blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
be01270ef851b0b4da9dee308ece39ab9622afed
6bea1cce82a7c0ef4eafb33b75360c0830d7bdff
/R/quantities.R
96965e5bac262544776022d78ee50f4d9e81374c
[]
no_license
krlmlr/quantities
cb86a48da7dc11f16ba03d8d1d42e86d1e2c2ea2
5cd26e488835e672d8cd529e7f7c40f4e67023c9
refs/heads/master
2023-03-07T13:19:54.686971
2021-02-21T16:20:01
2021-02-21T16:20:01
null
0
0
null
null
null
null
UTF-8
R
false
false
4,010
r
quantities.R
#' \pkg{quantities}: Quantity Calculus for R Vectors #' #' Support for painless automatic units and uncertainty propagation in numerical #' operations. Both \pkg{units} and \pkg{errors} are integrated into a complete #' quantity calculus system within the R language. R vectors, matrices and arrays #' automatically propagate those attributes when you operate with \code{quantities} #' objects. #' #' @author Iñaki Ucar #' #' @references Edzer Pebesma, Thomas Mailund and James Hiebert (2016). #' Measurement Units in \R. \emph{The R Journal}, 8(2), 486--494. #' \doi{10.32614/RJ-2016-061} #' #' Iñaki Ucar, Edzer Pebesma and Arturo Azcorra (2018). #' Measurement Errors in \R. \emph{The R Journal}, 10(2), 549--557. #' \doi{10.32614/RJ-2018-075} #' #' @docType package #' @import units #' @import errors #' @import stats #' @import utils #' @name quantities-package NULL #' Handle Measurement Units and Uncertainty on a Numeric Vector #' #' Set or retrieve measurement units and uncertainty to/from numeric vectors. #' #' @param x a numeric object, or object of class \code{quantities}, \code{units} #' or \code{errors}. #' #' @details \code{quantities} returns a named list with the \code{units} and #' \code{errors} attributes. #' #' \code{`quantities<-`} sets the units and error values (and converts \code{x} #' into an object of class \code{quantities}). \code{set_quantities} is a #' pipe-friendly version of \code{`quantities<-`} and returns an object of class #' \code{quantities}. #' #' @seealso #' \code{\link{errors}}, \code{\link{units}}, \code{\link{groupGeneric.quantities}}. #' \code{\link{Extract.quantities}}, \code{\link{c.quantities}}, #' \code{\link{rep.quantities}}, \code{\link{cbind.quantities}}. #' \code{\link{as.data.frame.quantities}}, \code{\link{as.matrix.quantities}}, #' \code{\link{t.quantities}}. #' #' @examples #' x = 1:3 #' class(x) #' x #' quantities(x) <- list("m/s", 0.1) #' class(x) #' x #' #' (x <- set_quantities(x, m/s, seq(0.1, 0.3, 0.1))) #' #' @export quantities <- function(x) UseMethod("quantities") #' @export quantities.quantities <- function(x) { list(units=attr(x, "units"), errors=attr(x, "errors")) } #' @name quantities #' @param value a list of two components: an object of class \code{units} or #' \code{symbolic_units} (see \code{\link[units]{units}}), and a numeric vector #' of length 1 or the same length as \code{x} (see \code{\link[errors]{errors}}). #' @export `quantities<-` <- function(x, value) UseMethod("quantities<-") #' @export `quantities<-.quantities` <- function(x, value) { if (is.null(value)) return(drop_quantities(x)) stopifnot(length(value) == 2) units(x) <- value[[1]] errors(x) <- value[[2]] x } #' @export `quantities<-.numeric` <- function(x, value) { if (is.null(value)) return(x) `quantities<-.quantities`(x, value) } #' @export `quantities<-.units` <- function(x, value) { if (is.null(value)) return(drop_units(x)) `quantities<-.quantities`(x, value) } #' @export `quantities<-.errors` <- function(x, value) { if (is.null(value)) return(drop_errors(x)) `quantities<-.quantities`(x, value) } #' @name quantities #' @param unit a \code{units} object, or something coercible to one with #' \code{as_units} (see \code{\link[units:units]{set_units}}). #' @param errors a numeric vector of length 1 or the same length as \code{x} #' (see \code{\link[errors:errors]{set_errors}}). #' @inheritParams units::set_units #' @export set_quantities <- function(x, unit, errors=0, ..., mode=units_options("set_units_mode")) UseMethod("set_quantities") #' @export set_quantities.numeric <- function(x, unit, errors=0, ..., mode=units_options("set_units_mode")) { if (missing(unit)) unit <- unitless else if (mode == "symbols") unit <- substitute(unit) quantities(x) <- list(as_units(unit), errors) x } #' @export set_quantities.quantities <- set_quantities.numeric #' @export set_quantities.units <- set_quantities.numeric #' @export set_quantities.errors <- set_quantities.numeric
7fc13a68af829ac36dc8322186123fa648f3b2ce
014ea6defc872a40b68259a7d14c2402cf7571aa
/plot4.R
e965a07de3c9549ab8792302fb7531e75ba589c9
[]
no_license
OlaKahina/Exploratory-Data-Analysis
b115c1efa03aa8b6a693c3643096fb1a53633497
3cb14b8c33cab99db058ee7e99ffd0248164434b
refs/heads/master
2021-09-03T01:15:28.307421
2018-01-04T14:09:21
2018-01-04T14:09:21
null
0
0
null
null
null
null
UTF-8
R
false
false
1,109
r
plot4.R
library(ggplot2) #Set working directory where it contains the data setwd("./exdata%2Fdata%2FNEI_data/") #Read data NEI and SCC NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # ---------------------------------- QUESTION 4 ----------------------------------- #Across the United States, how have emissions from coal combustion-related sources #changed from 1999-2008? # 1- Subsetting only the SCC rows that correspond to coal combustion coal <- NEI[which(NEI$SCC %in% SCC[grep("coal",SCC$Short.Name,ignore.case = TRUE),"SCC"]),] # 2- Drawing the fourth plot png(file = "plot4.png",width = 800,height = 400,units = "px") g_coal <- ggplot(coal, aes(x=year, y=Emissions)) g_coal+geom_line(stat = "summary", fun.y="sum",size=1, color ="blue")+ labs(x="year",y="PM2.5 Emissions from coal combustion-related sources") + labs(title = "PM2.5 Emissions from coal combustion-related sources across the US") dev.off() # 3- Answer to the question # In total, there is a decrease of Emissions from coal combusion related sources # from 1999 to 2008 across the US.
6b3f84ddd89f9b809983c57418d6d84050e43423
d5d861f87ba038b533c064e9481a3a397889b1a3
/nnet.R
474a3b785274ad99270af21ddc08bb12e63bfb7c
[]
no_license
HashRoot97/ML-Workshop
2256dfed9d22638b85a496336685434587a994c4
b2857f78f5f9a060916fc79c2e78fe09b0afa671
refs/heads/master
2021-06-28T21:30:23.694335
2017-09-19T09:23:29
2017-09-19T09:23:29
103,620,225
1
0
null
null
null
null
UTF-8
R
false
false
757
r
nnet.R
library(neuralnet) df <- read.csv(file = '/home/bigdata17/Documents/ML-Workshop/concrete.csv', header = TRUE, sep = ',') str(df) class (df) dim(df) normalize <- function(x){ return ((x - min(x))/ (max(x)-min(x))) } concrete_norm = as.data.frame(lapply(df, normalize)) summary(concrete_norm$strength) concrete_train = df[1:773,] concrete_test = df[774:1030,] concrete_model = neuralnet(strength ~ cement + slag + ash + water + superplastic + coarseagg + fineagg + age, data=concrete_train) plot(concrete_model) concrete_model = neuralnet(strength ~ cement + slag + ash + water + superplastic + coarseagg + fineagg + age, data=concrete_train, hidden = 2) plot(concrete_model) head(df) print (mtcars)
ac58bdfd61a2aedf4f58276be657576604a62c2f
3dc8f46f8cf29903a0acd60eff607d23ede9f170
/newprodforecast/man/repeatmodel.Rd
94f843eb369d8e70657c12193680a338d32216e2
[]
no_license
conoorss/NewProductForecasting
699e94171c8a71cdb6e427fe68388295c3ce6f7f
3a1aa65418066ef37aac6b8c8c6f8373105c2987
refs/heads/master
2020-05-19T12:10:55.741898
2014-01-09T17:19:33
2014-01-09T17:19:33
null
0
0
null
null
null
null
UTF-8
R
false
false
1,508
rd
repeatmodel.Rd
\name{repeatmodel} \alias{repeatmodel} \title{Estimation for the Repeat Model} \usage{ repeatmodel(formula, data, group, startvals = numeric(), repSpec = list(), sf = FALSE, sfControl = list(), estimation = c("OLS"), method = "Nelder-Mead", optimControl = list(maxit = 20000)) } \arguments{ \item{formula}{is a two-sided formula object which specifies the dependent variable and the covariates if any.} \item{data}{is either a \code{data.frame} or \code{data.table} with the variables needed for the model.} \item{group}{is a string with the name of the group variable.} \item{startvals}{is a numeric vector with the starting values for the model.} \item{repSpec}{is a list which specifies the model specification (family, p0, acvMultiplier, number of covariates).} \item{sf}{is a logical flag for usage of the \code{snowfall} package for parallelization (currently not implemented)} \item{sfControl}{is a list of control parameters for \code{snowfall}} \item{estimation}{is a string which is either "MLE" or "OLS"} \item{method}{is a string which specifies the optimization method} \item{optimControl}{is a list of control parameters for \code{optim}} } \value{ A list of lists with each sublist containing either the results from the estimation or an object of class \code{try-error} if maximum likelihood fails } \description{ Fits a repeat model using either least squares. Returns an object of class \code{repeatmodel} }
dbcab3ca6b52defd2b0b459a376b1873ea9534f9
59c770cd3731ed3bbc177ea90eafda077d5cec6f
/tests/testthat/test_modularity_matrix.R
95ca3e76b385d39ce6d0fb5d4be5499704c7cc17
[]
no_license
vishalbelsare/rigraph
e52af967467ebe453bd07cfba0555354cc182a36
b1ae1de3aca4e2b7eedb4d0f00b8a5f1df35b78d
refs/heads/dev
2023-01-21T13:25:31.175473
2022-04-27T11:02:53
2022-04-27T11:02:53
129,304,592
0
0
null
2022-04-28T12:22:47
2018-04-12T19:58:07
R
UTF-8
R
false
false
688
r
test_modularity_matrix.R
context("modularity_matrix") test_that("modularity_matrix works", { library(igraph) kar <- make_graph("zachary") fc <- cluster_fast_greedy(kar) m1 <- modularity(kar, membership(fc)) m2 <- modularity(kar, membership(fc), weights=rep(1, ecount(kar))) expect_that(m1, equals(m2)) B1 <- modularity_matrix(kar) B2 <- modularity_matrix(kar, weights=rep(1, ecount(kar))) expect_that(B1, equals(B2)) }) test_that("modularity_matrix still accepts a membership argument for compatibility", { library(igraph) kar <- make_graph("zachary") expect_warning( modularity_matrix(kar, membership=rep(1, vcount(kar))), "membership argument is deprecated" ) })
ce30e8b2b7f40ee06c7f728dc4bc0419ec657924
d68441b6311721a84d0210c371a1a94b2eb5f261
/man/adf.Rd
22f27e4ac8e032be45b6c0aa15c48d515068b21a
[]
no_license
jasdumas/dumas
0e787cb29037cbfac331af108cff0f28c758b513
84aedfdd0e095e3a20d07877120a86e7b5d64f8b
refs/heads/master
2020-04-06T05:37:36.930368
2017-07-17T19:24:24
2017-07-17T19:24:24
38,554,253
3
2
null
null
null
null
UTF-8
R
false
true
355
rd
adf.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/adf.R \name{adf} \alias{adf} \title{Type less words for as.data.frame} \usage{ adf(x) } \arguments{ \item{x}{a object to transform to a data.frame()} } \value{ a object of class data.frame } \description{ Type less words for as.data.frame } \examples{ class(adf(Orange$age)) }
f9280b192be81ec4e7bd5b32ba2b860c778ae814
6cbb51fe996e65a51a8d9f2f35e3159721933f25
/man/runPerCellQC.Rd
6ae148da62d0a7380f173dce84b147c70fec7b50
[ "MIT" ]
permissive
compbiomed/singleCellTK
927fb97e257ba89cddee9a90f9cb7cb375a5c6fb
990e89e7ccfbf663f23c793454f72fb8c6878a32
refs/heads/master
2023-08-11T09:17:41.232437
2023-07-26T20:43:47
2023-07-26T20:43:47
68,756,293
144
89
NOASSERTION
2023-09-06T18:22:08
2016-09-20T21:50:24
R
UTF-8
R
false
true
6,676
rd
runPerCellQC.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runPerCellQC.R \name{runPerCellQC} \alias{runPerCellQC} \title{Wrapper for calculating QC metrics with scater.} \usage{ runPerCellQC( inSCE, useAssay = "counts", mitoGeneLocation = "rownames", mitoRef = c(NULL, "human", "mouse"), mitoIDType = c("ensembl", "symbol", "entrez", "ensemblTranscriptID"), mitoPrefix = "MT-", mitoID = NULL, collectionName = NULL, geneSetList = NULL, geneSetListLocation = "rownames", geneSetCollection = NULL, percent_top = c(50, 100, 200, 500), use_altexps = FALSE, flatten = TRUE, detectionLimit = 0, BPPARAM = BiocParallel::SerialParam() ) } \arguments{ \item{inSCE}{A \linkS4class{SingleCellExperiment} object.} \item{useAssay}{A string specifying which assay in the SCE to use. Default \code{"counts"}.} \item{mitoGeneLocation}{Character. Describes the location within \code{inSCE} where the gene identifiers in the mitochondrial gene sets should be located. If set to \code{"rownames"} then the features will be searched for among \code{rownames(inSCE)}. This can also be set to one of the column names of \code{rowData(inSCE)} in which case the gene identifies will be mapped to that column in the \code{rowData} of \code{inSCE}. See \code{\link{featureIndex}} for more information. If this parameter is set to \code{NULL}, then no mitochondrial metrics will be calculated. Default \code{"rownames"}.} \item{mitoRef}{Character. The species used to extract mitochondrial genes ID from build-in mitochondrial geneset in SCTK. Available species options are \code{"human"} and \code{"mouse"}. Default is \code{"human"}.} \item{mitoIDType}{Character. Types of mitochondrial gene id. SCTK supports \code{"symbol"}, \code{"entrez"}, \code{"ensembl"} and \code{"ensemblTranscriptID"}. It is used with \code{mitoRef} to extract mitochondrial genes from build-in mitochondrial geneset in SCTK. Default \code{NULL}.} \item{mitoPrefix}{Character. The prefix used to get mitochondrial gene from either \code{rownames(inSCE)} or columns of \code{rowData(inSCE)} specified by \code{mitoGeneLocation}. This parameter is usually used to extract mitochondrial genes from the gene symbol. For example, \code{mitoPrefix = "^MT-"} can be used to detect mito gene symbols like "MT-ND4". Note that case is ignored so "mt-" will still match "MT-ND4". Default \code{"^MT-"}.} \item{mitoID}{Character. A vector of mitochondrial genes to be quantified.} \item{collectionName}{Character. Name of a \code{GeneSetCollection} obtained by using one of the \code{importGeneSet*} functions. Default \code{NULL}.} \item{geneSetList}{List of gene sets to be quantified. The genes in the assays will be matched to the genes in the list based on \code{geneSetListLocation}. Default \code{NULL}.} \item{geneSetListLocation}{Character or numeric vector. If set to \code{'rownames'}, then the genes in \code{geneSetList} will be looked up in \code{rownames(inSCE)}. If another character is supplied, then genes will be looked up in the column names of \code{rowData(inSCE)}. A character vector with the same length as \code{geneSetList} can be supplied if the IDs for different gene sets are found in different places, including a mixture of \code{'rownames'} and \code{rowData(inSCE)}. An integer or integer vector can be supplied to denote the column index in \code{rowData(inSCE)}. Default \code{'rownames'}.} \item{geneSetCollection}{Class of \code{GeneSetCollection} from package GSEABase. The location of the gene IDs in \code{inSCE} should be in the \code{description} slot of each gene set and should follow the same notation as \code{geneSetListLocation}. The function \code{\link[GSEABase]{getGmt}} can be used to read in gene sets from a GMT file. If reading a GMT file, the second column for each gene set should be the description denoting the location of the gene IDs in \code{inSCE}. These gene sets will be included with those from \code{geneSetList} if both parameters are provided.} \item{percent_top}{An integer vector. Each element is treated as a number of top genes to compute the percentage of library size occupied by the most highly expressed genes in each cell. Default \code{c(50, 100, 200, 500)}.} \item{use_altexps}{Logical scalar indicating whether QC statistics should be computed for alternative Experiments in \code{inSCE} (\code{altExps(inSCE)}). If \code{TRUE}, statistics are computed for all alternative experiments. Alternatively, an integer or character vector specifying the alternative Experiments to use to compute QC statistics. Alternatively \code{NULL}, in which case alternative experiments are not used. Default \code{FALSE}.} \item{flatten}{Logical scalar indicating whether the nested \link[S4Vectors]{DataFrame-class} in the output should be flattened. Default \code{TRUE}.} \item{detectionLimit}{A numeric scalar specifying the lower detection limit for expression. Default \code{0}} \item{BPPARAM}{A \link{BiocParallelParam} object specifying whether the QC calculations should be parallelized. Default \code{BiocParallel::SerialParam()}.} } \value{ A \link[SingleCellExperiment]{SingleCellExperiment} object with cell QC metrics added to the \link{colData} slot. } \description{ A wrapper function for \link[scater]{addPerCellQC}. Calculate general quality control metrics for each cell in the count matrix. } \details{ This function allows multiple ways to import mitochondrial genes and quantify their expression in cells. \code{mitoGeneLocation} is required for all methods to point to the location within inSCE object that stores the mitochondrial gene IDs or Symbols. The various ways mito genes can be specified are: \itemize{ \item A combination of \code{mitoRef} and \code{mitoIDType} parameters can be used to load pre-built mitochondrial gene sets stored in the SCTK package. These parameters are used in the \link{importMitoGeneSet} function. \item The \code{mitoPrefix} parameter can be used to search for features matching a particular pattern. The default pattern is an "MT-" at the beginning of the ID. \item The \code{mitoID} parameter can be used to directy supply a vector of mitochondrial gene IDs or names. Only features that exactly match items in this vector will be included in the mitochondrial gene set. } } \examples{ data(scExample, package = "singleCellTK") mito.ix = grep("^MT-", rowData(sce)$feature_name) geneSet <- list("Mito"=rownames(sce)[mito.ix]) sce <- runPerCellQC(sce, geneSetList = geneSet) } \seealso{ \code{\link[scater]{addPerCellQC}}, \code{link{plotRunPerCellQCResults}}, \code{\link{runCellQC}} }
16b1f9269a0498a57a75c993696da512b261a00a
2656e71f4f685ad24e638b520e4a7cfaba44767c
/src/wrapper_theta_sampled.R
2a0d9893811e0611268e788c4d4a3d36094092e3
[]
no_license
varao/diffusionMCMC_JCGS
1764be0e9e70d1915a92817af648e97721b8c2b1
3b050d3c9fa2eb3cfc88260f6ed3804d1263dc25
refs/heads/master
2022-07-14T21:10:18.517132
2020-05-11T19:35:25
2020-05-11T19:35:25
263,072,243
1
0
null
null
null
null
UTF-8
R
false
false
1,857
r
wrapper_theta_sampled.R
source('./sder.R') if(hyp_sin == 1) { source('./ea1_func.R') ea_func <- ea1_func drft <- drift1 } else { source('./ea2_func.R') ea_func <- ea2_func drft <- drift2 } x0 <- 0; tEnd <- 20; nObs <- 9 sdv <- .5 sample_size = 5000 Prt = 50 step_size=.01 theta=1 set.seed(1) dat <- data_gen(x0 = 0, t0 = 0, sd = sdv, tEnd = tEnd, nObs = nObs,drift=function(x) drift(x,theta)) tDat <- dat$tDat xDat <- dat$xDat # Run HMC sampler (update theta) time_spend1 <- system.time(rslt_hmc <- sde_hmc(reps = sample_size, x0=x0, t0=0, tEnd = tEnd, xDat = xDat, tDat = tDat, func = ea_func, sdv = sdv, theta = NULL, M=100, eps=.2, L=5, )) # Run Euler pMCMC sampler (update theta) time_spend2 <- system.time(rslt_eul <- pmcmc_Eul(reps = sample_size, P=Prt, t0=0, stepSize=0.01, tEnd=tEnd, xDat=xDat, tDat=tDat, sdv = sdv, theta=NULL, func=drft)) ############ # Summarize results timeCost_hmc <- time_spend1["elapsed"] timeCost_eul <- time_spend2["elapsed"] hmc_theta <- rslt_hmc$theta es_hmc <- effectiveSize(hmc_theta) ess_hmc <- es_hmc / timeCost_hmc es_eul <- effectiveSize(rslt_eul$theta) ess_eul <- es_eul / timeCost_eul Idx <- seq(1, sample_size, by = 50) ksRslt <- ks.test(rslt_hmc$theta[Idx], rslt_eul$theta[Idx]) pValue <- ksRslt$p.value ksDist <- ksRslt$statistic dfm_cmp <- tibble(p=Prt, T=tEnd, N=nObs, tHMC=timeCost_hmc, esHMC=es_hmc, essHMC=ess_hmc, tEul=timeCost_eul, esEul=es_eul, essEul=ess_eul, pVal=pValue, ks=ksDist) dfm_cmp
081dc03978aaf8a4207bcbb926b86958bfbfe789
990defc7dfa8b37192d20c628f6dfd7471b5cddb
/R/stationarity.R
1347eb3341ec61527bc92724b27c230518ee34a5
[]
no_license
hoanguc3m/ccgarch
247cbda115769c0f4bcdd87a5088e000b6bd73a0
dbd84bacf56d09538d90fd687c2131d52e5dc7dd
refs/heads/master
2020-03-13T17:29:12.532948
2018-04-29T01:22:42
2018-04-29T01:22:42
131,218,138
1
1
null
null
null
null
UTF-8
R
false
false
209
r
stationarity.R
# stationariry condition. # for details, see He and Ter\"{a}svirta (2004) and Nakatani and Ter\"{a}svirta (2007). stationarity <- function(A,B){ G <- A + B max(Mod(eigen(G)$values)) }
737d9188ff93ac7d138698daab267b7b53011de6
f502efd2fa153c225643b2d960dd33de277e024f
/graph.R
0d2e0faacd571a044d0aca87fbb202fb9f2cb571
[]
no_license
seanlth/Cuda-Rcpp
caa2f10e9e629b4bde68dd0764d9d6d4279b8fa0
79a8169d8a4cc2868e60df57d98a5ec22bd2abbd
refs/heads/master
2020-12-26T03:01:18.744511
2015-04-13T11:53:48
2015-04-13T11:53:48
29,820,908
2
0
null
null
null
null
UTF-8
R
false
false
969
r
graph.R
d1 <- read.csv('timings_2015-03-19.csv') d1 library(ggplot2) library(reshape2) # we only want the important columns as the rest other two are fixed at 22,10000 d2 <- d1[,c('Iterations','Rtime','CUDAtime')] d2 # convert from "wide" to "tall" format d3 <- melt(d2, id='Iterations') d3 qplot(Iterations, value, colour=variable, data=d3, log='xy', ylab='Time (Secs)', geom=c('line','point'))+ geom_line(size=3) + theme_grey(base_size = 28) + theme(axis.title.x = element_text(size = 28), axis.title.y = element_text(size = 28), axis.text = element_text(size = 20)) # dimensions in inches ggsave('performance_plot_2015-03-20.pdf', height=7, width=7) # nb. can also save to say .png with dimensions also in inches, defaulting to 300dpi #ggsave('performance_plot_2015-03-20.png', height=7, width=7) d2$Ratio <- d2$Rtime/d2$CUDAtime qplot(Iterations, Ratio, data=d2, log='x', geom=c('point','line')) # ggsave('performance_ratio_plot_2015-03-22.pdf', height=7, width=7)
cc7da0f06bcfd966b0196aa34950d6712e25c56d
b1e1a193db8d4647a2ae1566724beebcfbc2c167
/index/data/mediation/scripts/001_adiposity_endometrial/forestplot.R
99fe7c45e6a1161a7b69100378fa49e5965e81b0
[]
no_license
mattlee821/000_thesis
166cef4616ad70ea47a6d558c77c8c4ec0a021b3
867c9f08daea61ecca7aa73e660d5001d1315a1b
refs/heads/master
2022-05-08T08:26:24.394209
2022-04-07T09:30:51
2022-04-07T09:30:51
229,047,207
1
1
null
null
null
null
UTF-8
R
false
false
2,109
r
forestplot.R
rm(list=ls()) ## set environment ==== directory_1 <- Sys.getenv("directory_1") setwd(directory_1) # source ==== library(ggplot2) library(dplyr) library(knitr) library(patchwork) library(tidyr) library(purrr) library(ggforestplot) library(wesanderson) colours <- names(wes_palettes) discrete_palette <- wes_palette(colours[8], type = "discrete") # data ==== data <- read.table("007_metabolites_outcomes/analysis/001_adiposity_endometrial/001_MR_results.txt", header = T, sep = "\t") data$group[data$exposure == "Locke BMI EU sex combined 77 SNPs clumped"] <- "BMI" data$group[data$exposure == "Shungin WHR EU sex combined 26 SNPs"] <- "WHR" data$group[data$exposure == "Lu BF EU sex combined 5 SNPs"] <- "BF" data$outcome_label[data$outcome == "Endometrial cancer (endometrioid histology) || id:ebi-a-GCST006465"] <- "Endometrioid" data$outcome_label[data$outcome == "Endometrial cancer (Non-endometrioid histology) || id:ebi-a-GCST006466"] <- "Non-endometroid" data$outcome_label[data$outcome == "Endometrial cancer || id:ebi-a-GCST006464"] <- "Endometrial cancer" data$method[data$method == "Inverse variance weighted (multiplicative random effects)"] <- "IVW-MRE" plot_data <- data plot_data$group <- factor(plot_data$group, levels = c("BMI", "WHR", "BF")) plot_data$outcome_label <- factor(plot_data$outcome_label, levels = c("Endometrial cancer", "Endometrioid", "Non-endometroid")) plot_data <- droplevels(plot_data) xmin <- min(plot_data$lower_ci) xmax <- max(plot_data$upper_ci) psignif <- 0.05 ci <- 0.95 pdf("007_metabolites_outcomes/analysis/001_adiposity_endometrial/figures/forestplot.pdf", width = 10, height = 6, pointsize = 10) forestplot(df = plot_data, name = outcome_label, estimate = b, pvalue = pval, psignif = psignif, ci = ci, se = se, colour = group, shape = method, logodds = TRUE) + scale_color_manual(values = c(discrete_palette[3], discrete_palette[1], discrete_palette[5])) + theme(axis.title.x = element_blank()) + theme(legend.title = element_blank()) dev.off()
ae6b799f6ec382f43c1e45326722f225897827ba
b67d72740059ddb2e988c9faad6f390fb14e3f49
/R/plotStateGraph.R
8eedd3b97b12ffe5308046fd1068f1262a4da427
[]
no_license
cran/BoolNet
e365c17c324a18fa023cc6968461385965e6be7f
4d3fe9d97a3dd6178e9b932cc9a231ec405587a1
refs/heads/master
2023-03-09T20:54:46.608871
2023-02-27T12:52:30
2023-02-27T12:52:30
17,678,159
3
4
null
null
null
null
UTF-8
R
false
false
8,883
r
plotStateGraph.R
# Plots a graph that visualizes the state transitions and attractor basins. <attractorInfo> is an object # of class AttractorInfo. This requires the igraph package. # If <highlightAttractors> is set, attractor edges are drawn bold. # If <colorBasins> is true, each basin is drawn in a different color. # Colors can be provided in <colorSet>. # <layout> specifies the graph layouting function. # If <piecewise> is true, subgraphs are layouted separately. # <basin.lty> and <attractor.lty> specify the line types used to draw states in the basins # and in the attractors (if <highlightAttractor> is set). # If <plotIt> is not set, only the igraph object is returned, but no graph is plotted. # ... provides further graphical parameters for the plot. # Returns an object of class igraph plotStateGraph <- function(stateGraph, highlightAttractors = TRUE, colorBasins = TRUE, colorSet, drawLegend = TRUE, drawLabels = FALSE, layout = layout.kamada.kawai, piecewise = FALSE, basin.lty = 2, attractor.lty = 1, plotIt = TRUE, colorsAlpha = c(colorBasinsNodeAlpha = .3, colorBasinsEdgeAlpha = .3, colorAttractorNodeAlpha = 1, colorAttractorEdgeAlpha = 1), ...) { stopifnot(inherits(stateGraph,"AttractorInfo") || inherits(stateGraph,"TransitionTable") || inherits(stateGraph,"SymbolicSimulation")) args <- list(...) if (!is.null(args$attractorInfo)) { warning("The parameter \"attractorInfo\" is deprecated. Use \"stateGraph\" instead!") stateGraph <- args$attractorInfo } if(is.null(colorsAlpha) | (length(colorsAlpha) != 4)) { warning("colorsAlpha parameter not properly specified. Parameter will be set to opaque values (1,1,1,1).") colorsAlpha <- c(1,1,1,1) } if (any(colorsAlpha < 0 | colorsAlpha > 1)) { warning("colorsAlpha parameters are not in range [0,1] - they will be normalized.") colorsAlpha <- colorsAlpha/sum(colorsAlpha) } if (installed.packages()["igraph","Version"] < package_version("0.6")) bias <- 1 else bias <- 0 symbolic <- FALSE if (inherits(stateGraph,"AttractorInfo")) { stateGraph <- getTransitionTable(stateGraph) } else if (inherits(stateGraph,"SymbolicSimulation")) { symbolic <- TRUE if (is.null(stateGraph$graph)) stop(paste("This SymbolicSimulation structure does not contain transition table information.", "Please re-run simulateSymbolicModel() with returnGraph=TRUE!")) stateGraph <- stateGraph$graph } geneCols <- setdiff(colnames(stateGraph), c("attractorAssignment","transitionsToAttractor")) numGenes <- (length(geneCols)) / 2 from <- apply(stateGraph[ , 1:numGenes, drop=FALSE], 1, paste, collapse="") to <- apply(stateGraph[ , ((numGenes+1):(2*numGenes)), drop=FALSE], 1, paste, collapse="") vertices <- unique(c(from, to)) edges <- data.frame(from, to) res <- graph.data.frame(edges, vertices = as.data.frame(vertices), directed=TRUE) res <- set.vertex.attribute(res, "name", value = vertices) if ("attractorAssignment" %in% colnames(stateGraph)) attractorAssignment <- stateGraph$attractorAssignment else { attractorAssignment <- c() colorBasins <- FALSE drawLegend <- FALSE } if ("transitionsToAttractor" %in% colnames(stateGraph)) attractorIndices <- to[stateGraph$transitionsToAttractor == 0] else { if (highlightAttractors) { warning("The parameter \"highlightAttractors\" is set to true although not enough information is available in stateGraph. Highlightning of attractors will be set to FALSE.") } attractorIndices <- c() highlightAttractors <- FALSE } # determine nodes and edges that belong to attractors # set default edge width and line type res <- set.edge.attribute(res, "width" , value = 0.8) res <- set.edge.attribute(res, "lty", value = basin.lty) if (highlightAttractors) { attractorEdgeIndices <- which(apply(edges, 1 , function(edge){ return( (edge[1] %in% attractorIndices) & (edge[2] %in% attractorIndices) ) })) - bias # set different edge width and line type for attractor edges res <- set.edge.attribute(res, "width", index = attractorEdgeIndices, value = 2) res <- set.edge.attribute(res, "lty", index = attractorEdgeIndices, value = attractor.lty) } if (missing(colorSet)) { # define default colors colorSet <- c("blue","green","red","darkgoldenrod","gold","brown","cyan", "purple","orange","seagreen","tomato","darkgray","chocolate", "maroon","darkgreen","gray12","blue4","cadetblue","darkgoldenrod4", "burlywood2") } # check for certain graphical parameters in ... # that have different default values in this plot if (is.null(args$vertex.size)) args$vertex.size <- 2 if (is.null(args$edge.arrow.mode)) args$edge.arrow.mode <- 2 if (is.null(args$edge.arrow.size)) args$edge.arrow.size <- 0.3 if (is.null(args$vertex.label.cex)) args$vertex.label.cex <- 0.5 if (is.null(args$vertex.label.dist)) args$vertex.label.dist <- 1 attractors <- unique(attractorAssignment) attractors <- attractors[!is.na(attractors)] if (colorBasins) { res <- set.edge.attribute(res, "color", value = "darkgrey") for (attractor in attractors) { # determine nodes and edges belonging to the basin of <attractor> attractorGraphIndices <- NULL basinIndices <- which(attractorAssignment == attractor) if(!is.null(stateGraph$transitionsToAttractor)) { attractorGraphIndices <- intersect(basinIndices, which(stateGraph$transitionsToAttractor == 0)) basinIndices <- base::setdiff(basinIndices, attractorGraphIndices) } if (!symbolic) { # change vertex color res <- set.vertex.attribute(res, "color", basinIndices - bias, value = adjustcolor(colorSet[(attractor-1) %% length(colorSet) + 1], alpha.f = colorsAlpha[1])) res <- set.vertex.attribute(res, "frame.color", basinIndices - bias, value = adjustcolor("black", alpha.f = colorsAlpha[1])) if(!is.null(attractorGraphIndices)) { res <- set.vertex.attribute(res, "color", attractorGraphIndices - bias, value = adjustcolor(colorSet[(attractor-1) %% length(colorSet) + 1], alpha.f = colorsAlpha[3])) res <- set.vertex.attribute(res, "frame.color", attractorGraphIndices - bias, value = adjustcolor("black", alpha.f = colorsAlpha[3])) } if (drawLabels) res <- set.vertex.attribute(res,"label.color",basinIndices - bias, value=colorSet[(attractor-1) %% length(colorSet) + 1]) } # change edge color res <- set.edge.attribute(res, "color", index = basinIndices - bias, value = adjustcolor(colorSet[(attractor-1) %% length(colorSet) + 1], alpha.f = colorsAlpha[2])) if(!is.null(attractorGraphIndices)) { res <- set.edge.attribute(res, "color", index = attractorGraphIndices - bias, value = adjustcolor(colorSet[(attractor-1) %% length(colorSet) + 1], alpha.f = colorsAlpha[4])) } } } if(plotIt) { if (drawLabels) labels <- vertices else labels <- NA if (piecewise) layout <- piecewise.layout(res, layout) if (symbolic) autocurve.edges(res) do.call("plot",c(list(res),args,"vertex.label"=list(labels), "layout"=list(layout))) #plot(res,vertex.size=args$vertex.size,layout=layout, # edge.arrow.mode=args$edge.arrow.mode, # vertex.label=labels,vertex.label.cex=args$vertex.label.cex, # vertex.label.dist=args$vertex.label.dist, # ...) if (colorBasins & drawLegend) legend(x="bottomleft",pch=15,ncol=1, col=colorSet[attractors-1 %% length(colorSet) + 1], legend = paste("Attractor",seq_along(attractors)), cex=0.5) } return(invisible(res)) }
1a5b96b61e7eaa744e116bb65e194f66ab2f0fd8
136452900447274023984e23e3a87baf8a87d999
/figures/SupplementalFigure11_Network/NetworkCyto.R
4457b9c16ec2d55eaa3808952405ee4850639d52
[ "MIT" ]
permissive
GenomicsNX/JACI-BioinformaticsGraphics
1d42b916736500ee0d3a094629cb6a7c96f97203
3ce8fb9b04b1f5712b195c8a447f5a7c9d397ed0
refs/heads/master
2023-03-16T04:12:13.274211
2019-04-05T17:56:32
2019-04-05T17:56:32
null
0
0
null
null
null
null
UTF-8
R
false
false
1,539
r
NetworkCyto.R
#' --- #' title: Network #' author: A Calatroni & J Wildfire #' date: "`r format(Sys.time(), '%d %B, %Y')`" #' output: #' github_document: #' toc: true #' --- #' ### set defaults knitr::opts_knit$set(root.dir = '../..') knitr::opts_chunk$set(warning = FALSE, message = FALSE, comment = NA) #' ### packages pacman::p_load(tidyverse, rio, reshape2) pacman::p_load(RColorBrewer) pacman::p_load(qgraph) #' ### citation citation("qgraph", auto = FALSE) %>% toBibtex() #' ### Import and Symmetric Matrix dd <- import("figures/SupplementalFigure11_Network/NetworkCyto.csv") %>% select(-1) %>% as.matrix() dd <- Matrix::forceSymmetric(dd,"U") #' ### Names Construction nn <- colnames(dd) nn <- as.data.frame(nn) nn <- nn %>% separate(nn,c("ca","cb","s","y")) %>% mutate(cy=paste(ca,cb,sep="."), cy2=paste(cy,s,sep="\n")) col <- brewer.pal(3, "Set1") nn$col <- with(nn,ifelse(y==0,col[1],ifelse(y==1,col[2],col[3]))) rownames(dd) <- nn$cy2 colnames(dd) <- nn$cy2 #' ### QGraph #+ fig_height=7, fig_eidth=10 set.seed(57817) qgraph(dd, graph='assosciation', layout='spring', layout.par = list(niter=5000), minimum=0.35,maximum=1, arrows=F, bg='white',label.color='black', color=nn$col, groups= factor(nn$y, labels=c("Birth","Year 1", "Year 3")), labels=as.factor(nn$cy2), legend=T, legend.cex=0.80, overlay=F,overlaySize=0.70, shape='circle', curve=0.2, vsize=2.5,label.cex=0.5, borders=FALSE, vTrans=200)
7bd0d81cdfdc17c6412f5fd5b55be56dcb3a61a0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/metaBMA/examples/dtriangular.Rd.R
ccb62bc23edcb07401e3ed2cf238699008a72c37
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
317
r
dtriangular.Rd.R
library(metaBMA) ### Name: dtriangular ### Title: Triangular Distribution ### Aliases: dtriangular rtriangular ### ** Examples plot(prior("triangular", c(.2, .6, 1.3)), 0, 2) samples <- rtriangular(1e5, .2, .5, 1) hist(samples, 200, FALSE) curve(dtriangular(x, .2, .5, 1), col = 2, add = TRUE, lwd = 2)
c409e2d19c08b90e5a7a823ce39b94a8ad85f06c
4e6476d3607a76f36c9ed3bcf269100d12cbcfbd
/man/matrixly.Rd
0d0e9fb745c2292cfbafad17105e8179b26ddadc
[]
no_license
kmaheshkulkarni/corrly
239e70e2ba1e53b83b2a700ec51d4ee1476bd0ef
aa1f465b9a73fcbe8cd1d5e68fa42983127ce9e8
refs/heads/master
2021-12-22T21:56:27.068956
2021-12-15T05:59:13
2021-12-15T05:59:13
189,001,782
6
0
null
null
null
null
UTF-8
R
false
true
369
rd
matrixly.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/matrixly.R \name{matrixly} \alias{matrixly} \title{matrixly Correlation matrix Plot using R Plotly} \usage{ matrixly(data = NULL) } \arguments{ \item{data}{is a Data Frame} } \value{ plot } \description{ matrixly Correlation matrix Plot using R Plotly } \examples{ matrixly(data = mtcars) }
cb81e762ce84af699d76032d2bbd541035e5167a
e1b6203a7b5ecf24d8100141196f602a78a8db8b
/Lab4_Calculus.R
9f2e6acd4899625cc8656c4707a6a30e618d3341
[]
no_license
nightwolfer1/Mathemathics_data_science
ca2b48e9317bc7603026e649c08bb99010556c1f
43cf036b1800855028f3cfad0bd7f5f4f4fd5779
refs/heads/main
2023-02-12T12:15:28.276882
2021-01-08T17:53:25
2021-01-08T17:53:25
315,022,749
0
0
null
null
null
null
UTF-8
R
false
false
1,047
r
Lab4_Calculus.R
#Lab4 Calculus #1. define and plot the function f(x,y) =x^2 +xy+y^2+y for x and y between -10 and 10. f<- function(x,y) x^2+x*y+y^2+y library(rgl) plot3d(f,xlim=c(-10,10),ylim=c(-10,10),col='red') library(Deriv) #2. find fx,fy,fxx,fyy,fxy and write the close form expression for each f.x <- Deriv(f,x='x') #2x+y f.y <- Deriv(f, x='y')#2y+x f.xx <- Deriv(f.x,x='x')#2 f.yy <- Deriv(f.y,x='y')#2 f.xy <- Deriv(f.x,x='y')#1 #3. the critical point is at (1/3,-2/3). Verify analytically that fx and fy both equal 0 at that point. #verify by plotting both partials aswell f.x(1/3,-2/3) #gives 0 f.y(1/3,-2/3) #gives 0 #true plot3d(f.x,xlim=c(-1,2),ylim=c(-1,2),zlim=c(0,10)) plot3d(f.y,xlim=c(-1,2),ylim=c(-1,2),zlim=c(0,10)) plot3d(f,add=TRUE,xlim=c(-1,2),ylim=c(-1,2),zlim=c(0,10),col='red') #verify that (1/3,-2/3) satisfies the conditions for a local minimum by using the second derivative test D <- f.xx(c(1/3,-2/3))*f.yy(c(1/3,-2/3))-f.xy(c(1/3,-2/3))^2 D#3 #D > 0 and f.xx(x*,y*) >0, if D > 0 and fxx(x*,y*) > 0, then f(x*,y*) is a local minimum
5778bf5f8f56d995ec7eacb6f4bde41cf024c6c4
5fe35ca981886c8ec689af788ac6b8db8291b4a0
/Post-processing/visualization.R
8ad179d06461cadc2aecd31c14cd6c41642418e9
[]
no_license
DienesB/VG2.0
cba5e04181f2b5dbd07cdc47fc272f3a87493976
ad9d5fed803e8b1ae28c5a1aa9dbf1cc67809ebe
refs/heads/master
2020-04-26T05:56:48.964595
2019-08-23T07:45:14
2019-08-23T07:45:14
173,349,210
0
0
null
null
null
null
UTF-8
R
false
false
2,112
r
visualization.R
#Import libraries library(raster) library(rgdal) library(dplyr) library(rasterVis) #Read tables temp <- list.files(pattern="*.txt", full.names = TRUE) temp structuredTables <- lapply(temp, function(x){ DF <- read.table(x, header = FALSE, skip = "1", col.names = paste0("v",seq_len(12))) colnames(DF) <- c("row", "col", paste0("m",seq_len(10))) #Filter southern wall DF <- dplyr::filter(DF, row==21 & (col >= 11 & col <=20 )) #Rotate tables counter-clockwise: this way irradiance values increase vertically (bottom to top) foo <- apply(t(DF),2,rev) mymatrix <- foo[1:10,] #Create tifs myraster <- raster(mymatrix, xmn=0, xmx=10, ymn=0, ymx=10) return(myraster) }) #Write rasters with automatic naming rasterNames <- setNames(structuredTables, c( 'B09h','B10h','B11h','B12h','B13h','B14h','B15h','B16h','B17h','B18h','B19h', 'P09h','P10h','P11h','P12h','P13h','P14h','P15h','P16h','P17h','P18h','P19h', 'QF09h','QF10h','QF11h','QF12h','QF13h','QF14h','QF15h','QF16h','QF17h','QF18h','QF19h', 'X09h','X10h','X11h','X12h','X13h','X14h','X15h','X16h','X17h','X18h','X19h')) rasterNames mapply(writeRaster, rasterNames, names(rasterNames), 'GTiff', overwrite = TRUE) #Plot raster time series #Stack tifs myraster_all_files <- list.files(full.names = TRUE, pattern = ".tif$") myraster_all_files myraster_stack <- stack(myraster_all_files) crs(myraster_stack) extent(myraster_stack) yres(myraster_stack) xres(myraster_stack) names(myraster_stack) #Common plot cols <- colorRampPalette(rev(brewer.pal(11,"RdBu"))) rasterNames <- paste0(" ",seq_len(44)) levelplot(myraster_stack, #main = "Irradiance values on the southern wall", legend=list(top=list(fun=grid::textGrob("kWh /"~m^{2}, y=0.25, x=1.015))), col.regions = cols, names.attr=rasterNames, layout = c(11,4), xlab = c("9:00", "10:00", "11:00", "12:00", "13:00", "14:00", "15:00", "16:00", "17:00", "18:00", "19:00"), ylab = c("No vegetation", "Fraxinus", "Pinus", "Betula"), scales = list(draw=FALSE)) #removes scales (i.e. width and height)
999fe38bce56b002cbdf5ed957668038c297f1d2
c6f93a7ebbaacbe667e8061b2eb5913a850a08c6
/R/plots.R
7691cc47482132d52c6f4486399e33d31ef8b6ef
[]
no_license
dajmcdon/dpf
6a8436d45be1229fd79b1fe604578060a9a636ae
1d7e8abe05b65e44c0765761dd7b5caacac4d3a3
refs/heads/master
2021-06-03T20:17:20.530700
2021-03-10T16:35:30
2021-03-10T16:35:30
107,696,996
0
1
null
2020-10-21T22:10:57
2017-10-20T15:49:37
TeX
UTF-8
R
false
false
2,511
r
plots.R
#' Plots the observed tempo and estimated discrete states given parameters. #' #' @param performance The named performance, e.g. 'Richter_1976'. See \code{names(tempos)}. #' @param params A vector of parameters of length 14. No checks are performed. #' @param y A vector of tempos. #' @param onset A vector of note onset times. #' @param particleNumber Number of particles for \code{beamSearch()}. Default is 200. #' @param initialMean Mean for the first note in constant tempo. Length 2. Default is (132,0). #' @param initialVariance Variance for the first note in constant tempo. Length 2. Default is (400,0). #' #' @export #' @examples #' params = c(426.69980736, 136.33213703, -11.84256691, -34.82234559, #' 439.37886221, 1, 1, 0.84916635, 0.04611644, 0.74119571, #' 0.43966082, 0.02116317, 0.24513563, 0.17253254) #' data(tempos) #' y = tempos[,'Richter_1976'] #' onset = tempos$note_onset #' plotStates('Richter_1976', params, y, onset) plotStates <- function(performance, params, y, onset, particleNumber = 200, initialMean = c(132,0), initialVariance = c(400,10)){ if(is.list(params)) params = unlist(params) y = matrix(y, nrow = 1) lt = diff(c(onset, 61)) mats = musicModel(lt, params[1], params[2:4], params[5:7], params[8:14], initialMean, initialVariance) bs = beamSearch(mats$a0, mats$P0, c(1,rep(0,10)), mats$dt, mats$ct, mats$Tt, mats$Zt, mats$HHt, mats$GGt, y, mats$transMat, particleNumber) bestpath = bs$paths[which.max(bs$weights),] kal = kalman(mats, bestpath, y) df = data.frame( measure = onset, tempo = c(y), inferred = c(kal$ests), state = factor( c('constant tempo', 'decelerating','accelerating','stress')[convert11to4(bestpath)] ) ) ggplot2::ggplot(df) + ggplot2::geom_rect( data=data.frame(xmin = 33, xmax = 45, ymin = -Inf, ymax = Inf), mapping=ggplot2::aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax), fill = 'gray90', color = 'gray90') + ggplot2::geom_line(ggplot2::aes(x=measure, y=tempo), color='black') + ggplot2::geom_point(ggplot2::aes(x=measure, y=inferred, color=state)) + ggplot2::scale_color_brewer(palette = 'Spectral') + ggplot2::theme_minimal() + ggplot2::theme(legend.position = 'bottom', legend.title = ggplot2::element_blank()) + ggplot2::ggtitle(performance) }
c6f912cea76974e8f1d2909051a20132e2bb2b91
dab97f897f8a1d7a8b2c6f39ed267218cd02673e
/man/NormData_English.Rd
e7b79170525dde43f9df193179916cc667529f03
[]
no_license
cran/MFAg
098d37233e562d23c7f0c0da1f13a6ea84c4d9f5
0c4323811550abdad4f8145360f86560ee252cf1
refs/heads/master
2023-09-02T17:49:25.931693
2023-08-19T15:02:35
2023-08-19T15:30:30
41,166,681
0
0
null
null
null
null
UTF-8
R
false
false
926
rd
NormData_English.Rd
\name{NormData} \alias{NormData} \title{Normalizes the data.} \description{Function that normalizes the data globally, or by column.} \usage{NormData(data, type = 1)} \arguments{ \item{data}{Data to be analyzed.} \item{type}{1 normalizes overall (default),\cr 2 normalizes per column.} } \value{\item{dataNorm}{Normalized data.}} \author{ Paulo Cesar Ossani Marcelo Angelo Cirillo } \examples{ data(DataQuan) # set of quantitative data data <- DataQuan[,2:8] res <- NormData(data, type = 1) # normalizes the data globally res # Globally standardized data sd(res) # overall standard deviation mean(res) # overall mean res <- NormData(data, type = 2) # normalizes the data per column res # standardized data per column apply(res, 2, sd) # standard deviation per column colMeans(res) # column averages } \keyword{Normalizes the data.}
4cf699fbb0a7d81c3593dd14c653f87418ea8908
bf3e3afd77b6c8fb9b7dff811b1f4b057a9efa1b
/cleansing.R
ab5bc0286a6a82c2e6a5bd7d475beaf0548ac77f
[]
no_license
jonnygame88/tennis-scraping
76ab917a6a428293353ff7c1abf53a2a859b0adf
587604e0430521b992615af18c91f3fb3c0a7289
refs/heads/master
2021-01-21T14:43:50.499984
2016-07-23T14:42:32
2016-07-23T14:42:32
59,190,491
0
0
null
null
null
null
UTF-8
R
false
false
3,501
r
cleansing.R
source('scripts/_setup.R') list.files('data/on-court/') ## match history matches <- read.csv('data/on-court/atp-match-history.csv') %>% f_conv() %>% mutate(match_date = ifelse(match_date == '', tour_date, match_date), match_date = as.Date(match_date, '%d/%m/%Y')) %>% select(-tour_date) unique_results <- matches$result %>% unique() %>% order_vector() unique_sets <- matches$result %>% strsplit(' ') %>% unlist() %>% unique() %>% order_vector() result_split <- function(r) { x <- strsplit(r, ' ')[[1]] if (length(x) < 5) {x <- c(x, rep('0-0', 5-length(x)))} data.frame(result_string = r, s1 = x[1], s2 = x[2], s3 = x[3], s4 = x[4], s5 = x[5]) } set_split <- function(s) { if (grepl('[A-Za-z]', s)) { data.frame(set_string = s, g1 = -1, g2 = -1) } else{ x <- strsplit(s, '-')[[1]]; g1 <- x[1]; g2 <- x[2] if (grepl('\\(', g2)) {g2 <- substr(g2, 1, 1)} data.frame(set_string = s, g1 = as.numeric(g1), g2 = as.numeric(g2)) } } set_mapping <- lapply(unique_sets, set_split) %>% bind_rows() result_mapping <- lapply(unique_results, result_split) %>% bind_rows() %>% mutate( s1 = match(s1, set_mapping$set_string), s2 = match(s2, set_mapping$set_string), s3 = match(s3, set_mapping$set_string), s4 = match(s4, set_mapping$set_string), s5 = match(s5, set_mapping$set_string)) gmap <- function(s) { gms <- set_mapping[result_mapping[[s]], 2:3] names(gms) <- paste0(s, names(gms)) gms } game_df <- data.frame(result = result_mapping$result_string) %>% cbind(lapply(paste0('s', 1:5), gmap) %>% bind_cols()) wset <- function(g1, g2) { ifelse(g1 > g2, ifelse(g2 < 6, 1, 5/6), ifelse(g1 < 6, 0, 1/6)) } matches <- inner_join(matches, game_df, by = 'result') %>% mutate(s1w = as.numeric(s1g1>s1g2), s2w = as.numeric(s2g1>s2g2), s3w = as.numeric(s3g1>s3g2), s4w = as.numeric(s4g1>s4g2), s5w = as.numeric(s5g1>s5g2)) %>% mutate(nsets = (s1g1+s1g2 > 0) + (s2g1+s2g2 > 0) + (s3g1+s3g2 > 0)+(s4g1+s4g2 > 0) + (s5g1+s5g2 > 0), wscore = (1 + wset(s1g1, s1g2) + wset(s2g1, s2g2) + wset(s3g1, s3g2) + wset(s4g1, s4g2) + wset(s5g1, s5g2))/(nsets + 1)) matches$completed <- 1 matches$completed[grep('[A-Za-z]', matches$result)] <- 0 # saveRDS(matches, 'data/cleaned/atp-match-history.RDS') # rm(matches) ## player list read.csv('data/on-court/atp-players.csv') %>% f_conv() %>% give_names(c('player_id', 'player_name', 'birth_date', 'country')) %>% mutate(player_name = gsub(' ', '', player_name, fixed = TRUE)) %>% saveRDS('data/cleaned/atp-players.RDS') ## tournament list surface_list <- c('Clay', 'Hard', 'I.hard', 'Grass', 'Carpet', 'Acrylic') level_list <- c('Futures', 'Challenger', 'MainTour', 'MastersSeries', 'GrandSlam', 'DavisFedCup', 'NonTour&Juniors') tour <- read.csv('data/on-court/atp-tournaments.csv') %>% f_conv() %>% rename(level_id = tour_level) %>% mutate(start_date = as.Date(start_date, '%d/%m/%Y')) tour$surface_id <- match(tour$surface, surface_list) tour$level <- level_list[tour$level_id+1] select(tour, tour_id, tour_name, surface_id, surface, level_id, level, start_date, prize_money, country, latitude, longitude, link_id) %>% saveRDS('data/cleaned/atp-tournaments.RDS') ## rounds list read.csv('data/on-court/rounds.csv') %>% give_names(c('round_id', 'round')) %>% saveRDS('data/cleaned/rounds.RDS')
a0fdf9ff5eec5ab0dd3394b6522e4a0499c5b12f
cfd67d4a5283734793517d73fe243c6badf6c244
/R/new.harvest.R
8bf16fcbaa262867e8364c017bb80dc465d79fbd
[]
no_license
cran/tossm
8df83466d4b75dd009c706ac9a19dcd39487f804
5f3a3cf64628b186fd2321eb8018db4157e8a14d
refs/heads/master
2021-01-15T12:25:26.888726
2009-07-28T00:00:00
2009-07-28T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,918
r
new.harvest.R
"harvest" <- function(mu.polys, interval.polys, bp.polys, rland, TAC){ # Genetic data from harvested individuals is discarded. However, this could # be easily changed in the future if we want to assume genetic data is collected if (sum(TAC)==0){ return (list(rland=rland, goners=NULL, actual.catch=TAC)) } else { goners <- vector("list",length(mu.polys)) bp.union <- poly.union(bp.polys) for (i.m in 1:length(mu.polys)){ if (TAC[i.m] > 0){ remaining.TAC <- TAC[i.m] ID.tracker<-NULL mu.interval.union <- lapply(interval.polys,function(int) { poly.p <- poly.intersect(list(mu.polys[[i.m]],int,bp.union)) if (area.poly(poly.p)>0) return(poly.p) else return(NULL) }) for (int in length(mu.interval.union):1){ #remove NULL elements from the list if (is.null(mu.interval.union[[int]])) mu.interval.union[[int]] <- NULL } n.in.int <- get.n.in.ss(rland,mu.interval.union,bp.polys) for (int in 1:length(mu.interval.union)){ gen.samp <- def.genetic.sampler(rland,mu.interval.union[int],bp.polys,remaining.TAC,n.in.ss=matrix(n.in.int[,int],nrow=length(bp.polys)),ID.tracker)[[1]] new.IDs <- unclass(attr(gen.samp,"coords"))[,1] ID.tracker<-unique(c(ID.tracker,new.IDs)) #add gen.samp to goners[[i.m]] only if it contains some individuals if (nrow(gen.samp)>0) goners[[i.m]] <- rbind(goners[[i.m]],unclass(attr(gen.samp,"coords"))) num.goners <- ifelse(is.null(goners[[i.m]]),0,nrow(goners[[i.m]])) if (num.goners == TAC[i.m]) { break } else {remaining.TAC <- TAC[i.m]-num.goners} } } } actual.catch <- sapply(goners,function(x){ return(if(is.null(x)){0} else { dim(x)[1]})}) goners <- do.call("rbind",lapply(goners, function(x) x)) rland$individuals <- subset(rland$individuals,!rland$individuals[,4] %in% goners[,1]) return(list(rland=rland, goners=goners, actual.catch=actual.catch)) } }
aa4a128600621595b9948f5ee9af709a1731784b
75bdecb6b71b6b4f97548d914561961dc359069c
/code/plot.R
988a068c9ce0d40869fb27a763d2010a979f939b
[]
no_license
jdmumm/P04_newVsOldStations
8ca326450ee54942717c7ff479d74f75b2bc8de6
aebb4a86f16de29e5c450ed7e0201a3e847e5503
refs/heads/master
2020-03-30T14:17:55.410516
2018-10-03T21:51:16
2018-10-03T21:51:16
151,311,441
0
0
null
null
null
null
UTF-8
R
false
false
3,333
r
plot.R
## TabsFigs #### # make figures comparing survey results using core vs non-core. ## Load #### library(tidyverse) library(reshape2) library(extrafont) font_import() loadfonts(device="win") windowsFonts(Times=windowsFont("TT Times New Roman")) theme_set(theme_bw(base_size=12,base_family='Times New Roman')+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())) read.csv("output/byYear_xz_w17_new.csv")-> surv_n read.csv("output/byYear_xz_w17_old.csv")-> surv_o read.csv("output/long_lbs.csv") -> long #cpue and se, surveywide, new and old stations. From byYear_xz_w17_old.csv and byYear_xz_w17_new.csv. read.csv("output/long_lbs_byArea.csv") ->long_byArea #cpue and se, by area, new and old stations. From byArea_xz_w17_old.csv and byArea_xz_w17_new.csv. #SURVEY_WIDE #### long %>% transmute( Year = as.factor(year), Stations = stations, Size = size, cpue, se) -> dat dat$Stations <- factor(dat$Stations, levels = c("old", "new")) # reorder old on left dat %>% filter (Size == "lrg") -> lrg dat %>% filter (Size == "all") -> all lrg %>% ggplot(aes(x = Year, y = cpue, color = Stations)) + scale_y_continuous(breaks = seq(0,5,.5), lim = c(0,3.5)) + geom_point(position=position_dodge(.3)) + geom_errorbar(aes(ymin= cpue - (1.96 * se), ymax = cpue + (1.96 * se)), width =.2, position=position_dodge(.3)) + labs( title = "Survey wide, Larges", x = "Year", y = "CPUE (lbs/pot)") ggsave("./figs/surveyWideCPUE_lbs_newVsOld_Lrg.png", dpi=300, height=3.75, width=6.5, units="in") all %>% ggplot(aes(x = Year, y = cpue, color = Stations)) + scale_y_continuous(breaks = seq(0,8.0,.5), lim = c(0,8.0)) + geom_point(position=position_dodge(.3)) + geom_errorbar(aes(ymin= cpue - (1.96 * se), ymax = cpue + (1.96 * se)), width =.2, position = position_dodge(.3) ) + labs( title = "Survey wide, All", x = "Year", y = "CPUE (lbs/pot)") ggsave("./figs/surveyWideCPUE_lbs_newVsOld_All.png", dpi=300, height=3.75, width=6.5, units="in") #BY_AREA #### long_byArea %>% transmute(Area = as.factor(area), Year = as.factor(year), Stations = stations, Size = size, cpue, se) -> dat_byArea dat_byArea$Stations <- factor(dat_byArea$Stations, levels = c("old", "new")) # reorder old on left dat_byArea %>% filter (Size == "lrg") -> lrg_byArea dat_byArea %>% filter (Size == "all") -> all_byArea lrg_byArea %>% ggplot(aes(x = Year, y = cpue, color = Stations)) + scale_y_continuous(breaks = seq(0,5,.5), lim = c(0,3.5)) + geom_point(position=position_dodge(.3)) + geom_errorbar(aes(ymin= cpue - (1.96 * se), ymax = cpue + (1.96 * se)), width =.2, position=position_dodge(.3) ) + labs( title = "By area, Larges", x = "Year", y = "CPUE (lbs/pot)")+ facet_wrap(~Area) ggsave("./figs/byAreaCPUE_lbs_newVsOld_Lrg.png", dpi=300, height=4, width=6.5, units="in") all_byArea %>% ggplot(aes(x = Year, y = cpue, color = Stations)) + scale_y_continuous(breaks = seq(0,8.0,.5), lim = c(0,8.0)) + geom_point(position=position_dodge(.3)) + geom_errorbar(aes(ymin= cpue - (1.96 * se), ymax = cpue + (1.96 * se)), width =.2, position=position_dodge(.3) ) + labs( title = "By area, All", x = "Year", y = "CPUE (lbs/pot)")+ facet_wrap(~Area) ggsave("./figs/byAreaCPUE_lbs_newVsOld_All.png", dpi=300, height=4, width=6.5, units="in")
1ae469b2c7c212aaaf2f2a4927aabf734724d041
8f9c1016da2e360e3602c2c06fe1b55d112c3bc3
/man/parl.questions.Rd
0fd2583bca4e7b3b17db1aaeecffae7d22f7aa90
[]
no_license
martigso/stortingAlpha
3aba1fe5ca707e7634e9e0c7e31dd4dc11a1c627
093b7e7560648ff422c5a49d884b5eea11314b08
refs/heads/master
2021-01-10T07:17:48.688872
2015-11-06T16:44:16
2015-11-06T16:44:16
36,719,111
0
0
null
null
null
null
UTF-8
R
false
false
1,196
rd
parl.questions.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/parl.questions.R \name{parl.questions} \alias{parl.questions} \title{Scrap data on questions in the Storting} \usage{ parl.questions(url, nPages) } \arguments{ \item{url}{String specifying one of the pages for the session to gather the questions from} \item{nPages}{Integer specifying how many pages of questions the relevant session has (20 questions per page)} } \value{ Returns a data frame with one row for each question } \description{ A function to collect all questions asked by MPs to ministers in the Storting from 1996-1997. } \details{ Each session must be run seperately. All sessions can be combined together with \code{\link{rbind}}. } \examples{ url1314 <- "https://www.stortinget.no/no/Saker-og-publikasjoner/Sporsmal/Sporretimesporsmal/?pid=2013-2014&qtid=all&qsqid=all&page=1#list" quest1314 <- parl.questions(url1314, nPages = 21) quest1314$session <- "2013-2014" url1415 <- "https://www.stortinget.no/no/Saker-og-publikasjoner/Sporsmal/Sporretimesporsmal/?pid=2014-2015&qtid=all&qsqid=all&page=1#list" quest1415 <- parl.questions(url1415, nPages = 19) quest1415$session <- "2014-2015" }
f600bef50b4ef37873ab33a2b2bab503b1f0cc9d
53e85ebc6c2e391f37f8bfedc2ee8ed39f787086
/tests/testthat/test_plots.R
c902d6ef76bafec8708974a728050920da130bb1
[ "MIT" ]
permissive
David-Salazar/ggsupplyDemand
43eeb25844b665a1ce76ae4cbde54df499c0d1a8
376e5aad83f8b4e997b317fc235803c82b25f6ff
refs/heads/master
2020-03-18T00:41:12.625376
2018-05-20T14:50:38
2018-05-20T14:50:38
134,107,534
1
1
null
null
null
null
UTF-8
R
false
false
782
r
test_plots.R
context("Basic Plots") test_that("Basic plot stills work",{ create_supply_and_demand() %>% shift_demand(outwards = TRUE) %>% plot_supply_and_demand(consumer_surplus = TRUE) -> g1 vdiffr::expect_doppelganger("first_example", g1) }) test_that("Second plot stills word", { create_supply_and_demand() %>% shift_demand(outwards = TRUE) %>% shift_supply(outwards = FALSE) %>% plot_supply_and_demand(consumer_surplus = TRUE) -> g2 vdiffr::expect_doppelganger("second-example", g2) }) test_that("Third plot stills work", { create_supply_and_demand() %>% shift_supply() %>% shift_supply(shifter = 250) %>% shift_demand(outwards = FALSE, shifter = 400) %>% plot_supply_and_demand() -> g3 vdiffr::expect_doppelganger("third-example", g3) })
c9ac57880732462e1fcf0df7152eb08dd8bcae91
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/mvdalab/examples/contr.niets.Rd.R
9c49774f707744658993304e1455e4e637d09115
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
240
r
contr.niets.Rd.R
library(mvdalab) ### Name: contr.niets ### Title: Cell Means Contrast Matrix ### Aliases: contr.niets ### ** Examples # Three levels levels <- LETTERS[1:3] contr.niets(levels) # Two levels levels <- LETTERS[1:2] contr.niets(levels)
3afa8b82fd720e2aa0f1264b8b726a6e06236af3
7379c8d889fad0119bb0b5457bc6c54638faf534
/common_readrnaatacchip.R
8e1707701dfd2bf41b60cab636f38b2fb3eadb0e
[ "BSD-2-Clause" ]
permissive
mahogny/th2crispr
941db5200bd50f7dc1966cf83a320dbcc2957a3b
ad1257cbec423ff6b131ec5b2b2f39f339721c27
refs/heads/master
2019-04-11T02:57:35.123862
2018-09-19T08:48:49
2018-09-19T08:48:49
105,709,522
4
0
null
null
null
null
UTF-8
R
false
false
38,463
r
common_readrnaatacchip.R
############################################################################################# ### ### ### Part of the paper ... ### ### Author: Johan Henriksson (mahogny@areta.org) ### ### ### ### This code ... ### ### ### ############################################################################################# #note: stat6 read depth is fine. but stat6 72h b is the wrong file! library(Rtsne) library(gplots) library(RColorBrewer) library(stringr) library(sqldf) library(reshape2) library(limma) library(GenomicRanges) #library(BiocParallel) #register(MulticoreParam(4)) ## Cut-offs used. TODO move ATAC cutoff here maxTSSdistCHIP <- 20e3 ################################################################## ####### Common helper functions ################################## ################################################################## ######### Clean up memory showmemuse <- function(){ for (thing in ls()) { s <- object.size( get(thing) ) if(s>20000000){ cat(sprintf("%s\t%s\n", round(s/1000000),thing)) #in MB } } } #showmemuse() ## Not in-operator '%!in%' <- function(x,y) !('%in%'(x,y)) ## Rbind the elements of a list rbindlist <- function(thelist){ as.data.frame(data.table::rbindlist(thelist)) } qtextscatter <- function(x,y,labels,cex=1){ plot(x,y,cex=0, xlab=deparse(substitute(x)), ylab=deparse(substitute(y))) text(x,y,labels = labels,cex=cex) } detach_package <- function(pkg, character.only = FALSE){ if(!character.only){ pkg <- deparse(substitute(pkg)) } search_item <- paste("package", pkg, sep = ":") while(search_item %in% search()) { detach(search_item, unload = TRUE, character.only = TRUE) } } bpsapply <- function (X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE) { FUN <- match.fun(FUN) answer <- bplapply(X = X, FUN = FUN, ...) if (USE.NAMES && is.character(X) && is.null(names(answer))) names(answer) <- X if (!identical(simplify, FALSE) && length(answer)) simplify2array(answer, higher = (simplify == "array")) else answer } mergebyrow <- function(x,y){ merge(x,y, by="row.names") } minmax <- function(x){ c(min(x),max(x)) } symrange <- function(x){ c(-max(abs(x)),max(abs(x))) } normalizesym <- function(s) paste(str_to_upper(str_sub(s,1,1)),str_to_lower(str_sub(s,2)),sep="") ######### ## Function to calculate a "correlation matrix" of jaccard indices corjaccard <- function(vd){ v <- matrix(NA,ncol(vd),ncol(vd)) colnames(v)<-colnames(vd) rownames(v)<-colnames(vd) for(i in 1:ncol(vd)) for(j in 1:ncol(vd)){ v[i,j] <- sum(vd[,i]>0 & vd[,j]>0) / sum(vd[,i]>0 | vd[,j]>0) } v } vecjaccard <- function(vi,vj){ sum(vi>0 & vj>0) / sum(vi>0 | vj>0) } ######### ### Merge columns with the same name by taking max value mergecolmax <- function(x){ cn <- unique(colnames(x)) nx <- matrix(0,nrow = nrow(x), ncol=length(cn)) colnames(nx) <- cn rownames(nx) <- rownames(x) for(i in 1:length(cn)){ nx[,i] <- apply(x[,cn[i],drop=FALSE],1,max) } nx } ######### ### Return object but with class set to double as.class.double <- function(x){ class(x) <- "double" x } ######### ### Function: geometric mean gm_mean <- function(a){prod(a)^(1/length(a))} ######### ### Function: Scale values to a range to 0-1 scale01 <- function(x){ x<-x-min(x) x<-x/max(x) x } ######### ### Function: Turn a boolean matrix into a 1/0 matrix binarize <- function(m2){ m2[m2]<-1 m2[!m2]<-0 m2 } ######### ### Safe merge: like merge() but first checks that there is at least one common column smerge <- function(x, y, by = intersect(names(x), names(y)), by.x = by, by.y = by, all = FALSE, all.x = all, all.y = all, sort = TRUE, suffixes = c(".x",".y"), incomparables = NULL){ if(length(by)==0){ print(colnames(x)) print(colnames(y)) stop("No overlap between tables") } else { merge(x, y, by, by.x, by.y, all, all.x , all.y, sort , suffixes , incomparables ) } } keepscreens <- c("s8a_stl", "sx2_stl","first_il4", "s11_il13","sc1_il13", "sx4_irf4","sc2a_irf4", "s8b_xbp1","sc2b_xbp1", "sc3_gata3","s9_stg") keepscreens_ren2 <- c("IL4 a", "IL4 b", "IL4 c", "IL13 a","IL13 b", "Irf4 a","Irf4 b", "Xbp1 a","Xbp1 b", "Gata3 a","Gata3 b") screens_il4 <- c("s8a_stl", "sx2_stl", "first_il4") screens_il13 <- c("s11_il13","sc1_il13") screens_irf4 <- c("sx4_irf4","sc2a_irf4") screens_xbp1 <- c("s8b_xbp1","sc2b_xbp1") screens_gata3 <- c("sc3_gata3","s9_stg") list_screen_genes <- c("Il4","Il13","Irf4","Xbp1","Gata3") list_screens <- list(il4=screens_il4, il13=screens_il13, irf4=screens_irf4, xbp1=screens_xbp1, gata3=screens_gata3) ################################################################## ## Read ATAC motifs atactf <- read.csv("out_motif/atactf.csv",stringsAsFactors=FALSE) for(i in 1:nrow(atactf)) atactf$motif[i] <- normalizesym(atactf$motif[i]) colnames(atactf)<-c("sym","p") atactf$sym[atactf$sym=="Bhlh2b"] <- "Bhlhe40" atactf$sym[atactf$sym=="Bhlh3b"] <- "Bhlhe41" atactf$sym[atactf$sym=="Hinfp1"] <- "Hinfp" #Tcfap2a -> ??? Ap2 #"Zbed1" -> ??? ################################################################## ## Read TF name <-> TF ID read.jaspar_namegenesym <- function(){ #Read orthology map. Only consider unique mappings human->mouse map_ortho_humanmouse <- read.csv("map_ortho_humanmouse.csv",stringsAsFactors = FALSE) map_ortho_humanmouse <- map_ortho_humanmouse[!duplicated(map_ortho_humanmouse$human),] #map_ortho_humanmouse <- map_ortho_humanmouse[!duplicated(map_ortho_humanmouse$mouse),] rownames(map_ortho_humanmouse) <- str_to_lower(map_ortho_humanmouse$human) #Read multimap from jasparname to several genes involved. #Remap human gene names to mouse gene names map_jaspar_namegenesym <- read.csv("map_jasparname_sym.csv",stringsAsFactors = FALSE) altmap <- map_ortho_humanmouse[str_to_lower(map_jaspar_namegenesym$mgi_symbol),]$mouse ismouse <- map_jaspar_namegenesym$mgi_symbol %in% ensconvert$mgi_symbol map_jaspar_namegenesym$mgi_symbol[!ismouse] <- altmap[!ismouse] map_jaspar_namegenesym <- map_jaspar_namegenesym[!is.na(map_jaspar_namegenesym$mgi_symbol),] map_jaspar_namegenesym } map_jaspar_namegenesym <- read.jaspar_namegenesym() ########################################################### ############# read atac and rnaseq data ################### ########################################################### ################################################################## ### Read mapping TF name <-> motifID genemotif <- read.csv("out_tc/JASPAR2016_MA_PB_C2H2_nonred.meme.names",header=FALSE,sep=" ",stringsAsFactors = FALSE)[,c(2,3)] colnames(genemotif) <- c("motifid","jasparname") #changed normalizesym <- function(s) paste(str_sub(s,1,1),str_to_lower(str_sub(s,2)),sep="") for(i in 1:nrow(genemotif)) genemotif$tf[i] <- normalizesym(genemotif$tf[i]) ################################################################## ### Read RNAseq time course read.mtpm <- function(org, ensconvert_){ # org <- "mouse" # ensconvert_=ensconvert mtpm <- read.csv(sprintf("out_tc/%s/tpm.txt",org),sep="\t",row.names = "gene") mtpm <- mtpm[,colnames(mtpm)!="row.names"] colnames(mtpm) <- str_replace_all(colnames(mtpm),"rep","") ### Calculate average TPM over time, Th2 mtpm_times <- c(0,0.5,1,2,4,6,12,24,48,72) av_mtpm <- cbind( #Th2 apply(mtpm[,grep("Naive",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_05h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_1h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_2h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_4h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_6h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_12h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_24h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_48h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th2_72h",colnames(mtpm))],1,mean) ) colnames(av_mtpm) <- sprintf("%sh",mtpm_times) ### Calculate average TPM over time, Th0 av_mtpm0 <- cbind( apply(mtpm[,grep("Naive",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_05h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_1h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_2h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_4h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_6h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_12h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_24h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_48h",colnames(mtpm))],1,mean), apply(mtpm[,grep("Th0_72h",colnames(mtpm))],1,mean) ) # print(head(av_mtpm0)) max_mtpm <- apply(av_mtpm,1,max) mtpm_th0 <- mtpm[,c( grep("Naive",colnames(mtpm)), grep("Th0_",colnames(mtpm)))] mtpm_th2 <- mtpm[,c( grep("Naive",colnames(mtpm)), grep("Th2_",colnames(mtpm)))] mtpm_early <- mtpm[,c( grep("Naive",colnames(mtpm)), grep("_05h",colnames(mtpm)), grep("_1h",colnames(mtpm)), grep("_2h",colnames(mtpm)), grep("_4h",colnames(mtpm)))] mtpm_late <- mtpm[,c( grep("_6h",colnames(mtpm)), grep("_12h",colnames(mtpm)), grep("_24h",colnames(mtpm)), grep("_48h",colnames(mtpm)), grep("_72h",colnames(mtpm)))] expressedGenes10Id <- names(av_mtpm[apply((cbind(av_mtpm,av_mtpm0))>10,1,sum)>0,1]) expressedGenes10 <- unique(ensconvert$mgi_symbol[ensconvert_$ensembl_gene_id %in% expressedGenes10Id]) #Differentially expressed genes de_early <- read.csv(sprintf("out_tc/%s/early_DE_Th0Th2_genes.txt",org),sep="\t",stringsAsFactors = FALSE) de_late <- read.csv(sprintf("out_tc/%s/late_DE_Th0Th2_genes.txt",org),sep="\t",stringsAsFactors = FALSE) colnames(de_early)[14]<-"ensembl_gene_id" colnames(de_early)[15]<-"mgi_symbol" colnames(de_late)[14]<-"ensembl_gene_id" colnames(de_late)[15]<-"mgi_symbol" #Get expresson levels for the TFs in particular motif_explevel <- smerge(smerge(smerge( data.frame(jasparname=genemotif$tf, stringsAsFactors = FALSE), map_jaspar_namegenesym),ensconvert_), data.frame(ensembl_gene_id=names(max_mtpm), explevel=max_mtpm, stringsAsFactors = FALSE)) motif_explevel <- sqldf("select jasparname, mgi_symbol, ensembl_gene_id, max(explevel) as maxexp from motif_explevel group by jasparname,mgi_symbol,ensembl_gene_id") list( mtpm=mtpm, av_mtpm=av_mtpm, av_mtpm0=av_mtpm0, max_mtpm=max_mtpm, mtpm_th0=mtpm_th0, mtpm_th2=mtpm_th2, mtpm_early=mtpm_early, mtpm_late=mtpm_late, motif_explevel=motif_explevel, #expressed_atacTF=expressed_atacTF, expressedGenes10Id=expressedGenes10Id, expressedGenes10=expressedGenes10, de_late=de_late, de_early=de_early, ensconvert=ensconvert_ ) } tcmouse <- read.mtpm("mouse", ensconvert_ = ensconvert) tchuman <- read.mtpm("human", ensconvert_ = human_ensconvert) expressedTFjaspar <- function(tc, tpm=1){ unique(tc$motif_explevel$jasparname[tc$motif_explevel$maxexp>tpm]) } ###################################################################### ### Check which genes are DE in both mouse and human ################# ###################################################################### ################################################################## ## DE for human and mouse in one big matrix getconservedDE <- function(qval=5e-2){ getdefromtable.mouse <- function(x){ (unique(x$ensembl_gene_id[x$qval<qval])) #should really normalize earlier! } getdefromtable.human <- function(x){ (unique(x$ensembl_gene_id[x$qval<qval])) #should really normalize earlier! } allde.mouse <- smerge(data.frame( ens_mouse = ensconvert$ensembl_gene_id, me=ensconvert$ensembl_gene_id %in% getdefromtable.mouse(tcmouse$de_early), ml=ensconvert$ensembl_gene_id %in% getdefromtable.mouse(tcmouse$de_late), stringsAsFactors = FALSE), ortho_mouse_human_unique, all.x = TRUE) allde.human <- smerge(data.frame( ens_human = human_ensconvert$ensembl_gene_id, he=human_ensconvert$ensembl_gene_id %in% getdefromtable.mouse(tchuman$de_early), hl=human_ensconvert$ensembl_gene_id %in% getdefromtable.mouse(tchuman$de_late), stringsAsFactors = FALSE), ortho_mouse_human_unique, all.x = TRUE) allde <- smerge(allde.mouse, allde.human, all=TRUE) allde$me[is.na(allde$me)] <- FALSE allde$ml[is.na(allde$ml)] <- FALSE allde$he[is.na(allde$he)] <- FALSE allde$hl[is.na(allde$hl)] <- FALSE allde <- smerge(allde, data.frame( ens_human=human_ensconvert$ensembl_gene_id, sym_human=human_ensconvert$mgi_symbol, stringsAsFactors = FALSE), all.x=TRUE) allde <- smerge(allde, data.frame( ens_mouse=ensconvert$ensembl_gene_id, sym_mouse=ensconvert$mgi_symbol, stringsAsFactors = FALSE), all.x=TRUE) repNAfalse <- function(x) { x[is.na(x)] <- FALSE x } allde$anytime_mouse <- repNAfalse(allde$me | allde$ml) allde$anytime_human <- repNAfalse(allde$he | allde$hl) allde$conserved_loose <- repNAfalse((allde$me | allde$he) | (allde$ml | allde$hl)) allde$conserved_alltime <- repNAfalse((allde$me & allde$he) & (allde$ml & allde$hl)) allde$conserved_special <- repNAfalse(((allde$he | allde$hl) & allde$me) | allde$ml) allde$conserved_anytime <- repNAfalse((allde$he & allde$me) | (allde$hl & allde$ml)) allde } allde <- getconservedDE() na.omit(allde[allde$sym_mouse %in% c("Il4","Fli1"),]) ################################################################## ## Output data to put in Venn diagram (made manually) if(FALSE){ x <- getconservedDE(0.001) c( sum(x$me), sum(x$me & x$ml), sum(x$ml)) c( sum(x$me & x$he), sum(x$ml & x$hl & x$me & x$he), sum(x$ml & x$hl)) c( sum(x$he), sum(x$he & x$hl), sum(x$hl)) sum( c( sum(x$me & x$he), sum(x$ml & x$hl & x$me & x$he), sum(x$ml & x$hl))) x[x$conserved_alltime,]$sym_mouse #Gata3, il2rb, mapkapk3 etc #How many % of the DE genes are in at least one of our screens? hitsinanyscreen <- names(which(sgenescorer2_matrix[,1]<500 | apply(sgenescorer2_matrix[,-1]<1000,1,any))) mean(unique(x$sym_mouse[x$anytime_mouse]) %in% hitsinanyscreen) } ################################################################## ## printconservedde_all <- function(conservedde_all){ out <- NULL for(i in 1:5){ x<-sgenescorer2_matrix[conservedde_all,i] names(x) <- rownames(sgenescorer2_matrix[conservedde_all,]) if(i==1) x <- x[x<500] else x <- x[x<1000] print(colnames(sgenescorer2_matrix)[i]) print(sort(x)) out <- rbind(out, data.frame(screen=colnames(sgenescorer2_matrix)[i], gene=names(sort(x)))) } out # vc <- vennCounts(allde) # vennDiagram(vc,cex=c(1.5,1.5,1.5)) } #printconservedde_all(allde_conserved_alltime) # x <- getconservedDE(0.001) printconservedde_all(x$sym_mouse[x$conserved_anytime]) ########################################################### ############# read chip data ############################## ########################################################### ################################################################## ## readallchiptot <- function(chipgenes = c("Gata3","Batf","Irf4","Stat6","Stat6m","Xbp1"),fname="gbi"){ foo <- read.csv(sprintf("chip/%s_total.csv",fname),sep="\t",stringsAsFactors = FALSE) times <- c(2, 4,24,48,72) tp <- c("peak","Naive",sprintf("Th2_%sh",times)) out <- as.data.frame(matrix(ncol=0, nrow=nrow(foo),0)) out$peak <- foo[,1] chiprep<-c("a","b") for(g in chipgenes) for(time in times){ # print(g=="Stat6m" & time==72) if(g=="Stat6m" & time==72) f<-sprintf("out.%s_%s_peaks.narrowPeak",g,chiprep) else { f<-sprintf("out.%s_%sh_%s_peaks.narrowPeak",g,time,chiprep) } # print(f) f<-intersect(f,colnames(foo)) if(length(f)>0){ print(f) rf <- sprintf("%s_%sh",g,time) w <- apply(foo[,f,drop=FALSE]!="",1,mean) out[,rf] <- w # out[w,rf] <- w } } ann <- read.csv(sprintf("chip/%s_ann.csv",fname),sep="\t",stringsAsFactors = FALSE) colnames(ann)[1] <- "peak" out <- merge(out,ann) } ## gata + batf + irf4 + raw stat6 dchiptot<- readallchiptot(fname="chip") #colnames(dchiptot) ## gata + batf + irf4 + xbp1 dgbix <- readallchiptot(fname="chip3") ## gata + batf + irf4 + merged stat6 dgbis <- readallchiptot(fname="chip2") ## gata + batf + irf4 dgbi <- readallchiptot(fname="gbi") dgata3<- readallchiptot(fname="Gata3") dbatf <- readallchiptot(fname="Batf") dirf4 <- readallchiptot(fname="Irf4") ################################################################## ## get if there is a chip peak for certain time points chip_tp <- function(tf,out,tp){ out <- out[abs(out$Distance.to.TSS)<maxTSSdistCHIP,] ##TSS distance cut-off li <- apply(out[,tp]==1,1,any) v <- table(out$Nearest.Ensembl[li]) v <- data.frame( jasparname=rep(tf,length(v)), TSS_ensg=names(v), cnt=as.double(v) ) v } ## get if there is a peak at any time chip_alltime <- function(){ tp <- 2:7 rbind( #chip_tp("chip_Stat6",dstat6,tp), chip_tp("chip_Gata3",dgata3,tp), chip_tp("chip_Batf",dbatf,tp), chip_tp("chip_Irf4",dirf4,tp), chip_tp("chip_Xbp1",dxbp1,tp) ) } c_alltime <- chip_alltime() ################################################################## #### Read and prepare ATAC peaks ################################# ################################################################## ################################################################## ## getnormATAC <- function(org, atac){ sumcolpairs <- function(x){ y <- matrix(0,ncol=ncol(x)/2,nrow=nrow(x)) for(i in 1:ncol(y)){ y[,i] <- x[,i*2-1]+x[,i*2] } y } newatac_ann <- read.csv(sprintf("atac/%s/ATACall_peaks.red.ann.csv",org),sep="\t",stringsAsFactors = FALSE) colnames(newatac_ann)[1] <- "peakid" #or something newatac_ann$Gene.Name <- normalizesym(newatac_ann$Gene.Name) ### Read background counts and figure out average counts newatac_inv <- read.table(sprintf("atac/%s/ATACall_peaks.inv.bed",org),sep="\t",stringsAsFactors = FALSE) newatac_inv_sum <- sum(as.numeric(newatac_inv$V3-newatac_inv$V2)) newatac_bg <- read.table(sprintf("atac/%s/counts.f.bg.csv",org),sep="\t",stringsAsFactors = FALSE) newatac_bg <- apply(sumcolpairs(newatac_bg[,-(1:3)]),2,sum) newatac_bg_avgreads <- newatac_bg/newatac_inv_sum ### Read peak counts newatac_peaks <- read.table(sprintf("atac/%s/counts.f.peaks.csv",org),sep="\t",stringsAsFactors = FALSE) newatac_peaks <- cbind(newatac_peaks[,4,drop=FALSE],sumcolpairs(newatac_peaks[,-(1:6)])) colnames(newatac_peaks) colnames(newatac_peaks) <- c("peakid","Naive","Th2_2h","Th2_4h","Th2_24h","Th2_48h","Th2_72h") #consider other parts of the code head(newatac_peaks) newatac_peaks <- smerge(newatac_peaks,newatac_ann) #Normalize peaks by background and length newatac_peaks_norm <- newatac_peaks newatac_peakslen <- (newatac_peaks$End-newatac_peaks$Start) for(i in 1:6){ #Arbitrary unit to make it easier to think. now most peaks in 0-2. with up to 10 newatac_peaks_norm[,i+1] <- 1e5*(newatac_peaks[,i+1]-newatac_bg_avgreads[i])/newatac_bg[i]/newatac_peakslen } print("Normalized ATAC peak counts by background/length") #Normalize over time newatac_peaks_norm_time <- newatac_peaks_norm ts <- newatac_peaks_norm_time[,1+2] #normalize by second time point #print(head(ts)) for(i in 1:6){ newatac_peaks_norm_time[,i+1] <- newatac_peaks_norm_time[,i+1]/ts } print("Normalized ATAC peak counts over time") ### Peak -> Scaled size over time and distance to TSS mapPeakGene <- newatac_peaks_norm_time[,c("peakid","Nearest.Ensembl","Gene.Name","Naive","Th2_2h","Th2_4h","Th2_24h","Th2_48h","Th2_72h", "Distance.to.TSS")] colnames(mapPeakGene) <- c("peakid","TSS_ensg","gene","Naive","Th2_2h","Th2_4h","Th2_24h","Th2_48h","Th2_72h","TSS_distance") #mapPeakGene_unfiltered <- mapPeakGene mapPeakGene <- mapPeakGene[abs(mapPeakGene$TSS_distance)<30e3,] mapSiteGene <- smerge(mapPeakGene,atac$mapPeakInfo[,c("jasparname","peakid")]) print("site->gene mapping done") tfattall <- sqldf("select distinct jasparname, sum(`Naive`) as cnt1, sum(`Th2_2h`) as cnt2, sum(`Th2_4h`) as cnt3, sum(`Th2_24h`) as cnt4, sum(`Th2_48h`) as cnt5, sum(`Th2_72h`) as cnt6 from `mapSiteGene` group by jasparname") rownames(tfattall) <- tfattall$jasparname tfattall <- tfattall[,-1] tfattall } normlevatacTime <- function(tfatall){ tfatall_normtime2 <- tfatall temp <- tfatall_normtime2[,2] for(i in 1:6){ tfatall_normtime2[,i] <- tfatall_normtime2[,i]/temp } tfatall_normtime2[order(tfatall_normtime2[,6]),] } levatac.mouse <- getnormATAC("mouse", atac.mouse) levatac.mouse.norm <- normlevatacTime(levatac.mouse) ### Store for website write.csv(levatac.mouse.norm,"out_teichlab/th2crispr_mouse_TFchrom_data.csv",row.names = TRUE, quote = FALSE) ################################################################## ## readnormATAC <- function(org){ ### Read peak annotation newatac_ann <- read.csv(sprintf("atac/%s/ATACall_peaks.red.ann.csv",org),sep="\t",stringsAsFactors = FALSE) colnames(newatac_ann)[1] <- "peakid" #or something newatac_ann$Gene.Name <- normalizesym(newatac_ann$Gene.Name) head(newatac_ann) ### Peak -> global position of peak mapPeakPos <- newatac_ann[,c("Chr","Start","End","peakid","Nearest.Ensembl","Gene.Name","Distance.to.TSS")] colnames(mapPeakPos) <- c("Chr","peakstart","peakend","peakid","TSS_ensg","gene","TSS_distance") ### Peak -> global position of peak # mapPeakPos <- newatac_peaks_norm[,c("Chr","Start","End","peakid")] # colnames(mapPeakPos) <- c("Chr","peakstart","peakend","peakid") ### Peak -> local info about peak and TFs in it mapPeakInfo <- read.csv(sprintf("atac/%s/fimo.txt",org),stringsAsFactors = FALSE,sep="\t") #not convinced this is right #head(mapPeakInfo) colnames(mapPeakInfo)[1]<-"motifid" colnames(mapPeakInfo)[2]<-"jasparname" colnames(mapPeakInfo)[3]<-"peakid" #mapPeakInfo <- mapPeakInfo[mapPeakInfo$p.value<1e-5,] #did not seem to filter before! mapPeakInfo$jasparname <- normalizesym(mapPeakInfo$jasparname) print("Got local coordinates of motifs") ### Figure out absolute position of motifs mapSiteInfo <- smerge(mapPeakInfo[,c("motifid","jasparname","peakid","start","stop","strand")], newatac_ann[,c("peakid","Chr","Start","Nearest.Ensembl","Gene.Name","Distance.to.TSS")]) # mapMotifPos <- smerge(mapMotifPos, mapPeakInfo) mapSiteInfo$motifstart <- mapSiteInfo$start + mapSiteInfo$Start-1 mapSiteInfo$motifend <- mapSiteInfo$stop + mapSiteInfo$Start-1 #I suspect this -1 is correct mapSiteInfo <- mapSiteInfo[,c("peakid","motifid","jasparname","strand","Chr","motifstart","motifend","Nearest.Ensembl","Gene.Name","Distance.to.TSS")] colnames(mapSiteInfo)[colnames(mapSiteInfo)=="Nearest.Ensembl"] <- "TSS_ensg" colnames(mapSiteInfo)[colnames(mapSiteInfo)=="Gene.Name"] <- "gene" colnames(mapSiteInfo)[colnames(mapSiteInfo)=="Distance.to.TSS"] <- "TSS_distance" print("Got global coordinates of motifs") ### Write BED file with absolute coordinates of the sites abed <- mapSiteInfo[,c("Chr","motifstart","motifend","jasparname")] abed$motifstart <- format(abed$motifstart , scientific = FALSE) abed$motifend <- format(abed$motifend , scientific = FALSE) write.table(abed,sprintf("atac/%s/sites.bed",org),row.names = FALSE,col.names=FALSE, quote = FALSE) print("Wrote TF site bed file") #Cache and return result list( mapSiteInfo=mapSiteInfo, mapPeakInfo=mapPeakInfo ) } ################################################################## ## writeBEDforATACsites <- function(org, mapSiteInfo, outf=sprintf("atac/%s/sites.bed",org)){ abed <- mapSiteInfo[,c("Chr","motifstart","motifend","jasparname")] abed$motifstart <- format(abed$motifstart , scientific = FALSE) abed$motifend <- format(abed$motifend , scientific = FALSE) write.table(abed,outf,row.names = FALSE,col.names=FALSE, quote = FALSE) print("Wrote TF site bed file") } ############################ ## Calculate # sites over time. Need to be rewritten #colnames(newatac_peaks_norm) mapsitelevelATAC <- function(d){ ### Site -> Scaled size over time and distance to TSS mapMotifGene <- smerge(d$mapPeakInfo,d$mapPeakGene) ### TF -> Summed activity over sites at different times tfattall <- sqldf("select distinct jasparname, sum(`Naive`) as cnt1, sum(`Th2_2h`) as cnt2, sum(`Th2_4h`) as cnt3, sum(`Th2_24h`) as cnt4, sum(`Th2_48h`) as cnt5, sum(`Th2_72h`) as cnt6 from mapMotifGene group by jasparname") rownames(tfattall)<-tfattall$jasparname tfattall<-tfattall[,-1] colnames(tfattall)<-c("0h","2h","4h","24h","48h","72h") d$tfattall <- tfattall d } ################################################################## #### Putative binding site conservation ########################## ################################################################## ################################################################## ## getConservedSites <- function(mapSiteInfo, flifted){ #Turn peak info into a grange grPeakInfo<-makeGRangesFromDataFrame(data.frame( chr=sprintf("%s_%s",mapSiteInfo$Chr,mapSiteInfo$jasparname), start =mapSiteInfo$motifstart, end =mapSiteInfo$motifend, strand =mapSiteInfo$strand, peakid =mapSiteInfo$peakid # 1:nrow(x) ), keep.extra.columns=TRUE) #Get the lifted sequence and turn into a grange lifted <- read.table(flifted,sep="\t") colnames(lifted) <- c("chr","start","end","jasparname") lifted$chr <- sprintf("%s_%s", lifted$chr, lifted$jasparname) grLifted<-makeGRangesFromDataFrame(lifted) #See which TF sites are preserved grPeakInfo_int <- findOverlaps(grPeakInfo, grLifted, ignore.strand=TRUE) mapSiteInfoConserved <- mapSiteInfo[unique(from(grPeakInfo_int)),] print(nrow(mapSiteInfoConserved)/nrow(mapSiteInfo)) mapSiteInfoConserved } #Write new human BED file for all peaks # writehumanBedATAC <- function(){ # newatac_ann <- read.csv(sprintf("atac/human/ATACall_peaks.red.ann.csv"),sep="\t",stringsAsFactors = FALSE) # f <- function(y) format(y , scientific = FALSE) # # bedhumanatac <- data.frame( # chr=sprintf("chr%s",newatac_ann$Chr), # start=f(newatac_ann$Start), # end=f(newatac_ann$End), # strand=newatac_ann$Strand, # stringsAsFactors = FALSE # ) # write.table(bedhumanatac,sprintf("atac/lift/human.bed"),row.names = FALSE,col.names=FALSE, quote = FALSE) # } # writehumanBedATAC() ################################################################## #### ATAC peak conservation ###################################### ################################################################## ################################################################## ## Check conservation on peak level getConservedPeaks <- function(org, flifted, istm=FALSE){ # org <- "mouse" if(istm){ ownpeak <- read.csv(sprintf("atac/%s/tm/ATACall_peaks.red.ann.csv",org),sep="\t",stringsAsFactors = FALSE) } else{ ownpeak <- read.csv(sprintf("atac/%s/ATACall_peaks.red.ann.csv",org),sep="\t",stringsAsFactors = FALSE) } # flifted="atac/lift/lifted.peaks.human.bed" #mapPeakInfo <- atac.human$mapPeakInfo #flifted="atac/lift/lifted.sites.mouse.bed" #Turn peak info into a grange grOwnPeak<-makeGRangesFromDataFrame(data.frame( chr=ownpeak$Chr, start=ownpeak$Start, end=ownpeak$End, strand=ownpeak$Strand )) #Get the lifted sequence and turn into a grange lifted <- read.table(flifted,sep="\t") colnames(lifted) <- c("chr","start","end","xxx") grLifted<-makeGRangesFromDataFrame(lifted) #See which peaks are preserved grPeakInfo_int <- findOverlaps(grOwnPeak, grLifted, ignore.strand=TRUE) data.frame( nownTot=nrow(ownpeak), nownOverlap=length(unique(from(grPeakInfo_int))), nLifted=nrow(lifted)) } ################################################################## ## Plot how many peaks overlap makeATACPeakOverlapPlot <- function(){ #Note: no big difference with 0.2 and 0.6 cutoff in sequence conservation pdf("atac/lift/overlap.pdf",height = 3) if(FALSE){ statPeakOverlap <- rbind( getConservedPeaks("mouse", "atac/lift.tm/lifted.peaks.human.bed", TRUE), getConservedPeaks("human", "atac/lift.tm/lifted.peaks.mouse.bed", TRUE)) } else { statPeakOverlap <- rbind( getConservedPeaks("mouse", "atac/lift/lifted.peaks.human.bed"), getConservedPeaks("human", "atac/lift/lifted.peaks.mouse.bed")) } statPeakOverlap$nLifted <- rev(statPeakOverlap$nLifted) statPeakOverlap$notinother <- statPeakOverlap$nownTot - statPeakOverlap$nLifted statPeakOverlap$liftedbutnotoverlap <- statPeakOverlap$nLifted - rev(statPeakOverlap$nownOverlap) barplot( t(as.matrix(statPeakOverlap[,c("nownTot","liftedbutnotoverlap","nownOverlap")])), col=c(rgb(230,159,0,maxColorValue = 255),rgb(86,180,233,maxColorValue = 255),rgb(0,158,115,maxColorValue = 255)), horiz=TRUE, names.arg=c("Mouse","Human") ) dev.off() ####TODO hmm... where are the missing peaks? are they closer to genes or anything? } ################################################################# #### Merge peaks and detected motifs in them ##################### Only for mouse right now ################################################################## ############ ##### combine ATAC peak input files and count peaks per gene calcgenetfcount <- function(mapMotifGene){ zz<-mapMotifGene[,c("jasparname","TSS_ensg")] sqldf("select distinct jasparname, TSS_ensg, count(TSS_ensg) as cnt from zz group by jasparname, TSS_ensg") } ################################################################## ## Get TF site count per gene getmarasitecountmatrix <- function(genetfcount){ #returns an annoying jasparname-row. but don't change, breaks code d <- dcast(genetfcount, jasparname~TSS_ensg, fill=0, value.var = "cnt") #colnames(d)[1:3] rownames(d) <- d[,1] d<-t(d)[-1,] #removes jasparname row - might break some code!!! class(d) <- "double" d } ################################################################## ## .... Use cached result if possible fname_atac_mouse <- sprintf("atac/%s.RData","mouse") fname_atac_human <- sprintf("atac/%s.RData","human") if(file.exists(fname_atac_mouse)){ atac.mouse <- readRDS(fname_atac_mouse) atac.human <- readRDS(fname_atac_human) } else { atac.mouse <- readnormATAC("mouse") atac.human <- readnormATAC("human") object.size(atac.mouse)/1e6 #Non-conserved TF-gene matrix #atac.mouse$noncons_tfc <- calcgenetfcount(atac.mouse$mapSiteInfo) atac.mouse$noncons_tfc <- rbind(c_alltime,calcgenetfcount(atac.mouse$mapSiteInfo)) atac.human$noncons_tfc <- calcgenetfcount(atac.human$mapSiteInfo) #TODO: should ideally have chipseq data here too? #object.size(atac.mouse$noncons_tfc)/1e6 #Note: lifting, 0.2 vs 0.6: seems 25% more peaks are lifted over. but this has little improvement on the site overlap atac.mouse$mapSiteInfo <- getConservedSites(atac.mouse$mapSiteInfo, "atac/lift/lifted.sites.human.bed") #20% of mouse peaks left atac.human$mapSiteInfo <- getConservedSites(atac.human$mapSiteInfo, "atac/lift/lifted.sites.mouse.bed") #12% of human peaks left # need to rethink # atac.mouse <- mapsitelevelATAC(atac.mouse) # atac.human <- mapsitelevelATAC(atac.human) atac.mouse$cons_tfc <- rbind(c_alltime,calcgenetfcount(atac.mouse$mapSiteInfo)) atac.human$cons_tfc <- calcgenetfcount(atac.human$mapSiteInfo) #TODO chipseq? writeBEDforATACsites("mouse",atac.mouse$mapSiteInfo, "conservedsite.bed") #Cache saveRDS(atac.mouse, fname_atac_mouse) saveRDS(atac.human, fname_atac_human) } ################################################################## ### Conserved ATAC peaks ######################################### ################################################################## #Do they go up and down the same way? are the sizes similar? ################################################################## ### Extract absolute ATAC motif coordinates ###################### ################################################################## writeMotifBed <- function(motif=NULL){ mapMotifPosBed <- unique(data.frame( chr=mapMotifPos$Chr, start=as.integer(mapMotifPos$motifstart), end=as.integer(mapMotifPos$motifend), name=mapMotifPos$jasparname, score=rep(1000,nrow(mapMotifPos)), strand=mapMotifPos$strand)) list_interesting_tf if(is.null(motif)){ mapMotifPosBed <- mapMotifPosBed[mapMotifPosBed$name %in% c(expressed_atacTF_50,"Etv2"),] write.table(mapMotifPosBed,sprintf("out_motif/motifs.ALL.bed"),col.names = FALSE, row.names = FALSE,quote = FALSE,sep="\t") } else { red <- mapMotifPosBed[mapMotifPosBed$name %in% motif,] write.table(red,sprintf("out_motif/motifs.red.bed"),col.names = FALSE, row.names = FALSE,quote = FALSE,sep="\t") } } # writeMotifBed() # writeMotifBed(c("Yy1","Tbx21","Pou6f1","Pou2f2","Etv2","Etv6","E2f4","Runx1","Foxo1","Ctcf","Ewsr1-fli1","Nrf1","Spib","Spi1","Ikzf3", # "Stat6","Stat4","Epas","Nfil3")) # writeMotifBed(list_interesting_tf) #from the atac-screen combination ################################################################## ####### Score ATAC motifs as early/late ########################## ################################################################## kmeans.atacT <- function(atac, tc){ ## Perform k-kmeans on normalized trends. ## Groups are boring - from early to late. set.seed(0) forkm <- atac$tfattall for(i in 1:nrow(forkm)){ forkm[i,] <- forkm[i,]/mean(forkm[i,]) } atackm <- kmeans(forkm,5) kmcol <- brewer.pal(max(atackm$cluster),"Set1") ## Show the k-means groups plot(apply(forkm[atackm$cluster==1 & rownames(tfattall) %in% tc$expressed_atacTF,],2,mean),type="l",ylim=c(0,2),col=kmcol[1]) for(i in 2:max(atackm$cluster)) lines(apply(forkm[atackm$cluster==i & rownames(tfattall) %in% tc$expressed_atacTF,],2,mean),col=kmcol[i]) } ## Base color on when the motif is present ## This does the job as well as k-means. score=0 early. score=1 late calc_score_ael <- function(tfattall_red){ #wt <- apply(tfattall_red,1,function(x) sum(x*(1:6))/sum(x)) wt <- 1-tfattall_red[,2]/tfattall_red[,5] wt <- (wt-min(wt))/(max(wt)-min(wt)) wt <- wt - median(wt) wt[wt<0] <- wt[wt<0]/-min(wt) wt[wt>0] <- wt[wt>0]/max(wt) wt } col_from_ael <- function(wt){ thecol <- rep("black",length(wt)) rsc <- function(x) abs(x)^0.5 r<-rsc(wt) x<-r[wt<0] thecol[wt<0] <- rgb(x,0,0) x<-r[wt>=0] thecol[wt>=0] <- rgb(0,0,x) thecol ##http://www.somersault1824.com/tips-for-designing-scientific-figures-for-color-blind-readers/ } atac.mouse$score_ael <- calc_score_ael(atac.mouse$tfattall) atac.human$score_ael <- calc_score_ael(atac.human$tfattall) ################################################################## ####### Curated list of genes #################################### ################################################################## ### Read list of transcription factors list_cm <- rownames(read.csv("tflist/Mus_musculus_chromatin_remodeling_factors_gene_list.txt",sep="\t",stringsAsFactors = FALSE)) list_co <- rownames(read.csv("tflist/Mus_musculus_transcription_co-factors_gene_list.txt",sep="\t",stringsAsFactors = FALSE)) list_tf <- read.csv("tflist/Mus_musculus_transcription_factors_gene_list.txt",sep="\t",stringsAsFactors = FALSE)[,1] ### Read protein atlas protein annotation protatlas <- read.csv("tflist/proteinatlas.tab",sep="\t",stringsAsFactors = FALSE) #unique(protatlas$Subcellular.location) list_protatlas_secreted <- ensconvert$ensembl_gene_id[ str_to_lower(ensconvert$mgi_symbol) %in% str_to_lower(protatlas$Gene[c( grep("secreted",protatlas$Protein.class), grep("membraneNOPE",protatlas$Protein.class) )])] list_protatlas_membrane <- ensconvert$ensembl_gene_id[ str_to_lower(ensconvert$mgi_symbol) %in% str_to_lower(protatlas$Gene[c( grep("membrane",protatlas$Protein.class) )])] ################################################################## ####### ThExpress data ########################################### ################################################################## d <- read.csv("thexpress/Th2_vs_naive.txt",sep="\t",stringsAsFactors = FALSE)[,c(2,6),drop=FALSE] d <- cbind(d,read.csv("thexpress/Th2_vs_Th1.txt", sep="\t",stringsAsFactors = FALSE)[,c(2,6),drop=FALSE]) d <- cbind(d,read.csv("thexpress/Th2_vs_Th17.txt", sep="\t",stringsAsFactors = FALSE)[,c(2,6),drop=FALSE]) d <- cbind(d,read.csv("thexpress/Th2_vs_iTreg.txt",sep="\t",stringsAsFactors = FALSE)[,c(2,6),drop=FALSE]) d <- cbind(d,read.csv("thexpress/Th2_vs_nTreg.txt",sep="\t",stringsAsFactors = FALSE)[,c(2,6),drop=FALSE]) thep <- d[,c(2,4,6,8,10)] thefc <- d[,c(1,3,5,7,9)] colnames(thep)<-c("Naive/Th0","Th1","Th17","iTreg","nTreg") colnames(thefc)<-c("Naive/Th0","Th1","Th17","iTreg","nTreg") ################################################################## ####### DMDD project data ######################################## ################################################################## dmdd <- read.csv("dmdd/dmdd_embryo_annotations_20170306.tsv",sep="\t",stringsAsFactors = FALSE) dmdd <- dmdd[dmdd$MP.ID %in% c( "MP:0000690","MP:0000692","MP:0000694","MP:0000703","MP:0000705","MP:0000706", "MP:0001879","MP:0002368","MP:0010200","MP:0013970"),] #"MP:0001914","MP:0001916","MP:0002633","MP:0002725","MP:0013970" s <- sgenescorer2_matrix[rownames(sgenescorer2_matrix) %in% dmdd$Gene,"Xbp1",drop=FALSE] s <- s[order(s[,1],decreasing = TRUE),,drop=FALSE] s
a8d2352b5082381cf035b36e453972c0ae3aa8a7
80b4f7435b8aab542b7565e3fdd4c242b6fc0d89
/analysis/sequence_tolerance.R
bcf03a04b9430481413d4a94597935893a6669d4
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
Kortemme-Lab/sequence-tolerance
a4a78e6302e32be7358ae72d2a3b411c9deb5064
1dbb7650f335210af2e61a2bedfa69d3d369c623
refs/heads/master
2021-01-22T07:11:18.640763
2017-05-25T00:05:43
2017-05-25T00:05:43
21,290,959
1
0
null
null
null
null
UTF-8
R
false
false
26,928
r
sequence_tolerance.R
# (c) Copyright Rosetta Commons Member Institutions. # (c) This file is part of the Rosetta software suite and is made available under license. # (c) The Rosetta software is developed by the contributing members of the Rosetta Commons. # (c) For more information, see http://www.rosettacommons.org. Questions about this can be # (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu. # Define vectors for mapping between 1 and 3 character residue names aa3 <- c("TRP", "PHE", "TYR", "MET", "LEU", "ILE", "VAL", "ALA", "GLY", "SER", "THR", "ARG", "LYS", "HIS", "ASN", "GLN", "ASP", "GLU", "PRO", "CYS") aa1 <- c("W", "F", "Y", "M", "L", "I", "V", "A", "G", "S", "T", "R", "K", "H", "N", "Q", "D", "E", "P", "C") names(aa3) <- aa1 names(aa1) <- aa3 # This function reads *.ga.entities checkpoint files. It assumes that all # entities have the same composition. It reads checkpointed Entity and # MultiStateEntity objects. It returns a data.frame object with a row for each # entity. The traits at each sequence position are given first, then the # overall fitness, then the state fitnesses and metric values. read_ga_entities <- function(filename, restypes = NULL) { metric_types <- list(Real = numeric(), Int = integer(), Size = integer(), Bool = logical()) file_con <- if (length(grep("\\.gz$", filename))) gzfile(filename) else file(filename) tokens <- scan(file_con, character(), 300, quiet = TRUE) close(file_con) oldformat <- length(grep("AA:", tokens[2])) == 0 if (tokens[1] != "traits") stop(paste(filename, "does not appear to be an entities file")) scan_what <- list(NULL) # set up the traits input num_traits <- grep("fitness", tokens)[1]-2 if (oldformat) { res_nums <- lapply(strsplit(tokens[seq_len(num_traits)+1], "\\."), "[[", 1) } else { res_nums <- lapply(strsplit(tokens[seq_len(num_traits)+1], ":"), "[[", 2) } traits_what <- rep(list(character()), num_traits) names(traits_what) <- paste("AA", res_nums, sep = "") scan_what <- c(scan_what, traits_what, list(NULL, fitness = numeric())) # handle additional MultiStateEntity data if (tokens[num_traits+4] == "states") { scan_what <- c(scan_what, list(NULL, NULL)) # iterate over the number of states num_states <- as.integer(tokens[num_traits+5]) token_offset <- num_traits+6 for (i in seq_len(num_states)) { state_what <- list(NULL, numeric(), NULL, NULL) names(state_what) <- c("", paste("state", i, "_fitness", sep = ""), "", "") scan_what <- c(scan_what, state_what) # iterate over the number of metrics num_metrics <- as.integer(tokens[token_offset+3]) token_offset <- token_offset+4 for (j in seq_len(num_metrics)) { metric_name <- paste("state", i, "_", tokens[token_offset], sep = "") metric_type <- tokens[token_offset+1] scan_what <- c(scan_what, list(NULL, NULL)) token_offset <- token_offset + 2 if (length(grep("\\[$", metric_type))) { # handle vector metrics metric_length <- which(tokens[-seq_len(token_offset-1)] == "]")[1] - 1 metric_type <- substr(metric_type, 1, nchar(metric_type)-1) metric_what <- rep(list(metric_types[[metric_type]]), metric_length) names(metric_what) <- paste(metric_name, seq_len(metric_length), sep = "") scan_what <- c(scan_what, metric_what, list(NULL)) token_offset <- token_offset + metric_length + 1 } else { # handle scalar metrics metric_what <- list(metric_types[[metric_type]]) names(metric_what) <- metric_name scan_what <- c(scan_what, metric_what) token_offset <- token_offset + 1 } } } } file_con <- if (length(grep("\\.gz$", filename))) gzfile(filename) else file(filename) result <- scan(file_con, scan_what, quiet = TRUE)[names(scan_what) != ""] close(file_con) if (is.null(restypes)) { for (i in seq_len(num_traits)) { if (oldformat) { result[[i]] <- unname(aa1[sub(".+\\.", "", result[[i]])]) } else { result[[i]] <- sub(".+:", "", result[[i]]) } } } else { for (i in seq_len(num_traits)) { if (oldformat) { result[[i]] <- factor(unname(aa1[sub(".+\\.", "", result[[i]])]), restypes) } else { result[[i]] <- factor(sub(".+:", "", result[[i]]), restypes) } } } as.data.frame(result) } # Read a list of *.ga.entities checkpoint files out of a directory. By default, # the parsed data is saved in the R format read_ga_entities_list <- function(dirpath, filepattern = NULL, recompute = FALSE, savedata = FALSE, readgen = FALSE) { filename <- file.path(dirpath, paste("entities", filepattern, ".Rda", sep = "")) if (file.exists(filename) && !recompute) { load(filename) } else { simpattern <- if (is.null(filepattern)) "" else filepattern entitiesfiles <- list.files(dirpath, pattern = paste(simpattern, ".*\\.ga\\.entities", sep = ""), full.names = TRUE, recursive = TRUE) entitiesfiles <- entitiesfiles[file.info(entitiesfiles)$size > 0] entitieslist <- vector("list", length(entitiesfiles)) generationslist <- vector("list", length(entitiesfiles)) for (i in seq(along = entitiesfiles)) { print(entitiesfiles[i]) entitieslist[[i]] <- read_ga_entities(entitiesfiles[i], unname(aa1)) if (readgen) generationslist[[i]] <- read_ga_generations(sub("entities", "generations", entitiesfiles[i]), entitieslist[[i]]) } names(entitieslist) <- gsub(".+/", "", entitiesfiles) if (readgen) attr(entitieslist, "generations") <- generationslist if (savedata == TRUE) save(entitieslist, file = filename) } entitieslist } # This function reads *.ga.generations checkpoint files. It requires that the # output of the read_ga_entities() function also be provided. It returns a list # of integer vectors with the indices to the entites in each generation. read_ga_generations <- function(filename, entities) { file_con <- if (length(grep("\\.gz$", filename))) gzfile(filename) else file(filename) gen_lines <- readLines(file_con) close(file_con) oldformat <- length(grep("AA:", gen_lines[2])) == 0 if (oldformat) { genenerations_traits <- gsub("[^ ]+\\.", "", gen_lines) replacefunc <- function(x) { paste(if (x[1] == "generation") x else unname(aa1[x]), collapse=" ") } genenerations_traits <- sapply(strsplit(genenerations_traits, " "), replacefunc) } else { genenerations_traits <- gsub("[^ ]+:", "", gen_lines) } entities_traits <- do.call("paste", entities[,grep("^AA", names(entities))]) trait_matches <- match(genenerations_traits, entities_traits) gen_line_indices <- grep("^generation ", gen_lines) gen_line_numbers <- as.integer(sub("^generation ", "", gen_lines[gen_line_indices])) gen_lengths <- diff(c(gen_line_indices, length(gen_lines)+1)) - 1 result <- vector("list", length(max(gen_line_numbers))) for (i in seq_along(gen_line_indices)) { result[[i]] <- trait_matches[seq_len(gen_lengths[i])+gen_line_indices[i]] } result } # Thes function writes *.ga.generations checkpoint files. It takes a filename # and a data.frame, matrix, or list thereof. write_ga_generations <- function(filename, traits, oldformat=FALSE) { file_con <- if (length(grep("\\.gz$", filename))) gzfile(filename, "w") else file(filename, "w") on.exit(close(file_con)) if (is.data.frame(traits) || is.matrix(traits)) { traits <- list(traits) } for (i in seq_along(traits)) { poscols <- grep("^AA[0-9]+$", colnames(traits[[i]])) posnums <- as.integer(sub("AA", "", colnames(traits[[i]])[poscols])) posmat <- as.matrix(traits[[i]][,poscols]) for (j in seq_len(ncol(posmat))) { if (oldformat) { posmat[,j] <- paste(posnums[j], aa3[posmat[,j]], sep = ".") } else { posmat[,j] <- paste("AA", posnums[j], posmat[,j], sep = ":") } } cat("generation ", i, "\n", sep="", file=file_con) cat(apply(posmat, 1, paste, collapse=" "), sep = "\n", file=file_con) } } # This function returns the fitness of a given set of entities entities_fitness <- function(entities, fitness_coef = NULL) { if (is.null(fitness_coef)) { fitness_coef <- c(fitness = 1) } fitness_matrix <- as.matrix(entities[,names(fitness_coef),drop=FALSE]) fitness_matrix %*% fitness_coef } # This function takes an entities data frame as read by read_ga_entities and # determines a position weight matrix for the sampled sequence positions. It # uses either a fitness cutoff above the sequence with the best fitness, or # Boltzmann weighting of the those energies. The total fitness is calculated # by weighting numeric data read along with the sequences. A list of data # frames can also be provided, in which case the PWM will be based on merging # the sequence from all data frames. The minimum score from each individual # data fram will be used as the reference. # WARNING: This function assumes the levels of all factors are identical! entities_pwm <- function(entities, temp_or_thresh, fitness_coef = NULL, type = c("boltzmann", "cutoff")) { type <- match.arg(type) entities_list <- is.list(entities) && ! is.data.frame(entities) if (entities_list) { nrows <- sapply(entities, nrow) offsets <- c(0,cumsum(nrows)) entities <- do.call(rbind, entities) } fitness <- entities_fitness(entities, fitness_coef) if (entities_list) { min_fitness <- numeric(length(fitness)) for (i in seq_along(nrows)) min_fitness[seq_len(nrows[i])+offsets[i]] <- min(fitness[seq_len(nrows[i])+offsets[i]]) } else { min_fitness <- min(fitness) } if (type == "cutoff") { weight <- fitness <= min_fitness+temp_or_thresh } else { if (temp_or_thresh != 0) { weight <- exp(-(fitness-min_fitness)/temp_or_thresh) } else { weight <- fitness == min_fitness } } pos_columns <- grep("^AA", colnames(entities)) freqmat <- matrix(nrow = length(levels(entities[,1])), ncol = length(pos_columns)) weight_sum <- sum(weight) for (i in seq_along(pos_columns)) { freqmat[,i] <- tapply(weight, entities[,pos_columns[i]], sum)/weight_sum freqmat[is.na(freqmat[,i]),i] <- 0 } rownames(freqmat) <- levels(entities[,1]) #print(freqmat) freqmat } # This function takes a list of entities data frames and returns a list of # position weight matrices, where each matrix correspons to a single sequence # position. The matrices will have one column for every input data frame. # If combine is used, the PWMs will be combined by weighting all sequences # together. # WARNING: This function assumes the levels of all factors are identical! entities_pwms <- function(entitieslist, temp_or_thresh, fitness_coef = NULL, type = c("boltzmann", "cutoff"), combine=FALSE) { type <- match.arg(type) naa <- length(levels(entitieslist[[1]][,1])) pwmlist <- rep(list(matrix(nrow = naa, ncol = 0)), length(grep("^AA", colnames(entitieslist[[1]])))) if (combine) { freqmat <- entities_pwm(entitieslist, temp_or_thresh, fitness_coef, type) for (j in seq(along = pwmlist)) { pwmlist[[j]] <- cbind(pwmlist[[j]], freqmat[,j]) } } else { for (i in seq(along = entitieslist) ){ freqmat <- entities_pwm(entitieslist[[i]], temp_or_thresh, fitness_coef, type) for (j in seq(along = pwmlist)) { pwmlist[[j]] <- cbind(pwmlist[[j]], freqmat[,j]) } } } #print(pwmlist) pwmlist } # This function takes a list of entities data frames and returns an array of # position weight matrices. The first dimension has the amino acids types, # the second dimension has the replicate, and the third dimension has the # sequence position entities_list_pwms <- function(entities, fitness_coef = c(1/2.5, 1/2.5, 1/2.5, 1), temp_or_thresh = 0.228, type = c("boltzmann", "cutoff")) { type <- match.arg(type) if (is.null(names(fitness_coef))) { names(fitness_coef) <- paste("state1_fitness_comp", seq_along(fitness_coef), sep="") } pwms <- entities_pwms(entities, temp_or_thresh, fitness_coef, type) posnames <- colnames(entities[[1]])[seq_along(pwms)] posnames <- gsub("AA", "", posnames) pwmsdimnames <- list(aa=rownames(pwms[[1]]), rep=NULL, pos=posnames) pwms <- array(do.call("c", pwms), dim = c(dim(pwms[[1]]), length(pwms))) dimnames(pwms) <- pwmsdimnames pwms } # This function takes an array of pwms as returned by entities_pwms_array and # collapses the arry into a single PWM, using a provided percentile cutoff. collapse_pwms <- function(pwms, percentile = .5) { pwm <- apply(pwms, c(1,3), quantile, percentile) minnotzero <- function(x) { x <- x[x!=0] if (length(x)) return(min(x)) NA } plastmin <- apply(pwms, c(1,3), minnotzero) correcteddist <- apply(plastmin, 2, function(x) as.numeric(!is.na(x) & x==min(x, na.rm = TRUE))) for (i in which(colSums(pwm) == 0)) { #print(paste("Correcting", i)) pwm[,i] <- correcteddist[,i] } pwm <- apply(pwm, 2, function(x) x/sum(x)) pwm } # This function extracts the sequence from a PDB file. The residue IDs # (<chainID><resSeq><iCode>) are given in as the names. pdb_sequence <- function(pdbpath) { if (length(grep(".gz$", pdbpath))) { # if a gzip was passed in, unzip and read the lines into an array pdbcon <- gzfile(pdbpath) pdblines <- readLines(pdbcon) close(pdbcon) } else { # otherwise, just read the lines into an array directly pdblines <- readLines(pdbpath) } # Create an array of the lines in the file starting with ATOM atomlines <- grep("^ATOM", pdblines, value=TRUE) # Create arrays of the residue names e.g. GLU, and IDs e.g. 'A 318 ' resName <- substr(atomlines, 18, 20) resID <- substr(atomlines, 22, 27) resID <- gsub("^ ", "_", resID) resID <- gsub(" ", "", resID) # Assign the corresponding residue ID as a name to each resName names(resName) <- resID # Get a boolean array determining which lines are duplicates # Pointwise negate this array to get an array of unique lines # Then return an array of residue names whose residue IDs are unique resName[!duplicated(resID)] } # The function converts a position weight matrix to a matrix of sequences with # the same approximate distribution as the original PWM. pwm_to_seqmat <- function(pwm, numseq=100) { seqmat <- matrix(character(), nrow=numseq, ncol=ncol(pwm)) for (i in seq_len(ncol(pwm))) { colfun <- stepfun(c(0,cumsum(pwm[,i])), c(1,seq_along(pwm[,i]),length(pwm[,i]))) funx <- seq(0, 1, length.out=numseq+1) funx <- funx[-1] - mean(diff(funx))/2 seqmat[,i] <- names(pwm[,i])[colfun(funx)] } colnames(seqmat) <- colnames(pwm) seqmat } # This function plots a character matrix, with optional foreground and # background color matrices. plot_charmat <- function(charmat, col = NULL, bg = NULL, cex=1, xlim=par("usr")[1:2], ylim=par("usr")[3:4]) { xlines <- seq(xlim[1], xlim[2], length.out=ncol(charmat)+1) xleft <- rep(xlines[-length(xlines)], each=nrow(charmat)) xright <- rep(xlines[-1], each=nrow(charmat)) ylines <- seq(ylim[1], ylim[2], length.out=nrow(charmat)+1) ybottom <- rep(ylines[-length(ylines)], nrow(charmat)) ytop <- rep(ylines[-1], nrow(charmat)) xcenter <- (xleft+xright)/2 ycenter <- (ybottom+ytop)/2 if (!is.null(bg)) { rect(xleft, ybottom, xright, ytop, col=bg, border=NA) } text(xcenter, ycenter, charmat, col = col, cex = cex) } # This function plots a ranked table of amino acid types for each position # It takes a PWM, an optional experimental PWM, and an optional wild type # sequence. plot_seqrank <- function(freq_mat, exp_freq_mat = NULL, wt_seq = NULL, star_mat = NULL, rank_line = 0, wt_col = "red", other_col = "black") { # Create three matrices of the dimensions of the pwm # char_mat is an matrix of residue names (as 1-character codes), ordered by increasing rank of the residue in the pwm # bg_freq_mat is a corresponding matrix of rank values # col_mat is a matrix of color names, defaulting to "black" char_mat <- matrix(nrow=nrow(freq_mat), ncol=ncol(freq_mat)) col_mat <- matrix(other_col, nrow=nrow(freq_mat), ncol=ncol(freq_mat)) bg_freq_mat <- matrix(nrow=nrow(freq_mat), ncol=ncol(freq_mat)) # Loop over all columns (residue positions) for (i in seq_len(ncol(freq_mat))) { char_mat[,i] <- rownames(freq_mat)[order(freq_mat[,i])] if (!is.null(star_mat)) { star_mat[,i] <- rev(star_mat[char_mat[,i],i]) } # Loop over all amino acids for (j in seq_len(nrow(freq_mat))) { if (is.null(exp_freq_mat)) { bg_freq_mat[j,i] <- freq_mat[char_mat[j,i],i] } else { bg_freq_mat[j,i] <- exp_freq_mat[char_mat[j,i],i] } if (!is.null(wt_seq)) { if (char_mat[j,i] == wt_seq[i]) col_mat[j,i] <- wt_col } } } # Color shading col_levels <- seq(0,1,by=.1) col_levels <- seq(0,ceiling(max(bg_freq_mat)/.1)*.1,by=.1) #cols <- gray(seq(1,0,length.out=length(col_levels)-1)) cols <- rev(c(topo.colors(length(col_levels)-2), "white")) bg_mat <- matrix(cols[pmin(floor((bg_freq_mat)*length(cols)/max(col_levels))+1, length(cols))], nrow=nrow(bg_freq_mat)) op <- par(no.readonly = TRUE) on.exit(par(op)) mar1 <- c(0.6, 2.7, 2.7, 0.4) mar2 <- c(0.6, 0.4, 2.7, 3.4) devwidth <- par("din")[1]*2.54 charheight <- par("cin")[2]*2.54 width1 <- (mar1[2]+mar1[4])*charheight width2 <- (mar2[2]+mar2[4])*charheight boxwidth <- (devwidth - sum(width1+width2))/(ncol(freq_mat)+1) layout(matrix(1:2, nrow=1,ncol=2), widths=c(width1+boxwidth*ncol(freq_mat),width2+boxwidth)) par(mar=mar1, mgp=c(1.5, .25, 0), cex=1) plot(0, 0, type="n", xlim=c(0.5,0.5+ncol(freq_mat)), ylim=c(20.5,0.5), xaxs="i", yaxs="i", xaxt="n", yaxt="n", xlab="", ylab="") plot_charmat(char_mat, col_mat, bg_mat) mtext("Predicted Rank", 2, par("mgp")[1]) axis(2, 1:20, tick=FALSE, las=2) mtext("Residue", 3, par("mgp")[1]) residx <- seq(1, ncol(freq_mat), by=2) axis(3, residx, colnames(freq_mat)[residx], tick=FALSE) if (ncol(freq_mat) >= 2) { residx <- seq(2, ncol(freq_mat), by=2) axis(3, residx, colnames(freq_mat)[residx], tick=FALSE) } box(lwd=.5) if (!is.null(star_mat)) { points(t(t(which(t(star_mat), arr.ind=TRUE))+c(.3,0)), pch="*") } if (rank_line) { abline(h=rank_line+0.5, lty="dashed") } maradj <- (1-length(cols)/nrow(col_mat))*0.5*par("pin")[2]/par("cin")[2] mar2[1] <- mar2[1]+maradj mar2[3] <- mar2[3]+maradj par(mar=mar2, mgp=c(2.2, .25, 0), cex=1) plot.new() plot.window(xlim = c(0, 1), ylim = range(col_levels), xaxs = "i", yaxs = "i") rect(0, col_levels[-length(col_levels)], 1, col_levels[-1L], col = cols, lwd=.5) axis(4, col_levels[seq(1,length(col_levels),1)], paste(round(col_levels[seq(1,length(col_levels),1)]*100), "%", sep=""), tick=FALSE, las=2) bg_title <- "Predicted Frequency" if (!is.null(exp_freq_mat)) bg_title <- "Experimental Frequency" mtext(bg_title, 4, par("mgp")[1]) box(lwd=.5) invisible(bg_freq_mat) } # This function produces boxplots showing the amount each generation contributes to # the the position weight matrix. plot_gen_contrib <- function(entitieslist, generationslist, fitness_coef = c(1/2.5, 1/2.5, 1/2.5, 1), temp_or_thresh = 0.228, type = c("boltzmann", "cutoff"), main = "") { type <- match.arg(type) names(fitness_coef) <- paste("state1_fitness_comp", seq_along(fitness_coef), sep="") gen_contrib <- matrix(nrow=length(entitieslist), ncol=length(generationslist[[1]])) for (i in seq_along(entitieslist)) { fitness <- entities_fitness(entitieslist[[i]], fitness_coef) min_fitness <- min(fitness) if (type == "cutoff") { weight <- fitness <= min_fitness+temp_or_thresh } else { if (temp_or_thresh != 0) { weight <- exp(-(fitness-min_fitness)/temp_or_thresh) } else { weight <- fitness == min_fitness } } weight <- weight/sum(weight) first_gen <- integer(length(fitness)) for (j in rev(seq_along(generationslist[[i]]))) { first_gen[generationslist[[i]][[j]]] <- j } for (j in seq_along(generationslist[[i]])) { gen_contrib[i,j] <- sum(weight[first_gen == j]) } } colnames(gen_contrib) <- seq_along(generationslist[[1]]) boxplot(as.data.frame(gen_contrib), xlab="Generation", ylab="Sequence Contribution", main=main, ylim=c(0,1), yaxt="n") axis(2, seq(0, 1, by=.25), labels=FALSE) axis(2, seq(0, 1, by=.5), seq(0, 1, by=.5), tick=FALSE, line=FALSE) } # This function plots the fitnesses for those sequences that contribute # significantly to the position weight matrix. plot_seq_contrib <- function(entitieslist, fitness_coef = c(1/2.5, 1/2.5, 1/2.5, 1), temp_or_thresh = 0.228, type = c("boltzmann", "cutoff"), main = "") { type <- match.arg(type) if (temp_or_thresh == 0) { maxfit <- 1 } else if (type == "boltzmann") { maxfit <- -log(.01)*temp_or_thresh*2 } else { maxfit <- 3*temp_or_thresh } layout(matrix(1:2, nrow=2), heights=c(0.2, 0.8)) mar1 <- mar2 <- par("mar") mar1[1] <- 0.1 mar2[3] <- 0.1 par(mar=mar1) plot(0, 0, xlim=c(0, maxfit), type="n", ylim=c(0, 1), xaxt="n", yaxt="n", xlab="", ylab="Weight", main=main) if (type == "cutoff") { segments(0, 1, temp_or_thresh, 1) segments(temp_or_thresh, 1, temp_or_thresh, 0) segments(temp_or_thresh, 0, maxfit*1.1, 0) } else { fitval <- seq(0, maxfit*1.1, length.out=100) points(fitval, exp(-fitval/temp_or_thresh), type="l") } axis(2, labels=FALSE) axis(2, c(0,1), tick=FALSE) par(mar=mar2) plot(0, 0, xlim=c(0, maxfit), type="n", ylim=c(length(entitieslist), 1), xlab="Normalized Fitness", ylab="Backbone") for (i in seq_along(entitieslist)) { fitness <- entities_fitness(entitieslist[[i]], fitness_coef) min_fitness <- min(fitness) norm_fit <- fitness-min_fitness plot_idx <- which(norm_fit < maxfit*1.1) points(norm_fit[plot_idx], rep(i, length(plot_idx)), pch=20, cex=.5) } if (type == "cutoff") abline(v=temp_or_thresh, lty="dashed") } # This function is the main data processing procedure. It takes a directory # path which contains *.ga.entities files. It reads all those files and # produces a set of boxplots in several different file formats. It also # generates a position weight matrix and FASTA file for producing a sequence # logo. By specifying plotgen=TRUE, it will produce a plot similar to # Figure 5 in the PLoS One manuscript. process_seqtol <- function(dirpath = ".", fitness_coef = c(1/2.5, 1/2.5, 1/2.5, 1), temp_or_thresh = 0.228, type = c("boltzmann", "cutoff"), percentile = .5, prefix = "specificity", plotgen = FALSE, plotseq = TRUE) { type <- match.arg(type) names(fitness_coef) <- paste("state1_fitness_comp", seq_along(fitness_coef), sep="") entities <- read_ga_entities_list(dirpath, readgen=plotgen) pwms <- entities_list_pwms(entities, fitness_coef, temp_or_thresh, type) pwm <- collapse_pwms(pwms, percentile) posnames <- colnames(pwm) inputseq <- NULL seqtoloutput <- file.path(dirpath, "seqtol_1_stdout.txt") if (!file.exists(seqtoloutput)) { seqtoloutput <- file.path(dirpath, sub(".ga.entities.*", "_seqtol.out", names(entities)[1])) } if (file.exists(seqtoloutput)) { seqtolcmd <- readLines(seqtoloutput, 2) seqtolcmd <- grep("core.init: command", seqtolcmd, value=TRUE) if (length(seqtolcmd)) { startpdbfile <- gsub("^.+ -s ([^ ]+) .+$", "\\1", seqtolcmd) if (!file.exists(startpdbfile)) { startpdbfile <- paste(startpdbfile, ".gz", sep="") } if (file.exists(startpdbfile)) { # Parse the file into a set of unique residue names/position ID pairs (GLU with name A318, ...) pdbseq <- pdb_sequence(file.path(dirpath, startpdbfile)) # Index into pdbseq (residue names e.g. GLU named by position ID e.g. A318) with posnames # Store the one-character corresponding codes e.g. E into inputseq inputseq <- aa1[pdbseq[as.integer(posnames)]] colnames(pwm) <- posnames <- names(pdbseq)[as.integer(posnames)] } } } # Write pwm to a tab-separated file (do not add quotation marks) # row.names defaults to TRUE so we add a blank column name for the first column write.table(pwm, paste(prefix, "_pwm.txt", sep=""), quote=FALSE, sep="\t", col.names=NA) seqmat <- pwm_to_seqmat(pwm) cat(paste(">", seq_len(nrow(seqmat)), "\n", apply(seqmat, 1, paste, collapse=""), sep=""), file=paste(prefix, "_sequences.fasta", sep=""), sep="\n") plotwidth <- 7 plotheight <- 3 pointsize <- 12 pdf(paste(prefix, "_boxplot.pdf", sep=""), width=plotwidth, height=plotheight, pointsize=pointsize) pdfdev <- dev.cur() png(paste(prefix, "_boxplot.png", sep=""), width=plotwidth*72, height=plotheight*72*length(posnames), pointsize=3/2*pointsize) pngdev <- dev.cur() par(mfrow=c(length(posnames), 1)) for (i in seq_along(posnames)) { for (imgtype in c("pdf", "png", "pngsep")) { if (imgtype == "pdf") dev.set(pdfdev) if (imgtype == "png") dev.set(pngdev) if (imgtype == "pngsep") png(paste(paste(prefix, "_boxplot_", sep=""), posnames[i],".png", sep=""), width=plotwidth*72, height=plotheight*72, pointsize=pointsize) par(mar = c(2.8, 2.8, 1.5, 0.1), mgp = c(1.7, 0.6, 0)) main <- paste("Residue", posnames[i], "Sequence Tolerance Boxplot") plot(0, 0, type="n", xlim=c(1,20), ylim=c(0,1), main=main, xlab="Amino Acid", ylab="Predicted Frequency", axes=FALSE) abline(h=seq(0, 1, by=.2), col="gray") boxplot(as.data.frame(t(pwms[,,i])), col="white", add=TRUE) points(1:20, pwm[,i], pch=4, col="blue") if (imgtype == "pngsep") dev.off() } } dev.off(pdfdev) dev.off(pngdev) seqrank_width <- 2.921+(1+ncol(pwm))*.2 seqrank_height <- 4 png_scale <- 1.5 pdf(paste(prefix, "_seqrank.pdf", sep=""), width=seqrank_width, height=seqrank_height, pointsize=10) # inputseq is an array of one-character residue names e.g. E corresponding to the design plot_seqrank(pwm, wt_seq=inputseq, rank_line=5) dev.off() png(paste(prefix, "_seqrank.png", sep=""), width=seqrank_width*72*png_scale, height=seqrank_height*72*png_scale, pointsize=10*png_scale) plot_seqrank(pwm, wt_seq=inputseq, rank_line=5) dev.off() if (plotgen) { pdf(paste(prefix, "_gencontrib.pdf", sep=""), width=7, height=3, pointsize=pointsize) par(mar=c(2.7,2.7,1.5,0.2), mgp=c(1.7, 0.6, 0)) plot_gen_contrib(entities, attr(entities, "generations"), fitness_coef, temp_or_thresh, type, "Generation Contributions") dev.off() } if (plotseq) { pdf(paste(prefix, "_seqcontrib.pdf", sep=""), width=6, height=6, pointsize=pointsize) par(mar=c(2.7,2.7,1.5,0.2), mgp=c(1.7, 0.6, 0)) plot_seq_contrib(entities, fitness_coef, temp_or_thresh, type, "Sequence Contributions") dev.off() } }
981a4396b4b4c3d82dbfe7b1e8f3deee275aba50
f72f31c41043c5735d7beb81a0e43d1ae4400d45
/R/format.R
8c508347fc7bd02f5d3b32b7dcb0bdf9e5d65e33
[ "Apache-2.0" ]
permissive
kevinykuo/pins
dbd07e4fb87270b9f9ed563636907819e27dbcea
ac74f7066d0d2b981b4bae93755d7b41c81e53e2
refs/heads/master
2020-07-02T19:51:57.229387
2019-08-10T01:53:38
2019-08-10T01:53:38
201,645,472
1
0
Apache-2.0
2019-08-10T15:05:30
2019-08-10T15:05:30
null
UTF-8
R
false
false
294
r
format.R
format_tibble <- function(data) { if (!is.data.frame(data)) return(data) if (length(find.package("tibble", quiet = TRUE)) > 0 && !identical(getOption("pins.tibble"), FALSE)) { as_tibble <- get("as_tibble", envir = asNamespace("tibble")) as_tibble(data) } else { data } }
e094fed9e1d8be1389a93e6a97e17b7788b0545f
0186581204db672fb8e14684910cd5ce994aa7ef
/execution.R
e2d8538f6f16435fb4c0499370b3312859624b5b
[]
no_license
Cyrille620/FAO
49bba230599e838fe084b7df92f3b58d76a09296
b5abf10c490dd658cebf3c84ecb9db3a6142440c
refs/heads/main
2023-03-02T17:58:45.332751
2021-02-03T00:57:04
2021-02-03T00:57:04
335,103,682
0
0
null
null
null
null
UTF-8
R
false
false
93
r
execution.R
library(shiny) runApp("C:\\Users\\Cyrille\\Documents\\FAO\\test", display.mode = "showcase")
96a68bb586ae4bc34ab1492afba841e842c23bcd
81ad7a31f9b17b0247b9b8ae9e70734e81bb537a
/OtherCode/demo.R
1261c137c872472f82503b7384261ff20a0c9e8d
[]
no_license
avvo/Traffic-Potential
d7c9d4c1becc77253b3d5034197b8efda33fef01
a5bec2b3c948b44f78f5239f65bbe55c8b915b49
refs/heads/master
2021-01-19T16:42:19.537831
2016-11-11T20:50:11
2016-11-11T20:50:11
73,510,345
0
0
null
null
null
null
UTF-8
R
false
false
363
r
demo.R
library(cluster) library(fpc) tempd=ctylevel[,.(cit.not_citizen/pop.base_count,hh.median_income)] plot(tempd,pch='+') del=km(tempd,5) plotcluster(del[[1]][,-3,with=FALSE],del[[2]]$cluster,method = 'anc',clnum=5,pch='+') input=data.table(ncluster=5,statename='washington',practice_area='Immigration',variables=paste('pop.base_count','hh.median_income',sep=','))
023651f658248a8275122ea43c94479f0a83c4ea
7895999e1d7fd1ca93dac6d3b7a5309014e50fa9
/R/plots_from_ldats.R
e7a0d3c41f637161a9d9f262cfcd14e09e98f25b
[ "MIT" ]
permissive
wlandau/MATSS-pipeline
7f4a7c54311803a3a16e42976290f93d0e0efe6c
fc309800ef79c9800d35aea7e2e6c0985dc418c3
refs/heads/master
2020-04-20T08:46:07.790847
2019-02-01T19:02:20
2019-02-01T19:02:20
null
0
0
null
null
null
null
UTF-8
R
false
false
3,116
r
plots_from_ldats.R
#' @name plot_lda #' @title Plot LDA topic composition and time series #' @description Plot the species composition of LDA topics, and the #' topic composition of the community over the time series. #' #' @param x an LDATS LDA object. #' @param observed_dates a vector of dates for the observed samples. If #' observed_dates == NULL, samples will be labelled sequentially starting #' from 1. #' #' @export plot_lda <- function(x, observed_dates = NULL, ...,select_samples = NULL){ gamma <- x@gamma beta <- exp(x@beta) nobs <- nrow(gamma) ntopics <- ncol(gamma) nwords <- ncol(beta) beta_order <- apply(beta, 2, order) beta_sorted <- apply(beta, 2, sort) if(!is.null(select_samples)) { gamma <- gamma[select_samples, ] nobs <- nrow(gamma) } if (is.null(observed_dates)) { observed_dates <- seq(nobs) } else { observed_dates <- as.numeric(format(as.Date(observed_dates, format="%d/%m/%Y"),'%Y')) } if(!is.null(select_samples)) observed_dates <- observed_dates[select_samples] gamma <- cbind(gamma, observed_dates) cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7") cols <- cbPalette[1:ntopics] counter <- 1 rect_mat <- matrix(NA, nrow = nwords * ntopics, ncol = 4) rect_col <- rep(NA, length = nwords * ntopics) for (i in 1:nwords){ x1 <- i - 0.4 x2 <- i + 0.4 y1 <- 0 y2 <- 0 for (j in 1:ntopics){ y1 <- y2 y2 <- y1 + beta_sorted[j, i] rect_mat[counter, ] <- c(x1, y1, x2, y2) rect_col[counter] <- cols[beta_order[j, i]] counter <- counter + 1 } } par(fig = c(0, 1, 0, 0.7), mar = c(3.25, 4, 1, 1)) plot(gamma[ , 1], type = "n", bty = "L", xaxt = 'n', ylab = "", las = 1, ylim = c(0, 1)) mtext(side = 1, "Observation", line = 2.2, cex = 1.25) mtext(side = 2, "Proportion", line = 2.8, cex = 1.25) for (i in 1:ntopics){ points(gamma[ , i], col = cols[i], type = "l", lwd = 1) } axis(side = 1, at = seq(1, nobs, by = 10), labels = observed_dates[seq(1, nobs, by = 10)]) par(fig = c(0, 0.85, 0.7, 1), new = TRUE, mar = c(1, 3, 1, 0)) max_y <- max(rect_mat[,4]) * 1.05 plot(1, 1, type = "n", bty = "L", xlab = "", ylab = "", las = 1, ylim = c(0, max_y), xlim = c(1, nwords), xaxt = "n", cex.axis = 0.75) mtext(side = 2, "Total Proportion", line = 2.125, cex = 0.75) for(i in 1:(nwords * ntopics)){ rect(rect_mat[i, 1], rect_mat[i, 2], rect_mat[i, 3], rect_mat[i, 4], col = rect_col[i]) } axis(2, at = seq(0, max_y, 0.1), labels = FALSE, tck = -0.02) mtext(side = 1, at = seq(1, nwords, 1), text = x@terms, tck = 0, cex = 0.5, line = 0) par(fig = c(0.85, 1, 0.7, 1), new = TRUE, mar = c(0, 0, 0, 0)) plot(1, 1, type = "n", bty = "n", xlab = "", ylab = "", xaxt = "n", yaxt = "n", ylim = c(0, 1), xlim = c(0,1)) ypos <- (0.9 / ntopics) * (ntopics:1) ttext <- paste("Topic ", 1:ntopics, sep = "") for (i in 1:ntopics){ text(ttext[i], x = 0.1, y = ypos[i], col = cols[i], adj = 0, cex = 0.75) } }
d8bcd92853dcf41204cc59d975d83084096d9f74
98963e32d96bf2d2aa901ccc7ea325031d418fff
/codes/regre func-glm.R
794949d9d83bfd3d9cfcdc95318a21195455e28d
[]
no_license
tozammel/eBayPaper
9fb9de7d2be1eed35558f1ecd5606523b07f299b
7d297807d4ac2d6326e58bdd4b43ca946f1f493a
refs/heads/master
2020-03-27T11:55:06.844821
2016-03-11T17:50:35
2016-03-11T17:50:35
null
0
0
null
null
null
null
UTF-8
R
false
false
6,808
r
regre func-glm.R
reg.ebay <- function(data = EBAY, plotpoins, r, reduced = FALSE){ NT.grid <- length(plotpoints) N.data <- length(data) ############################################### p1 <- p2 <- n1 <- n2 <- 0 for(i in 1:length(data)){ if(data[[i]]$Value == 1){ p1 <- p1 + max(data[[i]]$Price) n1 <- n1 + 1 } if(data[[i]]$Value == 0){ p2 <- p2 + max(data[[i]]$Price) n2 <- n2 + 1 } } p1 <- p1 / n1 p2 <- p2 / n2 ############################################### Y <- value <- reserve <-condition <- seller <- bidder <- early <- jump <- open <- PR <- rep(0, N.data) FuncReg <- NULL last.coef <- rep(1,10) for( j in 1:NT.grid){ t <- rep(0, N.data) for(i in 1:N.data){ #Y[i] <- dens[i, j] # let Y[i] be index function? Y[i] <- binary[i,j] value[i] <- data[[i]]$Value reserve[i] <- data[[i]]$Reserve condition[i] <- data[[i]]$Condition seller[i] <- data[[i]]$Seller bidder[i] <- StepCheck(data[[i]]$Time, data[[i]]$Bidder, plotpoints[j]) early[i] <- data[[i]]$Early jump[i] <- StepCheck(data[[i]]$Time, data[[i]]$Jump, plotpoints[j]) open[i] <- data[[i]]$Price[1] current.time <- plotpoints[j] current.num <- length(which(data[[i]]$Time <= current.time)) # since Time start at 0, current.time is always at least 1 for(k in 1:current.num){ t[i] <- t[i] + exp(r * (current.time - data[[i]]$Time[k])) } current.p <- StepCheck(data[[i]]$Time, data[[i]]$Price, plotpoints[j]) if(data[[i]]$Value == 1) WTP <- p1 if(data[[i]]$Value == 0) WTP <- p2 PR[i] <- WTP - current.p } allv <- data.frame(Self.Exciting=t, Price.Relative=PR, Value=value, Reserve = reserve, Condition=condition, Early.Bidding=early, Seller = seller, Bidder = bidder, Jump.Bidding=jump, Opening.Price=open) if(reduced){ allv <- data.frame(Self.Exciting=t, Price.Relative=PR, #Reserve = reserve, Condition=condition, Early.Bidding=early) #Opening.Price=open } # # allv <- data.frame(Self.Exciting=t, Price.Relative=PR, Value=value, # Condition=condition) FuncReg[[j]] <- glm(Y~.-1,data=allv,na.action=na.exclude, family = poisson) last.coef <- FuncReg[[j]]$coefficients } return(FuncReg) } plot.rsq <- function(FuncReg, plotpoints, noplot){ rsq <- rep(0, length(FuncReg)) for(i in 1:length(FuncReg)){ rsq[i] <- summary(FuncReg[[i]])$adj.r.squared } if(!noplot) plot(plotpoints, rsq, type = "l", ylim = c(0, 1)) return(rsq) } plot.beta <- function(FuncReg, plotpoints, order, noplot){ beta <- rep(0, length(FuncReg)) for(i in 1:length(FuncReg)){ if(summary(FuncReg[[i]])$aliased[order] == FALSE){ beta[i] <- summary(FuncReg[[i]])$coefficients[order, 4] } } if(!noplot) plot(plotpoints[240:length(plotpoints)], beta[240:length(plotpoints)], type = "l", main = rownames(summary(FuncReg[[100]])$coefficients)[order], xlab = "time", ylab = "p-value", ylim = c(0, 1), cex.main = 0.7) return(beta) } plot.beta.coef <- function(FuncReg, plotpoints, order, noplot){ beta <- rep(0, length(FuncReg) ) beta.sd <- rep(0, length(FuncReg) ) for(i in 1:length(FuncReg)){ if(summary(FuncReg[[i]])$aliased[order] == FALSE){ beta[i] <- summary(FuncReg[[i]])$coefficients[order, 1] beta.sd[i] <- summary(FuncReg[[i]])$coefficients[order, 2] } } if(!noplot){ y.min <- min((beta - 1.96*beta.sd)[240:length(plotpoints)], 0) y.max <- max((beta + 1.96*beta.sd)[240:length(plotpoints)]) pl <- data.frame(xx = plotpoints[240:length(plotpoints)], yy = beta[240:length(plotpoints)], yy.min = beta[240:length(plotpoints)] - 1.96 * beta.sd[240:length(plotpoints)], yy.max = beta[240:length(plotpoints)] + 1.96 * beta.sd[240:length(plotpoints)]) ppp <- ggplot(pl, aes(xx))+ geom_line(aes(y = yy), color = "blue")+ geom_ribbon(aes(ymin = yy.min, ymax = yy.max), alpha = 0.2)+ geom_hline(yintercept = 0, color = "red") + xlab("time")+ ylab("coefficient")+ ggtitle(rownames(summary(FuncReg[[100]])$coefficients)[order]) # # plot(plotpoints[240:length(plotpoints)], beta[240:length(plotpoints)], # type = "l", # main = rownames(summary(FuncReg[[10]])$coefficients)[order], # xlab = "time", # ylab = "coefficient", # ylim = c(y.min, y.max), # cex.main = 0.7) # # for(kk in 240:length(plotpoints)){ # # if(kk %% (length(plotpoints)/20) == 0){ # # points(plotpoints[kk], beta[kk] + 1.96 * beta.sd[kk], col = "red", cex = 0.6) # # points(plotpoints[kk], beta[kk] - 1.96 * beta.sd[kk], col = "red", cex = 0.6) # # } # # } # polygon(c(plotpoints[240:length(plotpoints)], # rev(plotpoints[240:length(plotpoints)])), # c((beta[240:length(plotpoints)] + # 1.96 * beta.sd[240:length(plotpoints)]), # rev((beta[240:length(plotpoints)] + # 1.96 * beta.sd[240:length(plotpoints)]))), # col = "red", border = NA) # # # lines(plotpoints[240:length(plotpoints)], beta[240:length(plotpoints)] + 1.96 * beta.sd[240:length(plotpoints)], # # col = "grey")#, type = "l", lty = 2) # # lines(plotpoints[240:length(plotpoints)], beta[240:length(plotpoints)] - 1.96 * beta.sd[240:length(plotpoints)], # # col = "grey")#, type = "l", lty = 2) } if(noplot) return(cbind(beta, beta.sd)) if(!noplot) return(ppp) } multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { require(grid) # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } }
bfc01821f6909f25bc7d2a2857c4f643860445b8
6cddbcf9922aeb1138d8b3388fe9f3d181bbad82
/R/status.R
fbccc8329d299fba528632e2321acea702b55251
[]
no_license
waynenilsen/prairie
92d444a6332a3a1a7e637bc696d4957d7abcdf90
5eebd679eb6c2e4e11d83b8078d6d582cc5a5dbe
refs/heads/master
2021-01-12T04:30:09.772039
2016-01-23T19:50:45
2016-01-23T19:50:45
77,629,323
0
0
null
2016-12-29T17:41:11
2016-12-29T17:41:10
null
UTF-8
R
false
false
2,300
r
status.R
#' HTTP Response Status Code #' #' Get or set the status code of a \code{response} object. #' #' @export #' @name status NULL #' @param x An \R object #' @export #' @rdname status status <- function(x) UseMethod('status') #' @param value HTTP status code, 1xx through 5xx #' @export #' @rdname status `status<-` <- function(x, value) UseMethod('status<-') #' @export #' @rdname status status.response <- function(x) { x$status } #' @export #' @rdname status `status<-.response` <- function(x, value) { x$status_code <- value invisible(x) } #' Status Code Reason Phrase #' #' Get the corresponding reason phrase for a status code. #' #' @param status_code An HTTP status code. #' #' @return #' #' If \code{status_code} is not found the empty string is returned. #' #' @keywords internal #' @export #' @examples #' reason_phrase(200) #' reason_phrase('404') #' #' reason_phrase(531) reason_phrase <- function(status_code) { assert_that(is.numeric(status_code) || is.character(status_code)) switch( as.character(status_code), '100' = "Continue", '101' = "Switching Protocols", '200' = "OK", '201' = "Created", '202' = "Accepted", '203' = "Non-Authoritative Information", '204' = "No Content", '205' = "Reset Content", '206' = "Partial Content", '300' = "Multiple Choices", '301' = "Moved Permanently", '302' = "Found", '303' = "See Other", '304' = "Not Modified", '305' = "Use Proxy", '307' = "Temporary Redirect", '400' = "Bad Request", '401' = "Unauthorized", '402' = "Payment Required", '403' = "Forbidden", '404' = "Not Found", '405' = "Method Not Allowed", '406' = "Not Acceptable", '407' = "Proxy Authentication Required", '408' = "Request Timeout", '409' = "Conflict", '410' = "Gone", '411' = "Length Required", '412' = "Precondition Failed", '413' = "Request Entity Too Large", '414' = "Request-URI Too Long", '415' = "Unsupported Media Type", '416' = "Requested Range Not Satisifable", '417' = "Expectation Failed", '500' = "Internal Server Error", '501' = "Not Implemented", '502' = "Bad Gateway", '503' = "Service Unavailable", '504' = "Gateway Timeout", '505' = "HTTP Version Not Supported", "" ) }
05b584b10dcded35a556e4f3e67e4ea13bf6cef8
99cded7454d091c0e0ddbe32e096ea87ede0eeab
/cachematrix.R
016b0f49888cc423fe2bdbde69c810816d3c145d
[]
no_license
Pik000/ProgrammingAssignment2
5139b94f683ee71758d07aae433b77f2e370fd30
b11a710dcb8e604e97fed86f8c865833ca80c7c9
refs/heads/master
2021-01-18T20:29:08.787473
2016-09-19T06:23:59
2016-09-19T06:23:59
68,566,886
0
0
null
2016-09-19T03:43:36
2016-09-19T03:43:35
null
UTF-8
R
false
false
1,259
r
cachematrix.R
## This script creates a matrix, and solves the invese of this matrix. ## It then stores the inverse of this matrix. ## The second part of the script checks to see if there is an inverse ## matrix already stored at x$getInverse. If it is stored then it is displayed, ## otherwise it is caculated. ## This script takes in the argument of a matrix, and it is stored within the ## function 'set'. setInverse when called caculates the inverse matrix. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setInverse <- function() inv <<- solve(x) #caculates the inverse of the matrix. getInverse <- function() inv list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## This part of the script checks to see if there is an inverse ## matrix already stored at x$getInverse. If it is stored then it is displayed, ## otherwise it is caculated and then displayed. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if(!is.null(inv)) { message("retrieving inverse matrix") return(inv) } ma <- x$get() inv <- solve(ma,...) x$setInverse(inv) inv }
64443f1d0c8332df113bc3f0ae11752f569520ee
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/moko/examples/max_EHVI.Rd.R
3b6e947990a7bbff0230ccedb3c0ea4109572697
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
474
r
max_EHVI.Rd.R
library(moko) ### Name: max_EHVI ### Title: max_EHVI: Maximization of the Expected Hypervolume Improvement ### criterion ### Aliases: max_EHVI ### ** Examples # ------------------------ # The Nowacki Beam # ------------------------ n <- 20 d <- 2 doe <- replicate(d,sample(0:n,n))/n res <- t(apply(doe, 1, nowacki_beam, box = data.frame(b = c(10, 50), h = c(50, 250)))) model <- mkm(doe, res, modelcontrol = list(objective = 1:2, lower=c(0.1,0.1))) max_EHVI(model)
9b6836eebaef6e9a8265f39296b9565d98342d49
84cb934fc740af94806f96e89c916b04458237b4
/code/plot_resampling_shared_GO_pvalues.R
abd738be2194d86e7342561610e5de079c6889a3
[]
no_license
maudf/gtex_condor
007bb97d2d2afd6b74d55575cd7d6939b9836883
3942080f94a788bafa522c7d0e3f7b411054b1eb
refs/heads/master
2023-07-16T01:00:02.435731
2021-09-03T15:29:19
2021-09-03T15:29:19
113,999,734
5
0
null
null
null
null
UTF-8
R
false
false
3,602
r
plot_resampling_shared_GO_pvalues.R
### plot_resampling_shared_GO_pvalues.R ### Load variables source("code/variables_definition.R") args = commandArgs(trailingOnly=TRUE) ### Set variables genes.file <- paste0("all_tissues_genes_fdr", FDRcis, FDRtrans, "_", window, "MB.Rdata") go.results.txt <- paste0('alltissues_fdr', FDRcis, FDRtrans, "_", window, 'MB_go_results_bp.txt') ### Load data load(paste0(cluster.dir, genes.file)) load(tissue.file) all.tis <- names(genes) tislist<-rownames(Tissues) names(tislist)<-as.character(Tissues[,1]) tislist <- tislist[tislist %in% all.tis] go.bp <- read.table(paste0(cluster.dir, go.results.txt), header=T, sep="\t", stringsAsFactors=F) a <- go.bp[go.bp$Tissue %in% names(tislist) & go.bp$p.adjust<=0.05 & go.bp$OddsRatio>1 & go.bp$NumberTissues>=12,] shared.com <- tapply(a$com, a$Tissue, unique) names(shared.com) <- tislist[names(shared.com)] a$Tissue <- tislist[a$Tissue] ### Functions compute.resample.pval <- function(x, resample){ tis <- x[1] com <- x[2] goid <- x[3] pval.real <- as.numeric(x[5]) iter <- as.numeric(x[13]) pval.res <- paste0("<", 1/iter) if (goid %in% names(resample[[`tis`]][[`com`]])){ tmp <- resample[[`tis`]][[`com`]][[`goid`]] if (length(tmp)<iter){ tmp <- c(tmp, rep(1, (iter-length(tmp)))) } s <- sum(tmp<=pval.real) if(s>0){pval.res <- s/iter} } return(pval.res) } ### read resampling files and store p-values. resample <- list() for(i in 1:length(tislist)){ tis <- tislist[i] print(tis) resample.files <- list.files(resampling.GO.dir, pattern=paste0(tislist[i], "_resampling_GO_shared_results_*")) tmp <- NULL for(f in resample.files){ print(f) tmp <- rbind(tmp, read.table(paste0(resampling.GO.dir, f), header=T, sep="\t", stringsAsFactors=F, quote="")) } tmp$comm.id <- shared.com[[`tis`]][tmp$comm.id] resample[[`tis`]] <- tapply(1:nrow(tmp), tmp$comm.id, function(x, tab){ res <- tapply(tab$Pvalue[x], tab$GOID[x], function(y){y}) }, tab=tmp) rm(tmp) } ### Compute real p-value rank a$iter <- 1000 resample.pval <- apply(a, 1, compute.resample.pval, resample=resample) go.bp$resample.pval <- NA go.bp$resample.pval[which(go.bp$Tissue %in% names(tislist) & go.bp$p.adjust<=0.05 & go.bp$OddsRatio>1 & go.bp$NumberTissues>=12)] <- resample.pval write.table(go.bp, file=paste0(cluster.dir, go.results.txt), row.names=F, sep="\t", quote=F) ### Plot distribution of resampled p-values for term GO:0010468 terms <- "GO:0010468" b<- a[grep(terms, a$GOID),] d<-tapply(b$Pvalue, b$Tissue, function(x){which(x==min(x))}) b.tmp <- NULL for(i in 1:length(d)){ b.tmp <- rbind(b.tmp, b[b$Tissue==names(d)[i],][d[i],]) } pdf(paste0(figure.dir, "hist_GO_terms_resampling_", terms, ".pdf"), width=8, height=11.5) par(mfrow=c(4,4), mar=c(4,5,3,0)+.1) for(i in 1:nrow(b.tmp)){ tis <- b.tmp[i,1] com <- as.character(b.tmp[i,2]) goid <- b.tmp[i,3] iter <- as.numeric(b.tmp[i,13]) tmp <- resample[[`tis`]][[`com`]][[`goid`]] if (length(tmp)<iter){ tmp <- c(tmp, rep(1, (iter-length(tmp)))) } h <- hist(tmp, plot=F, breaks=seq(0,1,0.01)) h$density <- h$counts/sum(h$counts) plot(h, col="dodgerblue", xlab="P-values", ylab="Density", main=as.character(Tissues[tis,1]), freq=F) par(xpd=T) text(0.5,(max(h$density)-0)*21/20, label=paste0("P = ", sprintf("%.2e", b$Pvalue[i])), font=2) } dev.off()
71a8d855940edeb4a039038bc6f336b7de5564d6
e4475f9d147174edb0c057af45f2afc60ddc7e4b
/scripts/Angela/Angela_CountReadsAssignedToGenes.R
aac2ba48e0fd4c2fd4774c8b8e610351498bb45a
[]
no_license
srmarzec/albopictus_remapping
04859ceaff6820985199ab041271d9f06a0b7d51
ccfffe46f200506108f9183df2af52c022984ed6
refs/heads/main
2023-04-26T11:46:57.565451
2021-04-22T18:20:58
2021-04-22T18:20:58
335,630,859
0
1
null
null
null
null
UTF-8
R
false
false
1,994
r
Angela_CountReadsAssignedToGenes.R
# Script to count up the reads assigned to genes by HTSeq for each of the samples #if (!require("readbulk")) install.packages("readbulk") #if (!require("tidyverse")) install.packages("tidyverse") #tidyverse can be used to reformat tables library(readbulk) library(tidyverse) # Read in all the files at once # Now all the data in the HTSeq files are in a table called "raw_data" raw_data <- read_bulk(directory = "/Users/cottonellezhou/OneDrive - Georgetown University/Differential Expression Analysis/Data", fun = read.table, header = F) # Spread out the data into long format so that we can have relevant column names # Before the data from the different samples were simply stacked on each other # spread_dat <- spread(tablename, thecolumnnheaderofthecolumncontainingthethingsyouwanttouseasthenewcolumnheaders, thevaluesyouwanttobespreadingoutforeachofthenewcolumns) # tidyr::spread Spread a key-value pair across multiple columns # Hence, must do library(tidyverse) before this command: spread_dat <- spread(raw_data, File, V2) # Remove the first 5 rows of reads as these are not assigned to genes # removing rows: tablename <newtablename[c(-rownumbers)] spread_dat <- spread_dat[c(-1,-2,-3,-4,-5),] # get the sums for the different columns (Files/samples) knowing that this will be the number of reads of assigned to a gene # setting variable called col_dat equal to the sums for the different columns # colSums: Form row and column sums and means for numeric arrays (or data frames). col_dat <- colSums(spread_dat[,-1]) # Make this into a nice little dataframe instead of a named list col_dat <- as.data.frame(col_dat) # Write this out as a csv so we can open it in excel later and reference the number of assigned reads (note that I am putting the full directory path here since we did not set the working directory above) setwd("~/OneDrive - Georgetown University/Differential Expression Analysis/Script") write.csv(col_dat, file = "../misc/htseqCount_ReadsAssignedGene.csv")
00d521c584ce662099ba1dbd7bc794927ad6f2d6
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/komaletter/examples/komaletter.Rd.R
0a6b13967be8f80ca19ed92c49e84a19fb0e825d
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
274
r
komaletter.Rd.R
library(komaletter) ### Name: komaletter ### Title: KOMA-Script LaTeX Letter Format ### Aliases: komaletter ### ** Examples ## Not run: ##D rmarkdown::draft("MyLetter.Rmd", template="pdf", package="komaletter") ##D rmarkdown::render("MyLetter.Rmd") ## End(Not run)
68cb6f74a6dc84755c3987f51a96b0bed31f4652
7218206cfc1b9d7d40d79dae94dbac99820fc794
/ch3/exercises.R
f2c55dcdbdaa8ed7c46d9ab94173be293f7d9237
[ "MIT" ]
permissive
azu/stats_with_R
ac7fbd6395c508f77ccc8be19977fe288221aa6f
54ae7db49b881f6e9062ccc0f15e6e5f8fb077f6
refs/heads/master
2021-01-19T11:14:50.527047
2013-12-31T12:35:13
2013-12-31T12:35:13
15,205,100
1
0
null
null
null
null
UTF-8
R
false
false
993
r
exercises.R
# (1) 以下は10人の大学生の一日の勉強時間(単位は時間)と定期試験の得点(100 点満点)のデータです。勉強時間を横軸に、定期試験を縦軸にした散布図を 描いてみましょう currentDirectory <- (function() { path <- (function() attr(body(sys.function(-1)), "srcfile"))()$filename dirname(path) })() dataFrame <- read.csv(file.path(currentDirectory, "exercises_1.csv") , header=TRUE) 時間 <- dataFrame$勉強時間 得点 <- dataFrame$定期試験の得点 plot(時間, 得点) # (2) 相関係数 cor(時間, 得点) # (3) 質的変数 - クロス集計 quality <- read.csv(file.path(currentDirectory, "exercises_3.csv") ,header=TRUE) 洋食or和食 <- quality$洋食派か和食派か 甘党or辛党 <- quality$甘党か辛党か table(洋食or和食, 甘党or辛党) # (4) ファイ係数 phi和洋 <- ifelse(洋食or和食 == "和食" , 1, 0); phi甘辛 <- ifelse(甘党or辛党 == "甘党" , 1, 0); cor(phi和洋, phi甘辛)
7e9e784f3504829d7aadacd1647160f5c4070ba0
455d5dadcc54cf2ae2b2b65b74a31cc6c81e178d
/02_Simulating_Networks.R
49ab33d582d02fd41efb71fe51a71e1a9868edf9
[]
no_license
albina23/Statistical-Analysis-of-Networks
f157f344ac5fbc50cae7173546418b6c029474cc
06b9d567fd3c1f9c3d6fcf002b81237887aa97f9
refs/heads/master
2021-09-18T11:35:54.826260
2018-07-13T11:03:43
2018-07-13T11:03:43
null
0
0
null
null
null
null
UTF-8
R
false
false
2,228
r
02_Simulating_Networks.R
# Exploring ERGM through simularion in R and MPNet (Chapters 1-4 in Lusher et al., 2013) # Set working directory setwd("~/MethodsR/Statistical-Analysis-of-Networks") # Install packages install_package <- function(pkg){ new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])] if (length(new.pkg)) install.packages(new.pkg, dependencies = TRUE) sapply(pkg, require, character.only = TRUE) } install_package(c( "dplyr", "igraph", "data.table", "RSiena", "network", "sna", "ergm" )) ############################### # Observed(friendMat) vs Random ############################### # Comparing the in-degrees of random vs observed par(mfrow=c(1,2)) plot(table(colSums(friendMat))) plot(table(colSums(g[1,,])), xlim=range(colSums(friendMat))) #(g[1,,]) (First slice, all rows, all columns) # Unifrom MAN num_rand <- 1 # How many random grapgs to generate. dyad.census(friendMat) # Run to find out how many to spexify g <- rguman(num_rand,50, mut =39 , asym =35 , nul =1151 , method = 'exact') # Exact makes the same dyad census # We observe that both grapgs have the same density gden(g) gden(friendMat) # Why use exact? # Produces the same dyad census as observed, eventually will converge enough random graphs are generated num_rand <- 1 dyad.census(friendMat) # Run to find out how many to spexify g <- rguman(num_rand,50, mut =39 , asym =35 , nul =1151) # Exact makes the same dyad census hist(dyad.census(g)[,1]) # Let's compare the exact random versus observed par(mfrow=c(1,2)) plot(as.network(g), main = 'Exact Random Graph') plot(as.network(friendMat), main = 'Friend Mat Graph') # The triad census is still different triad.census(g) triad.census(friendMat) # Exploring the different amount of traids num_rand <- 1000 g <- rguman(num_rand,50, mut =39 , asym =35 , nul =1151) # Exact makes the same dyad census BigTriad <- triad.census(g) # 030T hist(BigTriad[,9],xlab="030T", xlim=c(0,5)) # Observed.030T = 5 sum(BigTriad[,9]>=5)/1000 # Calculate p-val as proporition # Triad Checker name = '120C' position = 14 hist(BigTriad[,position],xlab=name, xlim=c(0,5), main = name) # Observed.120U = 5 sum(BigTriad[,position]>=2)/1000 # 20% of graphs have at least greater than 2 traids
4e02094fd0b1a3df0f5e71beb2e9f61ad759af30
34ea6f84847771a57523b4bcd53a1b94ff8903d7
/iso.R
6d2cc127ef3877c7464b3ef558666567b867e3ea
[]
no_license
cfshi/fgc
52d929473b0a08be4b9e184f77d0df3517c17e68
b6c7c07489cb10341997f5241c062debb57be92c
refs/heads/master
2021-05-16T05:36:56.210568
2017-09-11T20:39:32
2017-09-11T20:39:32
103,170,756
0
0
null
null
null
null
UTF-8
R
false
false
1,967
r
iso.R
library(ordinal) library(splines) library(gridExtra) library(ggplot2) library(reshape) theme_set(theme_bw()) attr(modAns,"terms") <- NULL catNames <- c("religion","urRural","job","maritalStat") predNames <- colnames(modAns)[2:(ncol(modAns)-3)] isoList <- lapply(predNames, function(n){ ordpred(mod, n, modAns) }) print( grid.arrange(varPlot(isoList[[1]],P=varlvlsum$`Pr(>Chisq)`[1]), varPlot(isoList[[2]],P=varlvlsum$`Pr(>Chisq)`[2]), varPlot(isoList[[3]],P=varlvlsum$`Pr(>Chisq)`[3]), varPlot(isoList[[4]],P=varlvlsum$`Pr(>Chisq)`[4]), varPlot(isoList[[5]],P=varlvlsum$`Pr(>Chisq)`[5]), varPlot(isoList[[6]],P=varlvlsum$`Pr(>Chisq)`[6]), varPlot(isoList[[7]],P=varlvlsum$`Pr(>Chisq)`[7]), varPlot(isoList[[8]],P=varlvlsum$`Pr(>Chisq)`[8]), varPlot(isoList[[9]],P=varlvlsum$`Pr(>Chisq)`[9]), varPlot(isoList[[10]],P=varlvlsum$`Pr(>Chisq)`[10]), varPlot(isoList[[11]],P=varlvlsum$`Pr(>Chisq)`[11]), nrow=4,ncol=3) ) if(nrow(varlvlsum)>11){ print( grid.arrange(varPlot(isoList[[12]],P=varlvlsum$`Pr(>Chisq)`[12]), nrow=4,ncol=3) ) } if(nrow(varlvlsum)>16){ print( grid.arrange(varPlot(isoList[[12]],P=varlvlsum$`Pr(>Chisq)`[12]), varPlot(isoList[[13]],P=varlvlsum$`Pr(>Chisq)`[13]), varPlot(isoList[[14]],P=varlvlsum$`Pr(>Chisq)`[14]), varPlot(isoList[[15]],P=varlvlsum$`Pr(>Chisq)`[15]), varPlot(isoList[[16]],P=varlvlsum$`Pr(>Chisq)`[16]), varPlot(isoList[[17]],P=varlvlsum$`Pr(>Chisq)`[17]), nrow=4,ncol=3) ) } if(nrow(varlvlsum)>18){ print( grid.arrange(varPlot(isoList[[18]],P=varlvlsum$`Pr(>Chisq)`[18]), varPlot(isoList[[19]],P=varlvlsum$`Pr(>Chisq)`[19]), nrow=4,ncol=3) ) } # print(listPlot(isoList)) #rdsave(isoList,varlvlsum)
95a0c18b9e0dd5167a776d5f31d0a9143a442a26
62cc5c60c55b857860b5b6bc62529a096c54f060
/FeatureBuilding.R
db44d94ecefa2ede51f5ae401e0ab5a46004d2e4
[]
no_license
ChrisKye/TempPilot
7bbea72c23a9f2c56f59e2abe5a1d6d2e4c78feb
a9a92a638dc8454aa06f0bbf16f30910dc7d2707
refs/heads/main
2023-02-01T10:36:14.218951
2020-12-13T05:42:19
2020-12-13T05:42:19
317,439,664
0
0
null
null
null
null
UTF-8
R
false
false
1,582
r
FeatureBuilding.R
library(data.table) library(dplyr) library(R.matlab) ############# Creating y_labels for modelling ############# ##Reading in data ratings <- fread("./Data/metadata_csv/participant_ratings.csv") data_y <- ratings[,c("Participant_id","Trial", "Valence", "Arousal", "Dominance", "Liking")] ##Relabelling to High/Low for emotion dimensions labels <- apply(data_y, 1, function(x) { if (x[3] > 5) val = 1 else val = 0 if (x[4] > 5) arou = 1 else arou = 0 if (x[5] > 5) dom = 1 else dom = 0 if (x[6] > 5) lik = 1 else lik = 0 return (c("Sub" = x[1], "Trial" = x[2], "Valence" = val,"Arousal" = arou, "Dominance" = dom, "Liking" = lik)) }) ##Reshape and repeat labels to match training set labels <- t(labels) labels <- as.data.table(labels) colnames(labels)[1:2] <- c("SubNo","TrialNo") finalLabs <- labels[rep(seq_len(nrow(ratings)), each=60),] saveRDS(object = finalLabs,file = "./Data/finalLabs.rds") ############# Music Features Reshaping ############# ##Initial Formatting df <- readMat('music_anal/musicFeatures.mat') df <- df[[1]] df <- as.data.frame(df) df <- as.data.frame(t(df)) names <- colnames(df) df1 <- as.matrix(df) df1 <- matrix(unlist(df1), nrow = 40, ncol=13) df1 <- as.data.frame(df1) colnames(df1) <- names ##Repeat to concatenate x_music <- df1[rep(seq_len(nrow(df1)), each=60),] x_music <- x_music[rep(seq_len(nrow(x_music)), 32), ] saveRDS(x_music, "./Data/x_music.rds") ##Full Feature Set featureFull <- cbind(x_freq, x_music) saveRDS(object = featureFull, file = "./Data/featureFull.rds")
db3ec7cc6706b48e4f7660c616b8aad009e3afc9
1e620d83967acb48dfba21436d88bf4697904ba0
/scripts/12-GRN_final.R
67893d9022790ddf7ce9e0bfc0526a6cddf90728
[ "MIT" ]
permissive
umr1283/LGA_HSPC_PAPER
e6c32af5fd7303dd93b802b7a2d04704d421b305
5ff879dc64d555452d8ee980b242a957f412b6c6
refs/heads/main
2023-04-17T15:57:57.025596
2022-07-18T15:27:35
2022-07-18T15:27:35
404,302,229
0
0
null
null
null
null
UTF-8
R
false
false
13,089
r
12-GRN_final.R
### Project Setup ================================================================================== out<-"outputs/12-GRN_final" dir.create(out) source("scripts/utils/new_utils.R") library(Seurat) library(Signac) ####Functions#### GetMotifIDs<-function(object,motif.names,assay=NULL,return_dt=FALSE){ if(is.null(assay))assay<-DefaultAssay(object) idx<-match(motif.names,object@assays[[assay]]@motifs@motif.names) if(return_dt){ return( data.table(motif.name=motif.names, motif.id=names(object@assays[[assay]]@motifs@motif.names[idx])) ) }else{ return(names(object@assays[[assay]]@motifs@motif.names[idx])) } } CheckMotif<-function(object,peaks,motif.name,assay = NULL,return.peaks=FALSE){ require("Signac") if(is.null(assay))assay<-DefaultAssay(object) motif<-GetMotifID(object,motif.name,assay=assay) motif.all <- GetMotifData( object = object, assay = assay, slot = "data" ) motifs_peaks_tf <- motif.all[peaks,motif , drop = FALSE] if(return.peaks){ motifs_peaks_tf<-rownames(motifs_peaks_tf)[as.vector(motifs_peaks_tf==1)] return(motifs_peaks_tf) }else{ motifs_peaks_tf_vec<-as.vector(motifs_peaks_tf==1) names(motifs_peaks_tf_vec)<-rownames(motifs_peaks_tf) return(motifs_peaks_tf_vec) } } ### Analysis ======================================================================================= #clean regulons list based on atac regulons_list<-readRDS("outputs/09-SCENIC/cbps_14k/regulons_list.rds") atacs<-readRDS("outputs/07-DMCs_atac_integr/cbps_atacs.rds") atacs[["lin_peaks"]]<-readRDS("outputs/07-DMCs_atac_integr/cbps_lin_spe_peaks_assay.rds") atacs@assays$lin_peaks@motifs<-readRDS("outputs/07-DMCs_atac_integr/atacs_cbps_lin_peaks_motif_object.rds") DefaultAssay(atacs)<-"lin_peaks" #for EGR1 peaks_hsc_genes<-fread("outputs/07-DMCs_atac_integr/peaks_hsc_genes_anno.csv.gz") peaks_close_EGR1_target<-peaks_hsc_genes[gene_name%in%regulons_list$EGR1]$query_region egr1_peaks<-CheckMotif(atacs, peaks =peaks_close_EGR1_target , motif.name = "EGR1", return.peaks = TRUE) length(egr1_peaks)/length(peaks_close_EGR1_target) #30% egr1_genes<-intersect(peaks_hsc_genes[query_region%in%egr1_peaks]$gene_name, regulons_list$EGR1) length(egr1_genes)/length(regulons_list$EGR1) #96% ! (25/26) #egr1_extended peaks_close_EGR1_target<-peaks_hsc_genes[gene_name%in%regulons_list$EGR1e]$query_region egr1_peaks<-CheckMotif(atacs, peaks =peaks_close_EGR1_target , motif.name = "EGR1", return.peaks = TRUE) length(egr1_peaks)/length(peaks_close_EGR1_target) #17% egr1_genes<-intersect(peaks_hsc_genes[query_region%in%egr1_peaks]$gene_name, regulons_list$EGR1e) length(egr1_genes)/length(regulons_list$EGR1e) #93% (393/424) #for all tfs_scenic<-unique(str_remove(names(regulons_list),"e$")) regulons_tf_atac<-unlist(atacs@assays$lin_peaks@motifs@motif.names[atacs@assays$lin_peaks@motifs@motif.names%in%tfs_scenic]) length(regulons_tf_atac)/length(tfs_scenic)#107/157 regulons_atac_list<-regulons_list[str_remove(names(regulons_list),"e$")%in%regulons_tf_atac] length(regulons_atac_list) #174 length(regulons_list) #250 regulons_atac_listf<-lapply(names(regulons_atac_list), function(regulon_name){ targets<-regulons_atac_list[[regulon_name]] motif_name<-str_remove(regulon_name,"e$") peaks_close_targets<-peaks_hsc_genes[gene_name%in%targets]$query_region tf_peaks<-CheckMotif(atacs, peaks =peaks_close_targets , motif.name = motif_name, return.peaks = TRUE) filtered_targets<-intersect(peaks_hsc_genes[query_region%in%tf_peaks]$gene_name, targets) return(filtered_targets) }) names(regulons_atac_listf)<-names(regulons_atac_list) cat(unlist(lapply(1:length(regulons_atac_listf), function(i)paste(names(regulons_atac_listf)[i],"=",round(length(regulons_atac_listf[[i]])/length(regulons_atac_list[[i]])*100),"%"))),sep = "\n") mean(unlist(lapply(1:length(regulons_atac_listf), function(i)length(regulons_atac_listf[[i]])/length(regulons_atac_list[[i]])))) #59% #make a df of interactions tf > targets regulons<-Reduce(rbind,lapply(names(regulons_atac_listf), function(t)data.table(tf=rep(t,length(regulons_atac_listf[[t]])),target=regulons_atac_listf[[t]]))) regulons[,extended:=str_detect(tf,"e$")] regulons[,tf:=str_remove(tf,"e$")] regulons[(extended)] #25397 tf > target interaction regulons[(!extended)] #4808 tf > target interaction with high confidence fwrite(regulons,fp(out,"tf_target_interactions.csv")) regulons<-fread(fp(out,"tf_target_interactions.csv")) #%TF-target conserved regulonsf<-fread(fp(out,"tf_target_interactions.csv"))[!(extended)] regulons_old<-fread("outputs/10-SCENIC/regulons.csv") res_conserved<-sapply(unique(regulons_old$tf), function(t)length(intersect(regulons_old[tf==t]$gene,regulonsf[tf==t]$target))/nrow(regulons_old[tf==t])) res_conserved[c("EGR1","KLF2","KLF4")] #start build network only with tf> interact with high conf regf<-regulons[(!extended)] length(unique(regf$tf)) #72 tfs regf[,n.target:=.N,by="tf"] summary(unique(regf,by="tf")$n.target) # Min. 1st Qu. Median Mean 3rd Qu. Max. # 4.00 16.50 32.50 66.78 75.50 462.00 #show network using ggnet #renv::install("briatte/ggnet") library(ggnet) library(network) library(sna) ?network regf<-regf[!is.na(target)] net<-as.network(regf[,.(tf,target)],loops = T,directed = T) net # Network attributes: # vertices = 1802 # directed = TRUE # hyper = FALSE # loops = TRUE # multiple = FALSE # bipartite = FALSE # total edges= 4808 # missing edges= 0 # non-missing edges= 4808 # # Vertex attribute names: # vertex.names # # Edge attribute names not shown saveRDS(net,fp(out,"network_tf_target_hi_conf.rds")) #only with tf of interest egr1_modul<-c("KLF2","EGR1","KLF4") reg_egr1<-regf[tf%in%c(egr1_modul)] #add only targets of the tfs altered fwrite(reg_egr1,fp(out,"egr1_KLF2_KLF4_network_tf_target_interactions.csv")) net_genes<-union(reg_egr1$tf,reg_egr1$target) reg_egr1r1<-unique(rbind(reg_egr1,regf[target%in%egr1_modul&tf%in%net_genes])) #add also regulators of this tfs in this newtwork fwrite(reg_egr1r1,fp(out,"egr1_network_plus_tf_regulators_tf_target_interactions.csv")) # reg_egr1r2<-regf[tf%in%c(egr1_modul)|target%in%egr1_modul] #add upstream regulators of egr1_modul # fwrite(reg_egr1r2,fp(out,"egr1_network_plus_tf_regulators.csv")) #reg_egr1<-unique(rbind(reg_egr1,regf[tf%in%net_genes&target%in%net_genes])) #add all interactions of this genes presents tfs<-unique(reg_egr1r1$tf) net_egr1<-as.network(reg_egr1r1[,.(tf,target)],loops = T,directed = T) net_egr1 # Network attributes: # vertices = 123 # directed = TRUE # hyper = FALSE # loops = TRUE # multiple = FALSE # bipartite = FALSE # total edges= 161 # missing edges= 0 # non-missing edges= 161 # # Vertex attribute names: # vertex.names # # No edge attributes #add a vertex attributes wich indicates if the gene is a tf or not net_egr1 %v% "type" = ifelse(network.vertex.names(net_egr1) %in% regf$tf, "tf", "gene") #add methyl info res_m<-fread("outputs/02-gene_score_calculation_and_validation/res_genes.csv.gz") res_m[gene_score_add>500,meth:="darkred"] res_m[gene_score_add<=500,meth:="black"] net_egr1 %v% "meth" = sapply(res_m[network.vertex.names(net_egr1),on="gene"]$meth,function(x)ifelse(is.na(x),"cornsilk3",x)) #add expr info res_e<-fread("outputs/06-LGA_vs_Ctrl_RNA/res_pseudobulkDESeq2_by_lineage.csv.gz")[lineage=="HSC"] res_e[padj>0.05,deg:="cornsilk3"] res_e[padj<=0.05&log2FoldChange>0,deg:="coral2"] res_e[padj<=0.05&log2FoldChange>0.5,deg:="coral3"] res_e[padj<=0.05&log2FoldChange<(0),deg:="cadetblue3"] res_e[padj<=0.05&log2FoldChange<(-0.5),deg:="cadetblue4"] res_e[padj<=0.05&log2FoldChange<(-0.25)] net_egr1 %v% "deg" = res_e[network.vertex.names(net_egr1),on="gene"]$deg #add atac info #on vertice #need add target info res_a<-fread("outputs/08-chromatin_change_LGA_vs_Ctrl/differential_peaks_accessibility_lga_vs_ctrl_hsc_logFC0.csv.gz") res_a<-res_a[!str_detect(peak,"chr[XY]")] peaks_hsc_genes[,peak:=query_region] res_at<-merge(res_a,peaks_hsc_genes,by="peak") res_at[,target:=gene_name] res_at[p_val_adj<0.001&avg_log2FC>0.25,da:="red"] res_at[p_val_adj<0.001&avg_log2FC<(-0.25),da:="blue"] res_at[is.na(da),da:="grey75"] net_egr1 %v% "da" = res_at[network.vertex.names(net_egr1),on="target"]$da #on edge : df tf-target link with if peak with motif found, FC / pval of the change #need merge network df with res_atac df #add TF info on res_atac => for each peak, merge with tf(of the network)-peak dt tfs<-unique(reg_egr1r1[,.(tf,target)]$tf) #"KLF4" "EGR1" "KLF2" "ATF4" "ATF3" "JUN" "FOS" "JUNB" peaks<-unique(res_at[target%in%reg_egr1r1$target]$peak) length(peaks)#690 motif.all <- GetMotifData( object = atacs, assay = "lin_peaks", slot = "data" ) motifs_peaks_tfs <- motif.all[peaks,GetMotifIDs(atacs,tfs) , drop = FALSE] tf_peak_dt<-melt(data.table(data.frame(as.matrix(motifs_peaks_tfs==1)),keep.rownames = "peak"),id.vars = "peak",variable.name ="motif.id" ,value.name = "is_present") tf_peak_dt<-merge(tf_peak_dt,GetMotifIDs(atacs,tfs,return_dt=TRUE)) tf_peak_dt<-tf_peak_dt[is_present==TRUE] res_at_tf<-merge(res_at,tf_peak_dt,by="peak") res_at_tf[,tf:=motif.name] #merge with network df reg_egr1r1_peaks<-merge(reg_egr1r1, res_at_tf[,.(tf,target,peak,p_val,p_val_adj,avg_log2FC,pct.1,pct.2,type,da)], by = c("tf","target"), all.x = T) reg_egr1r1_peaks[,n.tf.target.peaks:=.N,by=.(tf,target)] reg_egr1r1_peaks[,biggest_change:=p_val==min(p_val),.(tf,target)] reg_egr1r1_peaks[(biggest_change)|is.na(biggest_change)] unique(reg_egr1r1_peaks[(biggest_change)|is.na(biggest_change)],by=c("tf","target"))#ok reg_egr1r1_peaks[,da.peak:=p_val_adj<0.001&abs(avg_log2FC)>0.25] reg_egr1r1_peaks[,n.da.tf.target.peaks:=sum(da.peak),.(tf,target)] reg_egr1r1_peaks[da.peak==T] #9 reg_egr1r1_peak1<-reg_egr1r1_peaks[(biggest_change)|is.na(biggest_change)] #add DMCs infos on edge #need merge peaks DMCs df with reg_egr1 df peaks_cpgs<-fread("outputs/07-DMCs_atac_integr/cpgs_in_lin_OCRs.csv.gz") peaks_meth<-merge(peaks_cpgs,fread("outputs/01-lga_vs_ctrl_limma_DMCs_analysis/res_limma.tsv.gz")) peaks_meth[,peak:=peaks] peaks_meth_hsc<-peaks_meth[peak%in%peaks_hsc_genes$peak] reg_egr1r1_peak1_meth<-merge(reg_egr1r1_peak1, peaks_meth_hsc[,.(peak,cpg_id,logFC,P.Value,adj.P.Val)], by="peak", all.x=T) reg_egr1r1_peak1_meth[,n.cpg.peak:=.N,by=.(peak,tf)] reg_egr1r1_peak1_meth[,biggest_meth_change:=P.Value==min(P.Value),.(peak,tf)] reg_egr1r1_peak1_meth[(biggest_meth_change)|is.na(biggest_meth_change)] #ok reg_egr1r1_peak1_meth[,dmcs:=P.Value<0.001&abs(logFC)>25] reg_egr1r1_peak1_meth[,n.dmcs.peak:=sum(dmcs),.(peak,tf)] reg_egr1r1_peak1_meth[dmcs==T] #24 reg_egr1r1_peak1_meth1<-reg_egr1r1_peak1_meth[(biggest_meth_change)|is.na(biggest_meth_change)] #ADD edge atttibute (tf> target) : color depend of if atac based tf> target interact is altered by chromatin change #if tf-gene peak dn : blue if tf-gene peak up : red net_egr1_a<-network(reg_egr1r1_peak1_meth1[,-c("extended","biggest_change","biggest_meth_change","peak")],loops = T,directed = T) list.edge.attributes(net_egr1_a) as.matrix(net_egr1_a,attrname='da') net_egr1_a %e% "da" net_egr1_a %e% "da"=sapply(net_egr1_a %e% "da",function(x)ifelse(x=="grey75","darkgrey",x)) net_egr1_a %e% "dmc_line"=net_egr1_a %e% "n.dmcs.peak"+1 net_egr1_a %e% "dmc_line"=sapply(net_egr1_a %e% "dmc_line",function(x)ifelse(is.na(x),1,x)) #set.edge.attribute(net_egr1, "color", ifelse(net_egr1 %e% "dap" > 1, "black", "grey75")) #add vertices attributes net_egr1_a %v% "type" = ifelse(network.vertex.names(net_egr1_a) %in% regf$tf, "tf", "gene") net_egr1_a %v% "deg" = res_e[network.vertex.names(net_egr1_a),on="gene"]$deg net_egr1_a %v% "deg" = sapply(net_egr1_a %v% "deg",function(x)ifelse(is.na(x),"cornsilk3",x)) net_egr1_a %v% "meth" = sapply(res_m[network.vertex.names(net_egr1_a),on="gene"]$meth,function(x)ifelse(is.na(x),"black",x)) #genes_of_interest<-union(res_e[padj<=0.05&abs(log2FoldChange)>0.5]$gene,union(res_m[gene_score_add>500]$gene,unique(reg_egr1$tf))) #GRN sans selection,label all genes ggnet2(net_egr1_a, color = "deg", label = T,label.color = "meth",label.size = 2, size = "type" ,size.palette = c("tf"=3,"gene"=1), shape = "type", edge.alpha = 0.8, edge.size=0.5, edge.color = "da", arrow.size = 5, edge.lty = "dmc_line", arrow.gap =0.02) + theme(panel.background = element_rect(fill = "white")) ggsave(fp(out,"final_network_EGR1_KLF2_KLF4_tf_targets_2.pdf")) reg_egr1r1_peak1_meth1[n.dmcs.peak>1]
c248a90e1602f8f61c83d30dac321c07edec7261
9b224706fa92a3aefd056a297830ada20dc063ae
/cachematrix.R
5ecf0ab534309f94f3e4e403c424b8f3a71cb38c
[]
no_license
dasctobo/ProgrammingAssignment2
3d80ed0634d2318bd84c1cefe683ba5685f0cf7d
69c51f020bde381d35fef1e0f059625480821365
refs/heads/master
2021-01-09T06:41:10.317884
2014-10-17T14:23:28
2014-10-17T14:23:28
null
0
0
null
null
null
null
UTF-8
R
false
false
989
r
cachematrix.R
## These are functions to create a matrix (makeCacheMatrix) which has its inverse cached after first retrieval (and ## calculation) of the inverse (cacheSolve) ## Creates and returns a list of functions, that can set and get a matrix and its inverse makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## Tries to retrieve a cached inverse of matrix x and if no inverse is found, ## calculates it (using solve()), caches it for future retrieval and finally returns the inverse. ## accepts additional arguments and passes them to solve() cacheSolve <- function(x, ...) { inv <- x$getinverse() if(!is.null(inv)) { message("getting cached inverse") return(inv) } mat <- x$get() inv <- solve(mat, ...) x$setinverse(inv) inv }
379d03152cf1591005a17febc52f5c49127415fe
e08cbe1280758705991a3fecd2137fe262eaea3a
/peak_statistics.R
21bbdb0e5d88162c4df15a98b00ebe2524d8ce7e
[]
no_license
cwarden45/peak_calling_template
9272ba8274c93c03602cad737d8c4b918cee4e4e
d5aa9bf407cf95f5d84f8ccb98ed0dc1efe70dd9
refs/heads/master
2021-10-16T22:09:32.765005
2019-02-13T01:04:00
2019-02-13T01:04:00
103,324,290
0
0
null
null
null
null
UTF-8
R
false
false
5,644
r
peak_statistics.R
param.file = "parameters.txt" peak.length.dist = "peak_length_dist.png" peak.length.stats = "peak_length_dist.txt" max.plot = 1000 param.table = read.table(param.file, header=T, sep="\t") genome = as.character(param.table$Value[param.table$Parameter == "genome"]) alignment.folder = as.character(param.table$Value[param.table$Parameter == "Alignment_Folder"]) peakType = as.character(param.table$Value[param.table$Parameter == "peakType"]) nodup.bam = as.character(param.table$Value[param.table$Parameter == "Remove_Duplicates"]) annoType = as.character(param.table$Value[param.table$Parameter == "gtfID"]) sample.description.file = as.character(param.table$Value[param.table$Parameter == "sample_description_file"]) tss.GTF = as.character(param.table$Value[param.table$Parameter == "promoterGTF_MAC"]) merged.GTF = as.character(param.table$Value[param.table$Parameter == "mergedPeakGTF"]) library("GenomicRanges") if(nodup.bam == "yes"){ bamSuffix = ".nodup.bam$" }else if(nodup.bam == "no"){ bamSuffix = ".bam$" }else{ print("'Remove_Duplicates' must be 'yes' or 'no'") stop() } if(peakType == "broad"){ peakSuffix = "_peaks.broadPeak" }else if(peakType == "narrow"){ peakSuffix = "_peaks.narrowPeak" }else{ print("'peakType' must be 'broad' or 'narrow'") stop() } sample.description.table = read.table(sample.description.file, sep="\t", header=T) longID = sample.description.table$sampleID sample.label = sample.description.table$userID bedFiles = paste(sample.description.table$alignment.folder,"/", longID,"/macs_",longID,peakSuffix,sep="") num.peaks = c() png(peak.length.dist) for (i in 1:length(bedFiles)){ print(paste(longID[i]," : ",bedFiles[i],sep="")) input.table = read.table(bedFiles[i], head=F, sep="\t") print(dim(input.table)) num.peaks[i]=nrow(input.table) peak.length = input.table$V3-input.table$V2 print(max(peak.length)) peak.length[peak.length > max.plot]=max.plot print(length(peak.length[peak.length == max.plot])) if(i == 1) { den = density(peak.length, na.rm=T,from=0, to=max.plot) plot(den$x, den$y, type="l", xlab = "Peak Length", ylab = "Density", xlim=c(0,max.plot), ylim=c(0,0.01), col="gray") legend("top",legend=c("sample","merged"),col=c("gray","black"), lwd=2, xpd=T, inset =-0.1, ncol=2) }#end if(i == 1) else { den = density(peak.length, na.rm=T,from=0, to=max.plot) lines(den$x, den$y, type = "l", col="gray") }#end else gr = reduce(GRanges(Rle(input.table$V1), IRanges(start=input.table$V2, end=input.table$V3))) if(i == 1){ peakGr = gr }else{ peakGr = union(peakGr, gr) } }#end for (i in 1:length(bedFiles)) peakGr = reduce(peakGr) print("Create merged peak table") merged.peaks = data.frame(peakGr) peak.length = merged.peaks$width print(max(peak.length)) peak.length[peak.length > max.plot]=max.plot print(length(peak.length[peak.length == max.plot])) lines(den$x, den$y, type = "l", col="black") dev.off() stat.table = data.frame(SampleID=longID, userID=sample.label, bed.file=bedFiles, num.peaks) write.table(stat.table, peak.length.stats, quote=F, sep="\t", row.names=F) promoter.table = read.table(tss.GTF, head=F, sep="\t") print(dim(promoter.table)) if(length(grep("_",promoter.table$V1)) > 0){ promoter.table = promoter.table[-grep("_",promoter.table$V1),] promoter.table$V1 = as.factor(as.character(promoter.table$V1)) print(dim(promoter.table)) } extract.gene = function(char){ char.info = unlist(strsplit(char,split=";")) anno.info = as.character(char.info[grep("gene_name", char.info)]) anno = gsub("\"","",anno.info) anno = gsub("gene_name","",anno) anno = gsub(" ","",anno) return(anno) }#end def extract.gene gene = unlist(sapply(as.character(promoter.table$V9), extract.gene)) refGR = GRanges(Rle(promoter.table$V1), IRanges(start=promoter.table$V4, end=promoter.table$V5), Names=gene, Rle(strand(promoter.table$V7))) testGR = GRanges(Rle(merged.peaks$seqnames), IRanges(start=merged.peaks$start, end=merged.peaks$end)) hits = findOverlaps(refGR, testGR) overlaps = data.frame(pintersect(refGR[queryHits(hits)], testGR[subjectHits(hits)])) print(head(overlaps)) regionID = paste(merged.peaks$seqnames,":",merged.peaks$start,"-",merged.peaks$end,sep="") overlapsID = paste(overlaps$seqnames,":",overlaps$start,"-",overlaps$end,sep="") combine_genes = function(gene.arr){ gene.arr = unique(gene.arr) gene.arr = gene.arr[!is.na(gene.arr)] if(length(gene.arr) == 0){ return("other") }else{ return(paste(gene.arr,collapse="_")) } }#end def combine_genes region.gene = tapply(overlaps$Names, overlapsID, combine_genes) region.gene=region.gene[match(regionID, names(region.gene))] region.gene[!is.na(region.gene)]=paste("TSS_",region.gene[!is.na(region.gene)],sep="") region.gene[is.na(region.gene)]="other" tabularID = paste(region.gene,regionID,sep="_") attribute = paste("gene_id \"",regionID,"\"; gene_name \"",region.gene,"\"; transcript_id \"",tabularID,"\"",sep="") gtf.table = data.frame(chr=merged.peaks$seqnames, source=rep("merged_MACS2_peaks",nrow(merged.peaks)), feature=rep("peak",nrow(merged.peaks)), start = merged.peaks$start+1, end = merged.peaks$end+1, score = rep(".",nrow(merged.peaks)), strand = rep(".",nrow(merged.peaks)), frame = rep(".",nrow(merged.peaks)), attribute) gtf.table = apply(gtf.table, 2, as.character) print(dim(gtf.table)) write.table(gtf.table,merged.GTF, sep="\t", row.names=F, col.names=F, quote=F)
c2787a70190882cf9325ad8cdbbaac1eed90b6d2
ed9f2693c0afb85aaebf08250f45d70f621feabc
/man/PhenoComp.Rd
a36ac75a178337597ebea11689193891d56a8e30
[]
no_license
XJJ-student/PhenoComp1
87e1ead0be89c389cbf8d37484bb80cb73973b0d
8fb3e3206f45653c6284317e200720f0f8bf36d4
refs/heads/master
2021-04-05T06:19:39.843300
2020-03-19T15:21:48
2020-03-19T15:21:48
248,529,086
0
0
null
null
null
null
UTF-8
R
false
false
1,850
rd
PhenoComp.Rd
\name{PhenoComp} \alias{PhenoComp} \title{Identification of population-level differentially expressed genes in one-phenotype data} \usage{ PhenoComp(expdata,label,gene,freq,method,freq1,outfile1,outfile2) } \arguments{ \item{expdata}{a (non-empty) numeric gene expression matrix with both disease and control samples.} \item{label}{a (non-empty) numeric vector of data values where ’0’ represents control sample label and ’1’ reptesents disease sample(default).The length of label must be the same as the number of columns in the expdata.} \item{gene}{a (non-empty) numeric vector of Entrez gene IDs. The length of gene must be the same as the number of rows in the expdata} \item{freq}{the criteria for identifying stable gene pairs in control samples. The default setting of freq is 0.99.} \item{method}{Method determines how to estimate the p_up and p_down. Method=1: the p_up and p_down were estimated as the median values of the frequencies of up-regulated and down-regulated genes for individual disease samples.Method=2: the p_up and p_down were estimated as the mean values of the frequencies of up-regulated and down-regulated genes for individual disease samples. } \item{freq1}{the threshold of FDR for identifying population-level differentially expressed genes.} \item{outfile1}{The file name used to save the identified population-level up-regulation genes.} \item{outfile2}{The file name used to save the identified population-level down-regulation genes.} } \description{ PhenoComp is an algorithm to identify population-level differential genes in one-phenotype data. This algorithm is based on RankComp, an algorithm used to identify individual-level differentially expressed genes in each sample. } \examples{ PhenoComp(expdata,label,gene,0.99,1,0.05,"gene_up.txt","gene_down.txt") }
8b4d87f34c91cecb8d6e438d56b9fcdfbfb06a4c
957ecf14135b19cadd1314c2528628fbbe239b5a
/warp10r/man/pushWarp10.Rd
91201364d5cacc9e3c5f26a6ae9efebe14156294
[]
no_license
senx/warp10-r
b0af0334ff1de6ecff4cb49d3d6129826232efc3
199a09d26a530cf77b4eda38d8fe1d9ef587ee8c
refs/heads/master
2023-09-01T01:31:20.425182
2019-11-15T16:01:58
2019-11-15T16:01:58
82,603,784
1
2
null
2021-09-23T19:44:08
2017-02-20T21:19:48
R
UTF-8
R
false
true
522
rd
pushWarp10.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/client.R \name{pushWarp10} \alias{pushWarp10} \title{Push data points} \usage{ pushWarp10(data, token, endpoint = "http://localhost:8080/api/v0/update") } \arguments{ \item{data}{data points in GTS input format as a character vector or a filename ending with .data or .gz} \item{token}{write token} \item{endpoint}{ingress endpoint. Default to "http://localhost:8080/api/v0/update"} } \description{ Push data points to an ingress instance. }
94df102c5bc622e351c65e255ad6a7d914bb7a53
6e9b340fbad1378474ab0447dc8b822580b6bb2f
/assignment3/rankall.R
0689a7dab99ed44dd7e227b30ed05d72165b75c1
[]
no_license
kingname/datasciencecoursera
2cb290738e147d9c7b81a33bf6ef5543f7e473a2
8417869d3138b8e80a866364d2ea8f6edb6a1d7b
refs/heads/master
2023-08-16T16:51:12.082302
2015-02-01T04:45:19
2015-02-01T04:45:19
28,945,870
1
8
null
null
null
null
UTF-8
R
false
false
872
r
rankall.R
rankall <- function(outcome, num = "best") { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv",colClasses = "character") ## Check that outcome are valid problem <- c("heart attack", "heart failure", "pneumonia") right_outcome <- problem == outcome if(sum(right_outcome)==0){ stop("invalid outcome") } state_list <- unique(data$State) ## For each state, find the hospital of the given rank state_vector <- c() hospital_vector <- c() for(each in state_list){ hospital_name = rankhospital(each,outcome,num) state_vector <- c(state_vector,each) hospital_vector <- c(hospital_vector,hospital_name) } ## Return a data frame with the hospital names and the output <- data.frame(hospital=hospital_vector,state=state_vector) ## (abbreviated) state name output }
1e57a2a25a22991a79ddada3d2c20df530b71447
e39cd762e483cb80774aec5c89a333e3ff4cfa36
/practice.R
3aaa8b013e2cda48a38037b42dcf7c4b471fbd5f
[]
no_license
CodeFire98/impCodes
eaad7171bd77f489822f8998b5bed35c7fa947ca
9740a3d53eedd71718e60d8b6f0e969f15afcdcb
refs/heads/master
2020-03-21T07:02:42.985096
2018-06-22T05:10:38
2018-06-22T05:10:38
138,257,124
0
0
null
null
null
null
UTF-8
R
false
false
5,303
r
practice.R
library(shiny) library(ggplot2) library(reshape) library(scales) source("queplot.R") ui <- fluidPage( titlePanel("Plots"), sidebarPanel( h3("Constraints (Between 2012 and 2016)"), dateInput("date1", "Start Date", value = "2012-01-28"), dateInput("date2", "End Date", value = "2012-01-28"), h3("Parameters:"), checkboxInput('valtempr', 'Temperature', FALSE), checkboxInput('valrh', 'Humidity', FALSE), checkboxInput('valws', 'Wind Speed', FALSE), checkboxInput('valap', 'Air Pressure', FALSE), actionButton("submit","Submit") ), mainPanel( h2("Time Series Plot\n"), plotOutput("view"), verbatimTextOutput("d") ) ) server <- function(input, output) { mytable = iig_bharati observeEvent(input$submit, { output$d = renderText({ paste(input$date1, " to ", input$date2) }) output$view = renderPlot({ #queplot(input$date1, input$date2, input$valtempr, input$valrh, input$valws, input$valap) queplotuser(input$date1, input$date2, input$valtempr, input$valrh, input$valws, input$valap) }) }) } shinyApp(ui, server) deployApp() runApp(host = "172.0.0.1", port = 7200) ############################################################################# library(shiny) library(ggplot2) library(reshape) library(scales) library(reshape2) source("queplot.R") ui <- fluidPage( titlePanel("Plots"), sidebarPanel( h3("Constraints (Between 2005 and 2015)"), dateInput("date1", "Start Date", value = "2007-03-01"), dateInput("date2", "End Date", value = "2007-03-01"), h3("Parameters:"), checkboxInput('valtempr', 'Temperature', FALSE), checkboxInput('valrh', 'Humidity', FALSE), checkboxInput('valws', 'Wind Speed', FALSE), checkboxInput('valap', 'Air Pressure', FALSE), actionButton("submit","Submit") ), mainPanel( h2("Your Plot\n"), plotOutput("view"), verbatimTextOutput("d") ) ) #server = function(input, output) {} server <- function(input, output) { observeEvent(input$submit, { output$d = renderText({ paste(input$date1, " to ", input$date2) }) #subse = queplot(input$date1, input$date2) subs = subset(mytable, as.character(as.Date(mytable$obstime, "%d/%m/%Y")) >= input$date1 & as.character(as.Date(mytable$obstime, "%d/%m/%Y")) <= input$date2) subs$time_only = strptime(subs$obstime, "%d/%m/%Y %H:%M") format(subs$time_only, "%H:%M:%S") output$view = renderPlot({ qplot(subs$time_only, subs$tempr, geom = "line", xlab = "Time", ylab = "Temperature", main = "Temperature vs Time") }) }) } shinyApp(ui, server) ######################################################################### output$view = renderPlot({ queplot(input$date1, input$date2) }) radioButtons("choice", "Type of data?", c("Daily"="day", "Monthly"="month", "yearly"="year")) conditionalPanel(condition = "choice==day", numericInput("date","Date",1)) ######################################################################### ui = fluidPage( titlePanel("Plots"), sidebarPanel( radioButtons("choice", "Choice", c("Fixed Intervals"="ch1", "User Defined Range"="ch2"), selected = NULL), conditionalPanel( condition = "input.choice == ch2", h3("Constraints (Between 2005 and 2015)"), dateInput("date1", "Start Date", value = "2007-03-01"), dateInput("date2", "End Date", value = "2007-03-01") ), conditionalPanel( condition = "input.choice == ch1", radioButtons("choice1", "Type of data?", c("Daily"="day", "Monthly"="month", "yearly"="year"), selected = NULL), conditionalPanel(condition = "input.choice1 == year", numericInput("yea", "Enter Year:", 2007,2015,2007) ), conditionalPanel(condition = "input.choice1 == month", numericInput("yea", "Enter Year:", 2007,2015,2007), numericInput("mon", "Enter Month:", 01,12,01) ), conditionalPanel(condition = "input.choice1 == day", numericInput("yea", "Enter Year:", 2007,2015,2007), numericInput("mon", "Enter Month:", 01,12,01), numericInput("dat", "Enter Date:", 01,31,01) ) ), h3("Parameters:"), checkboxInput('valtempr', 'Temperature', FALSE), checkboxInput('valrh', 'Humidity', FALSE), checkboxInput('valws', 'Wind Speed', FALSE), checkboxInput('valap', 'Air Pressure', FALSE), actionButton("submit","Submit") ), mainPanel( h2("Time Series Plot\n"), plotOutput("view") ) ) server = function(input, output) { observeEvent(input$submit, { output$view = renderPlot({ if(input$choice == ch2) { queplotuser(input$date1, input$date2, input$valtempr, input$valrh, input$valws, input$valap) } if(input$choice == ch1) { queplotfix(input$valtempr, input$valrh, input$valws, input$valap, input$choice1, input$yea, input$mon, input$dat) } }) }) }
f932455f2c024755e56e79264ed1f6511d808fde
1845007bf50e3cfa8a499fc7ab2c445e8b95a742
/R/p1p2.c.h.R
62bb410386e0c5f77e32e25d9855e8c881236672
[]
no_license
cran/adaptTest
ebc77eee366622735cd385c9fae0ccff8598f507
c6d8ce1c12ac7437024b77f603df703da4807260
refs/heads/master
2022-04-30T22:27:38.072271
2022-03-04T11:00:06
2022-03-04T11:00:06
17,677,105
0
0
null
null
null
null
UTF-8
R
false
false
115
r
p1p2.c.h.R
`p1p2.c.h` <- function (p1,p2) ifelse(!(le(0,p1) && le(p1,1) && le(0,p2) && le(p2,1)), NA, p2)
7f3c10cd823223209c574a977edd42395e710290
fd55181a8fea8504b5d84b481970c1e75e0e28ae
/init.R
e7670ef6856c5770d015e034f3b94a52bed514f4
[]
no_license
andreypeshev/SimBsc
12fbb0c8ee8b1b834c9a2579e440ffca67955443
e018c259a8dd08aff63b09a944193ce06340422d
refs/heads/main
2023-03-15T06:57:53.316506
2021-03-14T11:04:28
2021-03-14T11:04:28
347,609,889
0
0
null
2021-03-14T10:49:29
2021-03-14T10:49:28
null
UTF-8
R
false
false
570
r
init.R
#Parameters of the simulation: parameters <- list() #Variable M <- 10 # Number of iterations nvec <- seq(100,10000,100) # sample sizes r.squared <- c(.1, .33, .5, .66, .9) # R-squared coeff_list <- list(c(1, .5), c(1, .5, 3.3), c(1, .5, 3.3, 2), c(1, .5, 3.3, 2, 1.5)) # Coefficients for the predictor variables, also number of predictors (length -1) #Fixed parameters$covariance <- c(.1) #covariances
de7ae864443ea5039543e3b3301100bc47f6105e
893f643927525c9eb1de6fa9719463252c9cfbba
/tests/testthat.R
3164cb034670cfc396f8ee84fab56ac0efa53ad5
[]
no_license
cran/quantspec
89b9b3f360686f877fcfae55c429a1641396fe42
755dad94c2dea5830625769c04c35c7b0aceba2a
refs/heads/master
2021-01-24T16:09:48.007048
2020-07-14T21:50:02
2020-07-14T21:50:02
17,698,910
0
0
null
null
null
null
UTF-8
R
false
false
66
r
testthat.R
library(testthat) library(quantspec) test_check("quantspec")
dc41f7fe3a06a388378880f1e7de16a84ac11525
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/highcharter/examples/hc_add_series_scatter.Rd.R
678c72ce1037343c31698cbb933e959fac41204f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
914
r
hc_add_series_scatter.Rd.R
library(highcharter) ### Name: hc_add_series_scatter ### Title: Shortcut for create scatter plots ### Aliases: hc_add_series_scatter ### ** Examples ## Not run: ##D hc <- highchart() ##D ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg) ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg, mtcars$drat) ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg, mtcars$drat, mtcars$am) ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg, mtcars$drat, mtcars$qsec) ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg, mtcars$drat, mtcars$qsec, rownames(mtcars)) ##D ##D # Add named attributes to data (attributes length needs to match number of rows) ##D hc_add_series_scatter(hc, mtcars$wt, mtcars$mpg, mtcars$drat, mtcars$qsec, ##D name = rownames(mtcars), gear = mtcars$gear) %>% ##D hc_tooltip(pointFormat = "<b>{point.name}</b><br/>Gear: {point.gear}") ##D ## End(Not run)
f49ddb2e93822990e3d56f11f4790fee2eb40fc0
bfdaf30a13fa2a7e32c76d416992869a251a4814
/WRF_0708_composite_impacts.R
3b661c43f130295fd53bfafc436fbd789869ea79
[]
no_license
apepler/Code-R
f214b76bd683ddee7c68eeefe63cfefe95aad524
a6f51ce604af52495d4b356185bf260131979c84
refs/heads/master
2020-05-21T16:45:09.502219
2017-04-19T03:28:13
2017-04-19T03:28:13
60,828,413
0
0
null
null
null
null
UTF-8
R
false
false
7,287
r
WRF_0708_composite_impacts.R
rm(list=ls()) setwd("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_wrf_2007_all/typing/") library(ggplot2) library(reshape2) library(abind) library(RNetCDF) lat=seq(-500,500,10) lon=seq(-500,500,10) library(sp) dist=matrix(0,101,101) for(i in 1:101) for(j in 1:101) dist[i,j]=sqrt(lat[i]^2 + lon[j]^2) dist2<-dist3<-matrix(NaN,101,101) dist2[dist<=500]=1 dist3[dist<=250]=1 dom="d02" cat="rad2_p100" wdirs=c("/srv/ccrc/data36/z3478332/WRF/output/ERAI_R1_nudging_default_2007/out/", "/srv/ccrc/data36/z3478332/WRF/output/ERAI_R2_nudging_default_2007/out/", "/srv/ccrc/data36/z3478332/WRF/output/ERAI_R3_nudging_default_2007/out/", "/srv/ccrc/data36/z3478332/WRF/output/ERAI_R1_nudging_default_2007_notopo/out/", "/srv/ccrc/data36/z3478332/WRF/output/ERAI_R2_nudging_default_2007_notopo/out/", "/srv/ccrc/data36/z3478332/WRF/output/ERAI_R3_nudging_default_2007_notopo/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R1_nudging_default_2007_BRAN/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R2_nudging_default_2007_BRAN/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R3_nudging_default_2007_BRAN/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R1_nudging_default_2007_BRAN_noeac/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R2_nudging_default_2007_BRAN_noeac/out/", "/srv/ccrc/data37/z3478332/WRF/output/ERAI_R3_nudging_default_2007_BRAN_noeac/out/", "/srv/ccrc/data45/z3478332/WRF/output/ERAI_R1_nudging_default_2007_BRAN_2eac/out/", "/srv/ccrc/data45/z3478332/WRF/output/ERAI_R2_nudging_default_2007_BRAN_2eac/out/", "/srv/ccrc/data45/z3478332/WRF/output/ERAI_R3_nudging_default_2007_BRAN_2eac/out/") wnames=c("R1","R2","R3", "R1_notopo","R2_notopo","R3_notopo", "R1_BRAN","R2_BRAN","R3_BRAN", "R1_BRAN_noeac","R2_BRAN_noeac","R3_BRAN_noeac", "R1_BRAN_2eac","R2_BRAN_2eac","R3_BRAN_2eac") for(w in 1) { data=read.csv(paste("ECLfixes_",dom,"_0708_",wnames[w],"_",cat,"_typing.csv",sep=""),stringsAsFactors=F) events=read.csv(paste("ECLevents_",dom,"_0708_",wnames[w],"_",cat,"_typing.csv",sep=""),stringsAsFactors=F) data=data[,-1] events=events[,-1] filelistC=paste(wdirs[w],"ECLrain_d02_0708_",cat,"_centred.nc",sep="") filelistW=paste(wdirs[w],"ECLwind_d02_0708_",cat,".nc",sep="") a=open.nc(filelistC) tmp=var.get.nc(a,"ECLrain") tmp[tmp>=500]=NaN close.nc(a) a=dim(tmp) for(x in 1:a[3]) tmp[,,x]=tmp[,,x]*dist2 data$MeanRain500=apply(tmp,3,mean,na.rm=T) data$MaxRain500=apply(tmp,3,max,na.rm=T) for(x in 1:a[3]) tmp[,,x]=tmp[,,x]*dist3 data$MeanRain250=apply(tmp,3,mean,na.rm=T) data$MaxRain250=apply(tmp,3,max,na.rm=T) a=open.nc(filelistW) tmp=var.get.nc(a,"ECL_WS10") close.nc(a) a=dim(tmp) for(x in 1:a[3]) tmp[,,x]=tmp[,,x]*dist2 data$MeanWind500=apply(tmp,3,mean,na.rm=T) data$MaxWind500=apply(tmp,3,max,na.rm=T) for(x in 1:a[3]) tmp[,,x]=tmp[,,x]*dist3 data$MeanWind250=apply(tmp,3,mean,na.rm=T) data$MaxWind250=apply(tmp,3,max,na.rm=T) events$MaxPointWind500<-events$MaxMeanWind500<-events$MaxPointRain500<-events$MaxMeanRain500<-events$TotalRain500<-0 events$MaxPointWind250<-events$MaxMeanWind250<-events$MaxPointRain250<-events$MaxMeanRain250<-events$TotalRain250<-0 for(k in 1:length(events$ID)) { I=which(data$ID==events$ID[k] & data$Location==1) events$TotalRain500[k]=sum(data$MeanRain500[I],na.rm=T) events$MaxMeanRain500[k]=max(data$MeanRain500[I],na.rm=T) events$MaxPointRain500[k]=max(data$MaxRain500[I],na.rm=T) events$MaxMeanWind500[k]=max(data$MeanWind500[I],na.rm=T) events$MaxPointWind500[k]=max(data$MaxWind500[I],na.rm=T) events$TotalRain250[k]=sum(data$MeanRain250[I],na.rm=T) events$MaxMeanRain250[k]=max(data$MeanRain250[I],na.rm=T) events$MaxPointRain250[k]=max(data$MaxRain250[I],na.rm=T) events$MaxMeanWind250[k]=max(data$MeanWind250[I],na.rm=T) events$MaxPointWind250[k]=max(data$MaxWind250[I],na.rm=T) } write.csv(data,paste("ECLfixes_",dom,"_0708_",wnames[w],"_",cat,"_typing_impactsC.csv",sep="")) write.csv(events,paste("ECLevents_",dom,"_0708_",wnames[w],"_",cat,"_typing_impactsC.csv",sep="")) } ####### Now, analyse! rm(list=ls()) setwd("/srv/ccrc/data34/z3478332/ECLtracks/outputUM_wrf_2007_all/typing/") library(ggplot2) library(reshape2) library(abind) library(RNetCDF) dom="d01" cat="rad2_p100" wnames=c("R1","R2","R3", "R1_notopo","R2_notopo","R3_notopo", "R1_BRAN","R2_BRAN","R3_BRAN", "R1_BRAN_noeac","R2_BRAN_noeac","R3_BRAN_noeac", "R2_BRAN_2eac","R2_BRAN_2eac","R3_BRAN_2eac") events<-fixes<-list() for(w in 1:length(wnames)) { fixes[[w]]=read.csv(paste("ECLfixes_",dom,"_0708_",wnames[w],"_",cat,"_typing_impacts.csv",sep=""),stringsAsFactors=F) fixes[[w]]$Date2=as.POSIXct(paste(as.character(fixes[[w]]$Date),substr(fixes[[w]]$Time,1,2),sep=""),format="%Y%m%d%H",tz="GMT") events[[w]]=read.csv(paste("ECLevents_",dom,"_0708_",wnames[w],"_",cat,"_typing_impacts.csv",sep="")) } ### Average of each statistic by dataset statave=matrix(0,15,9) rownames(statave)=wnames colnames(statave)=c("Count","Mean(meanR)","Mean(maxR)","Mean(meanW)","Mean(maxW)","MeanRain>=6","MaxRain>=50","MeanWind>=50km/h","MaxWind>=80km/h") impcol=17:20 ## For 500, 22:25 for 250 xthresh=c(6,50,13.9,22.2) for(w in 1:length(wnames)) { tmp=events[[w]] statave[w,1]=length(tmp[,1]) statave[w,2:5]=apply(tmp[,impcol],2,mean,na.rm=T) for(x in 1:4) statave[w,x+5]=length(which(tmp[,impcol[x]]>=xthresh[x])) } ## What about by type, across the different versions #types=c("EC","SC","TC","Mixed") types=c("ET","IT","SSL","CL") statave=array(0,c(15,4,9)) dimnames(statave)[[1]]=wnames dimnames(statave)[[2]]=types dimnames(statave)[[3]]=c("Count","Mean(meanR)","Mean(maxR)","Mean(meanW)","Mean(maxW)","MeanRain>=6","MaxRain>=50","MeanWind>=50km/h","MaxWind>=80km/h") impcol=17:20 ## For 500, 22:25 for 250 xthresh=c(6,50,13.9,22.2) for(w in 1:length(wnames)) for(t in 1:length(types)) { I=which(events[[w]]$TypeSB==types[t]) tmp=events[[w]][I,] statave[w,t,1]=length(tmp[,1]) statave[w,t,2:5]=apply(tmp[,impcol],2,mean,na.rm=T) for(x in 1:4) statave[w,t,x+5]=length(which(tmp[,impcol[x]]>=xthresh[x])) } ### statave2=matrix(0,15,10) colnames(statave2)<-c("Count","Length2","MSLP2","CV2","CV>2","Bomb","Deepening rate","Formed","Entered","Intensified") for(w in 1:length(wnames)) { events[[w]]$MaxNDR=0 for(i in 1:length(events[[w]]$ID)) { I=which(fixes[[w]]$ID==events[[w]]$ID[i] & fixes[[w]]$Location==1 & !is.na(fixes[[w]]$NDR)) if(length(I)>0) events[[w]]$MaxNDR[i]=max(fixes[[w]]$NDR[I],na.rm=T) } tmp=events[[w]] statave2[w,1]=length(tmp[,1]) statave2[w,2:4]=apply(tmp[,8:10],2,mean,na.rm=T) statave2[w,5]=length(which(tmp$CV2>=2)) statave2[w,6]=sum(tmp$Bomb) statave2[w,7]=mean(tmp$MaxNDR,na.rm=T) for(i in 1:3) statave2[w,7+i]=length(which(tmp$EnteredFormed==i)) }
96bac72f449ff921069948d74d83d22e09dc7514
9eb874c69af07d94fc20a53ab733a27c35c29fb3
/R/12__Hierarchical_clustering__Bulk.R
fbbb7a0cf7efd34053fd78bfa1842cddf910e7e4
[ "MIT" ]
permissive
ms-balzer/ZSF1_sGC
e7d50823fb542bef9f4985f6ab204dcd2593c43d
4f12d8927f8d6a67c720f27e39c8017c2c5a1e2c
refs/heads/main
2023-05-26T13:37:28.768985
2023-05-20T12:40:56
2023-05-20T12:40:56
521,015,301
0
0
null
null
null
null
UTF-8
R
false
false
2,302
r
12__Hierarchical_clustering__Bulk.R
library(ggplot2) library(stringr) library(dplyr) library(broom) library(data.table) library(cluster) library(factoextra) library(tidyverse) library(dendextend) library(stringr) library(ggpubr) set.seed(123) #======== STEP 1: load human data ======== meta <- read.csv("/data/Biobank-Tubule/tubule_metadata.csv", na.strings='.') dim(meta) #991 43 dat <- read.csv("/~/data/Biobank-Tubule/HK.Biobank.Tubule.TPM.csv") dim(dat) #44328 991 #======== STEP 2: get composite sGC co-expression WGCNA score genes and lift over from rat to human ======== geneInfo = read.csv('~/WGCNA/geneInfoSigned_UCI_all_by_clusters2.csv', sep = ",", header = TRUE) geneInfo <- subset(geneInfo, Initially.Assigned.Module.Color%in%c("red", "green", "black","blue","yellow")) genelist <- geneInfo$X length(unique(genelist)) #2198 ratgenes <- as.data.frame(unique(genelist)) colnames(ratgenes) <- "gene" human = useMart("ensembl", dataset = "hsapiens_gene_ensembl") rat = useMart("ensembl", dataset = "rnorvegicus_gene_ensembl") genesV2 = getLDS(attributes = c("rgd_symbol"), filters = "rgd_symbol", values = ratgenes$gene , mart = rat, attributesL = c("hgnc_symbol"), martL = human, uniqueRows=T) genesV2 humangenes <- unique(genesV2$HGNC.symbol) length(humangenes) #1901 #======== STEP 3: do cluster analysis ======== #subset TPM matrix on humangenes dat <- dat[which(rownames(dat)%in% humangenes),] df <- as.data.frame(dat) dim(df) #1755 991 df2 <- t(df) df_sc <- as.data.frame(scale(df2)) #create distance matrix and dendrogram dist_mat <- dist(df_sc, method ="euclidean") hclust_avg <- hclust(dist_mat, method="ward") pdf('dendrogram.pdf') plot(hclust_avg) dev.off() #determine optimal number of clusters and cut dendrogram pdf('optimal_n_of_clusters.pdf') fviz_nbclust(df2, kmeans, method = "silhouette") dev.off() k=2 #choose k based on plot above cut_avg <- cutree(hclust_avg, k=k) pdf(paste0('dendrogram_with_clustering_k',k,'_color.pdf'), width=6, height=4) avg_dend_obj <- as.dendrogram(hclust_avg) avg_col_dend <- color_branches(avg_dend_obj, k = k, col = c("#990000", "gray")) plot(avg_col_dend) dev.off() #================= EXIT ================= q(save = "no", status = 0, runLast = TRUE)
65b57fe7cb20cd1976572cd0a7e98def9e95d83c
7e8aedcf677232937597d109b13e8dabe061672f
/integerextent.R
607b014c7e756de720f83149f2e4a7b95b6803ea
[]
no_license
rral0/rgrass
4a94abdb72720cc7d14df113e0ad22043994ed19
bf0bbf650fc2628683617bce1032c96dc375b9ef
refs/heads/master
2022-01-11T12:46:21.154917
2019-05-10T00:23:16
2019-05-10T00:23:16
null
0
0
null
null
null
null
UTF-8
R
false
false
992
r
integerextent.R
intext <- function(e, r, type = c('inner', 'outer')){#Integer numbers for extent object #e Extent object #r Resolution #type Inner or outer extent object xyfloor <- function(coord, res=r){ m <- as.integer(floor(coord)) n <- as.integer(round(res,0)) i <- 0 for (i in 0:n){ if((m-i)%%n==0){ break } } mu <- m-i return(mu) } xyceiling <- function(coord, res=r){ m <- as.integer(ceiling(coord)) n <- as.integer(round(res,0)) i <- 0 for (i in 0:n){ if((m+i)%%n==0){ break } } mu <- m+i return(mu) } if(type=='inner'){ outextent <- extent( xyceiling(slot(e, 'xmin')), xyfloor(slot(e, 'xmax')), xyceiling(slot(e, 'ymin')), xyfloor(slot(e, 'ymax')) ) } else { outextent <- extent( xyfloor(slot(e, 'xmin')), xyceiling(slot(e, 'xmax')), xyfloor(slot(e, 'ymin')), xyceiling(slot(e, 'ymax')) ) } return(outextent) }
c66e046a692a6e71ccc4647e86384aa1f5909ee9
5bac2f75038a4737e362763e8304e5b5b27bda89
/plot3.R
5cb4c5fa56e3f82d1b9818e720d977c21d00afeb
[]
no_license
mingjiezhao/Exploratory-data-analysis
60fe56fa0bd1341fc75f3e61121edf63586d27f9
239ea6b610faae270fc9770bea485790bb13fcb3
refs/heads/master
2021-01-16T23:23:17.199803
2014-05-11T19:36:24
2014-05-11T19:36:24
null
0
0
null
null
null
null
UTF-8
R
false
false
932
r
plot3.R
# Selectively read tables from the txt file, with date raning from 2007-02-01 to 2007-02-02 library ("sqldf") file = read.csv.sql(file = "household_power_consumption.txt", sql= "select * from file where Date = '1/2/2007' OR Date = '2/2/2007'", sep=";") grep("\\?",file[,c("Sub_metering_1","Sub_metering_2","Sub_metering_3")]) sum(is.na(file[,c("Sub_metering_1","Sub_metering_2","Sub_metering_3")])) date = file[,1] time = file[,2] datetime = paste(date,time) datetime = strptime(datetime,"%d/%m/%Y %H:%M:%S") png(file="plot3.png",width=480,height=480) par(lab = c(2,3,7),bg="transparent") plot(datetime,file$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering") lines(datetime,file$Sub_metering_1,col="Black") lines(datetime,file$Sub_metering_2,col="Red") lines(datetime,file$Sub_metering_3,col="Blue") legend("topright",lwd=2,col=c("Black","Red","Blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) dev.off()
b5a0011959508cfb7e7ea997ef681cd2e628ab8c
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Omisc/examples/zScoreData.Rd.R
e4aa634c6783c0400d4f824672763255f0e8011c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
152
r
zScoreData.Rd.R
library(Omisc) ### Name: zScoreData ### Title: centerData ### Aliases: zScoreData ### ** Examples X<-data.frame(X=c(1:4),Y=c(6:9)) zScoreData(X)
3c6c6b79068d5e122853833e10e992c05a1bcf9f
ab8546534ae13d36906286c82ac4324f2077a7e9
/code/ind_mh.R
f1cf3527854b70ce86bfc26f053bee24a8945907
[]
no_license
kaijennissen/mcmc
8301fd5021e07bf45fa1db6cd8c228a7b068e1d7
7ab5da6f312ab62068b6e4c2dcc708784d0e366e
refs/heads/master
2022-08-01T17:05:22.188258
2020-03-06T21:10:32
2020-03-06T21:10:32
212,301,105
0
0
null
2022-06-22T01:22:21
2019-10-02T09:29:25
Jupyter Notebook
UTF-8
R
false
false
3,400
r
ind_mh.R
# We'll assume estimation of a Poisson mean as a function of x x <- runif(100) y <- rpois(100,5*x) # beta = 5 where mean(y[i]) = beta*x[i] # Prior distribution on log(beta): t(5) with mean 2 # (Very spread out on original scale; median = 7.4, roughly) log_prior <- function(log_beta) dt(log_beta-2, 5, log=TRUE) # Log likelihood log_lik <- function(log_beta, y, x) sum(dpois(y, exp(log_beta)*x, log=TRUE)) # Random Walk Metropolis-Hastings # Proposal is centered at the current value of the parameter rw_proposal <- function(current) rnorm(1, current, 0.25) rw_p_proposal_given_current <- function(proposal, current) dnorm(proposal, current, 0.25, log=TRUE) rw_p_current_given_proposal <- function(current, proposal) dnorm(current, proposal, 0.25, log=TRUE) rw_alpha <- function(proposal, current) { # Due to the structure of the rw proposal distribution, the rw_p_proposal_given_current and # rw_p_current_given_proposal terms cancel out, so we don't need to include them - although # logically they are still there: p(prop|curr) = p(curr|prop) for all curr, prop exp(log_lik(proposal, y, x) + log_prior(proposal) - log_lik(current, y, x) - log_prior(current)) } # Independent Metropolis-Hastings # Note: the proposal is independent of the current value (hence the name), but I maintain the # parameterization of the functions anyway. The proposal is not ignorable any more # when calculation the acceptance probability, as p(curr|prop) != p(prop|curr) in general. ind_proposal <- function(current) rnorm(1, 2, 1) ind_p_proposal_given_current <- function(proposal, current) dnorm(proposal, 2, 1, log=TRUE) ind_p_current_given_proposal <- function(current, proposal) dnorm(current, 2, 1, log=TRUE) ind_alpha <- function(proposal, current) { exp(log_lik(proposal, y, x) + log_prior(proposal) + ind_p_current_given_proposal(current, proposal) - log_lik(current, y, x) - log_prior(current) - ind_p_proposal_given_current(proposal, current)) } # Vanilla Metropolis-Hastings - the independence sampler would do here, but I'll add something # else for the proposal distribution; a Normal(current, 0.1+abs(current)/5) - symmetric but with a different # scale depending upon location, so can't ignore the proposal distribution when calculating alpha as # p(prop|curr) != p(curr|prop) in general van_proposal <- function(current) rnorm(1, current, 0.1+abs(current)/5) van_p_proposal_given_current <- function(proposal, current) dnorm(proposal, current, 0.1+abs(current)/5, log=TRUE) van_p_current_given_proposal <- function(current, proposal) dnorm(current, proposal, 0.1+abs(proposal)/5, log=TRUE) van_alpha <- function(proposal, current) { exp(log_lik(proposal, y, x) + log_prior(proposal) + ind_p_current_given_proposal(current, proposal) - log_lik(current, y, x) - log_prior(current) - ind_p_proposal_given_current(proposal, current)) } # Generate the chain values <- rep(0, 10000) u <- runif(length(values)) naccept <- 0 current <- 1 # Initial value propfunc <- van_proposal # Substitute ind_proposal or rw_proposal here alphafunc <- van_alpha # Substitute ind_alpha or rw_alpha here for (i in 1:length(values)) { proposal <- propfunc(current) alpha <- alphafunc(proposal, current) if (u[i] < alpha) { values[i] <- exp(proposal) current <- proposal naccept <- naccept + 1 } else { values[i] <- exp(current) } } naccept / length(values) summary(values)
404713a1dc32037318ac582d29d4ff32096e8ed3
863aa7e71911423a9096c82a03ef755d1cf34654
/man/dendrogram_plot.Rd
75154b29bedeb378026bcead8f44d30dde51a30b
[]
no_license
BioSystemsUM/specmine
8bd2d2b0ee1b1db9133251b80724966a5ee71040
13b5cbb73989e1f84e726dab90ff4ff34fed68df
refs/heads/master
2023-08-18T05:51:53.650469
2021-09-21T13:35:11
2021-09-21T13:35:11
313,974,923
1
1
null
2021-09-21T13:35:12
2020-11-18T15:22:49
R
UTF-8
R
false
false
1,126
rd
dendrogram_plot.Rd
\name{dendrogram_plot} \alias{dendrogram_plot} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Plot dendrogram } \description{ Plot dendrogram of hierarchical clustering results. } \usage{ dendrogram_plot(dataset, hc.result, column.metadata = 1, labels = NULL, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{dataset}{ list representing the dataset from a metabolomics experiment. } \item{hc.result}{ object of class hclust with the clustering results. } \item{column.metadata}{ string or index indicating what metadata to use to name the leafs. } \item{labels}{ vector with the leaf names (optional). } \item{\dots}{ other parameters for plotting. } } \examples{ \donttest{ ### Example of a dendrogram library(specmine.datasets) data(cachexia) hc.result = hierarchical_clustering(cachexia) dendrogram_plot(cachexia, hc.result) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ clustering } \keyword{ dendrogram } \keyword{ hclust }% __ONLY ONE__ keyword per line
7f5a7d9c348279f6f3bd0f599260b3d513cea892
25ecb2809ae8dc5aa90094c51fcf72537e6a194c
/man/get_property_tags.Rd
561d9e5aedbd795829fb9bdc90817b6c1a42b780
[]
no_license
mrc-ide/specio
fbe01c8374dcca4f00806580ea5d940358d94243
11535cf22294071bb6e6b7ed8409823cac2a7ddc
refs/heads/master
2021-06-11T20:34:21.662465
2021-04-12T16:43:50
2021-04-12T16:43:50
164,118,723
2
0
null
2021-04-12T11:09:39
2019-01-04T14:41:29
R
UTF-8
R
false
true
857
rd
get_property_tags.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dp_tags.R \name{get_property_tags} \alias{get_property_tags} \title{Get tags mapping for a property.} \usage{ get_property_tags(property) } \arguments{ \item{property}{Property to get the tags for.} \item{proj_years}{Years of the projection.} } \value{ List of possible tags used to refer to the property, in order of which they should be used. Also returns the function which should be used to parse the data for the property from the full set of DP data. } \description{ This gets list of tags which may be used to store data for the property within the DP file. Also specifies for each of these properties the function which should be used to extract the data from full set of DP data. Plus any other metadata related to property required for accessing. } \keyword{internal}
8322c254eb3c3b4a953d7e8056dc6e06fb12a685
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/dynamicGraph/examples/setTreeBlocks.Rd.R
aef4028de518ce836f7f33d713b92efd98534f41
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
5,768
r
setTreeBlocks.Rd.R
library(dynamicGraph) ### Name: setTreeBlocks ### Title: The block tree ### Aliases: setTreeBlocks Closed Closed<- Parents<- Parents Children<- ### Children NodeAncestors NodeAncestors<- NodeDescendants ### NodeDescendants<- ### Keywords: methods dynamic graphs ### ** Examples # Example 1: Block.tree <- list(label = "W", Vertices = c("country"), X = list(Vertices = c("race", "sex"), A = list(Vertices = c("hair", "eye"), horizontal = FALSE), B = list(Vertices = c("education"), C = list(Vertices = c("age"))))) V.Names <- unlist(Block.tree) vertices <- returnVertexList(V.Names[grep("Vertices", names(V.Names))]) blocktree <- setTreeBlocks(Block.tree, vertices) Positions(blockTreeToList(blocktree$BlockTree)) Positions(blocktree$Vertices) NodeAncestors(blockTreeToList(blocktree$BlockTree)) NodeDescendants(blockTreeToList(blocktree$BlockTree)) vertexStrata <- Strata(blocktree$Vertices) vertexStrata vertexNames <- Names(blocktree$Vertices) names(vertexNames) <- NULL vertexNames # Indices of the vertices in blocks: indicesInBlock <- vector("list", max(vertexStrata)) for (i in seq(along = vertexStrata)) indicesInBlock[[vertexStrata[i]]] <- append(indicesInBlock[[vertexStrata[i]]], i) str(indicesInBlock) # Names of the vertices in blocks: vertexNamesInblock <- vector("list", max(vertexStrata)) for (i in seq(along = vertexStrata)) vertexNamesInblock[[vertexStrata[i]]] <- append(vertexNamesInblock[[vertexStrata[i]]], vertexNames[i]) str(vertexNamesInblock) # A useful function, replace "k" (block index k) # in block "i" by "x[k]", the content "x[k]" of block "k": f <- function(A, x) { result <- vector("list", length(A)) names(result) <- names(A) for (i in seq(along = A)) if ((length(A[[i]]) > 0) && (A[[i]] != 0)) for (k in A[[i]]) result[[i]] <- append(result[[i]], x[k]) return(result) } # For each block, names of vertices in ancestor blocks: vertexAncOfBlock <- f(NodeAncestors(blockTreeToList(blocktree$BlockTree)), vertexNamesInblock) str(vertexAncOfBlock) for (i in seq(along = vertexAncOfBlock)) if (length(vertexAncOfBlock[[i]]) > 0) vertexAncOfBlock[[i]] <- unlist(vertexAncOfBlock[[i]]) str(vertexAncOfBlock) # For each block, names of vertices in descendant blocks: vertexDesOfBlock <- f(NodeDescendants(blockTreeToList(blocktree$BlockTree)), vertexNamesInblock) str(vertexDesOfBlock) for (i in seq(along = vertexDesOfBlock)) if (length(vertexDesOfBlock[[i]]) > 0) vertexDesOfBlock[[i]] <- unlist(vertexDesOfBlock[[i]]) str(vertexDesOfBlock) # Example 2: Block.tree <- list(g = 0, G = 54, label = "Pedegree.G", Male.Side = list(g = 0, G = 33, Father = list(g = 0, G = 12, P.G.Father = list(Vertices = c("P.G.Father.1")), P.G.Mother = list(Vertices = c("P.G.Mother.1")), common.children = list(g = 0, label = "Father.1", Vertices = c("Father.1"))), Mother = list(g = 0, G = 12, M.G.Father = list(Vertices = c("M.G.Father.1")), M.G.Mother = list(Vertices = c("M.G.Mother.1")), common.children = list(g = 0, label = "Mother.1", Vertices = c("Mother.1"))), common.children = list(g = 2, Vertices = c("Male"))), Female.Side = list(g = 0, G = 12, P.G.Father = list(Vertices = c("P.G.Father.2")), P.G.Mother = list(Vertices = c("P.G.Mother.2")), M.G.Father = list(Vertices = c("M.G.Father.2")), M.G.Mother = list(Vertices = c("M.G.Mother.2")), common.children = list(g = 0, G = 12, label = "Female", Father = list(Vertices = c("Father.2")), Mother = list(Vertices = c("Mother.2")), common.children = list(g = 2, Vertices = c("Female")))), common.children = list(Vertices = c("Marriage"), g = 3, label = "Children", Son = list(Vertices = c("Son"), g = 3, P.G.Son = list(Vertices = c("P.G.Son"), g = 2), P.G.Dat = list(Vertices = c("P.G.Dat"), g = 1)), Dat = list(Vertices = c("Dat"), g = 2, M.G.Son = list(Vertices = c("M.G.Son")), M.G.Dat = list(Vertices = c("M.G.Dat"))) ) ) v <- unlist(Block.tree) V.Names <- v[grep("Vertices", names(v))] rm(v) FromTo <- matrix(c("P.G.Father.1", "Father.1", "P.G.Father.2", "Father.2", "P.G.Mother.1", "Father.1", "P.G.Mother.2", "Father.2", "M.G.Father.1", "Mother.1", "M.G.Father.2", "Mother.2", "M.G.Mother.1", "Mother.1", "M.G.Mother.2", "Mother.2", "Father.1", "Male", "Father.2", "Female", "Mother.1", "Male", "Mother.2", "Female", "Male", "Marriage", "Female", "Marriage", "Marriage", "Son", "Marriage", "Dat", "Son", "P.G.Son", "Dat", "M.G.Son", "Son", "P.G.Dat", "Dat", "M.G.Dat"), byrow = TRUE, ncol = 2) From <- match(FromTo[,1], V.Names) To <- match(FromTo[,2], V.Names) V.Types <- rep("Discrete", length(V.Names)) Object <- NULL graph <- new("dg.simple.graph", vertex.names = V.Names, types = V.Types, from = From, to = To, block.tree = Block.tree) W <- dg(graph, control = dg.control(width = 600, height = 600, drawblocks = TRUE, drawBlockFrame = TRUE, overlaying = TRUE, title = "Pedegree.G"))
103ef4f26dc009a35cad8ef379dd3ddad1563291
c9e02923d819d62aa4b460a89e26db86bd2f18a9
/ui.R
d0226955d5b87cb9d3054bec93d28ec9d69893f5
[]
no_license
hjermann/DataApplication
410f76c0592fb674d248a308670eb5b340153d83
74156643ac089745e8569c3c0acefa3a9c804bab
refs/heads/master
2021-01-10T10:01:27.025544
2015-11-22T14:45:50
2015-11-22T14:45:50
46,664,405
0
0
null
null
null
null
UTF-8
R
false
false
3,167
r
ui.R
library(shiny) shinyUI(pageWithSidebar( headerPanel("Daily horoscope"), sidebarPanel( dateInput('bd','Please select your birthday',"1971-09-19" ), dateInput('pd','Please select date for prediction' ), helpText("Note: Just fill the dates with proper values and press the button.", "Application will calculate regarding this data your personal day number"), submitButton("Update View"), a("Click here to get more Help",href="help.html") ), mainPanel( textOutput("dailyHoroscope"), h2(textOutput("dailyNumber")), helpText("Influence number descriptions"), h4(1), p("A day for action and purpose. New beginnings are smiled upon on this day. Legal matters, partnerships formed, business deals, and contracts can be promising in this vibratory period. Caveat: Matters are promising only if they are kept simple."), h4(2), p("A day for balance, weighing decisions, and planning. This period is about harmonizing, and not taking too hasty action. This day may start with disasterous appearances, but will wrap up quite nicely, and will end very well."), h4(3), p("A day of accomplishment. You will find cooperation where you need it in matters of new projects. Problems are quickly ended. This is a good day for meeting people, travel, and fun."), h4(4), p("A day for catching up on matters that have fallen to the wayside. Deal with routine issues, and deal with chores accordingly. It may seem boring or redundant, but doing dealing with practical matters will assure order and steadiness in your future."), h4(5), p("A day to expect the unexpected. This is the wild card of vibrational days. Avoid taking unnecessary risks as they can backfire on you. Travel and new projects may be taken, but they should be taken only if they involve a distinct purpose."), h4(6), p("A day take take things easy, be comfortable, and rest. Not a day for quick action, excitement or new enterprise. Avoid contact. This is a day of culmination, gather around friends or family and enjoy the moment."), h4(7), p("A day of deeper meaning. Meditate, study, research, and investigate artistic subjects. Expand your creativity, and intuitive abilities. This is a psychically powerful day; take advantage of it. You may want to play your hunches on this day."), h4(8), p("A day sweeping change that bring great results. Now is the time to tackle complex issues and conquer difficulties. Today's numbers indicate a good day for business ventures, promising financial turns, and mergers."), h4(9), p("A day to announce important plans, and make promising contacts. This day promises achievement in most areas of life. Personal growth, triumph and success in competitions are at hand this day.") ) ))
33c64e21554eddce41b0546875d298f40e8b4c83
2b9c0c507aa0d049cd13885d0126a8996a2cdf83
/4c Total costs.R
39cda275d5333e59e452425d6b0e961fc1e050bc
[]
no_license
sabwongct/DM-Net-value
55c87353239889c8b6be4bde953f1d37868517a8
7f19cb66bbeded481f4faf5b88748fdc01f169c1
refs/heads/master
2020-03-28T00:00:48.645709
2018-08-24T11:00:46
2018-08-24T11:00:46
null
0
0
null
null
null
null
UTF-8
R
false
false
2,160
r
4c Total costs.R
#(P.2) Spending #1: Convert nominal spending into real spending in local currency units (preferred method is using GDP deflator, and preferred base year is 2010) #(P.2) Spending #2: All spending for a given individual is included, with no attempt to isolate DM-specific spending library(data.table) medcost <- readRDS("4a medcost.rds") # medcost = medication costs visit <- readRDS("4b visitcost.rds") # all = visit costs mylist <- readRDS("pt_clinicalvalues.rds") # mylist = clinical values # costs are already converted to real spending # create empty dataframe (d) for combining medication and visit costs, where each serial id have 9 separate entries from 2006 to 2014 d <- mylist[[1]][, c("serial_no", "entry.date")] d <- as.data.table(d) d <- d[rep(seq_len(nrow(d)), each=9),] d$yr <- rep(2006:2014, length.out = nrow(d)) d$yr <- as.factor(d$yr) # insert visit and medication costs into the dataframe d <- merge(d, visit[, c("serial_no", "visit_cost", "yr")], all.x = TRUE, by = c("serial_no", "yr")) d <- merge(d, medcost[, c("serial_no", "med_cost", "yr")], all.x = TRUE, by = c("serial_no", "yr")) # change all NA costs to 0 d[is.na(d)] <- 0 ## spending = visit cost + drug cost d$spending <- d$med_cost + d$visit_cost setkeyv(d, c("serial_no", "yr")) ### Adjust for late enrolment into cohort # (P.2 Spending 3.) Aggregate up to total annual spending, for each year in the study period (using fraction of year enrolled/observed, if individual only in sample part of a year) # Calculate difference between yr-01-01 and dm date # Sum up total costs for each individual # Adjust cost by (original cost / (missed period / total period)), where period = 365 d$yr.begins <- as.Date(paste0(d$yr, "-01-01")) d$adj <- ifelse((d$entry.date>d$yr.begins & d$yr == format(d$entry.date, "%Y")), d$entry.date-d$yr.begins, 0) d$spend.adj <- d$spending spending_adjust <- d[!(d$adj==0), c("spending")] days_adjust <- d[!(d$adj==0), c("adj")] d[!(d$adj==0), c("spend.adj")] <- spending_adjust / ((365-days_adjust)/365) # tidy up dataframe setkey(d, "serial_no") d <- d[, c("serial_no", "yr", "spend.adj")] saveRDS(d, file="4c total adjusted spending.rds")
67e608928a4c81f07773cb53b9b2d1323a985e6d
ccdc0d1d0e5ab160c5fa1672c2fccc7c7b13b6d9
/plot4.R
d9142019348e895039b8934e975a90ec0a081fe2
[]
no_license
sjkunnen/ExData_Plotting1
9001999f94db7fef4fbd5a3615acfea7e7c090b0
5fd02a0e36b637efdc80083ec1d48b9fb03fbb4a
refs/heads/master
2021-01-01T16:24:01.421927
2017-07-24T10:50:28
2017-07-24T10:50:28
97,826,105
0
0
null
2017-07-20T11:10:31
2017-07-20T11:10:31
null
UTF-8
R
false
false
2,006
r
plot4.R
## Read the data form the txt file and use the headers from the file data <- read.table("exdata_data_household_power_consumption/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?") ## Make a POSIXlt class by combining Date and Time DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S") ## Combine the POSIXct DateTime vector with the data.frame data2 <- cbind(DateTime, data) ## Select a subset of the data.frame by using the dates 2007-02-01 and 2007-02-01 dataset <- na.omit(subset(data2, DateTime >= "2007-02-01 00:00:00" & DateTime < "2007-02-03 00:00:00")) ## make a frame of 2x2 plots to fill you plots par(mfcol = c(2, 2)) ## Plot the Global active power data in time, including the labels top left plot4.1 <- plot(dataset$DateTime, dataset$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "", lty = 1) ## Plot the sub_metering data, including the labels at the bottom left plot4.2 <- plot(dataset$DateTime, dataset$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", lty = 1) points(dataset$DateTime, dataset$Sub_metering_2, type = "l", col = "red", lty = 1) points(dataset$DateTime, dataset$Sub_metering_3, type = "l", col = "blue", lty = 1) legend("topright", lty = 1, bty = "n", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), text.width = strwidth(" Sub_metering_1 ")) ## Plot the Voltage data in time at the top right plot4.3 <- plot(dataset$DateTime, dataset$Voltage, type = "l", ylab = "Voltage", xlab = "datetime", lty = 1) ## Plot the Reactive power data in time at the bottom right plot4.4 <- plot(dataset$DateTime, dataset$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime", lty = 1) ## Save the plot to a PNG file with a width of 480 pixels and a height of 480 pixels dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
04c53d19e7ca52dc30f0ef5c937bd048bae205b3
beecb0fa770e19f2fa20e7767705f075ff787c02
/2018March_Ellipse/2018march21_ellipseplot.r
17a53c3128bf04b390188a32dd553b097cadba9f
[]
no_license
email-clm/R_stat_visualization
dfc9e39875efbb87dc55c0c32a484dd984e46bf2
deddc032c4d54b1d5bda17c48cad38906ddd19da
refs/heads/master
2023-06-11T18:15:05.869138
2023-06-02T07:51:12
2023-06-02T07:51:12
134,325,739
0
0
null
null
null
null
UTF-8
R
false
false
10,425
r
2018march21_ellipseplot.r
# March 21, 2018 for Dona'a paper in Arctic C library(ellipse) library(ggplot2) library(car) setwd("/Users/xxuadmin/BUSINESS/PUBLICATIONS/WorkingOn_Zona_SnowC/20180315_plot") obs <- read.table("obdata",header=TRUE) attach(obs) library(Hmisc) minor.tick() pdf("Figure3a.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-200,0), ylim=c(0,40),xlab=expression("Cum C Jun-Aug (g C-CO"[2]*" m"^-2*")"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) Atq <- obs[which(obs$Site=='Atq'),] dataEllipse(Atq$GS,Atq$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-Atq", plot.points = FALSE,add=TRUE,col="darkgreen") Bes <- obs[which(obs$Site=='Bes'),] dataEllipse(Bes$GS,Bes$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-Bes", plot.points = FALSE,add=TRUE,col="darkmagenta") DL1 <- obs[which(obs$Site=='DL1'),] dataEllipse(DL1$GS,DL1$SEP,levels=c(0.32), center.pch=0, ellipse.label="CA-DL1", plot.points = FALSE,add=TRUE,col="lightcoral") ZaH <- obs[which(obs$Site=='ZaH'),] dataEllipse(ZaH$GS,ZaH$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-ZaH", plot.points = FALSE,add=TRUE,col="darkred") Ict <- obs[which(obs$Site=='Ict'),] dataEllipse(Ict$GS,Ict$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-Ict", plot.points = FALSE,add=TRUE,col="navy") Ivo <- obs[which(obs$Site=='Ivo'),] dataEllipse(Ivo$GS,Ivo$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-Ivo", plot.points = FALSE,add=TRUE,col="purple") Che <- obs[which(obs$Site=='Che'),] dataEllipse(Che$GS,Che$SEP,levels=c(0.32), center.pch=0, ellipse.label="US-Che", plot.points = FALSE,add=TRUE,col="blue") CoK <- obs[which(obs$Site=='CoK'),] dataEllipse(CoK$GS,CoK$SEP,levels=c(0.32), center.pch=0, ellipse.label="RU-CoK", plot.points = FALSE,add=TRUE,col="black") Sam <- obs[which(obs$Site=='Sam'),] dataEllipse(Sam$GS,Sam$SEP,levels=c(0.32), center.pch=0, ellipse.label="RU-Sam", plot.points = FALSE,add=TRUE,col="red") dev.off() hrt <- read.table("HRTemperature",header=TRUE) attach(hrt) pdf("Figure3b.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-2,8), ylim=c(0,40),xlab=expression("Sep Soil Temperature ("^o*"C)"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) Atq <- hrt[which(hrt$Site=='Atq'),] dataEllipse(Atq$soilT,Atq$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-Atq", plot.points = FALSE,add=TRUE,col="darkgreen") Bes <- hrt[which(hrt$Site=='Bes'),] dataEllipse(Bes$soilT,Bes$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-Bes", plot.points = FALSE,add=TRUE,col="darkmagenta") DL1 <- hrt[which(hrt$Site=='DL1'),] dataEllipse(DL1$soilT,DL1$sepHR,levels=c(0.32), center.pch=0, ellipse.label="CA-DL1", plot.points = FALSE,add=TRUE,col="lightcoral") ZaH <- hrt[which(hrt$Site=='ZaH'),] dataEllipse(ZaH$soilT,ZaH$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-ZaH", plot.points = FALSE,add=TRUE,col="darkred") Ict <- hrt[which(hrt$Site=='Ict'),] dataEllipse(Ict$soilT,Ict$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-Ict", plot.points = FALSE,add=TRUE,col="navy") Ivo <- hrt[which(hrt$Site=='Ivo'),] dataEllipse(Ivo$soilT,Ivo$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-Ivo", plot.points = FALSE,add=TRUE,col="purple") Che <- hrt[which(hrt$Site=='Che'),] dataEllipse(Che$soilT,Che$sepHR,levels=c(0.32), center.pch=0, ellipse.label="US-Che", plot.points = FALSE,add=TRUE,col="blue") CoK <- hrt[which(hrt$Site=='CoK'),] dataEllipse(CoK$soilT,CoK$sepHR,levels=c(0.32), center.pch=0, ellipse.label="RU-CoK", plot.points = FALSE,add=TRUE,col="black") Sam <- hrt[which(hrt$Site=='Sam'),] dataEllipse(Sam$soilT,Sam$sepHR,levels=c(0.32), center.pch=0, ellipse.label="RU-Sam", plot.points = FALSE,add=TRUE,col="red") dev.off() library(ellipse) library(ggplot2) library(car) setwd("/Users/xxuadmin/BUSINESS/PUBLICATIONS/WorkingOn_Zona_SnowC/20180315_plot") obs <- read.table("obdata",header=TRUE) attach(obs) library(Hmisc) minor.tick() pdf("Figure3a2.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-200,0), ylim=c(0,40),xlab=expression("Cum C Jun-Aug (g C-CO"[2]*" m"^-2*")"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) Atq <- obs[which(obs$Site=='Atq'),] dataEllipse(Atq$GS,Atq$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-Atq", plot.points = FALSE,add=TRUE,col="darkgreen") Bes <- obs[which(obs$Site=='Bes'),] dataEllipse(Bes$GS,Bes$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-Bes", plot.points = FALSE,add=TRUE,col="darkmagenta") DL1 <- obs[which(obs$Site=='DL1'),] dataEllipse(DL1$GS,DL1$SEP,levels=c(0.68), center.pch=0, ellipse.label="CA-DL1", plot.points = FALSE,add=TRUE,col="lightcoral") ZaH <- obs[which(obs$Site=='ZaH'),] dataEllipse(ZaH$GS,ZaH$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-ZaH", plot.points = FALSE,add=TRUE,col="darkred") Ict <- obs[which(obs$Site=='Ict'),] dataEllipse(Ict$GS,Ict$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-Ict", plot.points = FALSE,add=TRUE,col="navy") Ivo <- obs[which(obs$Site=='Ivo'),] dataEllipse(Ivo$GS,Ivo$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-Ivo", plot.points = FALSE,add=TRUE,col="purple") Che <- obs[which(obs$Site=='Che'),] dataEllipse(Che$GS,Che$SEP,levels=c(0.68), center.pch=0, ellipse.label="US-Che", plot.points = FALSE,add=TRUE,col="blue") CoK <- obs[which(obs$Site=='CoK'),] dataEllipse(CoK$GS,CoK$SEP,levels=c(0.68), center.pch=0, ellipse.label="RU-CoK", plot.points = FALSE,add=TRUE,col="black") Sam <- obs[which(obs$Site=='Sam'),] dataEllipse(Sam$GS,Sam$SEP,levels=c(0.68), center.pch=0, ellipse.label="RU-Sam", plot.points = FALSE,add=TRUE,col="red") dev.off() hrt <- read.table("HRTemperature",header=TRUE) attach(hrt) pdf("Figure3b2.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-2,8), ylim=c(0,40),xlab=expression("Sep Soil Temperature ("^o*"C)"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) Atq <- hrt[which(hrt$Site=='Atq'),] dataEllipse(Atq$soilT,Atq$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-Atq", plot.points = FALSE,add=TRUE,col="darkgreen") Bes <- hrt[which(hrt$Site=='Bes'),] dataEllipse(Bes$soilT,Bes$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-Bes", plot.points = FALSE,add=TRUE,col="darkmagenta") DL1 <- hrt[which(hrt$Site=='DL1'),] dataEllipse(DL1$soilT,DL1$sepHR,levels=c(0.68), center.pch=0, ellipse.label="CA-DL1", plot.points = FALSE,add=TRUE,col="lightcoral") ZaH <- hrt[which(hrt$Site=='ZaH'),] dataEllipse(ZaH$soilT,ZaH$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-ZaH", plot.points = FALSE,add=TRUE,col="darkred") Ict <- hrt[which(hrt$Site=='Ict'),] dataEllipse(Ict$soilT,Ict$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-Ict", plot.points = FALSE,add=TRUE,col="navy") Ivo <- hrt[which(hrt$Site=='Ivo'),] dataEllipse(Ivo$soilT,Ivo$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-Ivo", plot.points = FALSE,add=TRUE,col="purple") Che <- hrt[which(hrt$Site=='Che'),] dataEllipse(Che$soilT,Che$sepHR,levels=c(0.68), center.pch=0, ellipse.label="US-Che", plot.points = FALSE,add=TRUE,col="blue") CoK <- hrt[which(hrt$Site=='CoK'),] dataEllipse(CoK$soilT,CoK$sepHR,levels=c(0.68), center.pch=0, ellipse.label="RU-CoK", plot.points = FALSE,add=TRUE,col="black") Sam <- hrt[which(hrt$Site=='Sam'),] dataEllipse(Sam$soilT,Sam$sepHR,levels=c(0.68), center.pch=0, ellipse.label="RU-Sam", plot.points = FALSE,add=TRUE,col="red") dev.off() # blow for modeling ibrary(RNetCDF) library(ellipse) library(ggplot2) library(car) library(Hmisc) minor.tick() setwd("/Users/xxuadmin/BUSINESS/PUBLICATIONS/WorkingOn_Zona_SnowC/20180315_plot") data <- open.nc("12yearCarbonFlux.nc") GS_NEP <- var.get.nc(data,"GS_NEP") SEP_HR <- var.get.nc(data,"SEP_HR") SEP_TSOI <- var.get.nc(data,"SEP_TSOI") pdf("Figure3c.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-200,0), ylim=c(0,40),xlab=expression("Cum C Jun-Aug (g C-CO"[2]*" m"^-2*")"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) for(ii in 1:144) { for(jj in 1:16) { if (!is.na(GS_NEP[1,ii,jj]) && SEP_HR[1,ii,jj] > 0.0) dataEllipse(GS_NEP[,ii,jj],SEP_HR[,ii,jj],levels=c(0.32), center.pch=0, plot.points = FALSE,add=TRUE,col="#555555") } } dev.off() pdf("Figure3d.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-2,8), ylim=c(0,40),xlab=expression("Sep Soil Temperature ("^o*"C)"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) for(ii in 1:144) { for(jj in 1:16) { if (!is.na(GS_NEP[1,ii,jj]) && SEP_HR[1,ii,jj] > 0.0) dataEllipse((SEP_TSOI[,ii,jj]-273.15),SEP_HR[,ii,jj],levels=c(0.32), center.pch=0, plot.points = FALSE,add=TRUE,col="#555555") } } dev.off() colfunc <- colorRampPalette(c("black", "white")) colfunc(10) pdf("Figure3c2.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-200,0), ylim=c(0,40),xlab=expression("Cum C Jun-Aug (g C-CO"[2]*" m"^-2*")"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) for(ii in 1:144) { for(jj in 1:16) { if (!is.na(GS_NEP[1,ii,jj]) && SEP_HR[1,ii,jj] > 0.0) dataEllipse(GS_NEP[,ii,jj],SEP_HR[,ii,jj],levels=c(0.68), center.pch=0, plot.points = FALSE,add=TRUE,col="#555555") } } dev.off() pdf("Figure3d2.pdf") par(mai=c(1.25,1,0.9,0.8)) plot(-1000,-1000, xlim=c(-2,8), ylim=c(0,40),xlab=expression("Sep Soil Temperature ("^o*"C)"), ylab=expression('Cum C Sep (gC-CO'[2]*' m'^-2*")"), cex=1,las=1, cex.lab = 1.5, cex.axis = 1.5) minor.tick(nx=4,ny=4,tick.ratio=0.5,x.args=list(),y.args=list()) for(ii in 1:144) { for(jj in 1:16) { if (!is.na(GS_NEP[1,ii,jj]) && SEP_HR[1,ii,jj] > 0.0) dataEllipse((SEP_TSOI[,ii,jj]-273.15),SEP_HR[,ii,jj],levels=c(0.68), center.pch=0, plot.points = FALSE,add=TRUE,col="#555555") } } dev.off() colfunc <- colorRampPalette(c("black", "white")) colfunc(10)
547e076954e7d2fbb1ba11d7a61662993b306fa2
394b8a5c63d6f087c343e466b3161a0d13cce19c
/Sentiment.R
75708fc578aa4d4847e3cc2c07d96a610483a3eb
[]
no_license
thaque2050/University-Twitter-Sentiment-Score
4339fd222506a74ada01d9d7fb493439087ca1bf
7e11b2639940e664ce8490573ba2d5ba29f5257b
refs/heads/master
2020-05-24T18:11:15.400705
2019-05-18T20:51:02
2019-05-18T20:51:02
187,403,995
0
0
null
null
null
null
UTF-8
R
false
false
1,230
r
Sentiment.R
library(tidytext) library(dplyr) library(stringr) library(sentimentr) library(ggplot2) library(ggrepel) #COUNT WORDS FOR SENTIMENT BY DICTIONARIES corpus<-Corpus(VectorSource(reviews_data2$value)) corpus <- tm_map(x = corpus,removeNumbers) corpus <- tm_map(corpus, content_transformer(tolower)) corpus <- tm_map(corpus, removePunctuation) corpus <- tm_map(corpus, stripWhitespace) myStopwords <- c(stopwords(kind = 'en'),"students","school","university","college") corpus <- tm_map(corpus, removeWords, myStopwords) df<-data.frame(text = sapply(corpus, as.character), stringsAsFactors = FALSE) reviews_data3<-cbind(reviews_data2,df) tokens <- data_frame(text = reviews_data3$text) %>% unnest_tokens(word, text) tokens %>%inner_join(get_sentiments("nrc")) %>% count(sentiment) tokens %>%inner_join(get_sentiments("bing")) %>% count(sentiment) #ANALYSIS USING ADVANCED SENTIMENTR #df<-get_sentences2(reviews_data2$value) #ll2<-as.data.frame(stri_list2matrix(read_news_headline, byrow=TRUE)) word_df<-extract_sentiment_terms(reviews_data2$value) positive_df<-as.data.frame(stri_list2matrix(word_df$positive, byrow=TRUE)) positive_df$text<- do.call(paste0, positive_df[1:15])
32356b70dd73670772014609461504439b78642a
f45333719c9d6f1b55c95be0f9dc03dea9b6c086
/.development_files/examples_stemr_0.1_ebola_mod/effpop_sir_covg.R
578909881b95be7bfed278ae730b16b87fb5ea61
[]
no_license
fintzij/stemr
7c95bde20622142c7f62aad49cfb764a46342b47
185375e0933331da49bdc53563ce61338de4c450
refs/heads/master
2022-05-19T18:29:05.622681
2022-03-16T17:16:52
2022-03-16T17:16:52
52,254,628
8
6
null
2022-03-16T01:35:15
2016-02-22T07:16:45
R
UTF-8
R
false
false
8,724
r
effpop_sir_covg.R
library(stemr) library(extraDistr) library(foreach) library(doRNG) library(doParallel) library(coda) args <- commandArgs(TRUE) print(args) replication <- as.numeric(args[1]) popsize <- 1e5 S0 <- 1e5-10 I0 <- 10 R0 <- 0 set.seed(100817 + replication) true_pars <- c(R0 = 1 + exp(rnorm(1, 0, 0.5)), mu = exp(rnorm(1, -0.7, 0.35)), rho = expit(rnorm(1, 0, 1.4)), phi = rexp(1, 0.1), effpop = runif(1, 5e3,5e4)) # no strata the stemr object -------------------------------------------------- strata <- NULL compartments <- c("S", "I", "R") rates <- list(rate("beta * I * (S - effpop)", from = "S", to = "I", incidence = T, lumped = TRUE), rate("mu", "I", "R")) state_initializer <- list(stem_initializer(c(S = S0, I = I0, R = R0), fixed = T)) adjacency <- NULL tcovar <- NULL parameters = c(true_pars["R0"] / true_pars["effpop"] * true_pars["mu"], true_pars["mu"], true_pars["rho"], true_pars["phi"], popsize - true_pars["effpop"]) names(parameters) <- c("beta", "mu", "rho", "phi", "effpop") constants <- c(t0 = 0) t0 <- 0; tmax <- 100 dynamics <- stem_dynamics( rates = rates, tmax = tmax, parameters = parameters, state_initializer = state_initializer, compartments = compartments, constants = constants, strata = strata, adjacency = adjacency, tcovar = tcovar, messages = T, compile_ode = T, compile_rates = T, compile_lna = T, rtol = 1e-6, atol = 1e-6 ) emissions <- list(emission("S2I", "negbinomial", c("phi", "S2I * rho"), incidence = TRUE, obstimes = seq(1, tmax, by =1))) measurement_process <- stem_measure(emissions = emissions, dynamics = dynamics, messages = T) stem_object <- stem(dynamics = dynamics, measurement_process = measurement_process) dat <- matrix(0.0, nrow = tmax, ncol = 2) while(max(dat[,2]) < 15) { stem_data <- simulate_stem(stem_object = stem_object, method = "gillespie", paths = TRUE, observations = T, nsim = 1, census_times = unique(c(0:tmax))) # grab the dataset true_path <- stem_data$paths[[1]] dat <- stem_data$datasets[[1]] } g <- which(true_path[-1,5] < 25) if(length(g) != 0) { tmax <- g[which(g >= 15)[1]] if(tmax <= 15 & !which.max(true_path[,5])>tmax) { tmax <- 15 } else if(tmax > 50 | (tmax <= 15 & which.max(true_path[,5])>tmax)) { tmax <- 50 } dat <- dat[1:tmax,] true_path <- true_path[1:tmax,] } emissions <- list(emission("S2I", "negbinomial", c("phi", "S2I * rho"), incidence = TRUE, obstimes = seq(1, tmax, by =1))) measurement_process <- stem_measure(emissions = emissions, dynamics = dynamics, data = dat) stem_object <- stem(dynamics = dynamics, measurement_process = measurement_process) # initialize the inference ### Parameterization in terms of log(R0) and log(mu) ## Priors for log(R0), log(mu), rho, log(phi) # Parameters (natural scale): beta, mu, rho, phi # Parameters (estimation scale): log(beta * N / mu), log(mu), logit(rho), log(phi) to_estimation_scale = function(params_nat) { c(log(params_nat[1] * (popsize - params_nat[5]) / params_nat[2] - 1), # (beta,mu,Neff) -> log(R0-1) log(params_nat[2]), # mu -> log(mu) logit(params_nat[3]), # rho -> logit(rho) log(params_nat[4]), # phi -> log(phi) log(popsize - params_nat[5]) + log(params_nat[3])) } from_estimation_scale = function(params_est) { rho <- expit(params_est[3]) l_effpop <- params_est[5] - log(rho) c(exp(log(exp(params_est[1])+1) + params_est[2] - l_effpop), # (log(R0), log(mu), N) -> beta = exp(log(R0) + log(mu) - log(N)) exp(params_est[2]), # log(mu) -> mu rho, # logit(rho) -> rho exp(params_est[4]), # log(phi) -> phi popsize - exp(l_effpop)) # log(effpop) -> effpop } prior_density = function(params_nat, params_est) { l_effpop <- params_est[5] - log(expit(params_est[3])) sum(dnorm(params_est[1], 0, 0.5, log = TRUE), dnorm(params_est[2], -0.7, 0.35, log = TRUE), dnorm(params_est[3], 0, 1.4, log = TRUE), dexp(exp(params_est[4]), 0.1, log = TRUE) + params_est[4], dunif(exp(l_effpop), 5e3, 5e4, log = T) + l_effpop) } priors <- list(prior_density = prior_density, to_estimation_scale = to_estimation_scale, from_estimation_scale = from_estimation_scale) covmat <- diag(0.01, 5) rownames(covmat) <- colnames(covmat) <- c("log_R0_m1", "log_mu", "logit_rho", "log_phi", "log_Neff_x_rho") mcmc_kernel <- kernel( method = "mvn_g_adaptive", stop_adaptation = 1e4, sigma = covmat, scale_constant = 0.5, scale_cooling = 0.99, step_size = 0.1, nugget = 1e-5, messages = FALSE ) stem_object$dynamics$parameters <- function() { priors$from_estimation_scale(priors$to_estimation_scale(parameters) + rnorm(5, 0, 0.01)) } # register the cluster and set the seed registerDoParallel(5) # # Estimate an empirical covariance matrix results <- foreach(chain = 1:5, .packages="stemr", .options.RNG = 52787, .export = ls(all.names = T)) %dorng% { chain_res <- stem_inference(stem_object = stem_object, method = "lna", iterations = 3.5e4, thin_params = 10, thin_latent_proc = 10, initialization_attempts = 500, priors = priors, mcmc_kernel = mcmc_kernel, t0_kernel = t0_kernel, messages = FALSE) return(chain_res) } # collect the results posterior_samples <- cbind(chain = rep(1:5, each = 2.5e3), iter = seq(1,25000,by=10), do.call(rbind, lapply(results, function(x) x$results$MCMC_results[-c(1:1001),]))) posterior_quantiles <- apply(posterior_samples[,c("log_R0_m1", "log_mu", "logit_rho", "log_phi", "log_Neff_x_rho")], 2, quantile, c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975)) posterior_quantiles <- cbind(posterior_quantiles, log_Neff = quantile(log(1e5 - posterior_samples$effpop), c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975))) latent_posts <- lapply(results, function(x) x$results$lna_paths[,-1,-1]) latent_S2I <- do.call(cbind, lapply(latent_posts, function(x) x[,1,])) latent_I2R <- do.call(cbind, lapply(latent_posts, function(x) x[,2,])) latent_quantiles <- list(S2I = apply(latent_S2I, 1, quantile, c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975)), I2R = apply(latent_I2R, 1, quantile, c(0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975))) posterior_res_mcmc <- as.mcmc.list(lapply(results, function(x) mcmc( cbind(logpost = rowSums(x$results$MCMC_results[-c(1:1001),1:3]), x$results$MCMC_results[-c(1:1001),c("log_R0_m1", "log_mu", "logit_rho", "log_phi", "log_Neff_x_rho")])))) psrf <- gelman.diag(posterior_res_mcmc) effective_samples <- do.call(rbind, lapply(posterior_res_mcmc, effectiveSize)) posterior_results <- list(true_pars = true_pars, true_path = true_path, dat = dat, posterior_quantiles = posterior_quantiles, latent_quantiles = latent_quantiles, effective_samples = effective_samples, psrf = psrf, times = sapply(results, function(x) x$results$time)) save(posterior_results, file = paste0("sir_effpop_",replication,".Rdata"))
e053f659b2666c9c9578c77b737460034b271311
5d05a691af890d6fa9a8bce4ffe8f976bd786bd7
/Estructura.R
248f1180de681b77bbb2df73fe3ee535b0dbccee
[]
no_license
critinafernadez/ps_apps
1b98d66c4c9cb64e5d04f7151c21d82aab510f3e
1bc919284cc13a7cc0bdc3381db6d4f18d515c9f
refs/heads/master
2020-04-25T11:00:12.863021
2019-03-29T14:06:08
2019-03-29T14:06:08
172,729,778
0
0
null
null
null
null
UTF-8
R
false
false
611
r
Estructura.R
library(shiny) library(shinythemes) ui= navbarPage("GOOGLE APPS",theme = shinytheme("flatly"), tabPanel("Distributions", mainPanel( tabsetPanel(type="tabs", tabPanel("Hola"), tabPanel("Hasta luego")) )), tabPanel("Correlation"), tabPanel("Categorical Analysis"), tabPanel("Statistical Learning"), tabPanel("References")) server= function(input, output){ } shinyApp(ui = ui, server = server)
528db173d1d2599b63089067cfe58fe32714d830
2655fcbde895737e36a1f2283e0cd51765e98168
/Taxonomy/R/silhouette_viz.R
7fc273670a8c2c4053ab8b1221c52bd02136465a
[]
no_license
DDTD-IS/DDTD-IS
5b7128df844289fa804bc9a3750c73898001bfb4
eb21f343a7224793af823cd580f206d2fb48b604
refs/heads/master
2020-09-21T19:21:24.316497
2019-11-29T17:38:21
2019-11-29T17:38:21
224,897,542
0
0
null
null
null
null
UTF-8
R
false
false
2,543
r
silhouette_viz.R
#' @title Silhouette display with standard color palette #' @description This function is based on the function \link[factoextra]{fviz_silhouette}. It visualizes a silhouette object and applies the standard color palette (used for the shiny application) #' The reader is referred to the original documentation \link[factoextra]{fviz_silhouette}. #' @family UI #' @export silhouette_viz <- function (sil.obj, label = FALSE, return.summary = TRUE, ...) { colors = c( "#1F77B4", "#FF7F0E", "#2CA02C", "#D62728", "#9575D2", "#8C564B", "#E377C0", "#7F7F7F", "#BCBD22", "#17BECF" ) if (inherits(sil.obj, c("eclust", "hcut", "pam", "clara", "fanny"))) { df <- as.data.frame(sil.obj$silinfo$widths) } else if (inherits(sil.obj, "silhouette")) df <- as.data.frame(sil.obj[, 1:3]) else stop("Don't support an oject of class ", class(sil.obj)) df <- df[order(df$cluster,-df$sil_width),] if (!is.null(rownames(df))) df$name <- factor(rownames(df), levels = rownames(df)) else df$name <- as.factor(1:nrow(df)) df$cluster <- as.factor(df$cluster) mapping <- aes_string( x = "name", y = "sil_width", color = "cluster" , fill = "cluster" ) p <- ggplot(df, mapping) + geom_bar(stat = "identity") + scale_fill_manual(values = colors) + scale_colour_manual(values = colors) + labs( y = "Silhouette width Si", x = "", title = paste0( "Clusters silhouette plot ", "\n Average silhouette width: ", round(mean(df$sil_width), 2) ) ) + ggplot2::ylim(c(NA, 1)) + geom_hline( yintercept = mean(df$sil_width), linetype = "dashed", color = "red" ) p <- p + theme_minimal() p <- ggpubr::ggpar(p, ...) if (!label) p <- p + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) else if (label) p <- p + theme(axis.text.x = element_text(angle = 45)) ave <- tapply(df$sil_width, df$cluster, mean) n <- tapply(df$cluster, df$cluster, length) sil.sum <- data.frame( cluster = names(ave), size = n, ave.sil.width = round(ave, 2) ) if (return.summary) return(list(p, sil.sum)) }
3d8cf34324f938ac129d89ca9583ed5446ae41d3
1bd90b867597ff1fde0cab414f6cc663f96ecbf7
/R/clustering.R
869e02e3df90c8590119675fc1a15c914b5046c3
[]
no_license
c00cjz00/flycircuit
faf32d1ea024e2781fc98bd648bdfa2736aaec80
3c7119f1328e873e8626731ba8784ea7a6a1d576
refs/heads/master
2021-01-18T08:00:06.874459
2014-06-26T18:47:41
2014-06-26T18:47:41
null
0
0
null
null
null
null
UTF-8
R
false
false
4,514
r
clustering.R
#' Cluster a set of FlyCircuit neurons identified by gene_name #' #' Given a vector of gene/neuron names or neuronids use hclust to carry out a #' hierarchical clustering. The default value of distfun will handle square #' distance matrices and R. #' @param gns FlyCircuit identifiers (passed to fc_gene_name). #' @param method Clustering method (default Ward's). #' @param scoremat Score matrix to use (see \code{fc_subscoremat} for details of #' default) #' @param distfun Function to convert distance matrix returned by #' \code{fc_sub_distmat} into R dist object (default=as.dist). #' @param ... Additional parameters passed to hclust. #' @inheritParams fc_sub_distmat #' @return An object of class \code{\link{hclust}} which describes the tree #' produced by the clustering process. #' @export #' @family scoremats #' @seealso \code{\link{fc_gene_name}, \link{hclust}, \link{dist}, \link{plot3d.hclust}} #' @examples #' data(kcs20, package='nat') #' hckcs=hclustfc(names(kcs20)) #' # dividide hclust object into 3 groups #' library(dendroextras) #' plot(colour_clusters(hckcs, k=3)) #' # 3d plot of neurons in those clusters (with matching colours) #' library(nat) #' plot3d(hckcs, k=3, db=kcs20) #' # names of neurons in 3 groups #' subset(hckcs, k=3) hclustfc <- function(gns, method='ward', scoremat=NULL, distfun=as.dist, ..., maxneurons=4000) { subdistmat <- fc_sub_distmat(gns, scoremat, maxneurons=maxneurons) if(min(subdistmat) < 0) stop("Negative distances not allowed. Are you sure this is a distance matrix?") hclust(as.dist(subdistmat), method=method, ...) } #' Return a subset of a distance matrix stored in a file-backed matrix #' #' @inheritParams hclustfc #' @param form The type of object to return. #' @param maxneurons Set this to a sensible value to avoid loading huge order #' N^2 distances directly into memory. #' @return return An object of class matrix or dist (as determined by the form #' argument), corresponding to a subset of the distance matrix #' @export #' @family scoremats fc_sub_distmat <- function(gns, scoremat=NULL, form=c('matrix', 'dist'), maxneurons=NA){ form <- match.arg(form) if(!is.na(maxneurons) && length(gns) > maxneurons) { stop("Too many neurons! Use maxneurons to override if you're sure.") } d <- fc_subscoremat(gns, gns, scoremat=scoremat, distance=TRUE, normalisation='mean') if(form=='matrix') d else as.dist(d) } #' Methods to identify and plot groups of neurons cut from an hclust object #' #' @description \code{plot3d.hclust} uses \code{plot3dfc} to plot neurons from #' each group cut from the \code{hclust} object by colour. #' @details Note that the colours are in the order of the dendrogram as assigned #' by colour_clusters. #' @param x An hclust object generated by \code{hclustfc} #' @param k Number of clusters to cut from hclust object. #' @param h Height to cut hclust object. #' @param groups Numeric vector of groups to plot. #' @param col Colours for groups (directly specified or a function). #' @param ... Additional arguments for \code{plot3dfc} #' @return \code{plot3d.hclust} returns a list of rgl ids for plotted objects #' (see \code{\link{plot3dfc}}) #' @export #' @rdname hclustfc-slice #' @aliases hclustfc-slice #' @seealso #' \code{\link{hclustfc}, \link{plot3dfc}, \link{slice}, \link{colour_clusters}} #' @importFrom dendroextras slice plot3d.hclust <- function(x, k=NULL, h=NULL, groups=NULL, col=rainbow, ...) { # Cut the dendrogram into k groups of neurons. Note that these will now have # the neurons in dendrogram order kgroups <- slice(x,k,h) k <- max(kgroups) if(is.function(col)) col <- col(k) neurons <- names(kgroups) if(!is.null(groups)){ matching <- kgroups%in%groups kgroups <- kgroups[matching] neurons <- neurons[matching] } plot3dfc(neurons, col=col[kgroups], ...) } #' @description \code{subset.hclust} Return the labels of items in 1 or more #' groups cut from hclust object #' #' @details Only one of \code{h} and \code{k} should be supplied #' @return \code{subset.hclust} returns a character vector of labels of selected #' items #' @export #' @rdname hclustfc-slice #' @importFrom dendroextras slice subset.hclust<-function(x, k=NULL, h=NULL, groups=NULL, ...){ kgroups=slice(x, k, h) neurons=names(kgroups) if(!is.null(groups)){ matching=kgroups%in%groups kgroups=kgroups[matching] neurons=neurons[matching] } neurons }
913cf2ff4ad3e3a8378cf3088fff653aa55b627c
d6c0595084b6f9f3a541df39d7e54ad2cdd29d8e
/man/extractDateFilename.Rd
4d45f39d8d1fca3f43201f365b72e05189d223e9
[]
no_license
cran/phenopix
2b6e5b2ea601de51c312e692e04ec050529bf5e8
9220b65ba06c6e08e1df76a365db0b78364ed684
refs/heads/master
2023-08-19T07:31:53.401802
2023-08-09T13:50:02
2023-08-09T15:30:47
94,452,244
7
5
null
null
null
null
UTF-8
R
false
false
1,643
rd
extractDateFilename.Rd
\name{extractDateFilename} \alias{extractDateFilename} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Estract dates from filenames %% ~~function to do ... ~~ } \description{ This function extracts dates from filenames. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ extractDateFilename(filename, date.code) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{filename}{ The filename where to retrieve time stamp %% ~~Describe \code{path_img} here~~ } \item{date.code}{ The format of your date in filename, see details. %% ~~Describe \code{path_img} here~~ } } \details{ This function allows the extraction of the date (hour, doy, dayfract) from the filename. The only mandatory rules are (1) that site name come first and date after and (2) sitename and date must be separated by an underscore. In date.code provide the format of your date, using lower letters for year (y) month (m) and day (d) and upper letters for hour (H) and minute (M). As an example: If your file is named: 'sitename_2012_03_03_15-30.jpg' than your date.code is "yyyy_mm_dd_HH-MM". If your file is named 'sitename_12.03.03.1530.jpg' than your date.code is "yy.mm.dd.HHMM" If hours and minutes are missing in your filename, convertion defaults to 12:00. } \value{ A POSIX string containing date,Hour,DOY,DOY.dayfract of the entire images time series %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \author{ Edoardo Cremonese <e.cremonese@arpa.vda.it> }
35a6a89adf1b79b1b57a8bcbed68a0b7f225bee8
7afbb148ec11b3105aaead6bdd900f847e49eb18
/tests/testthat/test-missing.R
2b15e31d290f1df9ecf816030142e510399a2ee7
[ "MIT" ]
permissive
tidymodels/recipes
88135cc131b4ff538a670d956cf6622fa8440639
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
refs/heads/main
2023-08-15T18:12:46.038289
2023-08-11T12:32:05
2023-08-11T12:32:05
76,614,863
383
123
NOASSERTION
2023-08-26T13:43:51
2016-12-16T02:40:24
R
UTF-8
R
false
false
2,562
r
test-missing.R
library(testthat) library(recipes) set_with_na <- tibble( a = c(1, 2, NA), b = c(1, 2, NA_real_), d = as.integer(c(1, 2, NA_integer_)), e = c(1, 2, NA_character_) ) tst <- function(...) { cols <- quos(...) recipe(set_with_na) %>% check_missing(!!!cols) %>% prep() %>% bake(set_with_na) } test_that("check_missing passes silently when no NA", { no_na_rp <- recipe(mtcars) %>% check_missing(all_numeric()) %>% prep() expect_error(bake(no_na_rp, mtcars), NA) expect_equal(bake(no_na_rp, mtcars), tibble(mtcars)) }) test_that("check_missing throws error on all types", { expect_snapshot(error = TRUE, tst(a)) expect_snapshot(error = TRUE, tst(b)) expect_snapshot(error = TRUE, tst(d)) expect_snapshot(error = TRUE, tst(e)) }) test_that("check_missing works on multiple columns simultaneously", { expect_snapshot(error = TRUE, tst(a, e)) expect_snapshot(error = TRUE, tst(everything())) }) test_that("check_missing on a new set", { no_na <- tibble(a = 1:3) na <- tibble(a = c(1, NA)) rp <- recipe(no_na) %>% check_missing(a) %>% prep(no_na) expect_snapshot(error = TRUE, bake(rp, na) ) }) # Infrastructure --------------------------------------------------------------- test_that("bake method errors when needed non-standard role columns are missing", { rec <- recipe(mtcars) %>% check_missing(all_numeric()) %>% update_role(disp, new_role = "potato") %>% update_role_requirements(role = "potato", bake = FALSE) rec_trained <- prep(rec, training = mtcars) expect_error(bake(rec_trained, new_data = mtcars[, -3]), class = "new_data_missing_column") }) test_that("empty printing", { rec <- recipe(mpg ~ ., mtcars) rec <- check_missing(rec) expect_snapshot(rec) rec <- prep(rec, mtcars) expect_snapshot(rec) }) test_that("empty selection prep/bake is a no-op", { rec1 <- recipe(mpg ~ ., mtcars) rec2 <- check_missing(rec1) rec1 <- prep(rec1, mtcars) rec2 <- prep(rec2, mtcars) baked1 <- bake(rec1, mtcars) baked2 <- bake(rec2, mtcars) expect_identical(baked1, baked2) }) test_that("empty selection tidy method works", { rec <- recipe(mpg ~ ., mtcars) rec <- check_missing(rec) expect <- tibble(terms = character(), id = character()) expect_identical(tidy(rec, number = 1), expect) rec <- prep(rec, mtcars) expect_identical(tidy(rec, number = 1), expect) }) test_that("printing", { rec <- recipe(mtcars) %>% check_missing(all_numeric()) expect_snapshot(print(rec)) expect_snapshot(prep(rec)) })
a271fb16f4e6fd880b594d99fc142155b6b94d92
36290132d0cc0940d2925c54c40f154ad4c36c64
/victors_originals/Wes_explore_data_v1.r
0a47d174a0f5846f2f0359119b9269f5850eadf0
[]
no_license
VKON/wesv1
635814cefb58a07d04cc00e64e3028b6bc27782a
6f80d4f85820895f8482bdcc66ba2ee3ffb2e23f
refs/heads/master
2020-04-05T00:19:30.705980
2019-01-13T09:52:58
2019-01-13T09:52:58
156,392,719
1
1
null
2019-01-13T09:52:59
2018-11-06T14:04:22
HTML
UTF-8
R
false
false
2,311
r
Wes_explore_data_v1.r
# # Sort, add time features and limit dataset # # restart R session # setwd("C:/Users/VK/Desktop/VKOR/wes") # Sort1 <- read.csv(file = 'Wesv14.csv') Sort1 <- Join1 View(Sort1) # limit dataset options(max.print=600) # Sort1 <- A[,c(1:8,10,11,13,52,54,561,563,565,567)] # Sort1 <- A[,c(1:8,52,54,561,563,565,567)] # 9 10 and 11 have missing values # Sort1 <- A[,c(3:8,52,561,567)] # smaller dataset # View(Sort1) # add year, month, weekday information # lubridate package is a little easier here Sort1$year <- strptime(Sort1$date, format = '%Y.%m.%d %H:%M')$year + 1900 Sort1$mon <- strptime(Sort1$date, format = '%Y.%m.%d %H:%M')$mon Sort1$wday <- strptime(Sort1$date, format = '%Y.%m.%d %H:%M')$wday #rename wdays to week days Sort1$year <- factor(Sort1$year) Sort1$mon <- factor(Sort1$mon) Sort1$wday <- factor(Sort1$wday) # order of factors, start with sunday and Jan # These lists are stored in R as well, I believe part of Lubridate levels(Sort1$wday) <- c('Sun','Mon','Tue','Wed','Thu','Fri','Sat') levels(Sort1$mon) <- c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec') View(Sort1) # arrange columns by column name Sort1 <- select( Sort1, colnames(Sort1)[1:14], colnames(Sort1)[52:55], colnames(Sort1)[561:570], everything() ) View(Sort1) # write full dataset without NAs # Wesv15 is the shared dataset in initial commit # write.table(Sort1, file = 'Wesv15.csv', append = TRUE, sep = ',', col.names = TRUE, row.names = FALSE, na = '') # # Print all unique variable factors as text file # How did you enforce uniqueness? I would use the unique() function here alluniques <- sapply(Sort1, levels) sink("alluniques.txt") print(alluniques) sink() # restart R session # setwd("C:/Users/VK/Desktop/VKOR/wes") # Sort1 <- read.csv(file = 'Wesv15.csv') # # arrange columns by column name library('dplyr') # work with limited dataset first # focus on non-sparse data first, subset dataset for dense matrix Sort2 <- select( Sort1, colnames(Sort1)[1:28]) View(Sort2) dim(Sort2) # write dataset without NAs # write.table(Sort2, file = 'Wesv16.csv', append = TRUE, sep = ',', col.names = TRUE, row.names = FALSE, na = '') # # restart session and read final file # setwd("C:/Users/VK/Desktop/VKOR/wes") # Sort1 <- read.csv(file = 'Wesv16.csv') # Sort1 <- Sort2
f7f26e42966c655973ade72bee5f02a8a0bbf7cd
05e0e111ad0b82e222371bd84d25564e9b1310ae
/02_R_Programming/ProgrammingAssignment1/pollutantmean.R
14d27aeb3bda68083a5887d62b7d491255c3a194
[]
no_license
anuarimanbayev/datasciencecoursera
9f847569bd526a045c69cdb80a70197d3c6bf2d7
d698096e95c91cf526c0c94c6928424ace56564e
refs/heads/master
2021-01-21T21:48:36.731533
2016-04-24T23:42:42
2016-04-24T23:42:42
42,086,705
0
0
null
2015-09-08T03:03:02
2015-09-08T02:59:50
null
UTF-8
R
false
false
1,969
r
pollutantmean.R
pollutantmean <- function(directory, pollutant, id = 1:332) { ## ==== Homework Assignment 1: Part 1 ==== ## Creates a list of files with prepended directory files_list <- list.files(directory, full.names=TRUE) ## Instantiation specdata <- data.frame() ## Loops through the CSV files, rbinding them together for (i in id) { specdata <- rbind(specdata, read.csv(files_list[i])) } # Calculates the mean and strips away the NA values mean(specdata[, pollutant], na.rm=TRUE) } ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) ## NOTE: Do not round the result! ## AI Commentary: ## Place the pollutantmean.R file on the same LEVEL of directory as the specdata directory folder ## DO NOT place the pollutantmean.R file INTO the specdata directory folder. ## This is why I was getting just a numeric vector 1:10 over and over, since the funciton was looking for a specdata folder IN ITS ENVIRONMENT DIRECTORY LEVEL, whie being UNDER or PART of the specdata directory folder ## Much credit and kudos to: https://github.com/rdpeng/practice_assignment/blob/master/practice_assignment.rmd ## Without the above, I was stuck on the the file input handling, but I knew how to read.scv with rbind and looping, I just didn't know how to GET there in the first place. ## Also, after any change/update/save of this pollutantmean.R, in the CONSOLE, the source command had to be rerun to run the function again correctly. Otherwise, R Studio remembers the old function
7d7ded265903a0f27accbe0a24381ecc4577ef86
d59e56c7658f5177551b308b483ab352236da8a2
/paws/man/backup_update_region_settings.Rd
e943d137a6ba46044c991c0f1760b5b2271e4180
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
jcheng5/paws
a09b03b93c6bafdab26c3217c33926b86907276b
9bb49f9a3ba415c3276955fa676bc881bc22fa3e
refs/heads/main
2023-02-01T15:25:58.124905
2020-11-10T22:35:42
2020-11-10T22:35:42
317,394,924
0
0
NOASSERTION
2020-12-01T01:48:12
2020-12-01T01:48:12
null
UTF-8
R
false
true
980
rd
backup_update_region_settings.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backup_operations.R \name{backup_update_region_settings} \alias{backup_update_region_settings} \title{Updates the current service opt-in settings for the Region} \usage{ backup_update_region_settings(ResourceTypeOptInPreference) } \arguments{ \item{ResourceTypeOptInPreference}{Updates the list of services along with the opt-in preferences for the region.} } \description{ Updates the current service opt-in settings for the Region. If the service has a value set to \code{true}, AWS Backup attempts to protect that service\'s resources in this Region, when included in an on-demand backup or scheduled backup plan. If the value is set to \code{false} for a service, AWS Backup does not attempt to protect that service\'s resources in this Region. } \section{Request syntax}{ \preformatted{svc$update_region_settings( ResourceTypeOptInPreference = list( TRUE|FALSE ) ) } } \keyword{internal}
efcee535f2d323ad2b7cb128e8e5d7252d386b23
df7b041b0622c517ce16fbff0916c542e7da8174
/hindcast_ms/summarize_ms/annotated_histogram.R
46cd27e777036bd9d2499a1ee5311d6654cfeee8
[]
no_license
HeatherWelch/EcoCast_EcoROMS_comparison_ms
f56a4f2e9954f98068d349682c241d5fe9f02d47
9fdca2b69d946073a3d2783db8c2caed82ba3654
refs/heads/master
2021-05-09T09:13:13.924743
2019-10-31T21:54:59
2019-10-31T21:54:59
119,427,444
0
0
null
null
null
null
UTF-8
R
false
false
510
r
annotated_histogram.R
library(magick) plotdir_ms="~/Dropbox/EcoEast_EcoROMS_comparison_ms/EcoCast_EcoROMS_comparison_ms/hindcast_ms/summarize_ms/plots/"#;dir.create(plotdir_ms) file=paste0(plotdir_ms,"histograms3.png") template=paste0(plotdir_ms,"template.png") hist=image_read(file) template=image_read(template) template2=image_scale(template, "8100") #hist2=image_scale(hist, "970") hist2=image_crop(hist,"+50-0") a=image_composite(template2,hist2,offset = "+200+240") a image_write(a,path = paste0(plotdir_ms,"trial.png"))
55c86b20de9bfa54d8ddaad4e8f4e381bbed0777
7d615c4460b459d044213279490dc23f9aaa4c73
/NPV-IRR_indicators.R
c5aabf3375a16a0863a63847a501d39d8dcfeb11
[]
no_license
silvadaniels/BasicEconometrics
eaa5b65d5e53d3e4694503cb6fd83aefb7dff099
045871ec20dff2fd1264f4082667b1bb00120cf6
refs/heads/main
2023-06-09T20:18:10.477720
2021-07-09T00:00:07
2021-07-09T00:00:07
383,644,127
0
0
null
null
null
null
UTF-8
R
false
false
549
r
NPV-IRR_indicators.R
# Script to calculate economic indicators: Net Present Value and Internal Rate of Return # Net Present Value (NPV), i= interest rate, cf=net income vector (negative or positive), invest=initial investment (always negative) npv <- function(i, cf, invest, t=seq(along=cf)) sum(cf/(1+i)^t) +invest # Interest Rate of return (IRR): irr <- function(cf) { uniroot(npv, c(0,1), cf=values, inv)$root } # test inv=-12526 values=c(0,-4242,0,-2983,0,-2332,0,-2332,0,46192,-4886,-1190) rate= 0.1 npv(rate, values, inv) irr(cf)
545d36ea0a45f8c27046bb4c2761b947d0f23189
3486c479855d4a346c3fdd9fe30b4c02bc58c112
/tests/testthat/test-make_phrase.R
9a56d2fbe95a327bf9b4c4f2108ed0738af4025d
[ "MIT" ]
permissive
GenghisKhandybar/twelvedays
bb88eee7f26d0393d1316738c162e4ced5280625
5de8b4b8735f5a55ac4f25123223e652cb4679bd
refs/heads/master
2022-11-25T15:06:33.328574
2020-08-04T06:04:41
2020-08-04T06:04:41
284,625,823
0
0
MIT
2020-08-03T06:52:10
2020-08-03T06:52:09
null
UTF-8
R
false
false
278
r
test-make_phrase.R
context("Testing make_phrase function.") test_that("Random sentence construction works", expect_equal(make_phrase(3, "AGM-158 JASSM Missiles", "flying","silver","into civilians"), "three silver AGM-158 JASSM Missiles flying into civilians"))
c758f5781538548c2b7735134fc59076c4bc9f2e
2b13ed71bc7ff59ebe7a2bd4b1dd5ab77b1496d1
/R/plotMSD.r
eb27f55f923af8e74dad8ef8000c3dbcf8a2823f
[]
no_license
jgodet/trackR
5336444a1fd7ce67454c8ef27dc2f840c33babf5
231482b1c34232e77d5c68d7ed4a439fda4296a5
refs/heads/master
2021-01-06T19:59:47.018135
2020-05-19T14:13:05
2020-05-19T14:13:05
241,470,839
3
1
null
null
null
null
UTF-8
R
false
false
1,356
r
plotMSD.r
# plotMSD.r # written by JuG # March 08 2020 #' Plot MSD #' @author JuG #' @description #' @param msdData MSD data (list) (output of calcMSD function) #' @param deltaT time elapsed between two consecutive frame #' @param fitMSD boolean #' @param printMSDfit boolean #' @param npoint4fit number of points to use for MSD fitting #' @details #' @examples #' #' #' @return #' @export plotMSD <- function(msdData, deltaT, fitMSD =TRUE,printMSDfit=TRUE,npoint4fit =4,...){ if(missing(msdData)){ return(cat('MSD data are missing')) } if(missing(deltaT)){ deltaT <- 1 xlab <- "Time (timesteps)" }else{ xlab <- "Time (ms)" } tmax <- dim(msdData)[1] tt <- (1:tmax) * deltaT xlim <- c(0,tmax) ylim <- c(0, max(msdData$mean + msdData$sd, na.rm=T)*1.05) plot(tt,msdData$mean, xlim = xlim, ylim=ylim, xlab=xlab, ylab = "MSD", las=1,pch=21, bg='lightgrey', ...) points(0,0) for(i in tt){ segments(x0 = i, y0 = msdData$mean[i] - msdData$sd[i], y1 = msdData$mean[i] + msdData$sd[i],... ) } if(fitMSD){ mod <- lm(mean~ tt[1:npoint4fit] - 1, weights = c(n), data=msdData[1:npoint4fit,]) abline(mod, col="red") if(printMSDfit){ print(summary(mod)) cat("Diffusion", round(coefficients(mod)/4, 5)," [", round(confint(mod)[[1]]/4, 4)," - ",round(confint(mod)[[2]]/4, 4), "] \n") } } }
1286d868514b428f283ab6e4a9c10404302fc064
ba892b7dda59a3024307699aaee1d42d4af30e03
/manipulation/3-data-cleaning.R
33f2fee53bd167fe087ff65274e6dad804a2099c
[]
no_license
IALSA/longitudinal-response-pattern
5b9d5b3ce292b27e1eaed7b8ed9d6ee8c616fa0d
d293812cb7f0cea8d8e9a4a447bb87843f2ff24f
refs/heads/master
2020-04-09T16:52:07.699990
2016-07-15T20:06:43
2016-07-15T20:06:43
61,064,550
0
0
null
null
null
null
UTF-8
R
false
false
6,755
r
3-data-cleaning.R
## Project title: MAP ## Created on: June 08 2016 ## Author: Jamie Knight ## Data: ds2 ## Summary: Data Cleaning ## ## ---------------------------------------------------------------------- ## options(width=160) rm(list=ls()) cat("\f") # ---- load_packages ---------------------------------- requireNamespace("dplyr") # ----- load-data ------ getwd() ds <-readRDS("./data/unshared/derived/ds2.rds") str(ds) names(ds) dplyr::n_distinct(ds$projid) #1803 # ----- constant-variables ------ ####help### # apoe needs to have the values come all the way down the column! # install.packages("zoo") # library(zoo) # ?na.locf #last observation carried forward # # any(is.na(ds$apoe_genotype)) # ds$apoe_genotype <- na.locf(ds$apoe_genotype) # ds$apoe <- na.locf(ds$apoe) #any other missing constant values? any(is.na(ds$race)) any(is.na(ds$sex)) #false any(is.na(ds$educ)) any(is.na(ds$apoe)) ####problem#### #omit's NA's for people who are missing the data, need to do it by person only. # ----- deletions ------ # ----- BSIT score ------ # In the MAP data, for the BSIT scores, they have assigned 0.25 to missing responses to a maximum of two; # if more than two response were missing, the entire test was treated as missing). ####potential replication issue here - all papers include the decimals #### #here we remove everything past the decimal to get an even number. n_distinct(ds$total_smell_test)#33, should be 12 table(ds$total_smell_test) ds$total_smell_test[ds$total_smell_test == 2.25] <- 2 ds$total_smell_test[ds$total_smell_test == 2.5] <- 2 table(ds$total_smell_test) ds$total_smell_test[ds$total_smell_test == 3.25] <- 3 ds$total_smell_test[ds$total_smell_test == 3.5] <- 3 ds$total_smell_test[ds$total_smell_test == 4.25] <- 4 ds$total_smell_test[ds$total_smell_test == 4.5] <- 4 ds$total_smell_test[ds$total_smell_test == 5.25] <- 5 ds$total_smell_test[ds$total_smell_test == 5.5] <- 5 ds$total_smell_test[ds$total_smell_test == 6.25] <- 6 ds$total_smell_test[ds$total_smell_test == 6.5] <- 6 ds$total_smell_test[ds$total_smell_test == 7.25] <- 7 ds$total_smell_test[ds$total_smell_test == 7.5] <- 7 ds$total_smell_test[ds$total_smell_test == 8.25] <- 8 ds$total_smell_test[ds$total_smell_test == 8.5] <- 8 ds$total_smell_test[ds$total_smell_test == 9.25] <- 9 ds$total_smell_test[ds$total_smell_test == 9.5] <- 9 ds$total_smell_test[ds$total_smell_test == 10.25] <- 10 ds$total_smell_test[ds$total_smell_test == 10.5] <- 10 ds$total_smell_test[ds$total_smell_test == 11.25] <- 11 table(ds$total_smell_test) # 0 1 2 3 4 5 6 7 8 9 10 11 12 # 4 18 50 100 167 193 305 374 478 723 909 836 365 n_distinct(ds$total_smell_test)#14 - should be 13 + NAs glimpse(ds) str(ds$total_smell_test) class(ds$total_smell_test) # ---- explore --------- str(ds) glimpse(ds) summary(ds) plot(ds$fu_year, ds$age_at_visit) # ---- variable-types --------- #setting the propper variable types to the variables names(ds) class(ds$age_bl) levels(ds$group_smell) #total smell test as intger. ds$total_smell_test <- as.integer(ds$total_smell_test) n_distinct(ds$total_smell_test) #14 unique.default(sapply(ds$total_smell_test, unique)) # 8 9 10 NA 11 6 7 4 12 2 5 1 3 #should these be ordered? is.ordered(ds$total_smell_test)#no ds$BSIT <- as.ordered(ds$total_smell_test) levels(ds$BSIT) is.factor(ds$BSIT) #true unique.default(sapply(ds$BSIT, unique)) # [1] 8 9 10 <NA> 11 6 7 4 12 2 5 1 3 # Levels: 1 2 3 4 5 6 7 8 9 10 11 12 n_distinct(ds$BSIT) #14 # apoe_genotype as factor with 3 levels: summary(ds$apoe_genotype) n_distinct(ds$apoe_genotype) #7 unique.default(sapply(ds$apoe_genotype, unique)) # 34 33 NA 23 24 44 22 is.ordered(ds$apoe_genotype) ds$apoe<- as.ordered(ds$apoe_genotype) levels(ds$apoe) is.factor(ds$apoe) #true unique.default(sapply(ds$apoe, unique)) # 34 33 <NA> 23 24 44 22 # Levels: 22 23 24 33 34 44 n_distinct(ds$apoe) #7 #good #binomial variables as integers or numbers? #this can be changed at 2-add-variables #vital status n_distinct(ds$vital_status)#2 ds$vital_status <- as.integer(ds$vital_status) n_distinct(ds$vital_status) unique.default(sapply(ds$vital_status, unique)) #0 or 1 #dementia status n_distinct(ds$dementia_status)#3 unique.default(sapply(ds$dementia_status, unique)) ds$dementia_status <- as.integer(ds$dementia_status) n_distinct(ds$dementia_status) #13 unique.default(sapply(ds$dementia_status, unique)) #0, 1, NA #stroke status n_distinct(ds$stroke_status)#3 unique.default(sapply(ds$stroke_status, unique)) ds$stroke_status <- as.integer(ds$stroke_status) n_distinct(ds$stroke_status) #3 unique.default(sapply(ds$stroke_status, unique)) #0, 1, NA #path status n_distinct(ds$path_status)#3 unique.default(sapply(ds$path_status, unique)) ds$path_status <- as.integer(ds$path_status) n_distinct(ds$path_status) #13 unique.default(sapply(ds$path_status, unique)) #0, 1, NA # ---- outliers --------- glimpse(ds) str(ds) summary(ds) hist(ds$total_smell_test) #neg skew hist(ds$mmse) #neg skew, one outlier boxplot(ds$mmse) #many outliers at the low end ## Graph 1 library(ggplot2) source("./scripts/common-functions.R") source("./scripts/graph-presets.R") ids <- sample(unique(as.numeric(ds$projid)),100) ds %>% dplyr::mutate( id = as.integer(projid), stroke_status = factor(stroke_status), niareagansc = factor(niareagansc) ) %>% # dplyr::filter(id %in% ids) %>% ggplot(aes(x=age_at_visit, y=mmse)) + # ggplot(aes(x=age_at_visit, y=niareagansc)) + # geom_line(aes(group=id, color=stroke_status), size=1.1, alpha=.5)+ geom_line(aes(group=id, color=niareagansc), size=1.1, alpha=.5)+ # scale_color_manual(values = c("0"="grey60", "1"="red") )+ facet_grid(apoe~group_smell)+ main_theme ## Graph 2 ids <- sample(unique(as.numeric(ds$projid)),100) ds %>% dplyr::mutate( id = as.integer(projid), stroke_status = factor(stroke_status), niareagansc = factor(niareagansc) ) %>% # dplyr::filter(id %in% ids) %>% ggplot(aes(x=age_at_visit, y=mmse)) + # ggplot(aes(x=age_at_visit, y=niareagansc)) + # geom_line(aes(group=id, color=stroke_status), size=1.1, alpha=.5)+ geom_line(aes(group=id, color=apoe), size=.7, alpha=.7)+ scale_color_manual(values = c("0"="grey60", "1"="red") )+ facet_grid(niareagansc~group_smell)+ main_theme # ---- save --------- #save subset data as ds3 ds3<-ds saveRDS(ds3, "./data/unshared/derived/ds3.rds") #continue on to 4-apply-codebook # #code examples # mtcars$mpg[mtcars$cyl == 4] <- NA # #code example using dplyr # mtcars %>% mutate(mpg=replace(mpg, cyl==4, NA)) %>% as.data.frame()
2b672fead507bfacacd8ced70d26fa1a1d79e44c
0d9078891cd9b470be33514d1b2f1e7fbf77af3e
/man/get_quiz_attr.Rd
e415d51076a06a66b5a50782ceb8211cfad0b827
[ "MIT" ]
permissive
Lightbridge-KS/moodleQuiz
0acdc9f78951e0fd25b318d32378461b769845e6
d32edcec4769b93210a5f5736a1173694846630d
refs/heads/main
2023-04-09T09:02:24.671795
2022-09-23T13:30:49
2022-09-23T13:30:49
368,576,033
0
0
NOASSERTION
2022-06-02T12:26:37
2021-05-18T15:14:40
R
UTF-8
R
false
true
1,936
rd
get_quiz_attr.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quiz_attr.R \name{get_quiz_attr} \alias{get_quiz_attr} \title{Get Moodle Quiz Attributes} \usage{ get_quiz_attr(data) } \arguments{ \item{data}{A data.frame of \href{https://docs.moodle.org/311/en/Quiz_reports}{Moodle Quiz report}.} } \value{ A List containing Quiz attributes such as: \itemize{ \item \strong{\code{report_type}}: (character) "Grades" for Moodle Grade report or "Responses" for Moodle Responses report. \item \strong{\code{some_nyg}}: (logical) \code{TRUE}: If "Grade/xx" column of the Moodle Quiz report contained some grades that are "Not yet graded". \code{FALSE}: If "Grade/xx" column contained all numeric grades. \item \strong{\code{grade_max}}: (numeric) Maximum grade of the Quiz \item \strong{\code{q_no}}: (numeric) Only if \code{report_type} is "Grades", then \code{q_no} shows questions number presented in the Moodle Grades report. \item \strong{\code{q_max}}: (numeric) Only if \code{report_type} is "Grades", then \code{q_max} shows maximum scores of corresponding \code{q_no}. \item \strong{\code{resp_no}}: (numeric) Only if \code{report_type} is "Responses", then \code{resp_no} shows responses number of the Moodle Responses report. \item \strong{\code{cloze_cols}}: (character) Only if \code{report_type} is "Responses", then \code{cloze_cols} shows names of the "Responses" column that contained embedded answer (Cloze). If no Cloze column is presented, return \code{NULL}. } } \description{ Get attributes or meta-information from \href{https://docs.moodle.org/311/en/Quiz_reports}{Moodle Quiz report} (i.e., Grades or Responses report) such as type of Moodle Quiz report, maximum grade of each quiz, question's number, question's maximum score, or embedded answer (Cloze) column names (if present). } \examples{ # Grades Report get_quiz_attr(grades_ls$Quiz_1) # Responses Report get_quiz_attr(responses_ls$Quiz_1) }
465b24897bdd3feb9e4d69ee8fa639bfad2f4a35
32f251147606d865a04834ba8f08f8be75410738
/R/data.R
a1bed5d0d9c315c62b221d11fd9803756773899c
[]
no_license
cdv04/ACTR
4e17aaab32d319b1b609b6c1c0c553a0f7e41317
d1762dc8884eb37b023cf146a71c05a96508cc08
refs/heads/master
2021-01-01T05:11:47.528297
2017-04-07T10:16:40
2017-04-07T10:16:40
59,212,181
0
0
null
null
null
null
UTF-8
R
false
false
3,584
r
data.R
#' EDR10 and ED50 selected from FREDERICA modelling for the ACT :Acute to #' Chronic SSD extrapolation for project for ionising radiation. #' #' #' A dataset containing ED (Effective Dose) derived from dose response #' modelling of selected FREDERICA tests #' #' @format A data frame with 786 rows and 22 variables: #' \itemize{ #' \item Ecosystem : ecosystem of the studied species #' \item Kingdom : kinddom of the studied species #' \item Phylum: phylum of the studied species #' \item Class: class of the studied species #' \item Order: order of the studied species #' \item Family: family of the studied species #' \item Genus: genus of the studied species: #' \item SpeciesComp: Combination of Genus and Species variables in a unique #' variable #' \item Sp_common : name of species according common designation #' \item Sp_latin : name of species according latin designation #' \item DoseType : type of exposition #' \item ID :Reference ID number of the data (automatically generated in #' FREDERICA) #' \item subID : Reference subID number of the dose-response modeled data from #' which the ED value is derived #' \item RadType : radiation type of the exposition #' \item Umbrella : umbrella type of the studied effect #' \item Effect description : free description of the studied effect #' \item Model : model used for building the dose-response curve #' \item ED : effective dose redived from the dose-reponse curve. ED is ED50 #' when DoseType is "Acute", and ED is EDR10 when DoseType is "Chronic" #' EDR10 is expressed in µGy/h and ED50 in Gy #' \item SE : standard eror of the ED #' #' } #' @source All_v120316.csv dataset "cipr" #' EDR10 and ED50 selected from FREDERICA modelling for the ACT :Acute to #' Chronic SSD extrapolation for project for ionising radiation. #' #' #' A dataset containing ED (Effective Dose) derived from dose response #' modelling of selected FREDERICA tests #' #' @format A data frame with 786 rows and 22 variables: #' \itemize{ #' \item Ecosystem : ecosystem of the studied species #' \item Kingdom : kinddom of the studied species #' \item Phylum: phylum of the studied species #' \item Class: class of the studied species #' \item Order: order of the studied species #' \item Family: family of the studied species #' \item Genus: genus of the studied species: #' \item SpeciesComp: Combination of Genus and Species variables in a unique #' variable #' \item Sp_common : name of species according common designation #' \item Sp_latin : name of species according latin designation #' \item DoseType : type of exposition #' \item ID :Reference ID number of the data (automatically generated in #' FREDERICA) #' \item subID : Reference subID number of the dose-response modeled data from #' which the ED value is derived #' \item RadType : radiation type of the exposition #' \item Umbrella : umbrella type of the studied effect #' \item Effect description : free description of the studied effect #' \item Model : model used for building the dose-response curve #' \item ED : effective dose redived from the dose-reponse curve. ED is ED50 #' when DoseType is "Acute", and ED is EDR10 when DoseType is "Chronic" #' EDR10 is expressed in µGy/h and ED50 in Gy #' \item SE : standard eror of the ED #' #' } #' @source All_v140217.csv dataset "actr17"
c8e78b31a04a85af7ab69bb3e1e4138bba257e4c
e14fc97a2bde8a4f303a7109ab5694d332e806d4
/inst/doc/milr-intro.R
5785138dcb7eec29098fa492b625bbbb7af26ccd
[]
no_license
cran/milr
d321f9cec7f9a60c57b617268c68a198ba8def0f
55e4ab3cfa6aac436a61bb6dc4b7564485b45327
refs/heads/master
2021-01-20T18:36:15.259947
2020-10-31T06:30:02
2020-10-31T06:30:02
63,359,470
0
0
null
null
null
null
UTF-8
R
false
false
7,875
r
milr-intro.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(cache = 2, cache.lazy = FALSE, tidy = FALSE, warning = FALSE) ## ---- eval=FALSE-------------------------------------------------------------- # milr(y, x, bag, lambda, numLambda, lambdaCriterion, nfold, maxit) # softmax(y, x, bag, alpha, ...) ## ---- eval=FALSE-------------------------------------------------------------- # fitted(object, type) # predict(object, newdata, bag_newdata, type) ## ----DGP1--------------------------------------------------------------------- library(milr) library(pipeR) set.seed(99) # set the size of dataset numOfBag <- 30 numOfInstsInBag <- 3 # set true coefficients: beta_0, beta_1, beta_2, beta_3 trueCoefs <- c(-2, -1, 2, 0.5) trainData <- DGP(numOfBag, numOfInstsInBag, trueCoefs) colnames(trainData$X) <- paste0("X", 1:ncol(trainData$X)) (instanceResponse <- as.numeric(with(trainData, tapply(Z, ID, any)))) ## ----EST2--------------------------------------------------------------------- # fit milr model milrFit_EST <- milr(trainData$Z, trainData$X, trainData$ID, lambda = 1e-7) # call the Wald test result summary(milrFit_EST) # call the regression coefficients coef(milrFit_EST) ## ----EST---------------------------------------------------------------------- fitted(milrFit_EST, type = "bag") # fitted(milrFit_EST, type = "instance") # instance-level fitted labels table(DATA = instanceResponse, FITTED = fitted(milrFit_EST, type = "bag")) # predict for testing data testData <- DGP(numOfBag, numOfInstsInBag, trueCoefs) colnames(testData$X) <- paste0("X", 1:ncol(testData$X)) (instanceResponseTest <- as.numeric(with(trainData, tapply(Z, ID, any)))) pred_EST <- with(testData, predict(milrFit_EST, X, ID, type = "bag")) # predict(milrFit_EST, testData$X, testData$ID, # type = "instance") # instance-level prediction table(DATA = instanceResponseTest, PRED = pred_EST) ## ----VS, message=FALSE-------------------------------------------------------- set.seed(99) # Set the new coefficienct vector (large p) trueCoefs_Lp <- c(-2, -2, -1, 1, 2, 0.5, rep(0, 45)) # Generate the new training data with large p trainData_Lp <- DGP(numOfBag, numOfInstsInBag, trueCoefs_Lp) colnames(trainData_Lp$X) <- paste0("X", 1:ncol(trainData_Lp$X)) # variable selection by user-defined tuning set lambdaSet <- exp(seq(log(0.01), log(20), length = 20)) milrFit_VS <- with(trainData_Lp, milr(Z, X, ID, lambda = lambdaSet)) # grep the active factors and their corresponding coefficients coef(milrFit_VS) %>>% `[`(abs(.) > 0) ## ----AUTOVS,message=FALSE----------------------------------------------------- # variable selection using auto-tuning milrFit_auto_VS <- milr(trainData_Lp$Z, trainData_Lp$X, trainData_Lp$ID, lambda = -1, numLambda = 5) # the auto-selected lambda values milrFit_auto_VS$lambda # the values of BIC under each lambda value milrFit_auto_VS$BIC # grep the active factors and their corresponding coefficients coef(milrFit_auto_VS) %>>% `[`(abs(.) > 0) ## ----CV,message=FALSE--------------------------------------------------------- # variable selection using auto-tuning with cross validation milrFit_auto_CV <- milr(trainData_Lp$Z, trainData_Lp$X, trainData_Lp$ID, lambda = -1, numLambda = 5, lambdaCriterion = "deviance", nfold = 3) # the values of predictive deviance under each lambda value milrFit_auto_CV$cv # grep the active factors and their corresponding coefficients coef(milrFit_auto_CV) %>>% `[`(abs(.) > 0) ## ----DLMUSK1------------------------------------------------------------------ dataName <- "MIL-Data-2002-Musk-Corel-Trec9.tgz" dataUrl <- "http://www.cs.columbia.edu/~andrews/mil/data/" ## ----READMUSK1---------------------------------------------------------------- filePath <- file.path(getwd(), dataName) # Download MIL data sets from the url (not run) # if (!file.exists(filePath)) # download.file(paste0(dataUrl, dataName), filePath) # Extract MUSK1 data file (not run) # if (!dir.exists("MilData")) # untar(filePath, files = "musk1norm.svm") # Read and Preprocess MUSK1 library(data.table) MUSK1 <- fread("musk1norm.svm", header = FALSE) %>>% `[`(j = lapply(.SD, function(x) gsub("\\d+:(.*)", "\\1", x))) %>>% `[`(j = c("bag", "label") := tstrsplit(V1, ":")) %>>% `[`(j = V1 := NULL) %>>% `[`(j = lapply(.SD, as.numeric)) %>>% `[`(j = `:=`(bag = bag + 1, label = (label + 1)/2)) %>>% setnames(paste0("V", 2:(ncol(.)-1)), paste0("V", 1:(ncol(.)-2))) %>>% `[`(j = paste0("V", 1:(ncol(.)-2)) := lapply(.SD, scale), .SDcols = paste0("V", 1:(ncol(.)-2))) X <- paste0("V", 1:(ncol(MUSK1) - 2), collapse = "+") %>>% (paste("~", .)) %>>% as.formula %>>% model.matrix(MUSK1) %>>% `[`( , -1L) Y <- as.numeric(with(MUSK1, tapply(label, bag, function(x) sum(x) > 0))) ## ----MIFIT,message=FALSE,results="hide"--------------------------------------- # softmax with alpha = 0 softmaxFit_0 <- softmax(MUSK1$label, X, MUSK1$bag, alpha = 0, control = list(maxit = 5000)) # softmax with alpha = 3 softmaxFit_3 <- softmax(MUSK1$label, X, MUSK1$bag, alpha = 3, control = list(maxit = 5000)) # use a very small lambda so that milr do the estimation # without evaluating the Hessian matrix milrFit <- milr(MUSK1$label, X, MUSK1$bag, lambda = 1e-7, maxit = 5000) ## ----MILRVS, cache=TRUE,cache.lazy=FALSE,message=FALSE,warning=FALSE,tidy=FALSE---- # MILR-LASSO milrSV <- milr(MUSK1$label, X, MUSK1$bag, lambda = -1, numLambda = 20, nfold = 3, lambdaCriterion = "deviance", maxit = 5000) # show the detected active covariates sv_ind <- names(which(coef(milrSV)[-1L] != 0)) %>>% (~ print(.)) %>>% match(colnames(X)) # use a very small lambda so that milr do the estimation # without evaluating the Hessian matrix milrREFit <- milr(MUSK1$label, X[ , sv_ind], MUSK1$bag, lambda = 1e-7, maxit = 5000) # Confusion matrix of the fitted model table(DATA = Y, FIT_MILR = fitted(milrREFit, type = "bag")) ## ----MUSK1PRED2,message=FALSE------------------------------------------------- set.seed(99) predY <- matrix(0, length(Y), 4L) %>>% `colnames<-`(c("s0","s3","milr","milr_sv")) folds <- 3 foldBag <- rep(1:folds, floor(length(Y) / folds) + 1, length = length(Y)) %>>% sample(length(.)) foldIns <- rep(foldBag, table(MUSK1$bag)) for (i in 1:folds) { # prepare training and testing sets ind <- which(foldIns == i) # train models fit_s0 <- softmax(MUSK1[-ind, ]$label, X[-ind, ], MUSK1[-ind, ]$bag, alpha = 0, control = list(maxit = 5000)) fit_s3 <- softmax(MUSK1[-ind, ]$label, X[-ind, ], MUSK1[-ind, ]$bag, alpha = 3, control = list(maxit = 5000)) # milr, use a very small lambda so that milr do the estimation # without evaluating the Hessian matrix fit_milr <- milr(MUSK1[-ind, ]$label, X[-ind, ], MUSK1[-ind, ]$bag, lambda = 1e-7, maxit = 5000) fit_milr_sv <- milr(MUSK1[-ind, ]$label, X[-ind, sv_ind], MUSK1[-ind, ]$bag, lambda = 1e-7, maxit = 5000) # store the predicted labels ind2 <- which(foldBag == i) # predict function returns bag response in default predY[ind2, 1L] <- predict(fit_s0, X[ind, ], MUSK1[ind, ]$bag) predY[ind2, 2L] <- predict(fit_s3, X[ind, ], MUSK1[ind, ]$bag) predY[ind2, 3L] <- predict(fit_milr, X[ind, ], MUSK1[ind, ]$bag) predY[ind2, 4L] <- predict(fit_milr_sv, X[ind, sv_ind], MUSK1[ind, ]$bag) } table(DATA = Y, PRED_s0 = predY[ , 1L]) table(DATA = Y, PRED_s3 = predY[ , 2L]) table(DATA = Y, PRED_MILR = predY[ , 3L]) table(DATA = Y, PRED_MILR_SV = predY[ , 4L])
6779693ca9ecebb57bb74b96119138d1f3496dfe
6e8395e00701e58af37bfa5a21365fa68ec1e9ad
/aftica.R
2719fef7643f279ccd61dd0cf6dd108d514661a1
[]
no_license
RobertoRW/EspacialR
47fe17d5c2cff1793f648b71a45b5c86a0be84cf
fda2bb93c8e02f05a11e7cac428677e0332695c3
refs/heads/master
2020-04-10T12:18:10.355748
2018-12-09T09:07:10
2018-12-09T09:07:10
161,018,032
0
0
null
null
null
null
UTF-8
R
false
false
4,756
r
aftica.R
library(readr) library(ggmap) library(dplyr) library(spatstat) library(maptools) library(rgdal) library(sp) library(splancs) library(RColorBrewer) set.seed(798487) conflict.data <- read_csv( "africa.csv") conflict.algeria = subset(conflict.data, conflict.data$COUNTRY == "Uganda") conflict.algeria = subset(conflict.algeria, conflict.algeria$YEAR %in% c(1997, 2015)) bbox <- make_bbox(LONGITUDE, LATITUDE, data=conflict.algeria, f=0.2) conflict.algeria$anio = as.factor(conflict.algeria$YEAR) anio = conflict.algeria$anio africa <- get_map(bbox, source="stamen") ggmap(africa) + geom_point(aes(x=LONGITUDE, y=LATITUDE, shape=anio), data=conflict.algeria, alpha=0.5) + xlim(29, 35.5) + ylim(-2, 4.5) + scale_color_gradient(limits=c(1997, 2015), low="orangered1", high="red4") africa.marks = conflict.algeria[, "YEAR"] spatial_df <- SpatialPointsDataFrame(coords = conflict.algeria[, c("LONGITUDE", "LATITUDE")], proj4string = CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"), data = africa.marks) africa.transed <- spTransform(spatial_df, CRS("+init=epsg:3839")) africa.transed$Easting <- coordinates(africa.transed)[, 1] africa.transed$Northing <- coordinates(africa.transed)[, 2] plot(x = africa.transed$Easting, y = africa.transed$Northing, ylab = "Norte", xlab = "Este", main = "Ubicación (transformada) de Incidentes violentos en Ruanda") africa.chull <- convexhull.xy(x = africa.transed$Easting, y = africa.transed$Northing) africa.ppp <- ppp(x = africa.transed$Easting, y = africa.transed$Northing, africa.chull, marks = as.factor(africa.transed$YEAR)) unitname(africa.ppp) <- c("meter", "meters") p1 <- plot.ppp(africa.ppp, main="Distribucion de Acontecimientos Violentos en Ruanda") # By default, it will plot the first mark that you assigned legend(max(coords(africa.ppp))[1] + 1000, mean(coords(africa.ppp))[2], pch = p1, legend = names(p1), cex = 0.5) anios = split.ppp(africa.ppp) africa.1997 = anios$`1997` africa.1997.sp <- as(africa.1997, "SpatialPoints") africa.1997.spu <- elide(africa.1997.sp, scale=TRUE, unitsq=TRUE) africa.1997.pppu <- as(africa.1997.spu, "ppp") r = seq(0, sqrt(2)/6, by = 0.005) env97 <- envelope(africa.1997.pppu, fun=Gest, r=r, nrank=2, nsim=99) plot(env97, main="Funcion G para conflictos durante 1997") anios = split.ppp(africa.ppp) africa.2015 = anios$`2015` africa.2015.sp <- as(africa.2015, "SpatialPoints") africa.2015.spu <- elide(africa.2015.sp, scale=TRUE, unitsq=TRUE) africa.2015.pppu <- as(africa.2015.spu, "ppp") env2015 <- envelope(africa.2015.pppu, fun=Gest, r=r, nrank=2, nsim=99) plot(env2015, main="Funcion G para conflictos durante 2015") rf = seq(0, sqrt(2)/6, by = 0.0005) Fenv97 = envelope(africa.1997.pppu, fun=Fest, r=rf, nrank=2, nsim=999) plot(Fenv97, main="Funcion F para conflictos durante 1997") Fenv2015 = envelope(africa.2015.pppu, fun=Fest, r=rf, nrank=2, nsim=999) plot(Fenv2015, main="Funcion F para conflictos durante 2015") Kenv97 = envelope(africa.1997.pppu, fun=Kest, r=rf, nrank=2, nsim=999) plot(Kenv97, main="Funcion K para conflictos durante 1997") Kenv2015 = envelope(africa.2015.pppu, fun=Kest, r=rf, nrank=2, nsim=999) plot(Kenv2015, main="Funcion K para conflictos durante 2015") # Calculo de la intensidad mserw = bw.diggle(africa.1997.pppu) bw = as.numeric(mserw) bw plot(density(africa.1997.pppu, bw=bw, kernel='gaussian'), main="Densidad con kernel Gaussiano para 1997") mserw = bw.diggle(africa.2015.pppu) bw = as.numeric(mserw) bw plot(density(africa.2015.pppu, bw=bw, kernel='gaussian'), main="Densidad con kernel Gaussiano para 2015") # Modelado fit1997 = ppm(africa.1997.pppu, ~ x + y) fit1997poly = ppm(africa.1997.pppu, ~polynom(x, y, 2)) fit2015 = ppm(africa.2015.pppu, ~ x + y) fit2015poly = ppm(africa.2015.pppu, ~polynom(x, y, 2)) plot(fit1997, main="Tendencia de eventos violentos en 1997, Modelo Lineal", pause=FALSE, se=FALSE) plot(fit1997poly, main="Tendencia de eventos violentos en 2017, Modelo Polinomial de grado 2", se=FALSE) AIC(fit1997) AIC(fit2015) plot(fit2015, main="Tendencia de eventos violentos en 2015, Modelo Lineal", se=FALSE) plot(fit2015poly, main="Tendencia de eventos violentos en 2015, Modelo Polinomial de grado 2", se=FALSE) AIC(fit2015) AIC(fit2015poly) fit2015poly5 = ppm(africa.2015.pppu, ~polynom(x, y, 5)) plot(fit2015poly, main="Tendencia de eventos violentos en 2015, Modelo Polinomial de grado 5", se=FALSE) AIC(fit2015poly5)
3cceb458669522245628bde5fa1eb8ef5c8bb13a
192acf57bf90aa3892bddd088a52c9a40e24a241
/data-preparation.R
3303006eb5a68f6c62eaaf171d1d3fea38d5a5ae
[]
no_license
szigony/ceu-mtpods
0c31668acd5a317f622328dbda281cb345d8761f
bbe89dd9a1769897f893bf254fa8ed2afb44dc7f
refs/heads/master
2020-05-26T20:20:22.545155
2019-06-09T21:46:19
2019-06-09T21:46:19
188,361,318
0
0
null
null
null
null
UTF-8
R
false
false
3,679
r
data-preparation.R
# Libraries library(dplyr) library(tibble) library(stringr) library(readxl) ## Drinks.csv # Calculate total liters of pure alcohol # 1 ounce = 0.0295 liter # Beer serving: 12 ounces, 5% alcohol # Wine serving: 5 ounces, 12% alcohol # Spirit serving: 1.5 ounces, 40% alcohol # Omit the NA values so that it wouldn't skew the analysis drinks <- as.tibble(read.csv("data/Drinks.csv")) %>% mutate(beer_servings = na_if(beer_servings, "?"), wine_servings = na_if(wine_servings, "?"), spirit_servings = na_if(spirit_servings, "?")) %>% mutate_if(is.factor, as.character) %>% mutate_each(as.numeric, ends_with("servings")) %>% mutate(alcohol_per_beer_servings = beer_servings * 12 * 0.05, alcohol_per_wine_servings = wine_servings * 5 * 0.12, alcohol_per_spirit_servings = spirit_servings * 1.5 * 0.4, total_ounces_of_pure_alcohol = alcohol_per_beer_servings + alcohol_per_wine_servings + alcohol_per_spirit_servings, total_liters_of_pure_alcohol = total_ounces_of_pure_alcohol * 0.0295) %>% select(country, beer_servings, wine_servings, spirit_servings, total_liters_of_pure_alcohol) %>% na.omit() ## LifeExpectancy.csv # Omit the rows where the income_group is unknown # Convert income_group, region and sex to factors # Transform "Life expectancy at age 60 (years)" to life expectancy by adding 60 # Calculate the average life expectancy across the various metrics for both sexes and combined lifetime <- as.tibble(read.csv("data/LifeExpectancy.csv")) %>% mutate(country = CountryDisplay, year = YearCode, metric = GhoDisplay, region = RegionDisplay, income_group = WorldBankIncomeGroupDisplay, sex = SexDisplay, value = Numeric) %>% select(country, year, metric, region, income_group, sex, value) %>% filter(income_group != "") %>% mutate(income_group = as.factor(na_if(str_replace(str_replace(income_group, "_income", ""), "_", " "), ""))) %>% mutate_each(list(as.character), country) %>% mutate(value = ifelse(metric == "Life expectancy at age 60 (years)", value + 60, value)) %>% group_by(country, year, region, income_group) %>% summarise(avg_life_expectancy = mean(value)) %>% ungroup() ## CountriesOfTheWorld.xls # Read the specified range from the Excel file # The headers flow through to the second line, merge them with the first countries <- read_excel("data/CountriesOfTheWorld.xls", sheet = "Sheet1", range = "A4:P232") names(countries) <- paste(names(countries), countries[1, ], sep = " ") # Convert all variables to the appropriate data type # literacy, arable, crops and other are stored as percentages countries <- countries %>% slice(2:n()) %>% mutate(country = `Country NA`, population = `Population NA`, area = as.numeric(`Area sq. mi.`), population_density = as.numeric(`Pop. Density per sq. mi.`), coast_area_ratio = as.numeric(`Coastline coast/area ratio`), net_migration = `Net migration NA`, infant_mortality_rate = as.numeric(`Infant mortality per 1000 births`), gdp_per_capita = as.numeric(`GDP $ per capita`), literacy = as.numeric(`Literacy %`) / 100, phones = as.numeric(`Phones per 1000`), arable = as.numeric(`Arable %`) / 100, crops = as.numeric(`Crops %`) / 100, other = as.numeric(`Other %`) / 100, birthrate = `Birthrate NA`, deathrate = `Deathrate NA`) %>% select(country, population, area, population_density, coast_area_ratio, net_migration, infant_mortality_rate, gdp_per_capita, literacy, phones, arable, crops, other, birthrate, deathrate)
cd6130505da219a1708b2258fea9861b6fdfc8d7
5b2aa89a49b0414dff445fc7c1959ecd7180e0da
/plot2.R
cf5eb5a83602a00f7682b726d4822e4e36035945
[]
no_license
Pavit/ExData_Plotting1
0b40f8a60ea7a55064e2e3c32fe8f2b15d88d690
acd040acb27e7adfcf0575656096bdd3de0b2a2f
refs/heads/master
2020-03-28T00:52:42.930321
2018-10-09T01:18:48
2018-10-09T01:18:48
147,456,732
0
0
null
2018-09-05T03:52:29
2018-09-05T03:52:29
null
UTF-8
R
false
false
944
r
plot2.R
# This function reads data from household_power_consumption.txt # and, using a subset of power consumption data from 2/1/07 to 2/2/07, # it creates a line chart showing global active power by day in kilowatts plot2 <- function() { # subset the data to get the time period that we want data <- read.table("household_power_consumption.txt",stringsAsFactors = FALSE, sep = ";", header = TRUE) data$Date <- as.Date(data$Date, format = "%d/%m/%Y") data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02") # reformat Global active power to numeric and create datetime field dt dt <- paste(data$Date, data$Time) data$dt <- as.POSIXct(dt) data$Global_active_power <- as.numeric(data$Global_active_power) # create plot png("plot2.png", width = 480, height = 480) with(data, {plot(Global_active_power ~ dt, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") }) dev.off() }
fca22855e98ab1c7110033c08aea3fc8b2ed827c
12121d441ca67952f281823499c7252a09df65b2
/R/suns.R
1dcd149be3233f21f46543e66259f0cb509ce420
[]
no_license
cran/StatDA
53022b17e897ecb5c0ad07d12ce5db1acefdd96b
828bc95cda80edd448192fea1239e5d5e61982fd
refs/heads/master
2023-06-08T14:10:18.890836
2023-06-02T11:20:05
2023-06-02T11:20:05
17,693,780
0
0
null
null
null
null
UTF-8
R
false
false
5,092
r
suns.R
suns <-function (x, full = TRUE, scale = TRUE, radius = TRUE, labels = dimnames(x)[[1]], locations = NULL, nrow = NULL, ncol = NULL, len = 1, key.loc = NULL, key.labels = dimnames(x)[[2]], key.xpd = TRUE, xlim = NULL, ylim = NULL, flip.labels = NULL, col.stars = NA, axes = FALSE, frame.plot = axes, main = NULL, sub = NULL, xlab = "", ylab = "", cex = 0.8, lwd = 0.25, lty = par("lty"), xpd = FALSE, mar = pmin(par("mar"), 1.1 + c(2 * axes + (xlab != ""), 2 * axes + (ylab != ""), 1, 0)), add = FALSE, plot = TRUE, ...) { # plot suns as multivariate graphics # if (is.data.frame(x)) x <- data.matrix(x) else if (!is.matrix(x)) stop("'x' must be a matrix or a data frame") if (!is.numeric(x)) stop("data in 'x' must be numeric") n.loc <- nrow(x) n.seg <- ncol(x) if (is.null(locations)) { if (is.null(nrow)) nrow <- ceiling(if (!is.numeric(ncol)) sqrt(n.loc) else n.loc/ncol) if (is.null(ncol)) ncol <- ceiling(n.loc/nrow) if (nrow * ncol < n.loc) stop("nrow * ncol < number of observations") ff <- if (!is.null(labels)) 2.3 else 2.1 locations <- expand.grid(ff * 1:ncol, ff * nrow:1)[1:n.loc, ] if (!is.null(labels) && (missing(flip.labels) || !is.logical(flip.labels))) flip.labels <- ncol * mean(nchar(labels, type = "c")) > 30 } else { if (is.numeric(locations) && length(locations) == 2) { locations <- cbind(rep.int(locations[1], n.loc), rep.int(locations[2], n.loc)) if (!missing(labels) && n.loc > 1) warning("labels do not make sense for a single location") else labels <- NULL } else { if (is.data.frame(locations)) locations <- data.matrix(locations) if (!is.matrix(locations) || ncol(locations) != 2) stop("'locations' must be a 2-column matrix.") if (n.loc != nrow(locations)) stop("number of rows of 'locations' and 'x' must be equal.") } if (missing(flip.labels) || !is.logical(flip.labels)) flip.labels <- FALSE } xloc <- locations[, 1] yloc <- locations[, 2] angles <- if (full) seq(0, 2 * pi, length = n.seg + 1)[-(n.seg + 1)] else seq(0, pi, length = n.seg) if (length(angles) != n.seg) stop("length of 'angles' must equal 'ncol(x)'") if (scale) { x <- apply(x, 2, function(x) (x - min(x, na.rm = TRUE))/diff(range(x, na.rm = TRUE))) } x[is.na(x)] <- 0 mx <- max(x <- x * len) if (is.null(xlim)) xlim <- range(xloc) + c(-mx, mx) if (is.null(ylim)) ylim <- range(yloc) + c(-mx, mx) deg <- pi/180 op <- par(mar = mar, xpd = xpd) on.exit(par(op)) if (!add) plot(0, type = "n", ..., xlim = xlim, ylim = ylim, main = main, sub = sub, xlab = xlab, ylab = ylab, asp = 1, axes = axes) if (!plot) return() s.x <- xloc + x * rep.int(cos(angles), rep.int(n.loc, n.seg)) s.y <- yloc + x * rep.int(sin(angles), rep.int(n.loc, n.seg)) for (i in 1:n.loc) { if (radius) segments(rep.int(xloc[i], n.seg), rep.int(yloc[i], n.seg), s.x[i, ], s.y[i, ], lwd = lwd, lty = lty) } if (!is.null(labels)) { y.off <- mx * (if (full) 1 else 0.1) if (flip.labels) y.off <- y.off + cex * par("cxy")[2] * ((1:n.loc)%%2 - if (full) 0.4 else 0) text(xloc, yloc - y.off, labels, cex = cex, adj = c(0.5, 1)) } if (!is.null(key.loc)) { par(xpd = key.xpd) key.x <- len * cos(angles) + key.loc[1] key.y <- len * sin(angles) + key.loc[2] #polygon(key.x, key.y, lwd = lwd, lty = lty) if (radius) segments(rep.int(key.loc[1], n.seg), rep.int(key.loc[2], n.seg), key.x, key.y, lwd = lwd, lty = lty) lab.angl <- angles label.x <- 1.1 * len * cos(lab.angl) + key.loc[1] label.y <- 1.1 * len * sin(lab.angl) + key.loc[2] for (k in 1:n.seg) { text.adj <- c(if (lab.angl[k] < 90 * deg || lab.angl[k] > 270 * deg) 0 else if (lab.angl[k] > 90 * deg && lab.angl[k] < 270 * deg) 1 else 0.5, if (lab.angl[k] <= 90 * deg) (1 - lab.angl[k]/(90 * deg))/2 else if (lab.angl[k] <= 270 * deg) (lab.angl[k] - 90 * deg)/(180 * deg) else 1 - (lab.angl[k] - 270 * deg)/(180 * deg)) text(label.x[k], label.y[k], labels = key.labels[k], cex = cex, adj = text.adj) } } if (frame.plot) box(...) invisible(locations) }
ae0f8c91caacbbcad0aec1f531bb67f50fa4b66e
7c7c3469bf92ecedae878a35ca6d609071c569fe
/src/data/descriptive_analysis.R
c249f80d0f232aea844e58549500de06592f484f
[ "MIT" ]
permissive
joaophellip/comp5904
9fe3309e616397d2135923ec2db3ca3cb49cbfb4
0f7bb443f615bd812eb64b88d251c155e5246fe6
refs/heads/master
2023-01-30T17:46:10.093590
2020-12-05T22:59:21
2020-12-05T22:59:21
298,909,593
0
0
null
null
null
null
UTF-8
R
false
false
24,216
r
descriptive_analysis.R
library(tidyverse) library(ggcorrplot) source("src/data/earthquake_damage.R") data <- Earthquake_data() data$generate() # Juntando as features com a variável resposta damage <- as_tibble(cbind(data$dataset, damage_grade = factor(data$class_response, labels = c("low", "medium", "severe")))) # Separando entre conjunto de treinamento e teste set.seed(135) index <- sample(1:260601, ceiling(0.6 * 260601)) damage_test <- damage[-index, ] damage <- damage[index, ] # Estrutura glimpse(damage) # Análise descritiva no conjunto de treinamento -------------------------------- ### Resposta graph <- damage %>% ggplot(aes(x = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar() + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + # geom_text(aes(y =..count.., # label = scales::percent(..count../sum(..count..), accuracy = 0.01)), # position = position_dodge(0.9), vjust = -0.9, size = 3.5, stat="count") + # coord_cartesian(ylim = c(0, 100000)) + labs(x = "Grau de dano", y = "Número de construções") print(graph) # Proporções round(100 * prop.table(table(damage$damage_grade)), 2) ### Correlação entre as variáveis quantitativas ggcorrplot(cor(damage %>% select(age, area_percentage, height_percentage, count_families, count_floors_pre_eq)), hc.order = TRUE, type = "upper", outline.col = "white", ggtheme = "theme_void", lab = TRUE ) ### Região geográfica mais geral -- geo_level_1_id graph <- damage %>% ggplot(aes(x = geo_level_1_id, fill = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade)) + scale_x_discrete(breaks = seq(0, 30, 5)) + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Região geográfica", y = "Número de construções") print(graph) # A grande maioria das regiões apresentam construções que sofreram # um dano médio; # As regiões em que a maioria das construções soferam danos mais severos # foram as regiões 8, 17, 18 e 21. Entre elas destaca-se a região 17 # Proporções dos danos dentro de cada região round(100 * prop.table(table(damage$geo_level_1_id, damage$damage_grade), 1), 2) ### Área -- area_percentage # Boxplot graph <- damage %>% ggplot(aes(x = damage_grade, y = area_percentage)) + theme_classic() + geom_boxplot() + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + labs(x = "Grau de dano", y = "Área normalizada") # Barplot damage %>% ggplot(aes(x = area_percentage, fill = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Área normalizada", y = "Frequência") ### Altura -- height_percentage # Boxplot graph <- damage %>% ggplot(aes(x = damage_grade, y = height_percentage)) + theme_classic() + geom_boxplot() + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + labs(x = "Grau de dano", y = "Altura normalizada") # Barplot graph <- damage %>% ggplot(aes(x = height_percentage, fill = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Altura normalizada", y = "Frequência") ### Número de andares antes do terremoto -- count_floors_pre_eq # Boxplot graph <- damage %>% ggplot(aes(x = damage_grade, y = count_floors_pre_eq)) + theme_classic() + geom_boxplot() + scale_y_continuous(breaks = 1:9) + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + labs(x = "Grau de dano", y = "Número de andares") # Barplot graph <- damage %>% ggplot(aes(x = count_floors_pre_eq, fill = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + scale_x_continuous(breaks = 1:9) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 1) + coord_cartesian(ylim = c(0, 60000)) + labs(fill = "Grau de dano", x = "Número de andares", y = "Número de construções") print(graph) # Summary summary(damage$count_floors_pre_eq); sd(damage$count_floors_pre_eq) # Tabela de frequências table(damage$count_floors_pre_eq) # A maioria das construções possuem 2 andares # 95% das construções possuem até 3 andares quantile(damage$count_floors_pre_eq, probs = 0.95) # Proporções de dano dentro de cada nível dos andares prop.table(table(damage$count_floors_pre_eq, damage$damage_grade), margin = 1) ### Idade -- age # Boxplot graph <- damage %>% ggplot(aes(x = damage_grade, y = age)) + theme_classic() + geom_boxplot() + scale_y_continuous(breaks = 1:9) + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + labs(x = "Grau de dano", y = "Idade") print(graph) # Retirando os outliers graph <- damage %>% dplyr::filter(age < 200) %>% ggplot(aes(x = damage_grade, y = age)) + theme_classic() + geom_boxplot() + scale_y_continuous(breaks = 1:9) + scale_x_discrete(labels = c("Baixo", "Médio", "Severo")) + labs(x = "Grau de dano", y = "Idade") print(graph) # Barplots graph <- damage %>% ggplot(aes(x = age, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Idade", y = "Número de construções") # Retirando o outlier graph <- damage %>% filter(age < 250) %>% ggplot(aes(x = age, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Idade", y = "Número de construções") # Note que o número de construções que sofreram danos leves decai # exponencialmente quando a idade aumenta # Summary summary(damage$age); sd(damage$age) # Tabela de frequências table(damage$age) # Distribuição das construções com idade igual a 995 prop.table(table(damage$damage_grade[which(damage$age == 995)])) ### Condição da superfície -- land_surface_condition # Distribuição geral prop.table(table(damage$land_surface_condition)) # Proporções de dano dentro de cada nível das condições prop.table(table(damage$land_surface_condition, damage$damage_grade), margin = 1) # Note que dentro de cada nível das condições as proporções de danos # são semelhantes sugerindo o tipo de dano independe da condição da # superfície graph <- damage %>% ggplot(aes(x = land_surface_condition, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Condição da superfície", y = "Número de construções") print(graph) ### Tipo de fundação - foundation_type graph <- damage %>% ggplot(aes(x = foundation_type, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Tipo de fundação", y = "Número de construções") print(graph) # Proporções dos tipos de fundação round(100*prop.table(table(damage$foundation_type)), 2) # Proporções de dano dentro de cada nível dos tipos de fundação prop.table(table(damage$foundation_type, damage$damage_grade), margin = 1) # As proporções de dano dentro de cada nível do tipo de fundação sugerem # associação entre as variáveis. # As fundações "h" e "r" apresentaram maiores danos do tipo severo, # enquanto que cerca de 98% de construções com tipo de fundação # "i" sofreram danos de leves a médios. ### Tipo de andar térreo -- ground_floor_type graph <- damage %>% ggplot(aes(x = ground_floor_type, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Tipo de andar térreo", y = "Número de construções") print(graph) # Proporções dos tipos de andar térreo round(100*prop.table(table(damage$ground_floor_type)), 2) # Proporções de dano dentro de cada nível do tipo de andar térreo prop.table(table(damage$ground_floor_type, damage$damage_grade), margin = 1) # As proporções sugerem associação. Consruções com o tipo de andar térreo # "f" e "x" sofreram danos semelhantes. ### Tipo de piso utilizado (exceto telhado e térreo) -- other_floor_type graph <- damage %>% ggplot(aes(x = other_floor_type, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Tipo de piso utilizado (exceto telhado e térreo)", y = "Número de construções") print(graph) # Proporções de dano dentro de cada nível do tipo de piso prop.table(table(damage$other_floor_type, damage$damage_grade), margin = 1) # Aqui é possível perceber que construções com tipo de piso "s" sofreram # menos danos severos em comparação com os outros tipos. As construções # com pisos "q" e "x" destacam-se pelos altos níveis de danos sofridos. ### Tipo de telhado -- roof_type graph <- damage %>% ggplot(aes(x = roof_type, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Tipo de telhado", y = "Número de construções") print(graph) # Proporções de dano dentro de cada nível do tipo de telhado prop.table(table(damage$roof_type, damage$damage_grade), margin = 1) # As proporções de dano dentro de cada nível do tipo de telhado sugerem # associação entre as variáveis. # Construções com o tipo de telhado "x" sofreram menos dados severos do # que construções com outros tipos de telhados. ### Posição -- position graph <- damage %>% ggplot(aes(x = position, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Posição", y = "Número de construções") print(graph) # As distribuições dos danos sofridos em cada nível da posição # são relativamente semelhantes entre si, indicando que o dano sofrido # independe da posição da construção # Configuração do plano de construção -- plan_configuration graph <- damage %>% ggplot(aes(x = plan_configuration, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Configuração do plano de construção", y = "Número de construções") print(graph) # Proporções de dano dentro de cada nível da configuração do plano prop.table(table(damage$plan_configuration, damage$damage_grade), margin = 1) # Número de observações em cada classe table(damage$plan_configuration) ### Status -- legal_ownership_status graph <- damage %>% ggplot(aes(x = legal_ownership_status, group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Status legal de propriedade do terreno", y = "Número de construções") print(graph) # Proporções de dano dentro de cada nível do status prop.table(table(damage$legal_ownership_status, damage$damage_grade), margin = 1) table(damage$legal_ownership_status) # As porporções sugerem que os danos sofridos possuem associação com # o status legal da construção. Note que construções com status "a" # tiveram mais danos leves e menos danos severos do que construções com # status "w". ### Número de famílias -- count_families graph <- damage %>% filter(count_families <= 3) %>% ggplot(aes(x = as.factor(count_families), group = damage_grade)) + theme_classic() + theme(legend.position = "top") + geom_bar(aes(fill = damage_grade), position = "dodge") + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("low" = "#482677FF", "medium" = "#2D708EFF", "severe" = "#73D055FF")) + geom_text(aes(y =..count.., label = scales::percent(..count../tapply(..count.., ..x.. ,sum)[..x..]) ), stat="count", position = position_dodge(0.9), vjust = -0.9, size = 3) + coord_cartesian(ylim = c(0, 80000)) + labs(fill = "Grau de dano", x = "Número de famílias", y = "Número de construções") print(graph) # Proporções de dano dentro de cada nível do número de famílias prop.table(table(damage$count_families, damage$damage_grade), margin = 1) # Levando em conta que existem poucas construções com mais de 6 famílias, # não parece haver associação entre o grau de dano e o número de famílias # vivendo na construção. ### Superstructure # ANA: library(tigerstats) library(reshape2) library(plyr) tab0 <- colPerc(xtabs(~damage_grade+has_superstructure_adobe_mud,data=damage)) tab1 <- colPerc(xtabs(~damage_grade+has_superstructure_mud_mortar_stone,data=damage)) tab2 <- colPerc(xtabs(~damage_grade+has_superstructure_stone_flag,data=damage)) tab3 <- colPerc(xtabs(~damage_grade+has_superstructure_cement_mortar_stone,data=damage)) tab4 <- colPerc(xtabs(~damage_grade+has_superstructure_mud_mortar_brick,data=damage)) tab5 <- colPerc(xtabs(~damage_grade+has_superstructure_cement_mortar_brick,data=damage)) tab6 <- colPerc(xtabs(~damage_grade+has_superstructure_timber,data=damage)) tab7 <- colPerc(xtabs(~damage_grade+has_superstructure_bamboo,data=damage)) tab8 <- colPerc(xtabs(~damage_grade+has_superstructure_rc_non_engineered,data=damage)) tab9 <- colPerc(xtabs(~damage_grade+has_superstructure_rc_engineered,data=damage)) tab10 <- colPerc(xtabs(~damage_grade+has_superstructure_other,data=damage)) tab <- cbind(tab0, tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8, tab9, tab10) names <- c("Adobe/barro", "Barro e pedra", "Pedra", "Cimento e pedra", "Barro e tijolo", "Cimento e tijolo", "Madeira", "Bamboo", "Concreto armado", "Concreto armado projetado", "Outro material" ) #names <- c("adobe_mud", "mud_mortar_stone", "stone_flag", # "cement_mortar_stone", "mud_mortar_brick", # "cement_mortar_brick", "timber", "bamboo", # "non_engineered", "engineered", "other" ) teste <- data.frame(tab) drop <- c("X0", "X0.1", "X0.2", "X0.3", "X0.4", "X0.5", "X0.6", "X0.7", "X0.8","X0.9", "X0.10") tab_f <- teste[,!(names(teste) %in% drop)] colnames(tab_f)<-names Cat <- seq(1, 4, 1) tab_f <- cbind(tab_f, Cat) plot_teste <- melt(tab_f, id.vars = "Cat") plot_teste_fim <- plot_teste[plot_teste$Cat < 4,] plot_teste_fim_sor <- plyr::arrange(plot_teste_fim, variable, desc(Cat)) plot_teste_fim2 <- ddply(plot_teste_fim_sor, "variable", transform, label_ypos = cumsum(value)) # A intenção aqui foi destacar como os materiais utilizados nas construções # influenciam o nível de dano. De repente cabe mencionar na análise quando # o percentual de 1 foi menor que o de 3 e quando não. # E destacar a distribuição da 'has_superstructure_rc_engineered'. y_pos <- matrix(plot_teste_fim2$label_ypos, nrow = 3) y_pos[2, ] <- apply(y_pos, 2, diff)[1,]/2 + y_pos[1, ] y_pos[1, ] <- 5.78 y_pos[3, ] <- 99.5 graph <- ggplot(plot_teste_fim2, aes(x = variable, y = value, fill = factor(Cat))) + geom_bar(stat = "identity") + theme_classic() + theme(legend.position = "top", axis.text.x = element_text(angle = 30, hjust = 1, size = 8)) + geom_text(aes(y = y_pos, label = value), vjust = 0.9, color = "white", size = 3.5) + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("1" = "#482677FF", "2" = "#2D708EFF", "3" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Superestrutura", y = "Distribuição do grau de dano") print(graph) ### Secondary use # ANA: # O próximo gráfico mostra cada uma das variáveis a partir da # has_secondary_use até a has_secondary_use_other. Peguei todas elas quando # valiam '1' e calculei a distribuição com relação a variável damage_grade tab0 <- colPerc(xtabs(~damage_grade+has_secondary_use,data=damage)) tab1 <- colPerc(xtabs(~damage_grade+has_secondary_use_agriculture,data=damage)) tab2 <- colPerc(xtabs(~damage_grade+has_secondary_use_hotel,data=damage)) tab3 <- colPerc(xtabs(~damage_grade+has_secondary_use_rental,data=damage)) tab4 <- colPerc(xtabs(~damage_grade+has_secondary_use_institution,data=damage)) tab5 <- colPerc(xtabs(~damage_grade+has_secondary_use_school,data=damage)) tab6 <- colPerc(xtabs(~damage_grade+has_secondary_use_industry,data=damage)) tab7 <- colPerc(xtabs(~damage_grade+has_secondary_use_health_post,data=damage)) tab8 <- colPerc(xtabs(~damage_grade+has_secondary_use_gov_office,data=damage)) tab9 <- colPerc(xtabs(~damage_grade+has_secondary_use_use_police,data=damage)) tab10 <-colPerc(xtabs(~damage_grade+has_secondary_use_other,data=damage)) tab <- cbind(tab0, tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8, tab9, tab10) names <- c("Possui algum", "Agricultura", "Hotel", "Aluguel", "Instituição", "Escola", "Indústria", "Posto de saúde", "Escritório de governo", "Polícia", "Outro") #names<-c("secondary_use", "agriculture", "hotel", "rental", "institution", "school", "industry", "health_post", "gov_office", "police", "other" ) teste<-data.frame(tab) drop <- c("X0", "X0.1", "X0.2", "X0.3", "X0.4", "X0.5", "X0.6", "X0.7", "X0.8","X0.9", "X0.10") tab_f <- teste[,!(names(teste) %in% drop)] colnames(tab_f)<-names Cat <- seq(1, 4, 1) tab_f<-cbind(tab_f, Cat) plot_teste<-melt(tab_f, id.vars = "Cat") plot_teste_fim<- plot_teste[plot_teste$Cat < 4,] plot_teste_fim_sor <- plyr::arrange(plot_teste_fim, variable, desc(Cat)) plot_teste_fim2 <- ddply(plot_teste_fim_sor, "variable", transform, label_ypos=cumsum(value)) # A intenção aqui foi destacar como estruturas utilizadas para determinados # fins (hoteis, postos de saúde, ...) sofreram danos menores. # De repente cabe mencionar na análise quando o percentual de 1 foi menor # que o de 3 e quando não. y_pos <- matrix(plot_teste_fim2$label_ypos, nrow = 3) y_pos[2, ] <- apply(y_pos, 2, diff)[1,]/2 + y_pos[1, ] y_pos[1, ] <- 5.78 y_pos[3, ] <- 99.5 graph <- ggplot(plot_teste_fim2, aes(x = variable, y = value, fill = factor(Cat))) + geom_bar(stat = "identity") + theme_classic() + theme(legend.position = "top", axis.text.x = element_text(angle = 30, hjust = 1, size = 8)) + geom_text(aes(y = y_pos, label = value), vjust = 0.9, color = "white", size = 3.5) + scale_fill_manual(labels = c("Baixo", "Médio", "Severo"), values = c("1" = "#482677FF", "2" = "#2D708EFF", "3" = "#73D055FF")) + labs(fill = "Grau de dano", x = "Uso secundário", y = "Distribuição do grau de dano") print(graph) remove(list=c("graph"))
5302f2a8b03b70ebda3fea369c74d7dbd0a7e443
093f4979b58388700d670906ddb4f9e839675299
/plotZoom.R
e1db2de48ab0af75a4b12f7a9059a1ef5916c843
[]
no_license
sgschneider01/R_code
6ac0c871ad619635f238a655dde7c5f6e5d1a106
39d1db3d91673655cf799343eb884ee4caded5ad
refs/heads/master
2020-06-20T12:44:19.672021
2016-11-27T02:50:22
2016-11-27T02:50:22
74,863,072
0
0
null
null
null
null
UTF-8
R
false
false
2,158
r
plotZoom.R
plotZoom <- function (egid,sp,ss=NULL) { # Plots mapped probe locations FOR EACH TRANSCRIPT SEPARATELY if (is.numeric(egid)) egid <- as.character(egid) txs <- parseExonTable(egid) # this is a list tx.list <- lapply(txs,genom2tx) tx.num <- length(tx.list) tx.nms <- gsub(" .+","",names(tx.list)) big.mat <- getTxLocs(egid,sp,tx.nms) ps.num <- nrow(big.mat)/tx.num if (tx.num>1) devAskNewPage(ask = TRUE) for (j in 1:tx.num) { mat2 <- big.mat[((j-1)*ps.num+1):(j*ps.num),] pretty <- rainbow(nrow(mat2)) if (!is.null(ss)) { mat2 <- mat2[ss,] pretty <- pretty[ss] } x.min <- round(min(mat2)/100,0)*100-100 if (max(mat2)>0) { x.max <- round(max(mat2)/100,0)*100+100 } else { x.max <- max(unlist(tx.list)) } sym <- switch(sp, "Mm" = get(egid,org.Mm.egSYMBOL), "mouse" = get(egid,org.Mm.egSYMBOL), "Hs" = get(egid,org.Hs.egSYMBOL), "human" = get(egid,org.Hs.egSYMBOL) ) id <-paste(sym," (",egid,")",sep='') # create the axes plot (x.min:x.max, rep(0,x.max-x.min+1),type="l",ylim=c(-.5,1), yaxp=c(0,0,1), las=3, ylab="", main=id, xlab="Transcript Position") legend("topright",rownames(mat2),lty=1,bty="n",col=pretty,cex=.5) #plot all the probes for (i in 1:nrow(mat2)) { ps <- as.numeric(mat2[i,]) ps <- ps[which(ps>0)] if (length(ps) > 0) { sap <- sapply(ps, function (x) lines(rep(x,2),c(0,.1),col=pretty[i],type="l") ) } } plotAltTxExons(tx.list[j]) } devAskNewPage(ask = FALSE) } plotAltTxExons <- function (tx) { #plot the exons exons <- tx[[1]]$exons alt.col<- rep(c("black","gray"),nrow(exons)) for (j in 1:nrow(exons)) { lines(exons[j,],rep(0,2),type="l",lwd=5,lend=1,col=alt.col[j]) } #plot the start and stop positions (coding sequence) cds<-tx[[1]]$cds points(cds,rep(-.02,2),pch=24,bg=c("green","red"),cex=.7) id.ex <- gsub(" .+exons: +([0-9]+) ?.*"," (\\1 exons)",names(tx)) legend("top",id.ex,lty=1,lwd=5,bty="n",col="black",cex=.7) }
efdc58134529f930ae813c588179da4323a70252
576fd3d9b972a46d6284c51baa3603811d482cc6
/source/prototypeNormalization.R
b2b7dff3dc8c57dce9b499b81e93e40e63e0619f
[]
no_license
NicolasHousset/RetentionTimeAnalysis
89bd5da47245a60465293d5899a38d34fc771f1d
ff4801ba8a1876dc5dcd234473f12e0ff57dca48
refs/heads/master
2020-05-29T14:05:10.382666
2013-10-16T16:52:37
2013-10-16T16:52:37
null
0
0
null
null
null
null
UTF-8
R
false
false
4,450
r
prototypeNormalization.R
# Something huge... # How can we bring closer the retention times of different set of lc-run ? I'll try stuff on two sets of lc-run I found with Spotfire library(data.table) library(ggplot2) projectPath <- "C:/Users/Nicolas Housset/Documents/RetentionTimeAnalysis" load(file = paste0(projectPath,"/data/identified.RData")) identified[, l_lcrunid := as.character(l_lcrunid)] setkey(identified, l_lcrunid) identified <- identified[c(as.character(87371:87376), as.character(88556:88562))] # The "most common peptide" notion is here lcrun-based. setkey(identified, l_lcrunid, modified_sequence) countsPerProject <- unique(identified)[, list(l_lcrunid,modified_sequence)] countsPerProject[, modified_sequence.f := factor(modified_sequence)] nbProjPerPeptide <- summary(countsPerProject[, modified_sequence.f], maxsum = 1000000) rm(countsPerProject) # 2774 peptides (22/08/2013) # Create an alphabetical-based index id_peptide <- 1:NROW(nbProjPerPeptide) dt <- data.table(id_peptide) dt[, modified_sequence := labels(nbProjPerPeptide)] dt[, nbProjPep := -nbProjPerPeptide] setkey(dt, nbProjPep) # Here, the index will depend of the number of projects in which each peptide appear dt[, rank_peptide := 1:NROW(nbProjPerPeptide)] dt[, nbProjPep := -nbProjPep] setkey(dt, modified_sequence) setkey(identified, modified_sequence) identified <- identified[dt] # We repeat this part on the protein level setkey(identified, l_lcrunid, accession) protsPerProject <- unique(identified)[, list(l_lcrunid, accession)] protsPerProject[, accession.f := factor(accession)] nbProjPerProtein <- summary(protsPerProject[, accession.f], maxsum = 500000) rm(protsPerProject) # 54402 proteins (21/08/2013) # Create an alphabetical-based index id_protein <- 1:NROW(nbProjPerProtein) dt <- data.table(id_protein) dt[, accession := labels(nbProjPerProtein)] dt[, nbProjProt := -nbProjPerProtein] setkey(dt, nbProjProt) # Here, the index will depend of the number of projects in which each protein appear dt[, rank_protein := 1:NROW(nbProjPerProtein)] dt[, nbProjProt := -nbProjProt] setkey(dt, accession) setkey(identified, accession) identified <- identified[dt] setkey(identified, l_lcrunid, modified_sequence, rtsec) # To remove rt that have been identified more than once (otherwise, index are altered) identified <- unique(identified) convenient_vector <- 1:4000 # Add an index : 1 for the first time a peptide is encountered in a LC-run, 2 the second time, etc... # convenient_vector is automatically shrinked to the appropriate size : that is very convenient :) identified[, index_rt1 := convenient_vector, by = c("l_lcrunid","modified_sequence")] # Slightly different index : number of times the peptide is identified in the LC-run. identified[, size_rt := .N, by = c("l_lcrunid", "modified_sequence")] identified[,total_spectrum_intensity := -total_spectrum_intensity] setkey(identified, l_lcrunid, modified_sequence, total_spectrum_intensity) identified[, index_rt2 := convenient_vector, by = c("l_lcrunid","modified_sequence")] identified[,total_spectrum_intensity := -total_spectrum_intensity] identified[, grpLC := (as.numeric(l_lcrunid) > 88555)] table(identified[, grpLC]) test <- identified[, nbId := .N * (index_rt2 == 1), by = c("grpLC", "modified_sequence")] test <- identified[, nbId := .N, by = c("grpLC", "modified_sequence")] table(test[, nbId, by = grpLC]) part1 <- identified[grpLC == FALSE] part2 <- identified[grpLC == TRUE] part1[, rt1 := quantile(rtsec, probs = 0.5), by = modified_sequence] part2[, rt2 := quantile(rtsec, probs = 0.5), by = modified_sequence] setkey(part1, modified_sequence) setkey(part2, modified_sequence) fusion1 <- unique(part1)[, j = list(sequence, rt1, modified_sequence)] fusion2 <- unique(part2)[, j = list(sequence, rt2, modified_sequence)] setkey(fusion1, sequence, modified_sequence) setkey(fusion2, sequence, modified_sequence) fusion <- merge(fusion1, fusion2) plot_fusion <- ggplot(fusion, aes(x=rt1, y=rt2)) plot_fusion + geom_point() fusion[, rt1_adjust := rt1 - 800] fusion[, rt2_adjust := rt2 - 800] fusion[,rtdiff := rt2 - rt1] setkey(fusion, rtdiff) plot_diff <- ggplot(fusion, aes(x=rt1,y=rtdiff)) plot_diff + geom_point() fusion_2 <- fusion[rtdiff > 40] plot_diff <- ggplot(fusion_2, aes(x=rt1,y=rtdiff)) plot_diff + geom_point() fusion_2[,rt2trans := 0.951223 * rt2 - 56.760450] plot_diff <- ggplot(fusion_2, aes(x=rt1,y=rt2trans)) plot_diff + geom_point()
8fef13deb71d9f0c21f72280fae6d60692624056
65688298753e6ad992da18a864ebcce6359a2c92
/HM_ref_check.R
4651a35ee379487954f21809a7483b111276551a
[]
no_license
tleung2/HM
3d71ed1bf17fb974c5f0a7555871a64bba76aec9
de32aa20a465bf4af0fdd9eafb6460c573c96673
refs/heads/main
2023-01-27T20:42:07.993476
2020-12-11T07:32:32
2020-12-11T07:32:32
319,709,290
0
1
null
null
null
null
UTF-8
R
false
false
30,765
r
HM_ref_check.R
## SET WORKING DIRECTORY ## Install Phyloseq if needed (for R ver 4.0+) if(!requireNamespace("BiocManager")){ install.packages("BiocManager") } BiocManager::install("phyloseq") ## Alternative for installing phyloseq (from Phyloseq website) ## I always have problem going this route source('http://bioconductor.org/biocLite.R') biocLite('phyloseq') ## ------- LOAD LIBRARIES ----------- #install.packages(c("vegan","tidyverse","scales","gridGraphics", "reshape2")) library(tidyverse) ## Package includes ggplot and dplyr library(scales) library(gridGraphics) library(reshape2) library(vegan) library(phyloseq) ############################################################################################ ## ------- ASSIGNING VARIABLES FOR PHYLOSEQ----------- ## Make sure that the tax and shared files are in the working directory ## Copy shared, tax, and map file names with extension to corresponding values ## Assign variables for data that will be imported sharedfile_gg = "HM_gg.trim.contigs.good.unique.good.filter.unique.precluster.pick.pick.opti_mcc.shared" taxfile_gg = "HM_gg.trim.contigs.good.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.taxonomy" ## Repeat for rdp ref database sharedfile_rdp = "HM_rdp.trim.contigs.good.unique.good.filter.unique.precluster.pick.pick.opti_mcc.shared" taxfile_rdp = "HM_rdp.trim.contigs.good.unique.good.filter.unique.precluster.pick.pick.opti_mcc.0.03.cons.taxonomy" ## ------- IMPORT MOTHUR DATA ----------- ## Combines the shared and taxonomy files gg_data<-import_mothur(mothur_shared_file = sharedfile_gg, mothur_constaxonomy_file = taxfile_gg) ## Repeat for rdp ref database rdp_data<-import_mothur(mothur_shared_file = sharedfile_rdp, mothur_constaxonomy_file = taxfile_rdp) ## ------- EXAMINING MOTHUR DATA------- ## check the tax names, which are organized as ranks in columns head(tax_table(rdp_data)) head(tax_table(gg_data)) ############################################################################################ ## ------- IMPORT METADATA --------- ## Can also import as excel, does not have to be csv map<-read.csv("HM_mapfile.csv", stringsAsFactors = FALSE) head(map) ## check headings ## Convert the metadata into phyloseq format ## Make sre that rownames must match the ## sample names in your shared and taxonomy file map2 <- sample_data(map) ## Assign ID created by Mothur as rownames (NOT SAMPLE ID!) ## This will allow phyloseq to merge Mothur output with mapfile rownames(map2) <- map2$Mothur_ID map2$Depth <- factor(map2$Depth, levels=c('0','1', '2', '3', '4', '5', '5.1', '5.8','6', '6.8','7', '8', '9', '9.5','10')) ## Merge mothurdata with sample metadata ## Phyloseq will merge both datasets using the Mothur_ID gg_merge <- merge_phyloseq(gg_data, map2) rdp_merge <- merge_phyloseq(rdp_data, map2) colnames(tax_table(gg_merge)) ## Check to see how many tax ranks there are colnames(tax_table(gg_merge)) <- c("Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species") ## assigns names ## Repeat for rdp ref database colnames(tax_table(rdp_merge)) ## Check to see how many tax ranks there are colnames(tax_table(rdp_merge)) <- c("Kingdom", "Phylum", "Class", "Order", "Family", "Genus") ## assigns names ############################################################################################ ## ------- PRUNING THE DATASET ------------- ## filter out samples we don't want to include in our analysis ## such as OTU with 0 counts ## Note that some OTU have 0.0001 abundance gg.prune.data <- gg_merge %>% prune_taxa(taxa_sums(.) > 1e-5, .) ## Value can be changed accordingly gg.prune.data@tax_table ## lists the taxonomy ## Repeat for rdp ref database rdp.prune.data <- rdp_merge %>% prune_taxa(taxa_sums(.) > 1e-5, .) ## Value can be changed accordingly rdp.prune.data@tax_table ## lists the taxonomy ############################################################################################ ## ------- CHECKING COMMUNITY IN DATA -------------------- ## Use %>% to pass from left to right operator (chain functions together) ## epa.data %>% tax_glom() = tax_glom(epa.data) ## Change the rank to something you want ## --- What organisms are present? --- ## First, we need to reshape the data in a table format to see what ## organisms were present gg.species <- gg.prune.data %>% tax_glom(taxrank = "Phylum") %>% # agglomerate at Genus level transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance psmelt() %>% # Melt to long format arrange(Phylum) # Sort data frame alphabetically by phylum ## Write a csv file of the gg table write.table(gg.species, "HM_gg_abundance.csv", row.names = FALSE, sep = ";") ## Repeat for rdp ref database rdp.species <- rdp.prune.data %>% tax_glom(taxrank = "Genus") %>% # agglomerate at Genus level transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance psmelt() %>% # Melt to long format arrange(Phylum) # Sort data frame alphabetically by phylum ## Write a csv file of the rdp table write.table(rdp.species, "HM_rdp_species.csv", row.names = FALSE, sep = ";") ############################################################################################ ## ------- SUBSETTING IN DATA IN PHYLOSEQ ----------- ## Use %in% and subset_taxa ## Species table shows both non-photosynthetic and photosynthetic eukaryotes ## We wanted to include only photosynthetic aquatic eukaryotes and exclude all others ## *Note that some reference database use Dinophyta whereas others use Dinoflagellata gg.cyano.data <- subset_taxa(gg.prune.data, Phylum == "p__Cyanobacteria") cyano.species <- gg.cyano.data %>% tax_glom(taxrank = "Order") %>% # agglomerate at Genus level transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance psmelt() %>% # Melt to long format arrange(Phylum) # Sort data frame alphabetically by phylum ## Repeat for rdp ref database rdp.cyano.data <- subset_taxa(rdp.prune.data, Phylum == "Cyanobacteria") ############################################################################################ ## ------- LOOK AT DISTRIBUTION OF READ COUNTS ------------- ## Make a data frame with a column for the read counts of each sample sample_sum_df <- data.frame(sum = sample_sums(gg.cyano.data)) sample_sum_df2 <- data.frame(sum = sample_sums(rdp.cyano.data)) ## Histogram of sample read counts ggplot(sample_sum_df, aes(x = sum)) + geom_histogram(color = "black", fill = "indianred", binwidth = 2500) + ggtitle("Distribution of sample sequencing depth") + xlab("Read counts") + theme(axis.title.y = element_blank()) ############################################################################################ #####----------------------- CHECKING DIVERSITY ------------------######### ## This function estimates a number of alpha-diversity metrics and returns ## a ggplot plotting object ## You must use untrimmed, non-normalized count data for meaningful results, ## as many of these estimates are highly dependent on the number of ## singletons (single reads that were not stitched). ## In the 'measures()' you can add different types of metrics. ## See the R help for what kinds of Alpha diversity metrics can be used ## --- How does diversity vary with depth over time? --- ## Plot Depth as x-axis, and identify Month by different shapes P1 = plot_richness(gg.cyano.data, x="Depth", shape="Month", measures=c("Shannon")) + geom_point(size=5) + labs(x = "Depth (m)") + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), panel.border = element_rect(color="black", fill=NA), text = element_text(size = 16), axis.line = element_line(colour = "black"), axis.text=element_text(size=16), axis.title = element_text(size = 16), legend.text = element_text(size = 16), legend.key=element_rect(fill='white')) P1 ######################################################################################### ######------------- RAREFRACTION CURVE: CYANOBACTERIA -----------------###### ## The rarecurve function is part of "vegan" package, not phyloseq ## Make rarefaction curve rarecurve(t(otu_table(gg.prune.data)), step=50, cex=0.5) ## rarefy without replacement ps.rarefied = rarefy_even_depth(gg.cyano.data, rngseed=1, sample.size=0.9*min(sample_sums(gg.cyano.data)), replace=F) ps.rarefied rarecurve(t(otu_table(ps.rarefied)), step=50, cex=0.5) ## rarefied dataset plot_bar(ps.rarefied, fill="Order") #rarefied dataset by month plot_bar(ps.rarefied, fill="Order") + facet_wrap(Month~., scales="free_x", nrow=1) ps.phylum = tax_glom(ps.rarefied, taxrank="Rank2", NArm=FALSE) ps.phylum plot_bar(ps.phylum, fill="Rank2") + facet_wrap(~Month, scales= "free_x", nrow=1) ######################################################################################### #######------------------- BARPLOTS USING PHYLOSEQ -------------------######## p <- plot_bar(gg.prune.data, fill="Order") + facet_wrap(~Month, scales= "free_x", ncol=2) p + geom_bar(aes(color=Order, fill=Order), stat="identity", position="stack") + theme(axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14), panel.background = element_blank(), plot.title = element_text(size = 22), panel.grid.major = element_blank(), ## Change this to element_line if you want lines axis.text = element_text(size = 14, color = "black"), panel.grid.major.x = element_blank(), panel.border = element_rect(color="black", fill=NA), strip.text = element_text(size = 14), legend.text = element_text(size = 14), legend.title = element_text(size = 22)) ############################################################################################ ########----------- MAKE BARPLOT USING GGPLOT (RAW COUNTS) -----------####### ## Phyloseq data needs to be reshaped for ggplot ## This will produce relative abundance ## We break it down to "Species" tax but this can be changed gg.cyano.RA <- gg.cyano.data %>% tax_glom(taxrank = "Order") %>% # agglomerate at Order level transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance psmelt() %>% # Melt to long format #filter(Abundance > 0.02) %>% # Filter out low abundance taxa arrange(Phylum) # Sort data frame alphabetically by phylum ## Check Order (or any other taxa) levels(gg.cyano.RA$Order) ## If show NULL, go to next step ## If the command: levels(epa_order$Order or any tax column) shows NULL ## Need to convert taxonomy columns to factor, R ver. 4.0.2 identifies these as character ## SKip this if your R ver identifies taxonomy as factor gg.cyano.RA[,c(9:12)] <- lapply(gg.cyano.RA[,c(9:12)], as.factor) #convert phyla to factor levels(gg.cyano.RA$Phylum) # should see levels (taxa), check that haptophyte exists gg.species[,c(9:10)] <- lapply(gg.species[,c(9:10)], as.factor) #convert phyla to factor levels(gg.species$Phylum) ## --- MAKING BARPLOTS --- ## ## Using stacked bar plots to look at community composition ## Set colors for plotting ## For this project, interested in 5 phyla (subgroups) ## Assign more colors if you want more than 5 subgroups phylum_colors <- c("#669900", "#CCCFFF", "#CC9933","#663300", "#FFCC66") ## Plot stacked bargraph p2<-ggplot(data = gg.species, aes(y = Abundance, x = reorder(Depth, desc(Depth)), fill = Phylum)) + geom_bar(stat = "identity") + #scale_fill_manual(values = phylum_colors) + guides(fill = guide_legend(reverse = TRUE, keywidth = 1, keyheight = 1)) + labs(y = "Relative Abundance", x = "Depth(m)") + #ggtitle("Phylum Composition of Photosynthetic Eukaryotic Community") + theme(axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), panel.background = element_blank(), plot.title = element_text(size = 22), panel.grid.major = element_blank(), ## Change this to element_line if you want lines axis.text = element_text(size = 16, color = "black"), panel.grid.major.x = element_blank(), panel.border = element_rect(color="black", fill=NA), strip.text = element_text(size = 16), legend.text = element_text(size = 16), legend.title = element_text(size = 22)) + facet_wrap(~Month, ncol = 1, scales = "free") ## ncol = # columns p2 ############################################################################################ ## ------- RESHAPE and TRANSFORM DATA FOR BARPLOT (RELATIVE ABUNDANCE)--------- ## Organizes, formats, and transforms subsetted dataframe ## This will produce relative abundance, using ggplot ## We break it down to "Species" tax but this can be changed gg.cyano.trans <- gg.cyano.data %>% tax_glom(taxrank = "Species") %>% # agglomerate at Order level transform_sample_counts(function(x) {x/sum(x)} ) %>% # Transform to rel. abundance psmelt() %>% # Melt to long format #filter(Abundance > 0.02) %>% # Filter out low abundance taxa arrange(Phylum) # Sort data frame alphabetically by phylum ## Check Order (or any other taxa) levels(gg.cyano.trans$Order) ## If the command: levels(epa_order$Order or any tax column) shows NULL ## Need to convert taxonomy columns to factor, R ver. 4.0.2 identifies these as character ## SKip this if your R ver identifies taxonomy as factor gg.cyano.trans[,c(8:15)] <- lapply(gg.cyano.trans[,c(8:15)], as.factor) #convert phyla to factor levels(gg.cyano.counts$Phylum) # should see levels (taxa), check that haptophyte exists ## ------- EXPORT SUBSETTED DATA ---- skip if not needed--------- ## Exports the transformed subset dataframe if needed write.table(photo.trans, "name_file.csv", row.names = FALSE, sep = ";") ############################################################################################ ## ------- COMMUNITY COMPOSITION : BARPLOTS (RELATIVE ABUNDANCE)------------ ## Using stacked bar plots to look at community composition ## Set colors for plotting ## For this project, interested in 5 phyla (subgroups) ## Assign more colors if you want more than 5 subgroups phylum_colors <- c("#669900", "#CCCFFF", "#CC9933","#663300", "#FFCC66") ## Plot stacked bargraph p3<-ggplot(data = gg.cyano.trans, aes(x = Depth, y = Abundance, fill = Phylum)) + geom_bar(stat = "identity") + scale_fill_manual(values = phylum_colors) + guides(fill = guide_legend(reverse = TRUE, keywidth = 1, keyheight = 1)) + labs(x = "Depth(m)", y = "Relative Abundance") + #ggtitle("Phylum Composition of Photosynthetic Eukaryotic Community") + theme(axis.title.x = element_text(size = 14), axis.title.y = element_text(size = 14), panel.background = element_blank(), plot.title = element_text(size = 22), panel.grid.major = element_line(color = "black"), axis.text = element_text(size = 14, color = "black"), panel.grid.major.x = element_blank(), panel.border = element_rect(color="black", fill=NA), strip.text = element_text(size = 14), legend.text = element_text(size = 14), legend.title = element_text(size = 22)) + facet_wrap(~Month, ncol = 3, scales = "free") p3 ############################################################################################# ## ------- PREPARING DATA FOR ORDINATION------- ## ** Note that ordination in phyloseq uses only phyloseq objects ## ** Therefore, all dataframe should have "phyloseq" ## R sees "Month" in the phyloseq data as characters not factor or levels ## Convernt month to factor and assign levels using sample_data() sample_data(photo.data)$Month <- factor( sample_data(photo.data)$Month, levels = c("February", "May", "September") ) ## Converts Year to factor type and assign levels sample_data(photo.data)$Year <- factor( sample_data(photo.data)$Year, levels = c("2017", "2018")) ## Converts Depth to factor type and assign levels sample_data(photo.data)$Depth <- factor( sample_data(photo.data)$Depth, levels = c("1", "2", "3", "5", "7", "11", "12", "13","13Dp","14","15")) ## Normalize number of reads in each sample using median sequencing depth. ## Nick's tutorial first prunes the data and then rarefies it ## but literature advises using rarefy with caution because data is lost total = median(sample_sums(photo.data)) standf = function(x, t=total) round(t * (x / sum(x))) ## assigned as photo.data2 to keep orignial intact photo.data2 = transform_sample_counts(photo.data, standf) ############################################################################################## ## ------- PLOT ALL ORDINATIONS ---------------- ## Setting pipline for plotting all ordinations ## Need to load plyr and ape package for this library(plyr) library(ape) ## First need to make a raondon phylum tree random_tree = rtree(ntaxa(photo.data), rooted=TRUE, tip.label=taxa_names(photo.data)) plot(random_tree) ## Then add tree to photo.data photo.data2 = merge_phyloseq(photo.data, random_tree) photo.data2 ## Set R pipeline for distance type and orindation methods dist = "bray" ord_meths = c("DCA", "CCA", "RDA", "DPCoA", "NMDS", "MDS", "PCoA") ## Loops through different method parameter options to the plot_ordination function, ## and stores the plot results in a list, and then plot these results in a combined ## graphic using ggplot2. ## You will not see any plots, it is a function and takes a hot moment to run ## ** Note: Can change "taxa" to "samples" to look at sample, see photo.data2 dataframe ## for options, however color must variable must correspond with corresponding selection plist = llply(as.list(ord_meths), function(i, photo.data2, dist){ ordi = ordinate(photo.data2, method=i, distance=dist) plot_ordination(photo.data2, ordi, "taxa", color="Phylum") }, photo.data2, dist) ## Assigns ordination method types to plist names(plist) <- ord_meths ## extract the data from each of those individual plots from plist, ## and put it back together in one big data.frame pdataframe = ldply(plist, function(x){ df = x$data[, 1:2] colnames(df) = c("Axis_1", "Axis_2") return(cbind(df, x$data)) }) names(pdataframe)[1] = "method" ## Plot the faceted scatterplot ## Change the color and shape according to "taxa" or "samples" in the plist p4 = ggplot(pdataframe, aes(Axis_1, Axis_2, color=Phylum, shape=Phylum)) + geom_point(size=4) + facet_wrap(~method, scales="free") + scale_fill_brewer(type="qual", palette="Set1") + scale_colour_brewer(type="qual", palette="Set1") + theme_classic() + theme(axis.text.y.left = element_text(size=12, color = "black"), axis.text.x.bottom = element_text(size=12, color = "black"), legend.text = element_text(size = 12), legend.title = element_text(size=12), text = element_text(size = 12), axis.title.x = element_text(size=15, face="bold"), axis.title.y = element_text(size=15, face="bold")) p4 ########################################################################################### ## ------- UNCONSTRAINED ORDINATIONS-------- ## Reminder: the dataset needs to be a phyloseq object ## the Environment panel will contain datasets that have the word "phyloseq" ## Run the PCoA --Note:can change method to NMDS,CCA,RDA,DCA,CAP, see R documentaion photo_pcoa <- ordinate( physeq = photo.data, method = "NMDS", distance = "bray" ) ## ------- Plot the PCoA ------------ ## Note: "split" type result in a combined plot with both taxa and samples ## supported options are "samples", "sites", "taxa", "biplot", "split", "scree" p5<-plot_ordination( physeq = photo.data, ordination = photo_pcoa, type = "Split", # change this to taxa to see just taxa graph or sample graph shape = "Depth", #v ariables are with repect to type, eg. "taxa" type has phylum while "Sample" type has others (see object in environment) color = "Phylum", # same comment as above title = "NMDS of Photosynthetic Eukaryote Communities") + geom_point(size =3) + geom_text(mapping = aes(label = Month), size = 3, vjust = 1.5) + ## Labels sample scale_shape_manual(values=c(13, 16, 17, 18, 19, 0, 2, 4, 3, 8, 11, 7)) + ## Assign symbols for shape #scale_color_manual(values = c("#A65628", "red", "#FFAE19", "#4DAF4A", "#1919FF", #"darkorchid3", "magenta", "#FF9900", "#00CC33", "#FF3366", #"#CC00CC", "#6633CC")) + ## Assign colors theme_classic() + theme(axis.text.y.left = element_text(size=12, color = "black"), axis.text.x.bottom = element_text(size=12, color = "black"), legend.text = element_text(size = 12), legend.title = element_text(size=12), text = element_text(size = 12), axis.title.x = element_text(size=15, face="bold"), axis.title.y = element_text(size=15, face="bold")) p5 ## Facet if needed but keep in mind that "sample" and "Taxa" are separate column in the ## phyloseq object so it may look confusing ## Reminder: Facet by will depend on "type" chosen p6<-p4+facet_wrap(~Phylum, scales = "free") ## can removed scales to keep axes the same p6 ############################################################################################# ## ------- SUBSET BY MONTH AND ORDINATE PCoA -------- ## Regardless of the ordinate method, it seems that there is a temporal effect ## September samples cluster together and more so for May samples ## 1) Subset photo.data into months : May and September (February ony had 2 samples) photo.may <- subset_samples(photo.data, Month == "May") ## Run the PCoA --Note:can change method to NMDS,CCA,RDA,DCA,CAP, see R documentaion ## We have unsufficient data but running it anyway to see if there is anything worthwhile may_nmds <- ordinate( physeq = photo.may, method = "NMDS", distance = "bray" ) ## ------- Plot the PCoA -------------- ## Note: "split" type result in a combined plot with both taxa and samples ## supported options are "samples", "sites", "taxa", "biplot", "split", "scree" p7<-plot_ordination( physeq = photo.may, ordination = may_nmds, type = "Split", # change this to taxa to see just taxa graph or sample graph shape = "Depth", #v ariables are with repect to type, eg. "taxa" type has phylum while "Sample" type has others (see object in environment) color = "Phylum", # same comment as above title = "NMDS of Photosynthetic Eukaryote Communities in May") + geom_point(size =3) + geom_text(mapping = aes(label = Month), size = 3, vjust = 1.5) + ## Labels sample scale_shape_manual(values=c(13, 16, 17, 18, 19, 0, 3)) + ## Assign symbols for shape #scale_color_manual(values = c("#A65628", "red", "#FFAE19", "#4DAF4A", "#1919FF", #"darkorchid3", "magenta", "#FF9900", "#00CC33", "#FF3366", #"#CC00CC", "#6633CC")) + ## Assign colors theme_classic() + theme(axis.text.y.left = element_text(size=12, color = "black"), axis.text.x.bottom = element_text(size=12, color = "black"), legend.text = element_text(size = 12), legend.title = element_text(size=12), text = element_text(size = 12), axis.title.x = element_text(size=15, face="bold"), axis.title.y = element_text(size=15, face="bold")) p7 ## Facet if needed but keep in mind that "sample" and "Taxa" are separate column in the ## phyloseq object so it may look confusing ## Reminder: Facet by will depend on "type" chosen ## Can set scales free by scales = "free", "free_x" or "free_y" p8<-p7+facet_wrap(~Phylum) p8 ############################################################################################# ## ------- RESHAPE PHYLOSEQ DATA FOR VEGAN ------------ ## CCA compares variables within datasets and between 2 matrices/datasets ## Convert the phyloseq data into a dataframe if you haven't done so ## For this pipline, we have converted it in the bar plot sections ## You can use raw or relative counts, change the data accordingly ## Turn on the vegan package to do this library(vegan) ## ------- Create a new dataframe and reshaping it ------------ ## It is less messy to create a new dataframe then reshape it because there are too many ## variables (too many taxa) and the table will be very wide ## For this study, we are interested in eukaryotic phyla photo.trans2 <- photo.data %>% tax_glom(taxrank = "Phylum") %>% # agglomerate at Phylum level transform_sample_counts(function(x) {x/sum(x)} ) %>% psmelt() # Melt to long format ## To simplify things, include only columns of interest ## For this study, we are removing columns Doman and Kingdom photo.trans3 <- subset(photo.trans2, select = -c(8,9)) ## Next, pivot the table so that levels in Phyla becomes it's own column ## Check the output in the environment, you should seea dataframe ##**Note that the number of observations will increase if agglomerating at Genus or Species level photo.trans4 <- photo.trans3 %>% pivot_wider(names_from = Phylum, values_from = Abundance, id_cols = Mothur_ID) ## Add the metadata (in csv format) to the new dataframe ## Now we have a dataframe that consists of both environment and response variables photo.trans4 <- merge(photo.trans4, map, by = "Mothur_ID") ############################################################################################## ## ------- TESTING DATASET FOR CCA ----------- ## CCA has 2 assumptions: ## 1) Some linearity between independent and response variables ## 2) Response variables are unimodal (even one!) ## Will take a while of the dataframe is large ## Test your data by using GGally package library(GGally) ggpairs(photo.trans4[,c(2:6)]) ## Select columns 2 to 6 for this study ## ------- RUN THE CCA USING VEGAN ------------ ## Define the environmental and response variables ## code explanation: cca(species data,environmental data) ccamodel1<-cca(photo.trans4[,c(2:6)],photo.trans4[,c(9:10)]) ccamodel1 ## Axis 1 and 2 explain 53% of the variation in OTU ccamodel1$CCA ## This gives all the data that will be used for plotting ############################################################################################## ## ------- PREPARING FOR PLOT WITH GGPLOT ---------------- ## ------- Installing ggvegan package----------- ## Need ggvegan package to do this ## This worked for R version 4.0.2 install.packages("remotes") remotes::install_github("gavinsimpson/ggvegan", force = TRUE) library(ggvegan) ## ------- Convert the CCA results into a readable format----------- ## Convert the CCA results into a readable format ## The fortify command will recognize the CCA cca.res<-fortify(ccamodel1, axes = 1:2) ## CCA results in a dataframe format, see environment ## ------- Preparing for plotting ------------- ## subset sites/samples site.data<-subset(cca.res, Score == "sites") species.data<-subset(cca.res, Score == "species") ## Add Depth, sample ID, Month to species/site subset data: ## binds_col() is from dplyr package (within tidyverse) site.data<-bind_cols(site.data, map[,c(1:5)]) ## column 1 to 5 ## Scale environmental arrows to the plot ## subset environmental variables -- these will plot as arrows later arrows<-subset(cca.res, Score == "biplot") ## multiply the environmental variables (arrows) to scale it to the plot scores<-c('CCA1','CCA2') mul<-ggvegan:::arrowMul(arrows[,scores], subset(cca.res, select = scores, Score == "sites")) ## Scales the biplot arrows arrows[,scores] <-arrows[,scores] * mul ## ------- Plot CCA using ggplot--------- p9<-ggplot() + geom_point(site.data, mapping = aes(x = CCA1, y = CCA2, color = Month, shape = Depth), size = 3) + #leave out color not wanted geom_segment(arrows, mapping = aes(x = 0, xend = CCA1, y = 0, yend = CCA2), arrow = arrow(length = unit(0.03, "npc")), # unit = arrow end color = "blue", size = 1.5) + geom_text(arrows, mapping = aes(label = Label, x = CCA1*1.1, y = CCA2*1.1), size = 5) + geom_text(species.data, mapping = aes(label = Label, x = CCA1*1.1, y = CCA2*1.1), size = 5) + scale_shape_manual(values=c(13, 16, 17, 18, 19, 0, 2, 4, 3, 8, 11, 7)) + coord_fixed() + theme(legend.background = element_rect(fill="white", size=0.3, linetype="solid", colour="black"), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.text=element_text(size=16), axis.title = element_text(size = 16), legend.position ="right", legend.key = element_rect(fill = "white"), legend.title = element_text(size = 16), legend.text = element_text(size = 16)) p9
b6f405e3959930fd5969079c70db9cf6be2e2a82
62f46e39c6dcde7265ab9f1079fa73e2e454b258
/Plot3.R
71efa2b29630b929f035d863717af26a73a0645a
[]
no_license
Ignacio-dAR/ExData_Plotting1
bc00e1a824e4f696691ea2617b864311592c2e1e
10393eaa7a738023110bb2369b46873003365cdd
refs/heads/master
2021-01-17T08:03:47.031713
2015-02-06T13:55:09
2015-02-06T13:55:09
30,404,945
0
0
null
2015-02-06T09:29:05
2015-02-06T09:29:02
null
UTF-8
R
false
false
763
r
Plot3.R
d = read.table("household_power_consumption.txt", header=TRUE, sep=";",dec=".",na.strings="?") d1<-subset(d2,Date>=DateLimit[1]) d2<-subset(d1,Date<=DateLimit[2]) d2$Date<-as.Date(d2$Date,format="%d/%m/%Y") remove(d1) plot(d2$Sub_metering_1, main="", type="l", xlab="", axes=FALSE, ylab="") lines(d2$Sub_metering_2,col="red") lines(d2$Sub_metering_3, col="blue") box(col="black") axis(side=1,col="black",at=c(0,1440,2880),labels=c("Thru","Fri","Sat"), cex.axis=0.8) axis(2, col="black", at=c(0,10,20,30), cex.axis=0.8) mtext("Energy sub metering", side=2, line=3, col="black", cex=0.8) legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.8, pch="-",pt.cex=2,col=c("black","red","blue")) dev.copy(png,file="plot3.png") dev.off()
e849e8cdaf4cd0de41670241ff4299ccbbe23740
19bda1321417205a4b2859814291a805a3ecfc17
/man/testSvdReg.Rd
6cf367bab1e23289fdff453a8828b1ff24d3da3c
[]
no_license
driegert/transfer2
15e885fd67db828e00db6afe9c6ecd3bc59e51a4
ef09863f3bf8cdffdcffc8976f120fa0a05fc1ca
refs/heads/master
2021-05-23T06:06:02.312396
2018-04-16T14:51:22
2018-04-16T14:51:22
94,790,567
0
0
null
null
null
null
UTF-8
R
false
true
274
rd
testSvdReg.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/testing.R \name{testSvdReg} \alias{testSvdReg} \title{Test out to see if the regression stuff works...} \usage{ testSvdReg(Y, X) } \description{ Test out to see if the regression stuff works... }
bf8065ba63402e9e0cfe24a70077f178e639ce6a
b31fef59aa6842007c4a12ce09be413ac7a39bf9
/app.R
bcd77544498850e878e8b9ac17b50a882406f0ef
[]
no_license
nvelden/table-race
2c669c6b967fbf57d438171e7e9b9f0abfcb053b
d6ea29f40545d2008ac9479c105458b403569f93
refs/heads/master
2023-01-23T11:40:54.844562
2020-11-14T10:16:56
2020-11-14T10:16:56
311,434,367
0
0
null
null
null
null
UTF-8
R
false
false
5,504
r
app.R
library(shiny) #devtools::install_github("rstudio/r2d3") # install latest version from Github for join() function library(r2d3) library(COVID19) library(tidyr) library(plyr) library(dplyr) library(shinythemes) library(zoo) options(r2d3.shadow = FALSE) source("functions.R", local = TRUE) ui <- fluidPage( #Theme theme = shinytheme("superhero"), #Stylesheet tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "styles.css") ), # Application title fluidRow(width=12, id="headerContainer", style="display: flex; margin: 0; padding-left: 19px; padding-right: 19px;", div(id="titleIcon", style="padding-top: 10px; padding-right: 10px; margin-bottom: 20px;", icon("virus", "fa-6x"), lib="font-awesome"), div(id="titleContainer", div(id="title", "Covid-19 Table Race", style="margin-bottom: 0px; font-size: 48px;"), div(id="iconContainer", actionLink('GitHub', label=NULL, icon = icon("github-square", "fa-2x"), onclick="window.open('https://github.com/nvelden', '_blank')"), actionLink('LinkedIN', label=NULL, icon = icon("linkedin-square","fa-2x"), onclick="window.open('https://linkedin.com/in/nielsva', '_blank')")), )), fluidRow(width=12, class="inputContainer", div(id="countryInputContainer", style="width: 100%;", selectizeInput("countrySelection", "Country", choices = c(countryNames$country), width="100%", selected = c("United States", "China", "Germany", "Iran", "Netherlands", "France", "Italy", "Spain", "United Kingdom", "Switzerland"), multiple = TRUE, options = list(maxItems = 30)), selectInput("SortSelection", "Sort by", choices = c("Deaths" = "deaths", "Deaths [7 day]" = "deaths_new_07da", "Confirmed" = "confirmed", "Confirmed [7 day]" = "confirmed_new_07da", "Tests" = "tests", "Tests [7 day]" = "tests_new_07da"), width="20%", multiple = FALSE, ), actionButton("submit", "Submit", class = "btn-success")) ), fluidRow(width=12, class="inputContainer", style="justify-content: space-between;", div(id="dateRange", style="flex-grow: 1; margin-left: 40px;", uiOutput("dateRange")), uiOutput("dayOutput") ), fluidRow(width=12, class="inputContainer", style="display: flex; flex-direction: column;", div(id="tableOutput", style="width: 100%;", d3Output("table"), ), div(id="citation", 'Source: Guidotti E, Ardia D (2020). COVID-19 Data Hub.', style="color: #ebebeb; font-size: 12px;"), div(id="citation", '7 day: Rolling 7-day average.', style="color: #ebebeb; font-size: 12px;") ) ) server <- function(input, output) { dataInput <- reactive({ input$submit req(isolate(input$countrySelection)) data <- covidData(isolate(input$countrySelection), isolate(input$SortSelection)) return(data) }) output$dateRange <- renderUI({ sliderInput("dateSlider", label = NULL, min=as.Date(dataInput()$date[1]), max=as.Date(dataInput()$date[nrow(dataInput())]), value=as.Date(dataInput()$date[1]), animate = animationOptions(interval = 150, loop = FALSE), timeFormat="%b %Y" ) }) output$dayOutput <- renderUI({ div(id="dayOutput", style="text-align: center; padding-left: 25px;", h3(format(input$dateSlider, "%d %b")) ) }) output$table <- renderD3({ if(is.null(dataInput())) return(NULL) r2d3( data = dataInput(), options = list( selColumns = c("rank", "iso_alpha_2", "country", "population", "confirmed", "confirmed_new_07da", "deaths", "deaths_new_07da", "tests", "tests_new_07da"), colNames = c("#", "Flag", "Country", "Population", "Confirmed", "Confirmed [7 Day]", "Deaths", "Deaths [7 day]", "Tests", "Tests [7 day]"), colType = c("text", "text", "text", "text", "bar", "bar", "bar", "bar", "bar"), dateInput = input$dateSlider, sortSel = input$SortSelection ), d3_version = c("5"), container = 'div', css = "www/chartstyles.css", script = "www/tableD3.js", dependencies = c( ) ) }) } # # Run the application shinyApp(ui = ui, server = server)
4d92943f5bae1d5365cca5c3d1bfb3cc347d12dc
b43a6be2e74edb64bf29a13943e46d98e8ef2b83
/AML_sim/3_generate_plots/main_clustering_performance/plots_AML_sim_diffcyt_methods_main_clustering_performance.R
af39744a6e865e511ef8350ca3a0b6e43da8d55e
[ "MIT" ]
permissive
lmweber/diffcyt-evaluations
982ddb3cfb44c896fef7aa0b8ae58a80f715aa2c
74974b9167b8f38ce860090d7d888e3989a214e0
refs/heads/master
2021-03-19T16:09:30.412168
2019-05-02T16:12:39
2019-05-02T16:12:39
74,908,643
7
0
null
null
null
null
UTF-8
R
false
false
11,999
r
plots_AML_sim_diffcyt_methods_main_clustering_performance.R
########################################################################################## # Generate plots # # - data set: AML-sim # - plot type: clustering performance # - method: diffcyt methods # # - main results # # Lukas Weber, May 2018 ########################################################################################## # note: clustering step is the same for all 'diffcyt' methods (diffcyt-DA-edgeR, diffcyt-DA-voom, diffcyt-DA-GLMM) library(SummarizedExperiment) library(reshape2) library(ggplot2) library(cowplot) # load saved results DIR_RDATA <- "../../../../RData/AML_sim/main" # note: only need to load one set of results, since clustering step is the same for all 'diffcyt' methods load(file.path(DIR_RDATA, "outputs_AML_sim_diffcyt_DA_edgeR_main.RData")) load(file.path(DIR_RDATA, "out_clusters_AML_sim_diffcyt_DA_edgeR_main.RData")) load(file.path(DIR_RDATA, "out_objects_AML_sim_diffcyt_DA_edgeR_main.RData")) # path to save plots DIR_PLOTS <- "../../../../plots/AML_sim/main_clustering_performance" ################################## # Calculate clustering performance ################################## # loop over thresholds (th) and conditions (j) # spike-in thresholds thresholds <- c("5pc", "1pc", "0.1pc") # condition names cond_names <- c("CN", "CBF") cond_names_all <- c("healthy", cond_names) # lists to store clustering performance results clustering_pr <- clustering_re <- clustering_F1 <- labels <- vector("list", length(thresholds)) names(clustering_pr) <- names(clustering_re) <- names(clustering_F1) <- names(labels) <- thresholds for (th in 1:length(thresholds)) { clustering_pr[[th]] <- clustering_re[[th]] <- clustering_F1[[th]] <- labels[[th]] <- vector("list", length(cond_names)) names(clustering_pr[[th]]) <- names(clustering_re[[th]]) <- names(clustering_F1[[th]]) <- names(labels[[th]]) <- cond_names for (j in 1:length(cond_names)) { # ------------------------------------------ # load data objects and true spike-in status # ------------------------------------------ # load data objects # note: clustering is performed once on all samples from both conditions together d_se <- out_objects_diffcyt_DA_edgeR_main[[th]]$d_se # load spike-in status at cell level (for condition j) spikein <- out_diffcyt_DA_edgeR_main[[th]][[j]]$spikein # add spike-in status to data object (for condition j) rowData(d_se)$spikein <- 0 rowData(d_se)$spikein[rowData(d_se)$group_id %in% c("healthy", cond_names[j])] <- spikein # -------------------------------------------------------------------------------- # calculate clustering performance for all clusters containing true spike-in cells # -------------------------------------------------------------------------------- # find matching clusters (clusters containing true spike-in cells) # check no missing clusters stopifnot(all(names(table(rowData(d_se)[rowData(d_se)$spikein == 1, ]$cluster_id)) == levels(rowData(d_se)$cluster_id))) labels_matched <- unname(which(table(rowData(d_se)[rowData(d_se)$spikein == 1, ]$cluster_id) > 0)) labels_matched # total number of cells in each matching cluster n_matched <- sapply(labels_matched, function(l) sum(rowData(d_se)$cluster_id == l)) n_matched # number of true spike-in cells in each matching cluster n_correct <- sapply(labels_matched, function(l) sum(rowData(d_se)$cluster_id == l & rowData(d_se)$spikein == 1)) n_correct # total number of true spike-in cells n_spikein <- sum(rowData(d_se)$spikein == 1) n_spikein # calculate precision, recall, F1 score for each matching cluster stopifnot(length(n_matched) == length(n_correct), length(n_spikein) == 1) pr <- n_correct / n_matched re <- n_correct / n_spikein F1 <- 2 * (pr * re) / (pr + re) # store results labels[[th]][[j]] <- labels_matched clustering_pr[[th]][[j]] <- pr clustering_re[[th]][[j]] <- re clustering_F1[[th]][[j]] <- F1 } } ################ # Generate plots ################ # ---------------------------------------------------- # Plots showing individual scores (sorted by F1 score) # ---------------------------------------------------- # loop over thresholds (th) and conditions (j) # store plots in list plots_clustering <- vector("list", length(thresholds) * length(cond_names)) plot_widths <- rep(NA, length(thresholds) * length(cond_names)) for (th in 1:length(thresholds)) { for (j in 1:length(cond_names)) { # index to store plots sequentially in list ix <- (j * length(thresholds)) - (length(thresholds) - th) # create data frame for plotting d_plot <- data.frame( cluster = labels[[th]][[j]], precision = clustering_pr[[th]][[j]], recall = clustering_re[[th]][[j]], F1_score = clustering_F1[[th]][[j]] ) plot_widths[ix] <- 2 + nrow(d_plot) / 7 # sort by F1 score d_plot <- d_plot[rev(order(d_plot$F1_score)), ] d_plot$cluster <- factor(d_plot$cluster, levels = as.character(d_plot$cluster)) d_plot <- melt(d_plot, id.vars = "cluster", variable.name = "measure") d_plot$measure <- factor(d_plot$measure, levels = c("F1_score", "precision", "recall")) # create plot colors <- c("firebrick1", "forestgreen", "deepskyblue") p <- ggplot(d_plot, aes(x = cluster, y = value, color = measure)) + geom_point(shape = 1, stroke = 1) + scale_color_manual(values = colors) + ylim(c(-0.025, 1.025)) + ggtitle(paste0(cond_names[j], ", threshold ", gsub("pc$", "\\%", thresholds[th]))) + theme_bw() + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, size = 8), axis.title.y = element_blank()) plots_clustering[[ix]] <- p # save individual panel plot fn <- file.path(DIR_PLOTS, "panels", paste0("results_AML_sim_diffcyt_main_clustering_performance_", thresholds[th], "_", cond_names[j], ".pdf")) ggsave(fn, width = plot_widths[ix], height = 3) } } # -------------------------------------------------- # Plots showing cumulative recall (sorted by recall) # -------------------------------------------------- # loop over thresholds (th) and conditions (j) # store plots in list plots_clustering_cumulative <- vector("list", length(thresholds) * length(cond_names)) plot_widths_cumulative <- rep(NA, length(thresholds) * length(cond_names)) for (th in 1:length(thresholds)) { for (j in 1:length(cond_names)) { # index to store plots sequentially in list ix <- (j * length(thresholds)) - (length(thresholds) - th) # create data frame for plotting d_plot <- data.frame( cluster = labels[[th]][[j]], precision = clustering_pr[[th]][[j]], recall = clustering_re[[th]][[j]], F1_score = clustering_F1[[th]][[j]] ) plot_widths[ix] <- 2 + nrow(d_plot) / 7 # sort by recall d_plot <- d_plot[rev(order(d_plot$recall)), ] # add cumulative recall d_plot$recall_cumulative <- cumsum(d_plot$recall) # remove columns not needed for this plot d_plot <- d_plot[, -match(c("recall", "F1_score"), colnames(d_plot))] d_plot$cluster <- factor(d_plot$cluster, levels = as.character(d_plot$cluster)) d_plot <- melt(d_plot, id.vars = "cluster", variable.name = "measure") d_plot$measure <- factor(d_plot$measure, levels = c("precision", "recall_cumulative")) # create plot colors <- c("forestgreen", "deepskyblue") p <- ggplot(d_plot, aes(x = cluster, y = value, color = measure, group = measure)) + geom_point(shape = 20, stroke = 1) + geom_line() + scale_color_manual(values = colors, labels = c("precision", "cumulative\nrecall")) + ylim(c(-0.025, 1.025)) + ggtitle(paste0(cond_names[j], ", threshold ", gsub("pc$", "\\%", thresholds[th]))) + theme_bw() + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, size = 8), axis.title.y = element_blank()) plots_clustering_cumulative[[ix]] <- p # save individual panel plot fn <- file.path(DIR_PLOTS, "panels", paste0("results_AML_sim_diffcyt_main_clustering_performance_cumulative", thresholds[th], "_", cond_names[j], ".pdf")) ggsave(fn, width = plot_widths_cumulative[ix], height = 3) } } ######################## # Save multi-panel plots ######################## # ---------------------------------------------------- # Plots showing individual scores (sorted by F1 score) # ---------------------------------------------------- # modify plot elements plots_clustering <- lapply(plots_clustering, function(p) { p + theme(legend.position = "none") }) # format into grid plot_widths_avg <- c(7, 3.75, 2) grid_clustering <- do.call(plot_grid, append(plots_clustering, list( nrow = 2, ncol = 3, align = "hv", axis = "bl", rel_widths = plot_widths_avg)) ) # add combined title title_clustering <- ggdraw() + draw_label("AML-sim, diffcyt methods: clustering performance", fontface = "bold") grid_clustering <- plot_grid(title_clustering, grid_clustering, ncol = 1, rel_heights = c(1, 25)) # add combined legend (one legend per row) legend_clustering <- get_legend(plots_clustering[[1]] + theme(legend.position = "right", legend.title = element_text(size = 11, face = "bold"), legend.text = element_text(size = 11))) legend_clustering <- plot_grid(legend_clustering, legend_clustering, ncol = 1) grid_clustering <- plot_grid(grid_clustering, legend_clustering, nrow = 1, rel_widths = c(9, 1)) # save plots fn_clustering <- file.path(DIR_PLOTS, paste0("results_AML_sim_diffcyt_main_clustering_performance.pdf")) ggsave(fn_clustering, grid_clustering, width = 10, height = 5.5) # -------------------------------------------------- # Plots showing cumulative recall (sorted by recall) # -------------------------------------------------- # modify plot elements plots_clustering_cumulative <- lapply(plots_clustering_cumulative, function(p) { p + theme(legend.position = "none") }) # format into grid plot_widths_avg <- c(7, 3.75, 2) grid_clustering_cumulative <- do.call(plot_grid, append(plots_clustering_cumulative, list( nrow = 2, ncol = 3, align = "hv", axis = "bl", rel_widths = plot_widths_avg)) ) # add combined title title_clustering_cumulative <- ggdraw() + draw_label("AML-sim, diffcyt methods: clustering performance", fontface = "bold") grid_clustering_cumulative <- plot_grid(title_clustering_cumulative, grid_clustering_cumulative, ncol = 1, rel_heights = c(1, 25)) # add combined legend (one legend per row) legend_clustering_cumulative <- get_legend(plots_clustering_cumulative[[1]] + theme(legend.position = "right", legend.title = element_text(size = 10, face = "bold"), legend.text = element_text(size = 9))) legend_clustering_cumulative <- plot_grid(legend_clustering_cumulative, legend_clustering_cumulative, ncol = 1) grid_clustering_cumulative <- plot_grid(grid_clustering_cumulative, legend_clustering_cumulative, nrow = 1, rel_widths = c(9, 1)) # save plots fn_clustering_cumulative <- file.path(DIR_PLOTS, paste0("results_AML_sim_diffcyt_main_clustering_performance_cumulative.pdf")) ggsave(fn_clustering_cumulative, grid_clustering_cumulative, width = 10, height = 5.5) ################################### # Save timestamp file for Makefiles ################################### file_timestamp <- file.path(DIR_PLOTS, "timestamp.txt") sink(file_timestamp) Sys.time() sink()
c6fd023f338f70d4dfde40d820bbbbbfe759afc9
08c48f2627281810fe2a4a37bb1e9bc5c03eeb68
/Huan_link_all_script/R/x86_64-pc-linux-gnu-library/3.4/IRanges/unitTests/test_IRanges-class.R
67402fe2241c7441055423dc88a7291255542a2f
[]
no_license
Lhhuan/drug_repurposing
48e7ee9a10ef6735ffcdda88b0f2d73d54f3b36c
4dd42b35e47976cf1e82ba308b8c89fe78f2699f
refs/heads/master
2020-04-08T11:00:30.392445
2019-08-07T08:58:25
2019-08-07T08:58:25
159,290,095
6
1
null
null
null
null
UTF-8
R
false
false
2,055
r
test_IRanges-class.R
test_IRanges_names <- function() { range1 <- IRanges(start=c(1,2,3), end=c(5,2,8)) checkIdentical(names(range1), NULL) nms <- c("a", NA, "b") names(range1) <- nms checkIdentical(names(range1), nms) checkTrue(validObject(nms)) names(range1) <- NULL checkTrue(validObject(nms)) checkIdentical(names(range1), NULL) names(range1) <- "a" checkTrue(validObject(range1)) checkIdentical(names(range1), c("a", NA, NA)) checkException(names(range1) <- c("a", "b", "c", "d"), silent = TRUE) } test_Ranges_isDisjoint <- function() { ir1 <- IRanges(c(2,5,1), c(3,7,3)) ir2 <- IRanges(c(2,9,5), c(3,9,6)) ir3 <- IRanges(1, 5) checkIdentical(isDisjoint(ir1), FALSE) checkIdentical(isDisjoint(ir2), TRUE) checkIdentical(isDisjoint(ir3), TRUE) ## Handling of zero-width ranges current <- sapply(11:17, function(i) isDisjoint(IRanges(c(12, i), width=c(4, 0)))) target <- rep(c(TRUE, FALSE, TRUE), c(2, 3, 2)) checkIdentical(target, current) } test_IRanges_combine <- function() { range <- IRanges(start=c(1,2,3,1), end=c(5,2,8,3)) srange <- split(range, start(range) == 1) checkIdentical(srange, as(RangesList(`FALSE` = range[2:3], `TRUE` = range[c(1,4)]), "CompressedIRangesList")) checkIdentical(do.call(c, unname(as.list(srange))), IRanges(c(2,3,1,1), c(2,8,5,3))) ir1 <- IRanges(1, 10) ir2 <- IRanges(c(1, 15), width=5) mcols(ir2) <- DataFrame(score=1:2) checkIdentical(mcols(c(ir1, ir2)), DataFrame(score = c(NA, 1L, 2L))) ## Combining multiple IRanges object with varying mcols mcols(ir1) <- DataFrame(gc=0.78) checkException(c(ir1, ir2), silent=TRUE) checkIdentical(mcols(c(ir1, ir2, ignore.mcols=TRUE)), NULL) } test_IRanges_annotation <- function() { range <- IRanges(c(1, 4), c(5, 7)) mcols(range) <- DataFrame(a = 1:2) checkIdentical(mcols(range)[,1], 1:2) checkIdentical(mcols(range[2:1])[,1], 2:1) checkIdentical(mcols(c(range,range))[,1], rep(1:2,2)) }
db3a5c1cc5c67cfe7502db5a299e39bc9bdc6023
f2a71819963dd21c9c7fdc6c1c30192229abe881
/man/MxVersionType-class.Rd
f9092801dd40ae56a41a95fff0882d8164d61ed6
[]
no_license
cran/OpenMx
03f5f1fdd734899547c33e0bc93f8451b19083f9
28fb64add0857e35466f52f8cfa4dccf8920955d
refs/heads/master
2023-04-13T18:01:28.853095
2023-04-05T19:43:20
2023-04-05T19:43:20
35,870,105
0
0
null
null
null
null
UTF-8
R
false
true
233
rd
MxVersionType-class.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/0ClassUnion.R \name{MxVersionType-class} \alias{MxVersionType-class} \title{A package_version or character} \description{ A package_version or character }
1f31a44b42151850b61bb99f60bcf2eea4772837
7dc7c5b7d6faa41e52788b1dd6f5b7316a845e24
/DATRAS/man/indexSubset.Rd
93fcf4ec7b0ef6d115999638eecccaba3c5af006
[]
no_license
alko989/DATRAS
7f969cc2295ede8b9d81e7f24aa1e0fe4518d5b3
26bd3f3d9c8460d2f82b3e73525c809973e8261d
refs/heads/master
2021-08-28T03:48:00.471214
2020-11-03T15:05:47
2020-11-03T15:59:01
212,617,500
1
0
null
2019-10-03T15:45:54
2019-10-03T15:45:54
null
UTF-8
R
false
true
755
rd
indexSubset.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_datras.R \name{[.DATRASraw} \alias{[.DATRASraw} \title{Sample unit subset.} \usage{ \method{[}{DATRASraw}(x, i) } \arguments{ \item{x}{DATRASraw object} \item{i}{Integer vector} } \value{ DATRASraw object } \description{ Sample unit subset for DATRASraw object. } \details{ Extract all information related to a given set of hauls. For instance x[1:2] extracts the first two hauls. Duplicated integer values results in automatic renaming of haul ids. This can be useful for sampling with replacement. } \examples{ \dontshow{ file1 <- system.file("exchange","Exchange1.zip",package="DATRAS") x <- readExchange(file1) } x[1:2] x[sample(3,replace=TRUE)] split(x,x$Country) }
4807b73ee2c6a9f1e491ddad3fd0844e6f6ad376
0f2206cefc893004b5626511bbf2b32d89fb2c0b
/Analise/tokenizers.R
983ca64ce22b00fec88eac591b7295d159ba89d1
[ "MIT" ]
permissive
jnthmota/Analise_dados_R
ba9a0120f2150f08a14169d8de9de53bdacfe54c
528e4546caf037e70339f04a102325ffdf980682
refs/heads/main
2023-05-10T07:54:11.205751
2021-06-16T04:53:41
2021-06-16T04:53:41
376,166,252
1
0
null
null
null
null
UTF-8
R
false
false
97
r
tokenizers.R
library(tokenizers) tokenize_words("recuperação recuperam alunos que precisam ser recuperados")