blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
556ba8e5292c8aaa4a0c72a2e1de62cecbb1c25b | 61a6a31eeeea04ce48311b9f760ba760953a870b | /man/ConvertEvonetToIgraphWithNodeNumbers.Rd | df1b430a17fba38f4e8387e4f388e0e0e370d090 | [] | no_license | bomeara/BMhyb | 24e511479e59d29f08c8b9fa42958425800e49b2 | 22709ce2792695c1bf34e1a4a224c973eeb889e8 | refs/heads/master | 2021-11-08T21:52:36.907935 | 2021-11-08T16:09:12 | 2021-11-08T16:09:12 | 64,484,186 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 796 | rd | ConvertEvonetToIgraphWithNodeNumbers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmhyb.r
\name{ConvertEvonetToIgraphWithNodeNumbers}
\alias{ConvertEvonetToIgraphWithNodeNumbers}
\title{Convert an evonet object into igraph}
\usage{
ConvertEvonetToIgraphWithNodeNumbers(phy.graph)
}
\arguments{
\item{phy.graph}{An ape::evonet object (a phylogeny stored in phylo format that also includes a reticulation matrix)}
}
\value{
An igraph network
}
\description{
ape can already convert from evonet to igraph; the advantage of this function is that it uses the node ids from the evonet object for labels in igraph.
}
\examples{
phy <- ape::rcoal(5)
phy.evo <- ape::evonet(phy, from=1, to=2)
plot(phy.evo) # this is the ape plot
phy.igraph <- ConvertEvonetToIgraphWithNodeNumbers(phy.evo)
plot(phy.igraph)
}
|
5f38f589793240f0d7d1e236c8b37ae21db1d013 | eb675fa765a38b4ef43ad48eed5cabf0cd92e4c3 | /inst/example.R | 0fa5ba9af9b6673c97edb6039ec124e2b246424c | [
"MIT"
] | permissive | ir-sfsu/covizerate | 3925bdca40ff4be1a04b1c031dda8f38b6b03516 | 1fb046565822a8ca193bf22dbdf8e702a1166de5 | refs/heads/main | 2023-01-18T16:45:49.061721 | 2020-11-09T04:02:52 | 2020-11-09T04:02:52 | 308,476,719 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 309 | r | example.R | library(covizerate)
df <- data.frame(
year = 0:7,
continuation = c(1, .84, .75, .70, .50, .19, .07, .03),
graduation = c(0, 0, 0, .01, .19, .47, .59, .63)
)
covizerate(df, "Fall 2010 Cohort")
df %>%
covizerate("Fall 2010 Cohort") %>%
cvz_options(grad_fill = "forestgreen", vline_stroke = "gold")
|
e024e2f3e9f4982b78727c1d7b46ac46c628916c | 2510c3bcd764a71c504d1d0c8281103457732832 | /Problem Set 1.R | c81933bb5e28d37a7f9f0bb5e0425125a4044d18 | [] | no_license | tylerleigh94/Stats-500-PS-1 | b46e04ba3dc7e3a60e19231839e9be7853316a33 | f4ac0ff8bdb254637fde3e9b4784bed8ee9ae6c0 | refs/heads/master | 2020-08-10T00:48:19.429825 | 2019-10-10T17:49:44 | 2019-10-10T17:49:44 | 214,213,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 683 | r | Problem Set 1.R | attach(painter)
plot(age, mn)
plot(years, mn)
plot(age, years)
pt<-group=="painter"
plot(age[pt], mn[pt], pch=16, col='blue')
points(age[!pt], mn[!pt], pch=16, col='red')
m.1<-lm(mn~years+age+smoking+drinking)
confint(m.1)
res<-m.1$residuals
shapiro.test(res)
predict(m.1, data.frame(years=20, age=50, smoking=0, drinking=0), interval='confidence')
predict(m.1, data.frame(years=0, age=50, smoking=0, drinking=0), interval='confidence')
m.0<-lm(mn~years)
anova(m.0, m.1)
anova(m.1)
summary(m.1)
cor.test(age[group=='control'], mn[group=='control'])
attach(bonessri)
lm.1<-lm(femurbmd~ssriyears+age+female)
lm.0<-lm(femurbmd~ssriyears)
anova(lm.0, lm.1)
anova(lm.0)
detach(bonessri)
|
a16951a2e4e73019df531743be5088ab587eecb2 | bfdbeed0ae3ab77ab654f49be2d2aaddb3776806 | /Scripting_in_Multiple_Languages/modifyDataStore.R | 90d04b36706abcf31811fb39726390c8f293d2af | [] | no_license | antonycode/presentations | 2d198fe0d7fbcaa3550f5eece0e728909dffd24f | 3dc8705f5a0a23f123b86e996cc34c45605222ff | refs/heads/master | 2023-06-06T03:30:22.707076 | 2021-06-25T16:08:38 | 2021-06-25T16:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,091 | r | modifyDataStore.R | #!/usr/bin/env Rscript
# modifyDataStore.R, an R script by Jason Pickering loosely based
# on a script by Ben Guaraldi (and lightly edited by same)
require(httr)
require(jsonlite)
require(magrittr)
require(assertthat)
# Customize for you
yourname <- ''
if (nchar(yourname) == 0) {
stop('Please enter your name in the code')
}
# Get parameters from dish.json
config <- fromJSON('dish.json')
url <- paste0(config$dhis$baseurl, '/api/dataStore/assignments/organisationUnitLevels.json')
# Get the current JSON from the organisationUnitLevels key of the
# assignments namespace in the data store
json <- GET(url, authenticate(config$dhis$username, config$dhis$password)) %>%
content(., "text") %>%
fromJSON(.)
# Construct a new JSON from the current JSON, adding a new key and resorting it
key <- paste('Pirate_', yourname, sep='')
json[[key]] <- json$Zimbabwe
json[[key]]$name3 <- key
json <- json[order(names(json))]
# Replace the old JSON with the new JSON
r <- PUT(url, body = toJSON(json, auto_unbox = TRUE), content_type_json())
# Report completion
message('Exiting normally')
|
291211ab460eb45759f4794b23e613838d0ff49e | 7d9db020576580627e2842c6685d2809189b4662 | /MM_HW3/HW3_tg1478.R | 0bf41e648e47895641dbb61b3f9e89f543d51d42 | [] | no_license | tashay/Mobility_Modeling | f6ca4e14426732e5e4e40d3356e8a93a1bae1e14 | 416414d36375b22623ea59ed9bad51d7eb454964 | refs/heads/master | 2021-01-19T09:41:13.705746 | 2017-03-30T15:55:33 | 2017-03-30T15:55:33 | 82,136,369 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,643 | r | HW3_tg1478.R | # Question 1
library(data.table)
DT = data.table("Procedure1"=c(22.6,27.4,32.2,37.6,42.8,37.1,32.4,27.3,32.2,24.4,42.5,37.6,22.9,47.6),
"Procedure2"=c(22.6,32.8,30.9,42.3,37.4,32.6,37.6,22.7,37.1,22.4,47.5,32.3,27.0,42.1))
#a) Calculate the mean and standard deviation of speeds for the two data sets separately.
m1<-mean(DT$Procedure1)
m2<-mean(DT$Procedure2)
sd1<-sd(DT$Procedure1)
sd2<-sd(DT$Procedure2)
#b) Are the speeds calculated using procedure 1 higher than 25 mph at a level of significance 0.05?
t.test(DT$Procedure1, mu=25, alternative = 'greater')
#Results: Yes, with a p-value less than 0.05, we can conclude that the speeds
#are higher than 25mph at a level of significance 0.05.
#c) In order to find whether the data from both procedures are the same,
# conduct tests on the mean at a level of significance of 0.05.
# State your hypotheses and assume that the variances are equal.
# Do the necessary calculations and present your results.
#Null: There is no significant difference in the speeds calculated using
#procedures 1 & 2.
#Alternate: There is a significant difference in the speeds calculate using
#procedures 1 & 2.
t.test(DT$Procedure1, DT$Procedure2, paired=TRUE, var.equal=TRUE)
#Results: Cannot reject the Null. Since the p-value is larger than 0.05,
#we cannot conclude that a significant difference exists between the two
#procedures.
#d) Would it have been more appropriate to use the paired-t test in (b) rather than a
# simple t-test and why?
#A paired t test would be more appropriate since the speed observations for procedure 1
#and procedure 2 are paired and we are interested in comparing the results of the
#two procedures.
#e) Regress speeds measured by procedure 1 against speeds measured by procedure 2.
# Show the regression results and discuss on the hypothesis tests used in the regression model.
plot(DT$Procedure1, DT$Procedure2, xlab='Procedure1 Speed', ylab='Procedure2 Speed', title(main = 'Procedure1 vs Procedure2 Speeds'))
mod <- lm(DT$Procedure1~ DT$Procedure2)
abline(mod)
summary(mod)
#Null: There is a significant difference in the speeds calculated using
#procedures 1 & 2.
#Alternate: There is no significant difference in the speeds calculate using
#procedures 1 & 2.
#Results: The linear regression accounts for ~66% of the variance in speeds and fits the
#data relatively well. With a p-value less than 0.05, we can reject the Null that
#there is significant variance in the speeds of procedure 1 and procedure 2.
#Question 2
# Get current working directory
setwd('/Users/tashaygreen/Downloads')
# Read in data
SpeedData <- read.table("speed.csv", header = T, sep = ',')
SpeedData <- read.csv("speed.csv")
SpeedData <- read.delim("speed.csv", header = T, sep = ',')
#a) Use the dataset “speed.csv” to validate the relationship between space mean speed and time mean speed.
SpeedData['validation']= SpeedData$time.mean.speed - ((var(SpeedData$time.mean.speed))/ SpeedData$time.mean.speed)
SpeedData
#Null: The calculated space mean speed is equal to the calculated
#space mean speed at a level of confidence 0.05.
#Alternate: The calculated space mean speed is not equal to the calculated
#space mean speed at a level of confidence 0.05.
t.test(SpeedData$validation, SpeedData$space.mean.speed)
#Results: With a p value greater than 0.05, we cannot reject the Null hypothesis.
#Therefore, we can conclude that the calculated and observed values for space mean speed
#are the same at level of significance 0.05.
#We are able to validate the relationship between space mean speed and time mean speed.
|
5a1c8294561a0eacd40f847330be0e5663d06644 | 1ff5948cc363d8a195697c5ea3ae3e8505c7898d | /R/TBCellGrowth.R | 628f8c1f082ababd193b161b530f567ac0642087 | [] | no_license | MazamaScience/TBCellGrowth | 35399ef9918343145144c4330915f403d135a0b8 | 92e258ea7414361ad65136f3493e9ab9fdf16495 | refs/heads/master | 2020-12-11T03:32:19.119655 | 2016-04-19T22:00:30 | 2016-04-19T22:00:30 | 38,266,687 | 0 | 0 | null | 2015-06-29T19:25:26 | 2015-06-29T19:25:26 | null | UTF-8 | R | false | false | 5,119 | r | TBCellGrowth.R | #' @docType package
#' @name TBCellGrowth
#' @title Cell Microscopy Feature Extraction and Growth Tracking
#' @description A suite of functions for processing cell microscopy data,
#' specifically developed for tuberculosis. Features include GLCM based
#' feature extraction and blob labeling as well as tracking of colony growth
#' over many frames. Cell growth output is available as CSV as well as a
#' variety of image overlays and animations.
NULL
#' @docType data
#' @keywords datasets
#' @name nameList
#' @title First Names
#' @format A character vector of length 4368.
#' @description A vector of 4368 unique first names obtained from (...). These names
#' are used to identify individual cell colonies being tracked. First names are much
#' easier to remember than IDs generated from a hashing algorithm or numeric XY
#' coordinates.
NULL
# ----- Internal Package State -------------------------------------------------
# Hidden
TBCellGrowthEnv <- new.env(parent = emptyenv())
TBCellGrowthEnv$RunOptions <- list()
TBCellGrowthEnv$ProfileStart <- NULL
TBCellGrowthEnv$ProfileTimepoint <- NULL
TBCellGrowthEnv$ProfileSecs <- list()
#' @keywords environment
#' @export
#' @title Set Run Options
#' @param opt list of options
#' @description Store command line options such as \code{verbose} and \code{profile}
#' so that they are available internally.
#' @return None
setRunOptions <- function(opt) {
TBCellGrowthEnv$RunOptions <- opt
# Guarantee that some options exist
TBCellGrowthEnv$RunOptions[['verbose']] <- ifelse(is.logical(opt$verbose),opt$verbose,FALSE)
TBCellGrowthEnv$RunOptions[['profile']] <- ifelse(is.logical(opt$profile),opt$profile,FALSE)
TBCellGrowthEnv$RunOptions[['debug_images']] <- ifelse(is.logical(opt$debug_images),opt$debug_images,FALSE)
}
#' @keywords environment
#' @export
#' @title Get Run Options
#' @param option name of a specific option
#' @description Return the value of a particular option or, if \code{param} is not specified,
#' a list of command line options such as \code{verbose} and \code{profile}.
#' @return List of command line flags.
getRunOptions <- function(option=NULL) {
if (!is.null(option)) {
return(TBCellGrowthEnv$RunOptions[[option]])
} else {
return(TBCellGrowthEnv$RunOptions)
}
}
#' @keywords environment
#' @export
#' @title Get Profile Secs
#' @description Returns a named list containing the cumulative seconds associated
#' with a variety of tasks as determend by calls to profilePoint().
#' @return List of profiling stats.
#' @seealso profilePoint
getProfileSecs <- function() {
return(TBCellGrowthEnv$ProfileSecs)
}
#' @keywords environment
#' @export
#' @title Start Profiling Timers
#' @description Sets internal *start* and *timepoint* times that
#' can be used in scripts to calculate elapsed 'clock' time.
#' @return None
#' @seealso profilePoint
#' @seealso profileEnd
profileStart <- function() {
TBCellGrowthEnv$ProfileStart <- Sys.time()
TBCellGrowthEnv$ProfileTimepoint <- Sys.time()
TBCellGrowthEnv$ProfileSecs <- list()
}
#' @keywords environment
#' @export
#' @title Print Partial Elapsed Time
#' @param name named counter which will be incremented with secs since last timepoint
#' @param message character message
#' @description Returns the elapsed time since the
#' last time profilePoint() was called or since profileStart().
#' If \code{message} is not \code{NULL}, an message with this information is printed out.
#' @return Elapsed time in seconds (invisbly).
#' @seealso profileStart
#' @seealso profileEnd
profilePoint <- function(name='unknown', message=NULL) {
now <- Sys.time()
elapsed <- as.numeric( difftime(now, TBCellGrowthEnv$ProfileTimepoint, units='secs') )
TBCellGrowthEnv$ProfileTimepoint <- now
# Increment profile counter
if (is.null(TBCellGrowthEnv$ProfileSecs[[name]])) {
TBCellGrowthEnv$ProfileSecs[[name]] <- elapsed
} else {
TBCellGrowthEnv$ProfileSecs[[name]] <- TBCellGrowthEnv$ProfileSecs[[name]] + elapsed
}
# Print out a message if desired
if (TBCellGrowthEnv$RunOptions$profile && !is.null(message)) {
cat(paste(sprintf("%7.1f",elapsed),message,'\n'))
}
return(invisible(elapsed))
}
#' @keywords environment
#' @export
#' @title Print Total Elapsed Time
#' @param message character message
#' @description Returns the elapsed time since profileStart().
#' If \code{message} is not \code{NULL}, a message with this information is printed out.
#' @return Elapsed time in seconds.
#' @seealso profileStart
#' @seealso profilePoint
profileEnd <- function(message=NULL) {
now <- Sys.time()
elapsed <- as.numeric( difftime(now, TBCellGrowthEnv$ProfileStart, units='secs') )
# Print out a message if desired
if (TBCellGrowthEnv$RunOptions$profile && !is.null(message)) {
cat(paste(sprintf("%7.1f",elapsed),message,'\n'))
}
# Print out all timers if desired
if (getRunOptions('verbose')) {
for (name in names(TBCellGrowthEnv$ProfileSecs)) {
elapsed <- TBCellGrowthEnv$ProfileSecs[[name]]
cat(paste('\t',sprintf("%7.1f",elapsed),'seconds on',name,'\n'))
}
}
return(invisible(elapsed))
}
|
4700a68c7808b98a822d80693fa93b1fa36bd777 | 11da18c39ead68ee6cdfb0e97c1a73fd7ebecf39 | /read_events1.R | ab3dfc164d4f31059f52ccf76880520d38ee1822 | [] | no_license | lsloan/Repilac | a631e77f28f55b2547a8c96ecfe25f2e14d92f6b | 94481cc18d8b1ec74c4183b0b9dcb5f82f352e9f | refs/heads/master | 2020-04-03T09:53:39.702033 | 2016-08-12T08:42:02 | 2016-08-12T08:42:02 | 64,956,571 | 1 | 1 | null | 2016-08-09T14:48:21 | 2016-08-04T18:04:31 | R | UTF-8 | R | false | false | 1,018 | r | read_events1.R | # @author Lance E Sloan <lsloan at umich dot edu>
library(RJSONIO)
# R doesn't provide a function to get the path to the current program
# in a way that works in every environment. (Insert fantasies here
# about PHP's __FILE__ magic variable.) This path will probably need
# to be adjusted before this program will run.
pathToCaliperEventJSONFile <- './sample_caliper_events.json'
allEvents <- fromJSON(pathToCaliperEventJSONFile)
event <- allEvents[7] # get the whole seventh event, all contents
event # print event object's attributes
event$actor # print actor (the user) sub-object's attributes
event$actor$name # print user's full name or username
event$actor$`@id` # eg: https://example.edu/#profile:user_name
summary(event)
# set actors to list of generic objects containing actor data:
actors<-sapply(allEvents, function(x) x[['actor']])
# alternate method: set actors to list of actor objects:
#actors<-sapply(allEvents, function(x) x['actor'])
actors # prints all actors (CAUTION: May be MANY LINES!)
|
296608b4eaf217414741bdfce30aad5b7876c0da | 4f10c7d4b95a142c8f27d57f42ef4cc1f00c9845 | /man/plotlogm.resp.Rd | b022ef076681181f45437e87dd63013004ec1078 | [] | no_license | cran/hbim | f1cf926f43d1a36e5f90f62595493168773cdf2f | e35d7cf8cb0e84dbf62cb0480b47249648628001 | refs/heads/master | 2022-05-13T00:06:01.900605 | 2022-05-09T13:40:02 | 2022-05-09T13:40:02 | 17,696,602 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,971 | rd | plotlogm.resp.Rd | \name{plotlogm.resp}
\alias{plotlogm.resp}
\alias{plotresp.equiv}
\alias{plotresp.mix}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot Hill/Bliss Independence Model Data.}
\description{
These functions take data output calculated from the data generating functions (see details)
and plot either: the mean of the log transformed antibody doses by the response (\code{plotlogm.resp}),
equivalent increase in antibody plots (\code{plotresp.equiv}), or response
of one component versus a mixture (for details see \code{vignette("hbimdetails")}).
}
\usage{
plotlogm.resp(D, YLAB = "Efficacy", YLIM = c(0, 1),
XLIM = c(-2, 2),TITLE="")
plotresp.equiv(D, XLIM = c(0, 1), YLIM = c(1, 100),
RLAB = "Efficacy of", bounds= XLIM,TITLE="")
plotresp.mix(D, RLAB = "Efficacy of", XYLIM = c(0, 1),TITLE="")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{D}{data, see details}
\item{YLAB}{y label}
\item{YLIM}{range of y axis}
\item{XLIM}{range of x axis}
\item{RLAB}{response label, currently use only either "Efficacy of" or "\% Protected by"}
\item{bounds}{bounds on response of second antibody curve, see \code{vignette("hbimdetails")} }
\item{XYLIM}{range of both x and y axes}
\item{TITLE}{title of plot}
}
\details{
The following functions create data sets for plotting:
\code{\link{eff.sigma}},
\code{\link{eff.mu}},
\code{\link{eff.rho}},
\code{\link{pp.sigma}},
\code{\link{pp.mu}},
\code{\link{pp.rho}}.
These functions plot that data. For details see \code{vignette("hbimdetails")}.
}
\value{
Plots
}
%\references{ ~put references to the literature/web site here ~ }
%\author{ ~~who you are~~ }
%\note{ ~~further notes~~
%
% ~Make other sections like Warning with \section{Warning }{....} ~
%}
%\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
%\examples{ }
\keyword{hplot}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
56727dde62403d82a9334aea02c7e32529261226 | 678d8033de32840da422e1b8759bd5899aa7f94f | /code/fig6_PhytoToxins.R | 65f06aa8b2d2bb00f6af0ddb93456558cf7af84d | [
"CC-BY-4.0"
] | permissive | biogeochem/formbloom_conestogo_2017HAB | eb2349b7efa16aed43b0e2ea07ca6eb74dee6833 | 21130ee4a0c06d8967c3dfebaaa62bc008d7c39d | refs/heads/master | 2021-06-26T08:45:33.386021 | 2020-12-08T19:22:07 | 2020-12-08T19:22:07 | 174,236,733 | 1 | 3 | CC-BY-4.0 | 2020-12-08T19:22:08 | 2019-03-06T23:28:19 | R | UTF-8 | R | false | false | 4,879 | r | fig6_PhytoToxins.R | ####
#
#
#
####
# Phytoplankton ----
## Load data
phyto.dat <- read.csv("./data/Conestogo_species_clean.csv") %>%
mutate(date = as.Date(date, format = "%Y-%m-%d"), DOY = yday(date))
phyto.tax <- read.csv("./data/Conestogo_speciestaxonomy_clean.csv") %>%
select(Access.Code, Phylum, Family, Genus, Species, Description.Code)
# data frames
phyto.merge <- phyto.dat %>%
left_join(phyto.tax, by = c("species.code"="Access.Code")) %>% # merge taxonomy with counts
mutate(station = recode_factor(station, "center" = "clc", "east arm" = "cle"), #fix issues with site factoring
common.name = paste(Genus, Species, sep = " ")) %>% # create a common name
filter(date <=as.Date("2017-12-31"), # subset 2017 data
Description.Code == "PHYT",# remove heterocyst counts
depth == 2) # only want epi samples
phyto.com <- phyto.merge %>%
filter(Description.Code == "PHYT") %>%
group_by(station, date, DOY, Phylum, group) %>%
dplyr::summarise(total.biomass = sum(biomass.mg.m3),
total.density = sum(density.cells.l))
## Phytoplankton trends
site_labels = c(cle = "Transitional", clc = "Lacustrine")
plot.phyto.com <- phyto.com %>%
filter(station != "clw", group != "Euglenophyte")
plot.phyto.com <- as.data.frame(plot.phyto.com)
plot.phyto.com$group <- droplevels(plot.phyto.com$group)
p <- ggplot(data = plot.phyto.com,
aes(x = yday(date), y = total.biomass, fill = group)) +
annotate("rect", xmin = 173, xmax = 174, ymin = -Inf, ymax = Inf, alpha = 0.6, fill = "grey40")+
geom_vline(xintercept = yday(as.Date("2017-07-23")), col = "grey40", size = 0.5) +
geom_area(na.rm = TRUE, position = "stack", col = "white") +
labs(x = "", y = expression("Biomass (mg m"^-3*")")) +
theme(legend.position = "left", legend.background = element_rect(fill = "white"),
legend.key.size = unit(0.15,"in"), legend.text = element_text(size = 10),
plot.margin = unit(c(0, 0, 0, 0), "cm")) +
facet_grid(~ station, labeller = labeller(station = site_labels)) +
#scale_fill_viridis(discrete = TRUE, option = "D",direction = -1)+
#scale_color_viridis(discrete = TRUE, option = "D",direction = -1) +
scale_fill_brewer(palette = "Dark2", guide_legend(title = "")) +
lims(x = c(170,230)) +
scale_y_continuous(labels = comma)
p$data$station <- factor(p$data$station, levels = c("cle", "clc"))
p.com <- p
reposition_legend(p, "top left", panel = "panel-1-1", offset = c(0.025, 0.025), plot = T)
print(p.com)
# Toxins ----
tox.raw <- read.csv("./data/Conestogo_toxins-LOQ-MOD.csv") %>%
mutate(Sampling.date = as.Date(Sampling.date, format = "%Y-%m-%d"),
DOY = yday(Sampling.date))
#tox.raw %>% group_by(site) %>% filter(MC.LR == max(MC.LR))
#tox.raw %>% filter(site == "cle", DOY == 200)
tox.dat <- tox.raw %>%
select(site, Sampling.date, DOY, Depth..m., MC.LR, MC.RR, MC.YR, AP.A) %>%
gather(key = toxin, value = concentration, MC.LR:AP.A) %>%
mutate(concentration = concentration/1000)
tox.dat$concentration[is.na(tox.dat$concentration)] <-0
site_labels <- c(clc = "Lacustrine", cle = "Transitional")
p <- ggplot(data = tox.dat,
aes(x = DOY, y = concentration, fill = toxin)) +
annotate("rect", xmin = 173, xmax = 174, ymin = -Inf, ymax = Inf, alpha = 0.6, fill = "grey40")+
geom_vline(xintercept = yday(as.Date("2017-07-22")), col = "grey40", size = 0.5) +
geom_area(na.rm = TRUE, position = "stack", col = "white") +
scale_x_continuous(lim = c(170,230), breaks = c(180,200,220), labels = c(180,200,220)) +
labs(x = "Day of Year (DOY)",
y = expression(Toxin~concentration~(mu*g~L^-1))) +
theme(legend.position = "bottom", strip.text.x = element_blank(),
legend.key.size = unit(0.15,"in"), legend.text = element_text(size = 10),
plot.margin = unit(c(0, 0, 0, 0), "cm")) +
facet_wrap(~ site, labeller = labeller(site = site_labels)) +
#facet_wrap(~ site) +
scale_fill_viridis(discrete = TRUE, option = "C",direction = 1,
guide = guide_legend(title = NULL, ncol = 1))
#scale_color_viridis(discrete = TRUE, option = "C",direction = 1)
#scale_fill_grey(start = 0.2, end = 0.9, aesthetics = "fill")
#scale_x_date(limits= c(as.Date("2017-07-05"), as.Date("2017-08-17")),
# date_breaks = "2 weeks", date_labels = "%d %b")
p$data$site<- factor(p$data$site,
levels = c("cle", "clc"))
p.tox <- p
p <- plot_grid(reposition_legend(p.com, 'top left', panel = "panel-1-1", offset = c(0.025,0.01), plot = F),
reposition_legend(p.tox, 'top left', panel = "panel-1-1", offset = c(0.025,0.025), plot = F),
ncol = 1, nrow = 2, align = "hv", axis = c("lr"), labels = "auto")
print(p)
ggsave(plot = p,
filename = "./supporting-files/Fig6_PhytoToxinTrends.tiff",
height = 6, width = 7, dpi = 500, device = "tiff", scale = 1.25)
|
ab4cba0b649dbd0fd74356c94a21d9ec2e6bfb51 | 623f1ec7fbc38078e59a7ede0e5f42893f93f319 | /principalComponents/pcaIntro.R | c623332dbaba629c5f4106ddf964fcca906f44b1 | [] | no_license | elijahverdoorn/MATH384 | 17f78d94708c400e540598d91f7ba01c19d393ca | 16609554d6736d952da13ac9d3b73d3885badb25 | refs/heads/master | 2021-01-15T09:38:22.457488 | 2016-12-20T17:26:53 | 2016-12-20T17:26:53 | 68,355,565 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,761 | r | pcaIntro.R | library(MASS)
mvrnorm(n,c(1,2),diag(2,3))
b0<-3
b1<-1
b2<-2
n<-100
X<-mvrnorm(n,c(0,0),diag(c(10,1),2))
dim(X)
y<-b0+X%*% c(b1,b2)+rnorm(n,0,1)
dat.df<-data.frame(X,y)
names(dat.df)
ggplot(dat.df,aes(X1,X2))+geom_point()+
coord_fixed()
### Can see that X1 and X2 have very different variances
ggplot(dat.df)+
geom_density(aes(X1),color="black",fill="red",alpha=.5)+
geom_density(aes(X2),color="black",fill="blue",alpha=.5)
##########################
##Principal Component Model
## The plan is to identify orthogonal directions which successively
## maximize the variance after project onto the directions
mod.pca1<-prcomp(X)
summary(mod.pca1)
##THe rotation to optimize the variances in the identity (approximately)!
rot<-mod.pca1$rotation
rot
#############################################
## Rotate the data
## define a rotation matrix with angle theta
theta=pi/3
rotMat<-matrix( c(cos(theta),-sin(theta),sin(theta),cos(theta)),nrow=2)
rotMat
### Rotate the data
Xrot<-X %*% rotMat
datRot.df<-data.frame(Xrot,y)
names(datRot.df)
ggplot(datRot.df,aes(X1,X2))+geom_point()+
coord_fixed()
##Now X1, X2 have about the same variance
ggplot(datRot.df)+
geom_density(aes(X1),color="black",fill="red",alpha=.5)+
geom_density(aes(X2),color="black",fill="blue",alpha=.5)
######################
## Principal components model
mod.pca<-prcomp(Xrot)
## Now the rotation to optimize the variances is the inverse of rot
rot2<-mod.pca$rotation
rot2
###Note...the rotates back to the rot=idenity (approximately)
rot2%*% rotMat
X2<-Xrot %*% rot2
dat2.df<-data.frame(X2)
names(dat2.df)<-c("X1","X2")
##look familar???
ggplot(dat2.df)+
geom_density(aes(X1),color="black",fill="red",alpha=.5)+
geom_density(aes(X2),color="black",fill="blue",alpha=.5)
|
3e1a542fcdb0a2611f25e2f2636160a34ae40525 | f2643256c6611d7de0db96d162f594388c2c2c50 | /analyses/Trial_1_Outcomes/Attendance.R | 500762413416cdbbac2295f4c20f390d50937d8b | [] | no_license | raubreywhite/trial_dofiles | e06a5b3b39e9195eda79dd33856d67c918ec4053 | eface3b83b107cf7e621b3c654e65b5cbd45b711 | refs/heads/master | 2022-06-14T03:26:17.492945 | 2022-06-02T07:27:04 | 2022-06-02T07:27:04 | 114,857,557 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,877 | r | Attendance.R | ################################# Attendance ################################
# making vars
smallD[,refHRhosp:= FALSE]
smallD[(TrialOne_manRef_HR_00_00==T|
TrialOne_manRef_HR_01_01==T|
TrialOne_manRef_HR_02_02==T|
TrialOne_manRef_HR_03_03==T|
TrialOne_manRef_HR_04_04==T|
TrialOne_manRef_HR_05_05==T|
TrialOne_manRef_HR_06_06==T|
TrialOne_manRef_HR_07_07==T|
TrialOne_manRef_HR_08_08==T|
TrialOne_manRef_HR_09_09==T|
TrialOne_manRef_HR_10_10==T|
TrialOne_manRef_HR_11_11==T|
TrialOne_manRef_HR_12_12==T|
TrialOne_manRef_HR_13_13==T|
TrialOne_manRef_HR_14_14==T)|
(TrialOne_manRef_Hosp_00_00==T|
TrialOne_manRef_Hosp_01_01==T|
TrialOne_manRef_Hosp_02_02==T|
TrialOne_manRef_Hosp_03_03==T|
TrialOne_manRef_Hosp_04_04==T|
TrialOne_manRef_Hosp_05_05==T|
TrialOne_manRef_Hosp_06_06==T|
TrialOne_manRef_Hosp_07_07==T|
TrialOne_manRef_Hosp_08_08==T|
TrialOne_manRef_Hosp_09_09==T|
TrialOne_manRef_Hosp_10_10==T|
TrialOne_manRef_Hosp_11_11==T|
TrialOne_manRef_Hosp_12_12==T|
TrialOne_manRef_Hosp_13_13==T|
TrialOne_manRef_Hosp_14_14==T),refHRhosp:=TRUE]
xtabs(~smallD$refHRhosp, addNA=T)
## Define Opportunities
# oppt 16 week visit
smallD[,Opp_1:= as.numeric(NA)]
smallD[bookgestagedays_cats %in% c("(0,104]"),Opp_1:=1]
smallD[bookgestagedays_cats %in% c("(0,104]") &
refHRhosp==T,Opp_1:=0]
xtabs(~smallD$Opp_1, addNA=T)
# oppt 18-22 visit
smallD[,Opp_2:=as.numeric(NA)]
smallD[bookgestagedays_cats %in% c("(104,125]")| Opp_1==1, Opp_2:=1]
xtabs(~smallD$Opp_2, addNA=T)
#removing opportunities
smallD[Opp_2==1 &
(TrialOne_manRef_HR_15_15==T|TrialOne_manRef_Hosp_15_15==T)|
(TrialOne_manRef_HR_16_16==T|TrialOne_manRef_Hosp_16_16==T)|
(TrialOne_manRef_HR_17_17==T|TrialOne_manRef_Hosp_17_17==T),
Opp_2:=Opp_2-1]
xtabs(~smallD$Opp_2, addNA=T)
# 24-28 week visit
smallD[,Opp_3:=as.numeric(NA)]
smallD[bookgestagedays_cats %in% c("(125,160]",
"(160,167]") | Opp_2==1, Opp_3:=1]
xtabs(~smallD$Opp_3, addNA=T)
# removing opportunities
smallD[Opp_3==1 & ((TrialOne_manRef_HR_18_18==T|TrialOne_manRef_Hosp_18_18==T)|
(TrialOne_manRef_HR_19_19==T|TrialOne_manRef_Hosp_19_19==T)|
(TrialOne_manRef_HR_20_20==T|TrialOne_manRef_Hosp_20_20==T)|
(TrialOne_manRef_HR_21_21==T |TrialOne_manRef_Hosp_21_21==T)|
(TrialOne_manRef_HR_22_22==T|TrialOne_manRef_Hosp_22_22==T)|
(TrialOne_manRef_HR_23_23==T|TrialOne_manRef_Hosp_23_23==T)),
Opp_3:=Opp_3-1]
xtabs(~smallD$Opp_3, addNA=T)
# 31-33 week visit
smallD[,Opp_4:=as.numeric(NA)]
smallD[bookgestagedays_cats %in% c("(160,167]",
"(167,202]",
"(202,216]")|Opp_3== 1, Opp_4:=1]
xtabs(~smallD$Opp_4, addNA=T)
# removing opportunities
smallD[Opp_4==1 &
((TrialOne_manRef_HR_24_24==T|TrialOne_manRef_Hosp_24_24==T)|
(TrialOne_manRef_HR_25_25==T|TrialOne_manRef_Hosp_25_25==T)|
(TrialOne_manRef_HR_26_26==T|TrialOne_manRef_Hosp_26_26==T)|
(TrialOne_manRef_HR_27_27==T|TrialOne_manRef_Hosp_27_27==T)|
(TrialOne_manRef_HR_28_28==T|TrialOne_manRef_Hosp_28_28==T)|
(TrialOne_manRef_HR_29_29==T|TrialOne_manRef_Hosp_29_29==T)|
(TrialOne_manRef_HR_30_30==T|TrialOne_manRef_Hosp_30_30==T)),
Opp_4:=Opp_4-1]
xtabs(~smallD$Opp_4, addNA=T)
# 35-37 week visit
smallD[,Opp_5:=as.numeric(NA)]
smallD[bookgestagedays_cats %in% c("(216,237]",
"(237,244]") | Opp_4==1, Opp_5:=1]
xtabs(~smallD$Opp_5, addNA=T)
smallD[Opp_5==1 &
((TrialOne_manRef_HR_31_31==T|TrialOne_manRef_Hosp_31_31==T)|
(TrialOne_manRef_HR_32_32==T|TrialOne_manRef_Hosp_32_32==T)|
(TrialOne_manRef_HR_33_33==T|TrialOne_manRef_Hosp_33_33==T)|
(TrialOne_manRef_HR_34_34==T|TrialOne_manRef_Hosp_34_34==T)),
Opp_5:=Opp_5-1]
xtabs(~smallD$Opp_5, addNA=T)
################ successes ##########
# 15-17 week visit
smallD[,Succ_1:=as.logical(NA)]
smallD[Opp_1==1, Succ_1:=FALSE]
smallD[Succ_1==F &
TrialOne_anvisitnew_15_17==T, Succ_1:=TRUE]
xtabs(~smallD$Succ_1, addNA=T)
# 18-22 week visit
smallD[,Succ_2:=as.logical(NA)]
smallD[Opp_2==1, Succ_2:=FALSE]
smallD[Succ_2==F & TrialOne_anvisitnew_18_22==T, Succ_2:=TRUE]
xtabs(~smallD$Succ_2, addNA=T)
# 24-28 week visit
smallD[,Succ_3:=as.logical(NA)]
smallD[Opp_3==1, Succ_3:=as.logical(FALSE)]
smallD[Succ_3==F & TrialOne_anvisitnew_24_28==T, Succ_3:=TRUE]
xtabs(~smallD$Succ_3, addNA=T)
# 31-33 week visit
smallD[,Succ_4:=as.logical(NA)]
smallD[Opp_4==1, Succ_4:=FALSE]
smallD[Succ_4==F & TrialOne_anvisitnew_31_33==T, Succ_4:=TRUE]
xtabs(~smallD$Succ_4, addNA=T)
# 35-37
smallD[,Succ_5:=as.logical(NA)]
smallD[Opp_5==1, Succ_5:=FALSE]
smallD[Succ_5==F & TrialOne_anvisitnew_35_37==T, Succ_5:=TRUE]
xtabs(~smallD$Succ_5, addNA=T)
prelimAtt <- smallD[,.(N=.N,
bookedb414=sum(bookgestagedays_cats=="(0,104]", na.rm = T),
ANC15_17Opps=sum(Opp_1,na.rm=T),
ANC15_17=sum(Succ_1, na.rm=T),
ANC15_17FALSE=sum(Succ_1==F, na.rm=T),
booked1515=sum(bookgestagedays_cats=="(104,125]", na.rm = T),
ANC18_22Opps=sum(Opp_2, na.rm=T),
ANC18_22=sum(Succ_2, na.rm=T),
ANC18_22FALSE=sum(Succ_2==F, na.rm=T),
booked1822=sum(bookgestagedays_cats=="(125,160]", na.rm = T),
booked2323=sum(bookgestagedays_cats=="(160,167]", na.rm = T),
ANC2428Opps=sum(!is.na(Opp_3), na.rm=T),
ANC24_28TRUE=sum(Succ_3, na.rm=T),
ANC24_28FALSE=sum(Succ_3==F, na.rm=T),
booked2428=sum(bookgestagedays_cats=="(167,202]", na.rm = T),
booked2930=sum(bookgestagedays_cats=="(202,216]", na.rm = T),
ANC31_33Opps=sum(Opp_4, na.rm=T),
ANC31_33=sum(Succ_4, na.rm=T),
ANC31_33FALSE=sum(Succ_4==F, na.rm=T),
Booked31_33=sum(bookgestagedays_cats=="(216,237]", na.rm = T),
Booked34_34=sum(bookgestagedays_cats=="(237,244]", na.rm = T),
ANC3537Opps=sum(Opp_5, na.rm=T),
ANC3537=sum(Succ_5, na.rm=T),
Booked35_37=sum(bookgestagedays_cats=="(244,265]", na.rm = T)),
keyby=.(ident_dhis2_control)]
openxlsx::write.xlsx(prelimAtt,file.path(FOLDER_DATA_RESULTS,
"T1",
sprintf("%s_prelim_Attendance.xlsx",
lubridate::today())))
###### Attendance data set ######
AttSucc <- names(smallD)[stringr::str_detect(names(smallD),"^Succ_")]
AttOpp <- names(smallD)[stringr::str_detect(names(smallD),"^Opp_")]
smallD[ident_dhis2_control==F, prettyExposure:="E"]
smallD[ident_dhis2_control==T, prettyExposure:="F"]
varskeep <- c(varskeepAll,AttOpp,AttSucc)
attendance <-smallD[,varskeep,with=F]
openxlsx::write.xlsx(attendance,file.path(FOLDER_DATA_RESULTS,
"T1",
sprintf("%s_Attendance_outcomes.xlsx",
lubridate::today())))
################################## OLD CODE ##################################
#id women referred at some point in time to remove the the opportunities she may have
#need to seperate control and intervention seperately
#for intervention add the trialmanperf
#control
smallD[ident_dhis2_control==T & (TrialOne_refHosp_35_37==T | TrialOne_refHR_35_37),
OpportunityofVisits:=OpportunityofVisits-0]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_34_34==T | TrialOne_refHR_34_34),
OpportunityofVisits:=OpportunityofVisits-1]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_31_33==T | TrialOne_refHR_31_33),
OpportunityofVisits:=OpportunityofVisits-1]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_24_28==T | TrialOne_refHR_24_28),
OpportunityofVisits:=OpportunityofVisits-2]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_23_23==T | TrialOne_refHR_23_23),
OpportunityofVisits:=OpportunityofVisits-3]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_18_22==T | TrialOne_refHR_18_22),
OpportunityofVisits:=OpportunityofVisits-3]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_15_17==T | TrialOne_refHR_15_17),
OpportunityofVisits:=OpportunityofVisits-4]
smallD[ident_dhis2_control==T & (TrialOne_refHosp_00_14==T | TrialOne_refHR_00_14),
OpportunityofVisits:=OpportunityofVisits-5]
# Intervention
smallD[ident_dhis2_control==F & (TrialOne_refHosp_35_37==T | TrialOne_refHR_35_37),
OpportunityofVisits:=OpportunityofVisits-0]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_34_34==T | TrialOne_refHR_34_34),
OpportunityofVisits:=OpportunityofVisits-1]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_31_33==T | TrialOne_refHR_31_33),
OpportunityofVisits:=OpportunityofVisits-1]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_24_28==T | TrialOne_refHR_24_28),
OpportunityofVisits:=OpportunityofVisits-2]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_23_23==T | TrialOne_refHR_23_23),
OpportunityofVisits:=OpportunityofVisits-3]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_18_22==T | TrialOne_refHR_18_22),
OpportunityofVisits:=OpportunityofVisits-3]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_15_17==T | TrialOne_refHR_15_17),
OpportunityofVisits:=OpportunityofVisits-4]
smallD[ident_dhis2_control==F & (TrialOne_refHosp_00_14==T | TrialOne_refHR_00_14),
OpportunityofVisits:=OpportunityofVisits-5]
#check
xtabs(~smallD$OpportunityofVisits, addNA = T)
##-1opportunity for visits-need to check this out
#intervention
#Attendance (Success)
smallD[,AttendedonTime:=0]
smallD[TrialOne_anvisitnew_15_17==T, AttendedonTime:=AttendedonTime+1]
smallD[TrialOne_anvisitnew_18_22==T, AttendedonTime:=AttendedonTime+1]
smallD[TrialOne_anvisitnew_24_28==T, AttendedonTime:=AttendedonTime+1]
smallD[TrialOne_anvisitnew_31_33==T, AttendedonTime:=AttendedonTime+1]
smallD[TrialOne_anvisitnew_35_37==T, AttendedonTime:=AttendedonTime+1]
xtabs(~smallD$AttendedonTime, addNA = T)
#save data set in clean folder
#will use it for hypertension |
2c0eee0420adddaac3f240a01a00c3fc0c234ca9 | 91abf315dcd9fb08d0d070929750d50546d55ad4 | /Source Code for Functions in PEEGOAlgorithm Package/randomrow.R | cef1987f4da645a495c53da932b50d375e9fc0e8 | [
"MIT"
] | permissive | sustainable-processes/PEEGO | 0fbd8814d781173cc20f1f99f4501734630012f6 | b7bf627f7d505e4970220484e1add02810cc7bbc | refs/heads/main | 2023-04-13T11:00:30.820429 | 2022-11-15T16:46:58 | 2022-11-15T16:46:58 | 378,897,501 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,537 | r | randomrow.R | #' Function to generate random rows
#'
#' Function used to generate the random rows considered in the EGO algorithm
#'
#' @param nrow number of random rows
#' @param s.index index of active (non-zero) surfactants for random starting rows
#' @param lb lower bounds for controllable variables, on 0 to 1 scale
#' @param ub upper bounds for controllable variables, on 0 to 1 scale
#'
#' @return A matrix of random rows which are considered for swapping for a certain row in the design.
#' @export
#'
#' @examples set.seed(123) # Set random seed for reproducible example
#' randomrow(nrow=5, s.index=c(1,0,1,0,1), lb=c(rep(0, 6), rep(0.05, 2)), ub=rep(1,8))
#'# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
#'#[1,] 0.4089769 0 0 0 0 0.8830174 0.9434439 0.09327867
#'#[2,] 0.5514350 0 0 0 0 0.4566147 0.9589917 0.48066745
#'#[3,] 0.1029247 0 0 0 0 0.8998250 0.2837833 0.08995656
#'#[4,] 0.8895393 0 0 0 0 0.6928034 0.6584815 0.99455629
#'#[5,] 0.5440660 0 0 0 0 0.5941420 0.3247018 0.18975796
#' @seealso EGOAlg()
randomrow<-function(nrow, s.index, lb, ub) {
random.row<-matrix(0, nrow=nrow, ncol=length(lb)) # Initialising the set of unique rows
for(i in 1:nrow(random.row)) { # Loop over all added rows
random.row[i,c(s.index, 6:8)]<-runif(6, lb[c(s.index, 6:8)], ub[c(s.index, 6:8)]) # Generating a random row, where the active surfactants are given by s.index, and the other surfactants are 0
}
return(random.row) # Returning these random rows
}
|
fe6979579af85859c2f94aebbc492815c4f00957 | 961f1a2de9dd6875fb6a86b08dfc917a78f0933d | /16_function.R | ce979db927b1bbab2be8a3f427a7a1b2d2a6b99f | [] | no_license | alex7777777/my_funktion | c9ba83405f4e37438565d559b749ee2980cfca16 | fb1741a910a975f5c2052b6de3ccfa1be39e4a4f | refs/heads/master | 2020-05-29T13:48:15.868690 | 2020-01-09T22:10:13 | 2020-01-09T22:10:13 | 189,173,623 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,815 | r | 16_function.R | #######################################################
# 2018-01-29 - by Alex Gorbach
# Datenbasis: DB Carsharing - Flinkster
# Buchungen Carsharing
# http://data.deutschebahn.com/dataset/data-flinkster
#######################################################
# Function: Substring DF function for the sankey input
# SUPPORT_OR_TOP=T: Default "support"
my_pattern_df_for_sankey <- function(data_seq,
SUPPORT2er=0.0025,
TOP_n_2er=80,
SUPPORT_OR_TOP=T) {
data_seq_head <- data_seq[,1:2]
data_seq <- data_seq[,-(1:2)]
# event-to-srting function
events_to_string <- function(events){
events <- events[is.na(events) == F]
t <- paste(as.character(events))
return(paste(t, collapse = " "))
}
# events as 1x variable with the " "
data_string <- data.frame(events = as.character(apply(data_seq, 1, events_to_string)))
if(class(data_string$events) != "character") {
data_string$events <- as.character(data_string$events)
}
# 2x substring analysis
for(i in 0:((ncol(data_seq)/2)-2)){
k <- floor(i*2) + 1
print(k)
temp <- data_seq[,k:(k+2)]
temp <- na.omit(temp)
summary(temp)
sequenzes <- as.character(apply(temp, 1, events_to_string))
i_seq <- (sequenzes)
if (i == 0) seq <- i_seq else seq <- c(seq, i_seq)
}
# sort(table(seq), decreasing = F)/length(seq)
events_big <- (sort(table(seq), decreasing = T)/length(seq))
# print(paste0("Support of 2er events: ", SUPPORT2er))
# print(paste0("Number of 2er events: " , TOP_n_2er))
if(SUPPORT_OR_TOP) {
events_small <- events_big[events_big >= SUPPORT2er]
} else {
events_small <- events_big[1:TOP_n_2er]
}
# print(paste0("Check: Number of 2er events: " , length(events_small)))
events_names <- row.names(events_small)
unique_seq_2 <- sort(unique(events_names))
# events frequenz
library(stringr)
# 2
data_frequenz_2 <- data.frame(t = 1:nrow(data_string))
for(i in 1:length(unique_seq_2)){
data_frequenz_2[,i] <- 0
names(data_frequenz_2)[i] <- as.character(unique_seq_2[i])
}
for(i in 1:length(unique_seq_2)){
event_pattern <- as.character(unique_seq_2[i])
print(event_pattern)
count <- str_count(data_string$events, event_pattern)
data_frequenz_2[,i] <- count
}
df_to_smartdata <- cbind(data_seq_head, data_frequenz_2)
# remove null variables
not_null_vector <- c(1,2)
for(i in 3:ncol(df_to_smartdata)) {
if(sum(df_to_smartdata[ , i]) > 0) {
not_null_vector <- c(not_null_vector, i)
}
}
df_to_smartdata <- df_to_smartdata[ , not_null_vector]
return(df_to_smartdata)
}
|
6028adcbb121a3fa35b60a5c4a2a8e526646961f | a8f95eddeadf28fac9258620682fe897ab0c979a | /cachematrix.R | 0dbe04fa56612c797e359828f0a7bbe22a090409 | [] | no_license | sebgriffiths/ProgrammingAssignment2 | 922d13b7a66549451ffbf6408458f619ad4b181e | 2c043adb7f32248773e0bea69a8b99a2863e58ce | refs/heads/master | 2021-01-18T00:55:24.367388 | 2015-06-20T02:57:02 | 2015-06-20T02:57:02 | 37,720,292 | 0 | 0 | null | 2015-06-19T12:23:29 | 2015-06-19T12:23:27 | null | UTF-8 | R | false | false | 1,526 | r | cachematrix.R |
## This function takes a numeric matrix as input and
## returns a special vector for use in cacheSolve function
## The special vector is basically a list of solve functions
## stored into variables which get saved into the parent
## environment (e.g. x <<- y)
makeCacheMatrix <- function(x = numeric()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function takes makeCacheMatrix as input and
## first checks the parent environment for an existing
## cached result, if found return the cached value,
## else computer the inverse (solve) function
## Then store the computed value into the parent environment
## for future calls
##
## An example of how to use this:
## z <- matrix(c(1,0,5,2,1,6,3,4,0),nrow = 3)
## z
## [,1] [,2] [,3]
## [1,] 1 2 3
## [2,] 0 1 4
## [3,] 5 6 0
## y <- makeCacheMatrix(z)
## cacheSolve(y)
## [,1] [,2] [,3]
## [1,] -24 18 5
## [2,] 20 -15 -4
## [3,] -5 4 1
##
## NB: this example was taken for verification of functionality
## from http://www.purplemath.com/modules/mtrxinvr2.htm
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
53a947e9755d7620b10e1888cba15fc647946fb8 | 8b61baaf434ac01887c7de451078d4d618db77e2 | /R/whichN.R | 8b7d998199e53742eb22e2d50759b4e2e5fd4973 | [] | no_license | drmjc/mjcbase | d5c6100b6f2586f179ad3fc0acb07e2f26f5f517 | 96f707d07c0a473f97fd70ff1ff8053f34fa6488 | refs/heads/master | 2020-05-29T19:36:53.961692 | 2017-01-17T10:54:00 | 2017-01-17T10:54:00 | 12,447,080 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,316 | r | whichN.R | #' whichN
#'
#' @description \code{which.minN}: Get the indices of the N smallest values in x
#'
#' @param x a vector of numbers
#' @param N how many values are to be returned
#' @param return.ties logical: if there are multiple Nth smallest values, return all the indices?
#'
#' @return \code{which.minN}: the indices of the \code{N} smallest values in \code{x}. possibly more than \code{N} if
#' \code{return.ties=TRUE} and the \code{N}th smallest value appears more than once.
#'
#' @author Mark Cowley, 24 May 2005
#'
#' @export
#' @rdname whichN
which.minN <- function(x, N=5, return.ties=FALSE) {
res <- (1:length(x))
if( !is.null(names(x)) )
names(res) <- names(x)
res <- res[ order(x, decreasing=FALSE, na.last=TRUE) ]
if( return.ties ) {
## remove the 1:N min elements from x, then see if there are any more elements == the Nth element
## If there are, increment N so that they will all be returned.
N <- N + sum(x[-res[1:N] ] == x[ res[N] ])
return( res[1:N] )
}
else
return( res[1:N] )
}
#' @description \code{minN}: Get the N smallest values from x
#'
#' @param sort if \code{FALSE}: return the N values in order of appearance;
#' else return the N values in increasing order.
#'
#' @return \code{minN}: return the N smallest values from \code{x} (possibly sorted), possibly
#' more than N if return.ties=TRUE and the Nth smallest value appears more than
#' once.
#'
#' @export
#' @rdname whichN
minN <- function(x, N=5, sort=TRUE, return.ties=FALSE) {
if( sort ) {
x <- sort(x, decreasing=FALSE, na.last=TRUE)
if( return.ties )
N <- N + sum(x[-c(1:N)] == x[ x[N] ], na.rm=TRUE)
return(x[1:N])
}
else
## although counterintuitive, the sort call in the following sorts the INDICES which
## makes the returned values unsorted in reference to the values in x.
return( x[ sort(which.minN(x, N, return.ties)) ] )
}
#' @description \code{which.maxN}: Get the indices of the N largest values in x
#'
#' @return \code{which.maxN}: return the indices of the N largest values in x. possibly more than
#' N if return.ties=TRUE and the Nth largest value appears more than once.
#'
#' @export
#' @rdname whichN
which.maxN <- function(x, N=5, return.ties=FALSE) {
res <- (1:length(x))
if( !is.null(names(x)) )
names(res) <- names(x)
res <- res[ order(x, decreasing=TRUE, na.last=TRUE) ]
if( return.ties )
## remove the 1:N min elements from x, then see if there are any more elements == the Nth element
## If there are, increment N so that they will all be returned.
N <- N + sum(x[ -res[1:N] ] == x[ res[N] ], na.rm=TRUE)
return( res[1:N] )
}
#' @description \code{maxN}: Get the N largest values from x
#'
#' @return \code{maxN}: return the N largest values from x (possibly sorted), possibly more
#' than N if return.ties=TRUE and the Nth largest value appears more than once.
#' @export
#' @rdname whichN
maxN <- function(x, N=5, sort=TRUE, return.ties=FALSE) {
if(sort) {
x <- sort(x, decreasing=TRUE, na.last=TRUE)
if( return.ties )
N <- N + sum(x[-c(1:N)] == x[ x[N] ], na.rm=TRUE)
return(x[1:N])
}
else
## although counterintuitive, the sort call in the following sorts the INDICES which
## makes the returned values unsorted in reference to the values in x.
return( x[ sort(which.maxN(x, N, return.ties), na.rm=TRUE) ] )
}
|
061e74db1075f72aa866546c60de942117862b62 | f64a2fb6f51c98124012bd222f6ec6bcd663de1d | /R/sig_report.R | a933cf5b100c052208273e1982cf969e347623ae | [] | no_license | cran/sig | 36c3db4775febd21e1dee05033147f57244bf411 | 50104d019c5b3e0171a4822943fbae1ed77993f8 | refs/heads/master | 2022-04-29T14:19:36.590746 | 2022-04-21T11:50:02 | 2022-04-21T11:50:02 | 17,699,659 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,963 | r | sig_report.R | #' Summarise function complexity of a file or environment
#'
#' @param x A path to an R file or an environment.
#' @param too_many_args Upper bound for a sensible number of args.
#' @param too_many_lines Upper bound for a sensible number of lines.
#' @param length_metric Either \code{"deparse"} or \code{"body"}. See note.
#' @param ... Passed to \code{sig_report.environment}.
#' @return An object of class ``sigreport'' with the elements:
#' \itemize{
#' \item{n_vars}{Number of variables.}
#' \item{n_fns}{Number of functions.}
#' \item{n_args}{Table of the number of args of each function.}
#' \item{too_many_args}{Upper bound for a sensible number of args.}
#' \item{fns_with_many_args}{Names of each function with more args
#' than \code{too_many_args}.}
#' \item{n_lines}{Table of the number of lines of each function body.}
#' \item{too_many_lines}{Upper bound for a sensible number of lines.}
#' \item{long_fns}{Names of each function with more lines than
#' \code{too_many_lines}.}
#' }
#' @details
#' \code{sig_report} summarises the number of input arguments and the
#' number of lines of each function in an environment of file, and
#' identifies problem files, in order to help you refactor your code.
#' If the input is a path to an R file, then that file is sourced into
#' a new environment and and the report is generated from that.
#' The number of lines of code that a function takes up is subjective
#' in R; this function gives you a choice of \code{length(deparse(fn))} or
#' \code{length(body(fn))}, depending upon the value of \code{length_metric}.
#' The \code{body} metric tends to give smaller values than \code{deparse}, so
#' you may want to reduce the \code{too_many_lines} argument.
#' @examples
#' #Summarize function complexity in an environment
#' sig_report(pkg2env(stats))
#' #Summarize function complexity in a file
#' \donttest{
#' # From a file
#' tmp <- tempfile(fileext = ".R")
#' dump("scan", tmp)
#' sig_report(tmp)
#' }
#' # From an environment, adjusting the cutoff for reporting
#' sig_report(
#' baseenv(),
#' too_many_args = 20,
#' too_many_lines = 100
#' )
#' # Alternate length metric
#' sig_report(baseenv(), length_metric = "body")
#' @export
sig_report <- function(x, ...)
{
UseMethod("sig_report")
}
#' @rdname sig_report
#' @method sig_report default
#' @export
sig_report.default <- function(x, ...)
{
x <- as.environment(x)
NextMethod("sig_report")
}
#' @rdname sig_report
#' @method sig_report environment
#' @export
sig_report.environment <- function(x, too_many_args = 10, too_many_lines = 50,
length_metric = c("deparse", "body"), ...)
{
length_metric <- match.fun(match.arg(length_metric))
all_vars <- mget(ls(envir = x, all.names = TRUE), envir = x)
all_fns <- Filter(is.function, all_vars)
n_args <- vapply(
all_fns,
function(fn) length(formals(fn)),
integer(1)
)
n_lines <- vapply(
all_fns,
function(fn) length(length_metric(fn)),
integer(1)
)
structure(
list(
n_vars = length(all_vars),
n_fns = length(all_fns),
n_args = table(unname(n_args)),
too_many_args = too_many_args,
fns_with_many_args = names(all_fns)[n_args > too_many_args],
n_lines = table(exponential_cut(n_lines)),
too_many_lines = too_many_lines,
long_fns = names(all_fns)[n_lines > too_many_lines]
),
class = c("sigreport", "list")
)
}
#' @rdname sig_report
#' @method sig_report character
#' @export
sig_report.character <- function(x, ...)
{
e <- source_to_new_env(x)
sig_report(e, ...)
}
#' @rdname sig_report
#' @method print sigreport
#' @export
print.sigreport <- function(x, ...)
{
with(
x,
cat(
"The environment contains",
n_vars,
ngettext(n_vars, "variable", "variables"),
"of which",
n_fns,
ngettext(n_fns, "is a function.", "are functions."),
"\nDistribution of the number of input arguments to the functions:"
)
)
print(x$n_args, ...)
if(length(x$fns_with_many_args) > 0)
{
cat(
"These functions have more than",
x$too_many_args,
"input args:\n"
)
print(noquote(x$fns_with_many_args), ...)
} else
{
cat(
"There are no functions with more than",
x$too_many_args,
"input args.\n"
)
}
cat("Distribution of the number of lines of the functions:")
print(x$n_lines, ...)
if(length(x$long_fns) > 0)
{
cat(
"These functions have more than",
x$too_many_lines,
"lines:\n"
)
print(noquote(x$long_fns), ...)
} else
{
cat(
"There are no functions with more than",
x$too_many_lines,
"lines.\n"
)
}
}
|
d49be507fb9ea4610dbe60ef87639943e9a058a9 | 4549dd0a02b0d432de9ec78ba1b4e5e779b07e29 | /plot3.R | 3e3316f53ffdbc4aa631ca72a650b43968f10fe1 | [] | no_license | vr1090/ExData_Plotting1 | 4c060fe5471115b32095107f566f674572a6ee5a | ae9e382271455c86917272b873f712bc6f914c1b | refs/heads/master | 2020-12-25T19:58:48.313616 | 2016-05-06T06:38:50 | 2016-05-06T06:38:50 | 56,213,373 | 0 | 0 | null | 2016-04-14T06:37:36 | 2016-04-14T06:37:36 | null | UTF-8 | R | false | false | 496 | r | plot3.R | #loading rhe data
filename <- "dataClean.rdata"
load(filename)
png(filename="plot3.png")
with(dataClean, plot(dateConvert,Sub_metering_1, type="S", ylab="Energy sub metering",xlab=""))
with(dataClean, points(dateConvert,Sub_metering_2, col="red", type="S"))
with(dataClean, points(dateConvert,Sub_metering_3, col="blue", type="S"))
legend("topright",col=c("black","red","blue"),lty=1,lwd=3,legend=c("energy sub metering 1","energy sub metering 2","energy sub metering 3"))
dev.off() |
c1c0037d6db82da82fff59e660a264b4087c1122 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Statistics_For_Psychology_by_Arthur_Aron_Elliot_J_Coups_And_Elaine_N_Aron/CH12/EX12.3b/Ex12_3b.R | be9f42076d5f3d0d99ed3f76e2d43abb5f1ed48b | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 1,090 | r | Ex12_3b.R | # Page no. : 504 - 505
# The least squared error principle
# Rule 1 ==> y_cap <- 8 - (.18)*X
# Rule 2 ==> y_cap <- 4 + (0)*X
# Rule 3 ==> y_cap <- -2.5 + (1)*X
# Rule 4 ==> y_cap <- -3 + (1)*X
Hours_slept <- c(5, 6, 6, 7, 8, 10)
Actual_mood <- c(2, 2, 3, 4, 7, 6)
# Rule 1
y_cap1 <- 8 - (.18) * Hours_slept
error1 <- Actual_mood - y_cap1
error1_sq <- round(error1 ** 2, 2)
# Rule 2
y_cap2 <- 4 + (0) * Hours_slept
error2 <- Actual_mood - y_cap2
error2_sq <- error2 ** 2
# Rule 3
y_cap3 <- -2.5 + (1) * Hours_slept
error3 <- Actual_mood - y_cap3
error3_sq <- error3 ** 2
# Rule 4
y_cap4 <- -3 + (1) * Hours_slept
error4 <- Actual_mood - y_cap4
error4_sq <- error4 ** 2
DF <- data.frame(Hours_slept, Actual_mood, y_cap1, error1, error1_sq, y_cap4, error4, error4_sq)
View(DF)
s1 <- sum(DF$error1_sq)
cat("Rule 1 sum of squared errors is", s1)
s2 <- sum(error2_sq)
cat("Rule 2 sum of squared errors is", s2)
s3 <- sum(error3_sq)
cat("Rule 3 sum of squared errors is", s3)
s4 <- sum(DF$error4_sq)
cat("Rule 4 sum of squared errors is", s4) |
6dc5b7ff7e0dfc3d9fe7f003ac3ff38318d092ef | fa7a30285e4870f68f04a8d1defd94ec2c9199ab | /man/km.mrl.Rd | d56f6e1b6668076d914b0333596db6547dc4d31c | [] | no_license | cran/locfit | 1b499f021a7f8853b1a077823af60c31e5d42f4c | 71cf38ab0427d5173886e91f2e1367bec0a17e5f | refs/heads/master | 2023-06-23T08:07:59.689473 | 2023-06-11T17:37:26 | 2023-06-11T17:37:26 | 17,697,138 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,687 | rd | km.mrl.Rd | \name{km.mrl}
\alias{km.mrl}
\title{
Mean Residual Life using Kaplan-Meier estimate
}
\usage{
km.mrl(times, cens)
}
\description{
This function computes the mean residual life for censored data
using the Kaplan-Meier estimate of the survival function. If
\eqn{S(t)} is the K-M estimate, the MRL for a censored observation
is computed as \eqn{(\int_t^{\infty} S(u)du)/S(t)}. We take
\eqn{S(t)=0} when \eqn{t} is greater than the largest observation,
regardless of whether that observation was censored.
When there are ties between censored and uncensored observations,
for definiteness our ordering places the censored observations
before uncensored.
This function is used by \code{\link{locfit.censor}} to compute
censored regression estimates.
}
\arguments{
\item{times}{
Obsereved survival times.
}
\item{cens}{
Logical variable indicating censoring. The coding is \code{1}
or \code{TRUE} for censored; \code{0} or \code{FALSE} for uncensored.
}
}
\value{
A vector of the estimated mean residual life. For uncensored observations,
the corresponding estimate is 0.
}
\examples{
# censored regression using the Kaplan-Meier estimate.
data(heart, package="locfit")
fit <- locfit.censor(log10(surv+0.5)~age, cens=cens, data=heart, km=TRUE)
plotbyfactor(heart$age, 0.5+heart$surv, heart$cens, ylim=c(0.5,16000), log="y")
lines(fit, tr=function(x)10^x)
}
\references{
Buckley, J. and James, I. (1979). Linear Regression with censored data.
Biometrika 66, 429-436.
Loader, C. (1999). Local Regression and Likelihood. Springer, NY (Section 7.2).
}
\seealso{
\code{\link{locfit.censor}}
}
%\keyword{locfit}
\keyword{smooth}
% Converted by Sd2Rd version 0.2-a5.
|
501b18bc333b44f86fbca1e5e441bf976a20c011 | d696c4e5321295cebc0c1eb67efc8f3b89d2d0e4 | /man/reverse_yes_no.Rd | 2dc6dacb09d4ae0a067db1b55141082a02ce8084 | [] | no_license | uk-gov-mirror/jncc.article12 | e2c12cff05837c12cef66d1c02c23fe0e6dd1780 | 59d75bf52bdce23864d5ca219a7409c084680381 | refs/heads/master | 2022-03-09T04:41:14.157734 | 2019-10-10T13:50:28 | 2019-10-10T13:50:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 739 | rd | reverse_yes_no.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recode.R
\name{reverse_yes_no}
\alias{reverse_yes_no}
\title{Reverse yes and no}
\usage{
reverse_yes_no(yes_no_blank)
}
\arguments{
\item{yes_no}{character, yes, no or NA_character_}
}
\value{
character, yes and no are swopped around, blank converted to yes
}
\description{
This function reverses yes and no as species categories 5.11, 6.16
and 11.7 in data capture spreadsheet ask for change (yes or no) where
as in reporting tool the same categories ask for no change (yes or no)
}
\details{
Blanks are converted to yes (no change) in the reporting tool as the
reporting tool only stores two states yes or no
}
\examples{
reverse_yes_no(c("Yes", "No", NA))
}
|
0cbf84f3a6877e3687ce3d9ece19814a5df88472 | d9724585527cd1185f57caf0d1c137e1eef77f6d | /lab_3/continuous.R | 0b7dbdad4d2e81adb1539df79cf0b8e54f376747 | [] | no_license | NoahMcCullah/MATH32 | 55fbc3361b73a8af857324d4e88d91aaca6b95c7 | 561fe6826b54f87a62656d040a58dc61366d0304 | refs/heads/master | 2022-12-11T08:15:17.644382 | 2020-08-27T03:36:15 | 2020-08-27T03:36:15 | 290,666,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 100 | r | continuous.R | rm(list=ls(all=TRUE))
val = runif(n = 10000, min = -5, max = 10)
print(mean(val))
print((-5+10)/2) |
bc2dd3144f36db3d72a2accd74865b54dbb1c646 | d4e152a8e34ae2602a7ba03d488f59e966508b99 | /man/gbmFns.Rd | f5d2a226d46f51a464eadc9c87a1b243d4cfc83f | [] | no_license | roywilsonLinc/gbmFns | 45a6315fb16d5f5e108ff08194d5b3f5e5402a00 | c0ebae0118bec77afb5aa3bbfdbdc17bf35b6c15 | refs/heads/master | 2021-01-19T08:58:09.816469 | 2016-01-14T22:19:49 | 2016-01-14T22:19:49 | 44,174,100 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 206 | rd | gbmFns.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gbmFns-package.r
\docType{package}
\name{gbmFns}
\alias{gbmFns}
\alias{gbmFns-package}
\title{gbmFns.}
\description{
gbmFns.
}
|
e6366eef2f2117c000ed25a15111c04edee88198 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nonmemica/examples/definitions.Rd.R | 345e9f220620417baf4787b397f977cfee7b40e8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 284 | r | definitions.Rd.R | library(nonmemica)
### Name: definitions
### Title: Harvest Model Item Definitions
### Aliases: definitions definitions.definitions definitions.character
### ** Examples
library(magrittr)
options(project = system.file('project/model',package='nonmemica'))
1001 %>% definitions
|
8eaab47ff620eca3813c748fc7f20843e05034d2 | 4d0c894a4a648b1219767a63523aad113b93047a | /Completos.R | e9c0198e19dfc1b0c45315f5580731b2f3ce08a3 | [] | no_license | AdVaEs/Software-Actuarial-lll | 68ef06baee544af82d75525b979d418cdc1223b5 | d43d279d8988b2ef090b0152a8febae5161b5702 | refs/heads/master | 2021-09-14T23:21:43.253659 | 2018-05-22T04:41:45 | 2018-05-22T04:41:45 | 119,708,906 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 592 | r | Completos.R |
direct = "C:/Users/ave-2/Documents/specdata"
completos <- function(direc = direct, id = 1:332)
{
musubi<- numeric()
for(s in id)
{
x <- paste(direc,"/",formatC(s,width = 3,flag="0"),".csv", sep = "")
y <-read.csv(x, header = TRUE)
suma <- 0
for (a in 1:nrow(y))
{
r <- complete.cases(y[a,])
if (r == TRUE)
{
suma <- suma + 1
}
}
musubi <- c(musubi,suma)
}
ID = id
tabla <- data.frame(id=ID, nobs= musubi)
print(tabla)
}
completos(id= 1:5) |
bfe596bba7bf91473fa2aa0252411c40125b89b6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/spatial.tools/examples/create_blank_raster.Rd.R | 5498686d3ec3c1e0c2671247e823c73e3c59b72f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 512 | r | create_blank_raster.Rd.R | library(spatial.tools)
### Name: create_blank_raster
### Title: Create an empty raster and header.
### Aliases: create_blank_raster
### ** Examples
## Not run:
##D
##D tahoe_highrez <- brick(system.file("external/tahoe_highrez.tif", package="spatial.tools"))
##D test_blank_file <- create_blank_raster(reference_raster=tahoe_highrez)
##D file.info(test_blank_file)
##D test_blank_raster <- create_blank_raster(reference_raster=tahoe_highrez,return_filename=FALSE)
##D test_blank_raster
## End(Not run)
|
4c8ee589f6705887dd117dc8f0e90131c1d52cfd | 9a9fb52073dd66199f7b3f4f6902ef0890a428a6 | /HR_DATA_Shiny/app.R | 56914815514854483faec6b318c3298b3e3dea6f | [] | no_license | mvsell/HR_Data_Shiny1 | 02378b6ef261c7746594ed19bea67163beb44bb1 | d487e278d53cc077e348790db4adeb6f647dd30c | refs/heads/master | 2021-08-15T20:45:27.076647 | 2017-11-18T07:59:01 | 2017-11-18T07:59:01 | 111,186,715 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,688 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(stats)
library(caret)
library(lattice)
library(ggplot2)
library(gbm)
library(survival)
library(plyr)
library(e1071)
# library(stats)
# library(mlbench)
# library(gbm)
# library(rpart)
# library(ggfortify)
# library(plotly)
# library(leaflet)
# hr<-read.csv("./Data/HR_comma_sep_ed.csv")
hr<-readRDS("./Data/hr.RDS")
# Make sure computations can be reproducible.
set.seed(123)
# Partition data- this will increase performance not to take the entire data
inTrainSet <- createDataPartition(y=hr$left, p=0.5, list=FALSE)
training <- hr[inTrainSet,]
# Train learning model
fitControl<-trainControl(method = "repeatedcv", number = 5, repeats = 3)
fit <- train(as.factor(left)~satisfaction_level+number_project+time_spend_company+average_montly_hours+last_evaluation,
data = training, trControl=fitControl, method="gbm")
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Predict which employee is likely to leave the Company next"),
sidebarLayout(
sidebarPanel(
textInput("satisfaction_level", "Satisfaction Level", 0.11),
sliderInput("number_project","Number of Projects", min = 2, max = 10, value = 4),
sliderInput("time_spend_company","Time Spent at the Company", min = 1, max = 15, value = 3),
textInput("average_montly_hours", "Average Monthly Hours Worked", 160),
textInput("last_evaluation", "Last Evaluation", 0.60),
submitButton("Submit")
),
mainPanel(
h4("Predicted result:"),
textOutput("out")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Reactive expression called whenever inputs change.
model1pred<-reactive({
new_data=as.data.frame(matrix(0, nrow=1, ncol=5))
colnames(new_data)=c("satisfaction_level", "last_evaluation", "number_project",
"average_montly_hours", "time_spend_company")
new_data[1,1]=as.numeric(input$satisfaction_level)
new_data[1,2]=as.numeric(input$last_evaluation)
new_data[1,3]=as.numeric(input$number_project)
new_data[1,4]=as.numeric(input$average_montly_hours)
new_data[1,5]=as.numeric(input$time_spend_company)
predict(fit, newdata=new_data)
})
# Fill-in the tabs with output from caret
output$out <- renderPrint({
if(model1pred()=="1") print("Leaves the Company")
else print("Stays in the Company")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
e089a53932bb61d74cf1774b173fffeb3ce4704c | 489720630fee2fc856ff099f18c96acafb3077b3 | /RebalancePortfolio.R | 3cc85c7822313b32feff73dd0f3d4e65c71ad2c8 | [] | no_license | Cren12/Models | a72715bfb7fbd7182eb8c829361466c37ab239cf | 94f295fa36d82d356ffdae0ea2bf3ae337e9ee10 | refs/heads/master | 2021-06-02T13:05:55.560828 | 2021-01-15T11:56:55 | 2021-01-15T11:56:55 | 129,648,321 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 8,667 | r | RebalancePortfolio.R | packages <- c('blotter',
'foreach',
'doFuture')
# +------------------------------------------------------------------
# | library and require load and attach add-on packages. Download and
# | install packages from CRAN-like repositories.
# +------------------------------------------------------------------
lapply(X = packages,
FUN = function(package){
if (!require(package = package,
character.only = TRUE))
{
install.packages(pkgs = package,
repos = "https://cloud.r-project.org")
library(package = package,
character.only = TRUE)
} else {
library(package = package,
character.only = TRUE)
}
})
# +------------------------------------------------------------------
# | Sys.setenv sets environment variables.
# +------------------------------------------------------------------
Sys.setenv(TZ = 'UTC')
# +------------------------------------------------------------------
# | source() causes R to accept its input from the named file or URL
# | or connection or expressions directly.
# +------------------------------------------------------------------
# source()
# +------------------------------------------------------------------
RebalancePortfolio <- function(
acct.name,
portf.name,
prices,
width,
dividends = NULL,
theor.weights,
rebalance.on = 'months',
TxnFees = 0)
{
# +------------------------------------------------------------------
# | Extract index values of a given xts object corresponding to the
# | last observations given a period specified by on. A numeric
# | vector of endpoints beginning with 0 and ending with the a value
# | equal to the length of the x argument is returned.
# +------------------------------------------------------------------
end.of.periods <- index(prices)[endpoints(x = prices,
on = rebalance.on)]
if (!is.null(dividends))
{
dividend.dates <- index(dividends)
} else {
dividend.dates <- NULL
}
foreach(i = width:nrow(prices)) %do%
{
today <- as.Date(index(prices[i, ]))
chartSeries(prices[paste0('/', today)],
name = '')
# +------------------------------------------------------------------
# | Get a portfolio object consisting of a nested list.
# +------------------------------------------------------------------
portfolio <- blotter::getPortfolio(portf.name)
# +------------------------------------------------------------------
# | Retrieves an account object from the .blotter environment.
# +------------------------------------------------------------------
account <- getAccount(acct.name)
equity <- as.numeric(account$summary$End.Eq[as.character(today), ])
if (length(equity) == 0)
{
equity <- as.numeric(account$summary$End.Eq[1, ])
}
avail.liq <- as.numeric(account$summary$End.Eq[as.character(today), ]) - as.numeric(portfolio$summary$Gross.Value[as.character(today), ])
if (length(avail.liq) == 0)
{
avail.liq <- equity
}
if (today %in% end.of.periods && !(today %in% dividend.dates))
{
theor.value <- ifelse(is.na(lag(theor.weights)[today]), 0, lag(theor.weights)[today] * equity)
pos.qty <- max(c(0, as.numeric(portfolio$symbols[[symbol]]$posPL$Pos.Qty[as.character(today), ])))
pos.avg.cost <- max(c(0, as.numeric(portfolio$symbols[[symbol]]$posPL$Pos.Avg.Cost[as.character(today), ])))
print(paste(today, 'Current position:', pos.qty, '@', pos.avg.cost))
pos.value <- pos.qty * pos.avg.cost
to.trade.value <- theor.value - pos.value
to.trade.value <- ifelse(to.trade.value > 0, min(c(avail.liq, to.trade.value)), to.trade.value)
to.trade.shares <- ifelse(to.trade.value >= 0, floor(to.trade.value / prices[today, ]), floor(to.trade.value / prices[today, ]))
if (as.numeric(to.trade.shares) != 0)
{
# +------------------------------------------------------------------
# | When a trade or adjustment is made to the Portfolio, the addTxn
# | function calculates the value and average cost of the
# | transaction, the change in position, the resulting positions
# | average cost, and any realized profit or loss (net of fees) from
# | the transaction. Then it stores the transaction and calculations
# | in the Portfolio object.
# +------------------------------------------------------------------
addTxn(Portfolio = portf.name,
Symbol = symbol,
TxnDate = today,
TxnQty = as.numeric(to.trade.shares),
TxnPrice = as.numeric(prices[today, ]),
TxnFees = TxnFees)
}
}
print(addTA(ta = theor.weights))
try(expr = print(addLines(h = pos.avg.cost, col = 'gray50')),
silent = TRUE)
if (!is.null(dividends))
{
div.per.share <- as.numeric(dividends[today])
if (!is.na(div.per.share) && length(div.per.share) > 0)
{
# +------------------------------------------------------------------
# | Adding a cash dividend does not affect position quantity, like a
# | split would.
# +------------------------------------------------------------------
addDiv(Portfolio = portf.name,
Symbol = gsub(pattern = '.div',
replacement = '',
x = symbol),
TxnDate = today,
DivPerShare = div.per.share)
}
}
# +------------------------------------------------------------------
# | The updatePortf function goes through each symbol and calculates
# | the PL for each period prices are available.
# +------------------------------------------------------------------
updatePortf(Portfolio = portf.name)
# +------------------------------------------------------------------
# | Constructs the equity account calculations from the portfolio
# | data and corresponding close prices.
# +------------------------------------------------------------------
updateAcct(name = acct.name)
# +------------------------------------------------------------------
# | Calculates End.Eq and Net.Performance.
# +------------------------------------------------------------------
updateEndEq(Account = acct.name)
}
par(mfrow = c(2, 2))
print(plot(x = cumsum(account$summary$Realized.PL[paste0(first(index(prices) + width),'/'), ]),
main = 'Realized PL'))
print(plot(x = cumsum(account$summary$Unrealized.PL[paste0(first(index(prices) + width),'/'), ]),
main = 'Unrealized PL'))
print(plot(x = (account$summary$End.Eq - portfolio$summary$Gross.Value)[paste0(first(index(prices) + width),'/'), ],
main = 'Liquidity'))
print(plot(x = account$summary$End.Eq[paste0(first(index(prices) + width),'/'), ],
main = 'End Eq'))
par(mfrow = c(1, 1))
# +------------------------------------------------------------------
# | This function (for now) calculates return on initial equity for
# | each instrument in the portfolio or portfolios that make up an
# | account. These columns will be additive to return on capital of
# | each portfolio, or of the entire account.
# +------------------------------------------------------------------
R <- PortfReturns(Account = acct.name)
R.core <- coredata(x = R)
R.core <- xts(x = R.core,
order.by = index(R))
# +------------------------------------------------------------------
# | For a set of returns, create a wealth index chart, bars for
# | per-period performance, and underwater chart for drawdown.
# +------------------------------------------------------------------
charts.PerformanceSummary(R = R.core,
geometric = TRUE,
main = paste0(portf.name, ' Portfolio'))
# +------------------------------------------------------------------
# | Table of Annualized Return, Annualized Std Dev, and Annualized
# | Sharpe.
# +------------------------------------------------------------------
table.AnnualizedReturns(R = R.core)
}
|
9316ffd2e5ebc2bba2c4c25fd945f5cfb2668e44 | 1f9a964c779f440be2b88fbe7226ceef8cb5b335 | /man/Integration-Algorithm-SIMPLS.Rd | 8482302d84cfb56066d34f651670cd750cf1af04 | [] | no_license | phycomlab/RISC | e7b1bc07fdc22d2656bcf62ab08142d539b1e545 | 94fcf7f6e82f7089285a8086b4e001202420c48d | refs/heads/master | 2023-06-08T18:11:16.582266 | 2021-06-27T17:53:39 | 2021-06-27T17:53:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,316 | rd | Integration-Algorithm-SIMPLS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Integrating.R
\name{SIMPLS}
\alias{SIMPLS}
\title{Integration Algorithm SIMPLS}
\usage{
SIMPLS(X, Y, npcs = 10, seed = 123)
}
\arguments{
\item{X}{The reference matrix, row for genes and column for cells.}
\item{Y}{The target matrix, row for genes and column for cells.}
\item{npcs}{The number of the PCs used for data integration.}
\item{seed}{The random seed to keep consistent result.}
}
\description{
The partial least square (PLS) with SIMPLS algorithm is an extension of the
multiple linear regression model and considered as bilinear factor models.
Instead of embedding the reference and target matrices into a hyperplane
of maximum variance, the PLS utilizes a linear regression to project the
reference and target matrices into a new place. The SIMPLS algorithm provides
the regularization procedure for PLS. The matrices need to be centered before
SIMPLS integraton.
}
\examples{
## SIMPLS with two matrices
mat0 = as.matrix(raw.mat[[1]])
coldata0 = as.data.frame(raw.mat[[2]])
coldata1 = coldata0[coldata0$Batch0 == 'Batch1',]
coldata2 = coldata0[coldata0$Batch0 == 'Batch4',]
mat1 = mat0[,rownames(coldata1)]
mat2 = mat0[,rownames(coldata2)]
SIM0 = SIMPLS(mat1, mat2, npcs = 4)
}
\references{
De-Jong et al. (1993)
}
|
c7f3eaaa88f0468e3dfe5ae7c3fbb4fb9a0fe97d | 7a9a38029df5efbf9eff3703bdd3ec18c66083ee | /tasks2/corr1.R | 93716b9b4412796e957450451fb0435f0ab877e7 | [] | no_license | jaspreetkaur96/Winter-Workshop-ML-DS- | 9ad8d712f5955a920966297fef0bce39630c9142 | 6a07f0c3f898fb1c3574c575c1cf405fdec041be | refs/heads/master | 2020-03-06T21:38:20.703564 | 2018-03-28T04:19:55 | 2018-03-28T04:19:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 500 | r | corr1.R | data <- read.csv("@SrBachchan.csv")
a = data$length
b = data$no_hashtags
c = data$no_men
d = data$likes
e = data$retweets
f = data$sentiment_polarity
g = data$time
print(cor(a,b))
print(cor(a,c))
print(cor(a,d))
print(cor(a,e))
print(cor(a,f))
print(cor(a,g))
print(cor(b,c))
print(cor(b,d))
print(cor(b,e))
print(cor(b,f))
print(cor(b,g))
print(cor(c,d))
print(cor(c,e))
print(cor(c,f))
print(cor(c,g))
print(cor(d,e))
print(cor(d,f))
print(cor(d,g))
print(cor(e,f))
print(cor(e,g))
print(cor(f,g))
|
4d36f4fbd5d8bc8abe3c2f844d8cdf996742de82 | fccf1f1b6dfb2e72053ec5b59e785132d414095b | /man/simulate_rel_change.Rd | d33466df64ec7db0903b87be3421aaae814fcf55 | [
"Apache-2.0"
] | permissive | ssokolen/metcourse | a0a4e4f9f8d571ab4e005427d6916fcbbde62617 | b4f2ba62b9192f12896bfda9117be0d5ae2c8526 | refs/heads/master | 2021-04-12T05:04:26.404699 | 2015-11-15T21:23:14 | 2015-11-15T21:23:14 | 40,250,953 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,028 | rd | simulate_rel_change.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/simulation.R
\name{simulate_rel_change}
\alias{simulate_rel_change}
\title{Simulate concentration changes}
\usage{
simulate_rel_change(n, par1, par2 = NULL, p = 0.5, con = NULL)
}
\arguments{
\item{n}{Number of samples to draw.}
\item{par1}{Parameters (alpha, beta) for first distribution.}
\item{par2}{Optional parameters (alpha, beta) for second distribution.}
\item{p}{Proportion of samples to draw using \code{par1} vs. \code{par2}.
Note, \code{p} is ignored if \code{par2} is not specified.}
\item{con}{Optional vector of lower and upper constraints on generated
samples.}
}
\value{
A vector of relative concentration values.
}
\description{
Simulates relative (fractional) changes in metabolite concentrations using
a mixture of beta distributions.
}
\examples{
out <- simulate_rel_change(10000, c(2, 5), c(0.5, 0.5), 0.7, c(0.1, 1))
hist(out, 20, probability = TRUE,
main = '', xlab = 'Fractional change in concentration')
}
|
65c051cb867ca42d9bcc0b7b5360e44464fc75e0 | 59796a108849297fa2ce1cbf9aade2097694c354 | /rgsvn/gtd/src/rgenetics/shortread_qc.r | d5a99c5478c5711ec22c3a6ce3dc1d4235309ce6 | [] | no_license | ro6ert/Channing-Galaxy-Tools | 5548ebde414925e616a708bdd5dda49053a3cd64 | ab49b58fac0246f313cc06ceefc98102c3302f25 | refs/heads/master | 2020-06-15T06:56:29.030908 | 2012-03-12T14:48:10 | 2012-03-12T14:48:10 | 3,586,157 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 105 | r | shortread_qc.r | library('ShortRead')
ca = commandArgs(T)
qa_data = qa('/tmp', ca[1], ca[2])
report(qa_data, dest=ca[3])
|
ee1df9cb63ae873f39aa84fca70d92f388c8a7b9 | 273e68a7608d20da07288085583983979146fde7 | /man/newMRexperiment.Rd | d3bbb832c45a6ae6f4362d4303f62d48159f4d6c | [] | no_license | HCBravoLab/metagenomeSeq | cd3a36e82a508dc3ac44f37d20b713f9ea4c7abc | df8a28214fa9cb25870dee0e5cc909c160ce8da2 | refs/heads/master | 2023-04-08T19:09:04.362067 | 2020-06-10T13:41:35 | 2020-06-10T13:41:35 | 8,764,233 | 48 | 23 | null | 2023-03-27T18:39:38 | 2013-03-13T23:55:45 | R | UTF-8 | R | false | true | 1,320 | rd | newMRexperiment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allClasses.R
\name{newMRexperiment}
\alias{newMRexperiment}
\title{Create a MRexperiment object}
\usage{
newMRexperiment(
counts,
phenoData = NULL,
featureData = NULL,
libSize = NULL,
normFactors = NULL
)
}
\arguments{
\item{counts}{A matrix or data frame of count data. The count data is
representative of the number of reads annotated for a feature (be it gene,
OTU, species, etc). Rows should correspond to features and columns to
samples.}
\item{phenoData}{An AnnotatedDataFrame with pertinent sample information.}
\item{featureData}{An AnnotatedDataFrame with pertinent feature information.}
\item{libSize}{libSize, library size, is the total number of reads for a
particular sample.}
\item{normFactors}{normFactors, the normalization factors used in either the
model or as scaling factors of sample counts for each particular sample.}
}
\value{
an object of class MRexperiment
}
\description{
This function creates a MRexperiment object from a matrix or data frame of
count data.
}
\details{
See \code{\link{MRexperiment-class}} and \code{eSet} (from the Biobase
package) for the meaning of the various slots.
}
\examples{
cnts = matrix(abs(rnorm(1000)),nc=10)
obj <- newMRexperiment(cnts)
}
\author{
Joseph N Paulson
}
|
6e155937d5aa480f9505b048e8aa16d3e782f03b | ee0689132c92cf0ea3e82c65b20f85a2d6127bb8 | /35-tidyverse/26c-tidyr-DSR1.R | 3ce0fa2f1c166b2804de3019c664e17b81d77309 | [] | no_license | DUanalytics/rAnalytics | f98d34d324e1611c8c0924fbd499a5fdac0e0911 | 07242250a702631c0d6a31d3ad8568daf9256099 | refs/heads/master | 2023-08-08T14:48:13.210501 | 2023-07-30T12:27:26 | 2023-07-30T12:27:26 | 201,704,509 | 203 | 29 | null | null | null | null | UTF-8 | R | false | false | 908 | r | 26c-tidyr-DSR1.R | # tidyr
library(tidyr)
#devtools::install_github("garrettgman/DSR")
library(DSR)
head(table1)
head(table2)
head(table3)
head(table4)
head(table5)
head(table6)
table1$cases
#spread----
library(tidyr)
head(table2)
spread(table2, type, count)
?spread
#gather-----
head(table4)
gather(table4, "year", "cases", 2:3)
head(table4)
gather(table4, "year", "population", 2:3)
gather(table4, "year", "population", -1)
?gather
#seperate----
#turns a single character column into multiple columns by splitting the values of the column wherever a separator character appears.
?separate
head(table3)
separate(table3, rate, into = c("cases", "population"))
# specify character
separate(table3, rate, into = c("cases", "population"), sep = "/")
#integer position
head(table3)
separate(table3, year, into = c("century", "year"), sep = 2)
#unite----
head(table6)
unite(table6, "new", century, year, sep = "")
?unite
|
22fb3b1f50dbe24c57a095c39aa843c71286186f | bf34afc9a02e3c3546b2d5cf7e6ea55bee023bee | /Serial/Serial.R | 977741a3724af69fe0fd27a63e57e98cbb272aa2 | [] | no_license | RobHarrand/kaggle | fd7b401391b3c0f41cc5164ab58d0c6c711c1d30 | 9f859da7c185ca97299a18457748a7ab5caa62dc | refs/heads/master | 2020-03-11T09:59:19.640256 | 2018-04-17T15:50:40 | 2018-04-17T15:50:40 | 129,928,554 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,867 | r | Serial.R | codebook = read.csv('Codebook.csv', stringsAsFactors = F)
motives = read.csv('Motive Codes.csv', stringsAsFactors = F)
killers = read.csv('Serial Killers Data.csv', stringsAsFactors = F)
victims = read.csv('Victim Codes.csv', stringsAsFactors = F)
str(killers, list.len = length(killers))
#Aspects...
aspects = data.frame(colnames(killers))
aspects$Min = 0
aspects$Max = 0
i=1
while (i <= dim(killers)[2]) {
aspects[i,2] = min(killers[,i], na.rm = T)
aspects[i,3] = max(killers[,i], na.rm = T)
i=i+1
}
#Where are the NAs?
nas = data.frame(sapply(killers, function(x) sum(is.na(x) | x == "")))
aspects = cbind(aspects, nas)
rownames(aspects) = seq(1, length(aspects$colnames.killers.),1)
aspects$NA_pc = round(aspects$sapply.killers..function.x..sum.is.na.x... / length(killers$Name) * 100, 2)
i=1
while (i <= dim(killers)[2]) {
aspects[i,6] = length(unique(killers[,i]))
i=i+1
}
aspects = aspects[order(-aspects$`No. of NAs`),] #Get them in order of scale
colnames(aspects) = c('Feature', 'Min', 'Max', 'No. of NAs', '% NAs', 'No. of unique values')
library(lubridate)
library(ggplot2)
library(dplyr)
#Do motives fluctuate over a given period of time?
killers$Type = as.factor(killers$Type)
killers$DateFirst = mdy(killers$DateFirst)
killers$Year = year(killers$DateFirst)
by_type_year = group_by(killers, Type, Year)
NumVicsByType = data.frame(summarise(by_type_year, Total = sum(NumVics, na.rm = T)))
NumVicsByType = NumVicsByType[order(-NumVicsByType$Total),]
ex = is.na(NumVicsByType$Year)
NumVicsByType = NumVicsByType[!ex,]
NumVicsByType$Type
NumVicsByType$Motive = "Other"
NumVicsByType$Motive[grep("Anger", NumVicsByType$Type)] = "Anger"
NumVicsByType$Motive[grep("Convenience", NumVicsByType$Type)] = "Convenience"
#NumVicsByType$Motive[grep("Criminal", NumVicsByType$Type)] = "Criminal"
NumVicsByType$Motive[grep("Enjoyment", NumVicsByType$Type)] = "Enjoyment"
NumVicsByType$Motive[grep("FinancialGain", NumVicsByType$Type)] = "FinancialGain"
#NumVicsByType$Motive[grep("Mentalillness", NumVicsByType$Type)] = "Mentalillness"
#NumVicsByType$Motive[grep("Multiplemotives", NumVicsByType$Type)] = "Multiplemotives"
NumVicsByType$Motive[grep("Other", NumVicsByType$Type)] = "Other"
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(NumVicsByType, aes(x=Year, y=Total, col = Motive, group = Motive)) +
geom_line(size=1) +
scale_colour_manual(values=cbbPalette) +
theme_bw()
#What about methods (weapon usage)?
killers$MethodDescription = as.factor(killers$MethodDescription)
by_method_year = group_by(killers, MethodDescription, Year)
NumVicsByMethod = data.frame(summarise(by_method_year, Total = sum(NumVics, na.rm = T)))
NumVicsByMethod = NumVicsByMethod[order(-NumVicsByMethod$Total),]
ex = is.na(NumVicsByMethod$Year)
NumVicsByMethod = NumVicsByMethod[!ex,]
NumVicsByMethod$Method = "Other"
NumVicsByMethod$Method[grep("^Strangle$", NumVicsByMethod$MethodDescription)] = "Strangle"
NumVicsByMethod$Method[grep("^Pills$", NumVicsByMethod$MethodDescription)] = "Pills"
NumVicsByMethod$Method[grep("^Poison$", NumVicsByMethod$MethodDescription)] = "Poison"
NumVicsByMethod$Method[grep("^Shoot$", NumVicsByMethod$MethodDescription)] = "Shoot"
NumVicsByMethod$Method[grep("^Strangle$", NumVicsByMethod$MethodDescription)] = "Strangle"
NumVicsByMethod$Method[grep("^Stab$", NumVicsByMethod$MethodDescription)] = "Stab"
NumVicsByMethod$Method[grep(",", NumVicsByMethod$MethodDescription)] = "Multiple"
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(NumVicsByMethod, aes(x=Year, y=Total, col = Method, group = Method)) +
geom_line(size=1) +
scale_colour_manual(values=cbbPalette) +
theme_bw()
#Do offenders with certain motives prefer certain types of weapons?
by_motive_method = group_by(killers, Type, MethodDescription)
NumVicsByMotiveMethod = data.frame(summarise(by_motive_method, Total = sum(NumVics, na.rm = T)))
NumVicsByMotiveMethod = NumVicsByMotiveMethod[order(-NumVicsByMotiveMethod$Total),]
ex = (NumVicsByMotiveMethod$MethodDescription == "" | NumVicsByMotiveMethod$Type == "")
NumVicsByMotiveMethod = NumVicsByMotiveMethod[!ex,]
#What factors impact an offender’s decision to use multiple methods (weapons) over their series?
killers$Multi = 0
killers$Multi[killers$Type == 'Multiplemotives'] = 1
l = length(killers$Name)
len_s = length(killers$Sex[killers$Multi == 0])
len_m = length(killers$Sex[killers$Multi == 1])
len_s / l * 100
len_m / l * 100
sex_0 = data.frame(table(killers$Sex[killers$Multi == 0]) / len_s*100)
sex_1 = data.frame(table(killers$Sex[killers$Multi == 1]) / len_m*100)
sex = data.frame(cbind(sex_0$Freq, sex_1$Freq))
rownames(sex) = c('Male', 'Female')
colnames(sex) = c('% single method', '% multiple methods')
sex$`% single method` = round(sex$`% single method`, 2)
sex$`% multiple methods` = round(sex$`% multiple methods`, 2)
sex
race_0 = data.frame(table(killers$Race[killers$Multi == 0]) / len_s*100)
race_1 = data.frame(table(killers$Race[killers$Multi == 1]) / len_m*100)
race_1$Var1 = as.character(race_1$Var1)
race_1 = rbind(race_1, c("6",0))
race = data.frame(cbind(race_0$Freq, race_1$Freq))
race$X1 = as.numeric(as.character(race$X1))
race$X2 = as.numeric(as.character(race$X2))
rownames(race) = c('White', 'Black', 'Hispanic', 'Asian', 'Native American', 'Aboriginal')
colnames(race) = c('% single method', '% multiple methods')
race$`% single method` = round(race$`% single method`, 2)
race$`% multiple methods` = round(race$`% multiple methods`, 2)
race
m_iq1 = mean(killers$IQ1[killers$Multi == 0], na.rm = T)
m_iq2 = mean(killers$IQ1[killers$Multi == 1], na.rm = T)
m_iqs = data.frame(c(m_iq1,m_iq2))
m_age1 = mean(killers$Age1stKill[killers$Multi == 0], na.rm = T)
m_age2 = mean(killers$Age1stKill[killers$Multi == 1], na.rm = T)
m_ages = data.frame(c(m_age1,m_age2))
m_age1b = mean(killers$AgeLastKill[killers$Multi == 0], na.rm = T)
m_age2b = mean(killers$AgeLastKill[killers$Multi == 1], na.rm = T)
m_agesb = data.frame(c(m_age1b,m_age2b))
m_height1 = mean(killers$Height[killers$Multi == 0], na.rm = T)
m_height2 = mean(killers$Height[killers$Multi == 1], na.rm = T)
m_heights = data.frame(c(m_height1,m_height2))
m_numvic1 = mean(killers$NumVics[killers$Multi == 0], na.rm = T)
m_numvic2 = mean(killers$NumVics[killers$Multi == 1], na.rm = T)
m_numvics = data.frame(c(m_numvic1,m_numvic2))
#Merge plots,
my_plots = data.frame(m_iqs, m_ages, m_agesb, m_heights, m_numvics)
#Plot all,
barplot(as.matrix(my_plots), names.arg=c("IQ", "Age (1st kill)", "Age (last kill)", "Heights", "No. victims"),
beside=TRUE, col=terrain.colors(2))
#Set the legend,
legend(6, 80, c("Single method", "Multiple methods"), cex=0.8, fill=terrain.colors(2))
#What is the most common victim type for killers with expressive motives such as anger, revenge and lust?
by_victim_method = group_by(killers, Type, VictimCode)
VictimByType= data.frame(summarise(by_victim_method, Total = sum(NumVics, na.rm = T)))
VictimByType = VictimByType[order(-VictimByType$Total),]
ex = is.na(VictimByType$VictimCode)
VictimByType = VictimByType[!ex,]
anger = grep("anger|revenge|lust", VictimByType$Type, ignore.case = T)
VictimByType = VictimByType[anger,]
VictimByType = VictimByType[1:10,]
VictimByType$Type_name = ""
VictimByType$Type_name[VictimByType$VictimCode == 11.00] = 'Multiple victim types'
VictimByType$Type_name[VictimByType$VictimCode == 8.30] = 'Street - General public'
VictimByType$Type_name[VictimByType$VictimCode == 5.00] = 'Family'
VictimByType$Type_name[VictimByType$VictimCode == 8.50] = 'Street - Acquaintences'
VictimByType$Type_name[VictimByType$VictimCode == 5.70] = 'Family - Girl/Boy friends'
VictimByType$Type_name[VictimByType$VictimCode == 8.32] = 'Street - Men'
VictimByType$Type_name[VictimByType$VictimCode == 4.10] = 'Patients/Wards - Hospital patients'
#What is the most common victim type for killers with instrumental motives such as financial gain?
by_victim_method = group_by(killers, Type, VictimCode)
VictimByType2= data.frame(summarise(by_victim_method, Total = sum(NumVics, na.rm = T)))
VictimByType2 = VictimByType2[order(-VictimByType2$Total),]
ex = is.na(VictimByType2$VictimCode)
VictimByType2 = VictimByType2[!ex,]
financial = grep("financial", VictimByType2$Type, ignore.case = T)
VictimByType2 = VictimByType2[financial,]
VictimByType2 = VictimByType2[1:10,]
VictimByType2$Type_name = ""
VictimByType2$Type_name[VictimByType2$VictimCode == 8.33] = 'Street - Adults - men & women'
VictimByType2$Type_name[VictimByType2$VictimCode == 8.30] = 'Street - General public'
VictimByType2$Type_name[VictimByType2$VictimCode == 4.30] = 'Patients/Wards - Child care'
VictimByType2$Type_name[VictimByType2$VictimCode == 6.00] = 'Employees/Customers'
VictimByType2$Type_name[VictimByType2$VictimCode == 7.10] = 'Home invasion - Men & Women'
VictimByType2$Type_name[VictimByType2$VictimCode == 6.10] = 'Employees/Customers - Employees'
VictimByType2$Type_name[VictimByType2$VictimCode == 11.00] = 'Multiple victim types'
VictimByType2$Type_name[VictimByType2$VictimCode == 4.20] = 'Patients/Wards - Wards'
VictimByType2$Type_name[VictimByType2$VictimCode == 8.31] = 'Street - Women'
#Do the characteristics of the victims interact with those of the offenders in some unforeseen way? How are offenders and victims similar and different?
#Do men that kill exclusively women vary from those that kill men and women?
#What are the observable differences between offenders that kill for sexual reasons and those with financial motives?
#Are there stark differences between offenders that kill two victims as opposed to three and above?
#What role does the age of the offender play in impacting the other variables? Do older offenders kill more victims over time? Are older offenders able to remain unapprehended for longer timespans?
#What does the offender’s choice of location tell us? Do region based offenders have different motives and methods than multistate killers? Do any particular counties experience more series than others?
#Is there an association between the offender's chosen state of operation and the weapon they use, such as a handgun?
#What are some variables unique to partnerships?
library(caret)
boxplot(killers$NumVics)
boxplot(killers$NumVics, outline = F)
killers$Severity = 1
killers$Severity[killers$NumVics > 2] = 2
killers$Severity[killers$NumVics > 12] = 3
table(killers$Severity)
killers$Severity = as.factor(killers$Severity)
killers$Sex = as.factor(killers$Sex)
killers$Race = as.factor(killers$Race)
killers$WhiteMale = as.factor(killers$WhiteMale)
model = train(NumVics ~ WhiteMale + AgeLastKill, method = 'ctree', data = killers)
plot(model$finalModel)
|
e6c9fded833b348f926ff34133050a2e1bbc45a1 | 1ae00cb0ae3d7911e49594d3a0be4325eb659239 | /analiza/analiza.r | 320be3c2960f9fe4ea865751723401d0eb1a3b3f | [
"MIT"
] | permissive | Klarapenko/APPR-2019-20 | f1aacba52c703fbb0d62f95f743fe797ccf23d08 | b8ce902add0bb9facb7637635694c7c7590199da | refs/heads/master | 2021-12-12T12:55:09.879867 | 2021-09-02T22:13:57 | 2021-09-02T22:13:57 | 221,639,997 | 0 | 0 | null | 2019-11-14T07:44:49 | 2019-11-14T07:44:48 | null | UTF-8 | R | false | false | 2,133 | r | analiza.r | # 4. faza: Analiza podatkov
library(ggplot2)
library(GGally)
library(mgcv)
#NAPOVEDI Z KVADRATNO REGRESIJSKO FUNKCIJO - za LITVO
##NAPOVED TEDENSKIH DELOVNIH UR ZA LITVO
tedenske_delovne_ure <- URE %>% filter(drzava == "Litva") %>% select(leto, vrednost)
prilagajanje <- lm(data = tedenske_delovne_ure, vrednost~I(leto^2) +leto)
graf <- data.frame(leto = seq(2019, 2021, 1))
napoved <- mutate(graf, napovedano.st.tedenskih.ur=predict(prilagajanje, graf))
graf_regresije <- ggplot(tedenske_delovne_ure, aes(x=leto, y=vrednost))+
geom_point() + geom_smooth(method = lm, formula =y~ x + I(x^2), fullrange = TRUE, color = 'blue')+
geom_point(data = napoved, aes(x= leto, y=napovedano.st.tedenskih.ur), color='red', size = 2) +
scale_x_continuous(breaks=seq(2008, 2021, 1)) +
ggtitle('Napoved tedenskih delovnih ur za Litvo')
##NAPOVED DOLGOV ZA LITVO
dolgovi1 <- DOLGOVI %>% filter(drzava == "Litva") %>% select(leto, vrednost)
prilagajanje1 <- lm(data = dolgovi1, vrednost~I(leto^2) +leto)
graf1 <- data.frame(leto = seq(2019, 2021, 1))
napoved1 <- mutate(graf1, napovedano.dolgov=predict(prilagajanje1, graf1))
graf_regresije1 <- ggplot(dolgovi1, aes(x=leto, y=vrednost))+
geom_point() + geom_smooth(method = lm, formula =y~ x + I(x^2), fullrange = TRUE, color = 'green')+
geom_point(data = napoved1, aes(x= leto, y=napovedano.dolgov), color='red', size = 2) +
scale_x_continuous(breaks=seq(2008, 2021, 1)) +
ggtitle('Napoved dolžnikov za Litvo')
##NAPOVED LOČITEV ZA LITVO
locitve_litva <- LOCITVE %>% filter(država == "Litva") %>% select(leto, locitve)
prilagajanje2 <- lm(data = locitve_litva, locitve~I(leto^2) +leto)
graf2 <- data.frame(leto = seq(2019, 2021, 1))
napoved2 <- mutate(graf2, napovedane.locitve=predict(prilagajanje2, graf2))
graf_regresije2 <- ggplot(locitve_litva, aes(x=leto, y=locitve))+
geom_point() + geom_smooth(method = lm, formula =y~ x + I(x^2), fullrange = TRUE, color = 'orange')+
geom_point(data = napoved2, aes(x= leto, y=napovedane.locitve), color='red', size = 2) +
scale_x_continuous(breaks=seq(2008, 2021, 1)) +
ggtitle('Napoved ločitev za Litvo')
|
238345b7ed717076031b1bbb8730bfc3eea374aa | b83cc4f7697608ea4a665d88c946211da4bfdf39 | /formattable.R | db058167d90b26c51d86a66f3f49e14d4bfd2317 | [] | no_license | kwbonds/SOT_OTS-1 | 736399bce04417d94d220bbc1ac177ffe27178ae | 1692dd567aa42529abb54f455dded8336f357b69 | refs/heads/master | 2020-04-14T13:35:59.514841 | 2018-10-23T18:22:23 | 2018-10-23T18:22:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,564 | r | formattable.R | library(formattable)
library(DT)
# library(plotly)
color.picker <- function(z){
if(is.na(z)){return("black")}
else if(z <= 20){return("red")}
else if( z > 20 & z <= 80){return("darkorange")}
else {return("darkgreen")}
}
bg.picker <- function(z){
if(is.na(z)){return("black")}
else if(z <= 20){return("pink")}
else if( z > 20 & z <= 80){return("yellow")}
else {return("lightgreen")}
}
sign_formatter <- formatter("span",
style = x ~ style(color = ifelse(x > 0, "DarkGreen",
ifelse(x < 0, "DarkRed", "black"))))
sign__bg_formatter <- formatter("span",
style = x ~ style(color = ifelse(x > 0, "DarkGreen",
ifelse(x < 0, "DarkRed", "black"))))
formatter(.tag = "span", style = function(x) style(display = "block",
padding = "0 4px",
`border-radius` = "4px",
`background-color` = csscolor(gradient(as.numeric(x), ...))))
SOT_tile <- formatter(.tag = "span", style = function(x) style(display = "block",
padding = "0 4px",
`border-radius` = "4px",
`background-color` = ifelse(x > 0 ,
csscolor(gradient(as.numeric(x), ...)))))
sign_formatter(c(-1, 0, 1))
SOT_formatter <- formatter("span",
style = x ~ style("font-weight" = ifelse(x > .95, "bold", NA)))
above_avg_bold <- formatter("span",
style = x ~ style("font-weight" = ifelse(x > mean(x), "bold", NA)))
change_names <- function(x) {
names(x) <- c("Brand", "Shipped On Time to Contract %", "% Variance from Target (95%)", "Transportation Impact", "Vendor Impact (Air)", "Vendor Impact (non-Air)", "Unmeasured Impact", "Total Impact")
}
#
# change_units <- function(x){
#
# }
names(Trans_output) <- change_names(Trans_output)
Trans_output$`Shipped On Time to Contract %` <- percent(Trans_output$`Shipped On Time to Contract %`, 1)
Trans_output$`% Variance from Target (95%)` <- percent(Trans_output$`% Variance from Target (95%)`, 1)
Trans_output$`Transportation Impact` <- percent(Trans_output$`Transportation Impact`, 1)
Trans_output$`Vendor Impact (Air)` <- percent(Trans_output$`Vendor Impact (Air)`, 1)
Trans_output$`Vendor Impact (non-Air)` <- percent(Trans_output$`Vendor Impact (non-Air)`, 1)
Trans_output$`Unmeasured Impact` <- percent(Trans_output$`Unmeasured Impact`, 1)
Trans_output$`Total Impact` <- percent(Trans_output$`Total Impact`, 1)
by_brand <- formattable(Trans_output, list( `Shipped On Time to Contract %` = SOT_formatter,
`% Variance from Target (95%)` = sign_formatter,
`Transportation Impact` = color_tile("transparent", "lightpink"),
`Vendor Impact (Air)` = color_tile("transparent", "lightpink"),
`Unmeasured Impact` = color_tile("transparent", "lightpink"),
`Vendor Impact (non-Air)` = color_tile("transparent", "lightpink")))
formattable(Trans_output)
|
b225f320cdef9c585371d049293154918476d769 | f7cb5ffe2d36c1a529a9b74ce6408dc0e0ae2413 | /analysis/misc/parallel_setup.R | 7e09f235e88fc593a10bc11f103fb9cda5942bc7 | [] | no_license | alaindanet/fishcom | aa5b03f49c5e698d37003b6ffa75b88f3f18b1f4 | 76e30a7bf13cccabc1cd8fc230bb469276d7f846 | refs/heads/master | 2023-06-18T20:29:30.291089 | 2021-07-12T16:34:30 | 2021-07-12T16:34:30 | 160,175,823 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | parallel_setup.R | ####################
# Setup parallel #
####################
library(future)
nb_cores <- future::availableCores()
#Crap hack to determine if in local or on the PBS cluster:
node_name <- Sys.info()["nodename"]
if (node_name %in% c("Andy", "BigAndy", "BrickAndy")) {
future::plan(multiprocess)
#} else if (node_name %in% c("mesu0", "migale")) {
} else {
cl <- parallel::makeCluster(nb_cores)
future::plan(cluster, workers = cl)
#warning("Unknow location: parallel session have not been set up")
}
|
4c385d33484914e9749316186db87cd1209a6367 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/hommel/R/concentration-function.R | 6b2b84a365a403fd044ca866a92446b9603f4e19 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 311 | r | concentration-function.R | concentration <- function(hommel, alpha = 0.05) {
m <- length(hommel@p)
h <- findHalpha(hommel@jumpalpha, alpha, m)
simesfactor <- hommel@simesfactor[h+1]
sortedp <- hommel@p[hommel@sorter]
z <- findConcentration(sortedp, simesfactor, h, alpha, m)
return(sortedp[z])
}
|
0975080c18f8631afab41b7d48a5c63477f2a892 | 7a866c6c5fa70556c8d5e6662e2ae249c682ef18 | /R/calcNDVI.R | 6a0fbe289f8f84afa9d411fb1a1d0384c03b2279 | [] | no_license | jdroesen92/Intro_raster | 53035b990c5b8392c5da96f58c84668e22574bb3 | 045e97a2bb6724b60d97c429242dca8dbd775c8d | refs/heads/master | 2021-01-10T08:58:09.525046 | 2016-01-11T08:44:19 | 2016-01-11T08:44:19 | 49,411,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 244 | r | calcNDVI.R | #CalculateNDVI
#Calculate NDVIs Landsat 8(2014): Red = band 4 (layer 5) and NIR = band 5 (layer 6)
#and Landsat 5(1990): Red = band 3 (layer 6) and NIR = band 4 (layer 7)
ndvOver <- function(x, y) {
ndvi <- (y - x) / (x + y)
return(ndvi)
} |
7f3182ff4c72af0ddba82abd1276c40f1e085d5a | 3a122a9a64fcc715269a50115af193ffe680ba70 | /MR/Non_UKBB_CRP_univariable.R | c6a9fafc271ecc8bc6cb665ce8a27ae9d24a43e4 | [] | no_license | Williamreay/Pneumonia_meta_GWAS | 24bce229d577113ceeb89f23a2e1ee51d978cb21 | 765721fbf329c9bd88d5149d38859b20b571321d | refs/heads/master | 2023-04-16T23:57:36.913539 | 2022-08-11T04:35:23 | 2022-08-11T04:35:23 | 319,832,839 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,302 | r | Non_UKBB_CRP_univariable.R | ########################################
## CRP ==> pneumonia susceptibility (2022)
## Non-UKBB CRP GWAS
## Univariable MR - total effect
## William Reay (2022)
########################################
library(data.table)
library(dplyr)
library(TwoSampleMR)
library(ggplot2)
library(MRPRESSO)
setwd("~/Desktop/23andMe_pneumonia/2022_FinnGen_r6_meta_analysis/Common_var_results/LCV/MR_CRP_follow_up/")
## Get non-UKBB CRP IVs
CRP_IVs <- extract_instruments("ieu-b-35", clump = TRUE)
Pneumonia_raw <- fread("../../../Meta_results/FINAL_Common_IVW_FinnGen_23andMe_2022.txt.gz")
Pneumonia_raw$Phenotype <- "Pneumonia_susceptibility"
Pneumonia_outcome <- format_data(dat = Pneumonia_raw,
type = "outcome",
beta_col = "Effect",
se_col = "StdErr",
snp_col = "MarkerName",
effect_allele_col = "Allele1",
other_allele_col = "Allele2",
phenotype_col = "Phenotype")
## Harmonise data and exclude potential palindromes (option 3)
CRP_pneu_harm <- harmonise_data(CRP_IVs, Pneumonia_outcome, action = 3)
Univariable_CRP <- mr(CRP_pneu_harm,
method_list = c("mr_ivw_mre",
"mr_ivw_fe",
"mr_weighted_median",
"mr_weighted_mode",
"mr_egger_regression"))
OR_CRP_to_pneumonia <- generate_odds_ratios(Univariable_CRP)
write.table(OR_CRP_to_pneumonia, file="Non_UKBB_Univariable_CRP_to_pneumonia.txt",
sep = "\t", row.names = F, quote = F)
## Heterogeneity test via Cochran's Q
CRP_pneumonia_het <- mr_heterogeneity(CRP_pneu_harm)
## Test if MR-Egger intercept is significantly different from zero
CRP_pneumonia_Egger_intercept <- mr_pleiotropy_test(CRP_pneu_harm)
CRP_pneumonia_LOO_IVW <- mr_leaveoneout(CRP_pneu_harm, method = mr_ivw_mre)
CRP_pneumonia_LOO_IVW %>% filter(p > 0.05)
CRP_single_SNP <- mr_singlesnp(CRP_pneu_harm)
## Derive top three single SNPs
CRP_single_SNP <- CRP_single_SNP[order(CRP_single_SNP$p),]
mr_forest_plot(CRP_single_SNP, exponentiate = F)
|
2e7ba7dcee433becbf278cf41b4b496c62b20336 | b1ee7b4bac461beae977051f426358783a693f8f | /plot3.R | 9155e66f2db5a696b48c88ad5bb7dc73247e30a8 | [] | no_license | mgelvin/ExData_Plotting1 | acef0ac3c9a2b97ddaeb2d3db6c9e67f65c22ada | 4c2a7d89acdb5cc97efcceb3923fa03eec67cfb0 | refs/heads/master | 2021-01-20T05:07:44.887329 | 2015-04-12T21:10:05 | 2015-04-12T21:10:05 | 33,800,876 | 0 | 0 | null | 2015-04-12T02:56:13 | 2015-04-12T02:56:11 | null | UTF-8 | R | false | false | 779 | r | plot3.R | temp<-tempfile()
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip',temp,'curl')
data<-read.csv2(unz(temp,'household_power_consumption.txt'),na.strings='?',stringsAsFactor=F)
unlink(temp,recursive=T)
data<-data[data$Date=='1/2/2007'|data$Date=='2/2/2007',]
data$Date_Time=paste(data$Date,data$Time,sep=' ')
data$Date_Time<-strptime(data$Date_Time,format='%d/%m/%Y %H:%M:%S')
png(filename='plot3.png')
plot(data$Date_Time,data$Sub_metering_1,'l',xlab='',ylab='Energy sub metering')
lines(data$Date_Time,data$Sub_metering_2,'l',col='red')
lines(data$Date_Time,data$Sub_metering_3,'l',col='blue')
legend('topright',col=c('black','red','blue'),legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lty=c(1,1,1))
dev.off()
|
3e0e7732cf708afe277a457a92a8be029e0db813 | af77cc9ccadb9cf4d451831fdd07abe13503a879 | /yelp/wekafiles/packages/RPlugin/mlr/mlr/R/performance.resample.r | 8241d9159d74f9a9a72620838db81a11e3ba5e93 | [] | no_license | tummykung/yelp-dataset-challenge | 7eed6a4d38b6c9c90011fd09317c5fa40f9bc75c | 84f12682cba75fa4f10b5b3484ce9f6b6c8dad4a | refs/heads/master | 2021-01-18T14:10:55.722349 | 2013-05-21T09:30:37 | 2013-05-21T09:30:37 | 9,527,545 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,265 | r | performance.resample.r | #' @include prediction.resample.r
roxygen()
#' @rdname performance
setMethod(
f = "performance",
signature = c(pred="resample.prediction", measures="list", losses="list", aggr="list"),
def = function(pred, measures, aggr, losses, task) {
rin = pred["instance"]
preds = as.list(pred)
is = 1:rin["iters"]
perfs = lapply(preds, function(p) performance(p, measures=measures, aggr=rin["aggr.group"], losses=losses, task=task))
# add iter index to measure data.frame
ms.list = lapply(is, function (i) {
m=perfs[[i]]$measures
if(is.numeric(m))
m = as.matrix(t(m))
cbind(iter=i, m)
})
# put together without aggregation
ms.all = Reduce(rbind, ms.list)
if (rin["has.groups"]) {
ms.aggr.group = Reduce(rbind, lapply(perfs, function(x) x$aggr))
# if (!is.matrix(ms.aggr.group))
# ms.aggr.group = as.matrix(t(ms.aggr.group))
# colnames(ms.aggr.group) = names(measures)
rownames(ms.aggr.group) = NULL
ms.aggr.group = as.data.frame(ms.aggr.group)
ms.aggr = lapply(aggr, function(f) apply(ms.aggr.group, 2, f))
ms.aggr.group = cbind(iter=is, ms.aggr.group)
} else {
ms.aggr = lapply(aggr, function(f) apply(ms.all[,-1,drop=FALSE], 2, f))
}
j = which(names(aggr) == "combine")
if (length(j) > 0) {
# downcast
if (rin["has.groups"]) {
pred = as(pred, "grouped.prediction")
ms.aggr[[j]] = performance(pred=pred, measures=measures, losses=list(), aggr=rin["aggr.group"], task=task)$aggr
} else {
pred = as(pred, "prediction")
ms.aggr[[j]] = performance(pred=pred, measures=measures, losses=list(), aggr=list(), task=task)$measures
}
}
ms.aggr = Reduce(rbind, ms.aggr)
if (!is.matrix(ms.aggr))
ms.aggr = as.matrix(t(ms.aggr))
colnames(ms.aggr) = names(measures)
rownames(ms.all) = NULL
rownames(ms.aggr) = names(aggr)
ms.all = as.data.frame(ms.all)
ms.aggr = as.data.frame(ms.aggr)
ls = lapply(is, function (i) cbind(iter=i, perfs[[i]]$losses))
ls = as.data.frame(Reduce(rbind, ls))
result = list(measures=ms.all)
if (rin["has.groups"])
result$aggr.group = ms.aggr.group
result$aggr = ms.aggr
if (length(losses) > 0)
result$losses = ls
return(result)
}
)
|
3dc26d2b0f1d9d1f6cfe03c3a2ee1ec9e42cf0af | e3f72b7db34995bd1488020a7fe2f40b5641df62 | /doc/eurlexpkg.R | 488fba172ada475ed04bb4a7493bac0746cb8591 | [] | no_license | local-maxima/eurlex | 11c3cbc773d6dfa60c398bf182d1479ac091ab83 | 183d2bc01a6ddf4fc7735400c07698baecb95ccc | refs/heads/master | 2022-12-01T21:22:14.068136 | 2020-08-13T21:39:06 | 2020-08-13T21:39:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,816 | r | eurlexpkg.R | ## ---- echo = FALSE, message = FALSE, warning=FALSE, error=FALSE---------------
knitr::opts_chunk$set(collapse = T, comment = "#>")
options(tibble.print_min = 4, tibble.print_max = 4)
## ----makequery, message = FALSE, warning=FALSE, error=FALSE-------------------
library(eurlex)
library(dplyr) # my preference, not needed for the package
query_dir <- elx_make_query(resource_type = "directive")
## ----precompute, include=FALSE------------------------------------------------
dirs <- elx_make_query(resource_type = "directive", include_date = TRUE, include_force = TRUE) %>%
elx_run_query() %>%
rename(date = `callret-3`)
results <- dirs %>% select(-force,-date)
## -----------------------------------------------------------------------------
query_dir %>%
glue::as_glue() # for nicer printing
elx_make_query(resource_type = "caselaw") %>%
glue::as_glue()
elx_make_query(resource_type = "manual", manual_type = "SWD") %>%
glue::as_glue()
## -----------------------------------------------------------------------------
elx_make_query(resource_type = "directive", include_date = TRUE, include_force = TRUE) %>%
glue::as_glue()
# minimal query: elx_make_query(resource_type = "directive")
elx_make_query(resource_type = "recommendation", include_date = TRUE, include_lbs = TRUE) %>%
glue::as_glue()
# minimal query: elx_make_query(resource_type = "recommendation")
## ----runquery, eval=FALSE-----------------------------------------------------
# results <- elx_run_query(query = query_dir)
#
# # the functions are compatible with piping
# #
# # elx_make_query("directive") %>%
# # elx_run_query()
## -----------------------------------------------------------------------------
as_tibble(results)
## -----------------------------------------------------------------------------
head(results$type,5)
results %>%
distinct(type)
## ----eurovoc------------------------------------------------------------------
rec_eurovoc <- elx_make_query("recommendation", include_eurovoc = TRUE, limit = 10) %>%
elx_run_query() # truncated results for sake of the example
rec_eurovoc %>%
select(celex, eurovoc)
## ----eurovoctable-------------------------------------------------------------
eurovoc_lookup <- elx_label_eurovoc(uri_eurovoc = rec_eurovoc$eurovoc)
print(eurovoc_lookup)
## ----appendlabs---------------------------------------------------------------
rec_eurovoc %>%
left_join(eurovoc_lookup)
## -----------------------------------------------------------------------------
eurovoc_lookup <- elx_label_eurovoc(uri_eurovoc = rec_eurovoc$eurovoc,
alt_labels = TRUE,
language = "sk")
rec_eurovoc %>%
left_join(eurovoc_lookup) %>%
select(celex, eurovoc, labels)
## ----getdatapur, message = FALSE, warning=FALSE, error=FALSE------------------
# the function is not vectorized by default
elx_fetch_data(results$work[1],"title")
# we can use purrr::map() to play that role
library(purrr)
dir_titles <- results[1:10,] %>% # take the first 10 directives only to save time
mutate(title = map_chr(work,elx_fetch_data, "title")) %>%
as_tibble() %>%
select(celex, title)
print(dir_titles)
## ---- eval=FALSE--------------------------------------------------------------
# dirs <- elx_make_query(resource_type = "directive", include_date = TRUE, include_force = TRUE) %>%
# elx_run_query() %>%
# rename(date = `callret-3`)
## ----firstplot, message = FALSE, warning=FALSE, error=FALSE-------------------
library(ggplot2)
dirs %>%
count(force) %>%
ggplot(aes(x = force, y = n)) +
geom_col()
## -----------------------------------------------------------------------------
dirs %>%
ggplot(aes(x = as.Date(date), y = celex)) +
geom_point(aes(color = force), alpha = 0.1) +
theme(axis.text.y = element_blank(),
axis.line.y = element_blank(),
axis.ticks.y = element_blank())
## -----------------------------------------------------------------------------
dirs_1970_title <- dirs %>%
filter(between(as.Date(date), as.Date("1970-01-01"), as.Date("1980-01-01")),
force == "true") %>%
mutate(title = map_chr(work,elx_fetch_data,"title")) %>%
as_tibble()
print(dirs_1970_title)
## ----wordcloud, message = FALSE, warning=FALSE, error=FALSE-------------------
library(tidytext)
library(wordcloud)
dirs_1970_title %>%
select(celex,title) %>%
unnest_tokens(word, title) %>%
count(celex, word, sort = TRUE) %>%
filter(!grepl("\\d", word)) %>%
bind_tf_idf(word, celex, n) %>%
with(wordcloud(word, tf_idf, max.words = 40, scale = c(1.8,0.1)))
|
b0b51b2a62dbc48a9e3170e14040590f8235ed12 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/RWDataPlyr/R/yearmon_helpers.R | 5ab57b324be1ea3e96f3284ad2d521abbeb26012 | [
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,197 | r | yearmon_helpers.R |
#' Get the water year from a year-month (yearmon) value
#'
#' `ym_get_wateryear()` returns the water year (assumed to be October -
#' September) from a [zoo::yearmon] object.
#'
#' If the argument is not already a yearmon object, it will attempt to convert
#' it to a [zoo::yearmon]. This may result in unexpected results. For example,
#' the string `"12-1-1906"` can be converted to a [zoo::yearmon], however, it
#' will not convert to `"Dec 1906"` as you might desire. It will convert to
#' `"Jan 0012"` since it is not a format expected by [zoo::as.yearmon()].
#' Therefore, a warning is posted when the function attempts to convert to
#' [zoo::yearmon], and it is safer to ensure `ym` is already a [zoo::yearmon].
#'
#' @param ym An object of class [zoo::yearmon], or something that can be
#' successfully converted to [zoo::yearmon].
#'
#' @return The water year as a numeric.
#'
#' @examples
#' ym_get_wateryear(zoo::as.yearmon(c("Dec 1906", "Oct 1945", "Jul 1955")))
#' ym_get_wateryear("2000-11")
#'
#' @export
ym_get_wateryear <- function(ym)
{
if (!methods::is(ym, "yearmon")) {
warning("ym, is not a yearmon object. attempting to convert to yearmon...")
ym <- zoo::as.yearmon(ym)
if (is.na(ym))
stop("could not convert ym to yearmon")
}
mm <- as.numeric(format(ym, '%m'))
yy <- ym_get_year(ym)
# if OND then increase year by one for water year, else leave it the same
yy[mm > 9] <- yy[mm > 9] + 1
yy
}
#' @export
#' @rdname ym_get_wateryear
getWYFromYearmon <- function(ym)
{
.Deprecated("ym_get_wateryear()")
ym_get_wateryear(ym)
}
#' Get the year as a numeric from a yearmon object
#'
#' Could use lubridate::year(), but for now we are not depending on lubridate
#' @noRd
ym_get_year <- function(ym)
{
if (!(class(ym) == "yearmon"))
stop("ym in ym_get_year(ym) is not a yearmon object.")
as.numeric(format(ym, "%Y"))
}
#' Get the full month name from a yearmon object
#' @noRd
ym_get_month_str <- function(ym)
{
if (!(class(ym) == "yearmon"))
stop("ym in ym_get_month_str(ym) is not a yearmon object.")
format(ym, "%B")
}
|
922a1948ca611ca60194ab8afe55c8b9aeee0305 | 20a5e7d5029472281d875d7d9c5861ea3b25b9b6 | /Weekly_assignments/week08/Tuttle_in_class_assignment.R | 3568895a56b6660dc26c23e6973eb0df4c9ed73f | [] | no_license | mattjtuttle/biometry | 56f6ee8e03399ce6fc88947d8f6f83240635534e | 20d4321a9ed4ae4939c9543fbcf713c2b4d7f18e | refs/heads/master | 2021-03-27T08:48:58.953978 | 2017-12-13T00:40:53 | 2017-12-13T00:40:53 | 101,313,528 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,794 | r | Tuttle_in_class_assignment.R | # Name: Matthew Tuttle
# Week08 - In class assignment
# Imports data as a dataframe
dataset <- read.csv(file = "Weekly_assignments/week08/InClassData.csv", header = TRUE)
# Tests the assumptions of the linear model
hist(dataset$gap.area.m2)
hist(dataset$Nitrate.ppm)
library(fBasics)
normalTest(dataset$gap.area.m2, method = c("da")) # Is right skewed
normalTest(dataset$Nitrate.ppm, method = c("da")) # Is normal
gap.area.root <- sqrt(dataset$gap.area.m2)
hist(gap.area.root)
normalTest(gap.area.root, method = c("da")) # Is normal
# Adds transformed data to dataframe
dataset$gap.area.root <- gap.area.root
# Creates a linear model
model <- lm(formula = Nitrate.ppm ~ gap.area.root, data = dataset)
summary(model) # Shows slope statistically different from zero
# Calculates 95% confidence interval around the slope (for fitted data)
pred.frame <- data.frame(gap.area.root = seq(from = 0, to = 100, length = 200))
pred.conf <- predict(model, interval = "confidence", newdata = pred.frame)
cbind(pred.frame, pred.conf)
# Calculates 95% confidence interval around predicted values
pred.pred <- predict(model, interval = "prediction", newdata = pred.frame)
cbind(pred.frame, pred.pred)
# Graphs data with linear model, confidence interval around the slope, and estimated range of predicted values
plot(dataset$Nitrate.ppm ~ dataset$gap.area.root, xlab = "Square root gap area (m)", ylab = "Nitrogen (ppm)")
abline(model)
matlines(pred.frame, pred.conf, col = c("black", "red", "red"))
matlines(pred.frame, pred.pred, col = c("black", "blue", "blue"))
#####
# Given the above model, we can conclude that the slope of the model is statistically different from zero (p = 0.0392). This means that we can predict that larger gap sizes lead to lower concentrations of nitrogen in the soil.
|
755a98ff6acd6fc00240810360f50fcea80c784a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/eggCounts/R/fecr_stanSimple.R | 2c68e8d15fb982e13f24bf26d3b7cf12729a8cbf | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,669 | r | fecr_stanSimple.R | ###########################################################################
# Modelling the reduction in faecal egg count data (two-sample case) using Stan
###########################################################################
# main function -----------------------------------------------------------
fecr_stanSimple <-function(preFEC, postFEC, rawCounts = FALSE, preCF = 50, postCF = preCF,
muPrior, deltaPrior, nsamples = 2000, nburnin=1000, thinning=1, nchain=2, ncore=1, adaptDelta=0.95,
saveAll = FALSE, verbose=FALSE){
# checks from FECR_PoGa.R -------------------------------------------------
# if (sys.parent() == 0) env <- asNamespace("eggCounts") else env <- parent.frame()
# assign(".verboselevel", verbose*.verboselevel, envir = env)
checkData(preFEC, postFEC, rawCounts, preCF, postCF)
preN <- length(preFEC)
postN <- length(postFEC)
if (length(preCF)==1) preCF<-rep(preCF,preN)
if (length(postCF)==1) postCF<-rep(postCF,postN)
preDilution <- preCF; postDilution <- postCF
if(rawCounts){
preDilution <- postDilution <- 1
}
# divide data by correction factor
preFEC <- preFEC/preDilution
postFEC <- postFEC/postDilution
# check function arguments
checkpars(nburnin, nsamples, thinning, nchain, ncore, rawCounts, adaptDelta, verbose)
# set default values
priors <- fecr_setPrior(muPrior = muPrior, deltaPrior = deltaPrior)
# set update functions
code<-simple_paired_stan(priors);model<-"Simple Bayesian model without zero-inflation for paired design"
preN <- length(preFEC)
postN <- length(postFEC)
if(preN != postN){
stop("post sample size different to pre sample size\n")
}
if (length(preCF)==1) preCF<-rep(preCF,preN)
if (length(postCF)==1) postCF<-rep(postCF,postN)
# create data list for stan use
epg_data <- list(J=preN, ystarbraw = preFEC, ystararaw = postFEC, fpre = preCF, fpost = postCF)
if (saveAll){ savePars <- NA} else { savePars <- c("mu","delta")}
# whether or not to suppress progress information and errors
if (length(setdiff(priors,fecr_setPrior()))==0){
stanModel<-stanmodels$simple
} else {
stanModel <- stan_model(model_name=paste(model),model_code=code)
}
if (verbose){
samples <- sampling(stanModel, data = epg_data, pars = savePars, iter = nsamples, warmup=nburnin, chains=nchain,
thin=thinning,control = list(adapt_delta = adaptDelta),cores=ncore)
} else {
samples <- suppressMessages(
suppressWarnings(
sampling(stanModel, data = epg_data, pars = savePars, iter = nsamples,
warmup = nburnin, chains = nchain, thin = thinning,
control = list(adapt_delta = adaptDelta),cores = ncore, refresh = nsamples/4)))}
checkDivergence(samples, adaptDelta)
# generate samples according to different models
meanEPG.untreated<-extract(samples,"mu")[[1]]
meanEPG.treated<-extract(samples,"mu")[[1]]*extract(samples,"delta")$delta
FECR<-1-extract(samples,"delta")[[1]]
result<-cbind(FECR,meanEPG.untreated,meanEPG.treated)
cat("Model: ", model,"\n","Number of Samples: ",nsamples, "\n","Warm-up samples: ",nburnin,"\n","Thinning: ",thinning,"\n","Number of Chains",nchain,"\n")
summarys<-as.data.frame(printSummary(result))
checkConvergence(samples)
if (preN + postN < 20) warning(cat("your sample size is less than 10, consider using getPrior_mu() and getPrior_delta() to find a more informative prior for the true mean epg and reduction parameter."))
return(invisible(list(stan.samples = samples, posterior.summary = summarys)))
} |
b1757fb7e02d0c5f5f9a9053fc83ca4d9d350e2b | cb3482b3d42278161a0737e00cc2231142c49161 | /Problem144.R | 5aa52b51b07205a90889e2fed6e567f84860c848 | [] | no_license | emielver/project-euler-hacking | e3546b85a0b72fc72b1b35a3332d89381d8d3f25 | d4042b8774ee08fcf4e7950a2d4e7dd47ab8ead9 | refs/heads/master | 2020-05-31T23:08:02.024656 | 2019-06-06T07:15:38 | 2019-06-06T07:15:38 | 190,533,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,379 | r | Problem144.R | plot_ellipse <- function(a, b, colour = NA, line = "black") {
plot.new()
plot.window(xlim = c(-a, a), ylim = c(-b, b), asp = 1)
par(mar = rep(0,4))
x <- seq(-a, a, length = 200)
y <- sqrt(b^2 - (b^2 / a^2) * x^2)
lines(x, y, col = line)
lines(x, -y, col = line)
polygon(x, y, col = colour, border = NA)
polygon(x, -y, col = colour, border = NA)
}
bounce <- function(coords) {
x <- coords$x
y <- coords$y
## Tangent to ellipse
t <- -(b^2 / a^2) * (x[2] / y[2])
## Deflection on sloping mirror y = mx + c
dydx <- diff(y) / diff(x)
m <- tan(pi - atan(dydx) + 2 * atan(t))
c <- y[2] - m * x[2]
## Determine intersection point
## Source: http://www.ambrsoft.com/TrigoCalc/Circles2/Ellipse/EllipseLine.htm
x[1] <- x[2]
y[1] <- y[2]
x2 <- (-a^2 * m * c + c(-1, 1) * (a * b * sqrt(a^2 * m^2 + b^2 - c^2))) /
(a^2 * m^2 + b^2)
x[2] <- ifelse(round(x[1] / x2[1], 6) == 1, x2[2], x2[1])
y[2] <- m * x[2] + c
return(data.frame(x, y))
}
# Initial conditions
a <- 5
b <- 10
x1 <- 0
y1 <- 10.1
x2 <- 1.4
y2 <- -9.6
answer <- 0
plot_ellipse(a, b)
points(c(0,0), c(-c, c), pch = 19)
## Bounce laser breams
laser <- data.frame(x = c(x1, x2), y = c(y1, y2))
while((laser$x[2] < -0.01 | laser$x[2] > 0.01) | laser$y[2] < 0) { ## Escape?
lines(laser$x, laser$y, col = "red", lwd = .5)
laser <- bounce(laser)
answer <- answer + 1
}
answer
|
b88575b3cc89a1f17feade7f2d4697957a1a5e00 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/provParseR/tests/test_provParse4.R | cd3473c785fd9834aca9a1b72704ce91e232f761 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,875 | r | test_provParse4.R | library(provParseR)
library(testthat)
## Loading test data
test.data <- system.file("testdata", "prov3.json", package = "provParseR")
prov <- prov.parse(test.data)
context("Environment access function")
envi.df <- get.environment(prov)
expect_match(class(envi.df), "data.frame")
expect_match(typeof(envi.df$value), "character")
expect_equal (envi.df$label, c("name", "architecture", "operatingSystem", "language", "langVersion",
"script", "scriptTimeStamp", "workingDirectory", "ddgDirectory", "ddgTimeStamp",
"hashAlgorithm"))
context ("Tool information")
tool.df <- get.tool.info(prov)
expect_match(class(tool.df), "data.frame")
expect_match(typeof(tool.df$tool.name), "character")
expect_match(typeof(tool.df$tool.version), "character")
expect_match(typeof(tool.df$json.version), "character")
expect_equal(nrow(tool.df), 1)
expect_equal(ncol(tool.df), 3)
expect_equal (tool.df$tool.name, "provR")
expect_equal (tool.df$tool.version, "1.0.1")
expect_equal (tool.df$json.version, "2.1")
context("Procedure nodes access function")
proc.df <- get.proc.nodes(prov)
expect_match(class(proc.df), "data.frame")
expect_match(typeof(proc.df$name), "character")
expect_match(typeof(proc.df$type), "character")
expect_match(typeof(proc.df$elapsedTime), "double")
expect_match(typeof(proc.df$scriptNum), "integer")
expect_match(typeof(proc.df$startLine), "integer")
expect_match(typeof(proc.df$startCol), "integer")
expect_match(typeof(proc.df$endLine), "integer")
expect_match(typeof(proc.df$endCol), "integer")
expect_equal(nrow(proc.df), 5)
expect_equal(ncol(proc.df), 9)
context("Data nodes access function")
data.df <- get.data.nodes(prov)
expect_match(class(data.df), "data.frame")
expect_match(typeof(data.df$name), "character")
expect_match(typeof(data.df$value), "character")
expect_match(typeof(data.df$valType), "character")
expect_match(typeof(data.df$type), "character")
expect_match(typeof(data.df$scope), "character")
expect_match(typeof(data.df$fromEnv), "logical")
expect_match(typeof(data.df$hash), "character")
expect_match(typeof(data.df$timestamp), "character")
expect_match(typeof(data.df$location), "character")
expect_equal(nrow(data.df), 3)
expect_equal(ncol(data.df), 10)
context("Function nodes access function")
func.df <- get.func.nodes(prov)
expect_match(class(func.df), "data.frame")
expect_equal(nrow(func.df), 0)
context("Procedure-to-procedure edges access function")
proc.proc.df <- get.proc.proc(prov)
expect_match(class(proc.proc.df), "data.frame")
expect_match(typeof(proc.proc.df$informant), "character")
expect_match(typeof(proc.proc.df$informed), "character")
expect_equal(nrow(proc.proc.df), 4)
expect_equal(ncol(proc.proc.df), 3)
context("Data-to-procedure edges access function")
data.proc.df <- get.data.proc(prov)
expect_match(class(data.proc.df), "data.frame")
expect_match(typeof(data.proc.df$entity), "character")
expect_match(typeof(data.proc.df$activity), "character")
expect_equal(nrow(data.proc.df), 2)
expect_equal(ncol(data.proc.df), 3)
context("Procedure-to-data edges access function")
proc.data.df <- get.proc.data(prov)
expect_match(class(proc.data.df), "data.frame")
expect_match(typeof(proc.data.df$entity), "character")
expect_match(typeof(proc.data.df$activity), "character")
expect_equal(nrow(proc.data.df), 3)
expect_equal(ncol(proc.data.df), 3)
context("Function-to-procedure edges access function")
func.proc.df <- get.func.proc(prov)
expect_match(class(func.proc.df), "data.frame")
expect_equal(nrow(func.proc.df), 0)
expect_equal(ncol(func.proc.df), 3)
context("Function-library group nodes access function")
func.lib.df <- get.func.lib(prov)
expect_match(class(func.lib.df), "data.frame")
expect_equal(nrow(func.lib.df), 0)
expect_equal(ncol(func.lib.df), 3)
context("Library nodes access function")
libs.df <- get.libs(prov)
expect_match(class(libs.df), "data.frame")
expect_match(typeof(libs.df$name), "character")
expect_match(typeof(libs.df$version), "character")
expect_equal(nrow(libs.df), 9)
expect_equal(ncol(libs.df), 3)
expect_setequal (libs.df$name, c("base", "datasets", "ggplot2", "graphics", "grDevices",
"methods", "provR", "stats", "utils"))
context("Scripts access function")
scripts.df <- get.scripts(prov)
expect_match(class(scripts.df), "data.frame")
expect_equal (nrow (scripts.df), 1)
expect_match(typeof(scripts.df$script), "character")
expect_match(typeof(scripts.df$timestamp), "character")
expect_equal(ncol(scripts.df), 2)
context ("Input files")
input.files <- get.input.files (prov)
expect_equal (nrow (input.files), 0)
context ("Output files")
output.files <- get.output.files (prov)
expect_equal (nrow (output.files), 0)
context ("Variables set")
variables.set <- get.variables.set (prov)
expect_equal (nrow (variables.set), 3)
context ("Variables used")
variables.used <- get.variables.used (prov)
expect_equal (nrow (variables.used), 2)
|
efbb8722117b44b420ba3ef66661f6deff71e829 | d583df3fb1c43be89633d25a83c1c822af3f881f | /plot3.R | d17ca576c3acb30a936a76cd7cbfd8f45ac47090 | [] | no_license | nanchangcj6/ExData_Plotting1 | ac89924861b8a1c4757ff59b73c791e3b42d8323 | d0e4037f82dd2c4b3da33832bf396f49f483e5af | refs/heads/master | 2021-01-22T01:18:23.518897 | 2015-12-12T00:30:10 | 2015-12-12T00:30:10 | 47,676,470 | 0 | 0 | null | 2015-12-09T07:44:34 | 2015-12-09T07:44:34 | null | UTF-8 | R | false | false | 2,144 | r | plot3.R | # Exploratory Data Analysis course project one
# Recreating four plots based on a data subset of household energy use
# for 2 days in 2007
# The dataset must be read, subsetted and character dates/times changed
# into POSIXct datetime values and the plots are drawn.
# The data subsetting code is common to each R file so that any one can be run
# in isolation.
# The source data must be extraced from the zip file and saved at the location
# specified by the "file" variable
# This R script creates plot 3
# Use the sqldf library to subset a large file without having to read it all
# into memory and then subset
library(sqldf)
# source data file
file <- "~/datasciencecoursera/household_power_consumption.txt"
# Read and subset the data using sql type syntax from sqldf
df_data_subset <- read.csv.sql(file,
sql = "select * from file
where Date = '1/2/2007'
or Date = '2/2/2007'",
header = TRUE,
sep=";")
# tidy up to avoid warnings
closeAllConnections()
# Combine the charactacter Date and Time fields into a new POSIXct column
df_data_subset$DateTime <- as.POSIXct(
paste(df_data_subset$Date, df_data_subset$Time),
format = "%d/%m/%Y %H:%M:%S")
# set the png graphics device with appropriate settings
png("~/datasciencecoursera/exploratory analysis/project_1/plot3.png",
width = 480,
height = 480,
units = 'px')
# Now draw the plot
plot(df_data_subset$DateTime,
df_data_subset$Sub_metering_1,
type = "l",
col = "black",
ylab = "Energy sub metering",
xlab = " "
)
# add the second variable to the y axis
lines(df_data_subset$DateTime,
df_data_subset$Sub_metering_2,
type = "l",
col = "red"
)
# and the third
lines(df_data_subset$DateTime,
df_data_subset$Sub_metering_3,
type = "l",
col = "blue"
)
# lastly the legend
legend("topright",
lwd = "1",
col = c("black","red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
)
# and close the graphics device
dev.off()
|
c51e455221601faa426016e2a0cbff3fc7e29e18 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TrackReconstruction/examples/gpsdata95.Rd.R | 2fba74cef550d0e3470273596c9a11317e7e2c87 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 250 | r | gpsdata95.Rd.R | library(TrackReconstruction)
### Name: gpsdata95
### Title: GPS raw data
### Aliases: gpsdata95
### Keywords: datasets
### ** Examples
data(gpsdata95)
head(gpsdata95);tail(gpsdata95)
str(gpsdata95)
plot(gpsdata95$Longitude,gpsdata95$Latitude)
|
f3f818333b8012dc8dfc7e8aef79d350db255811 | 97ae72e2f16ea8c9b8139f75e91a9beddfcbada8 | /man/Text.Highlight.Rd | 16824f44417da56a7949afe6b8b9353db86e4ab7 | [] | no_license | dCraigJones/pumpR | 0b2d485d684da04303ae8836a864ac35db077c67 | 92f5aae86a6e04136fd2a245e93a476b4b921277 | refs/heads/master | 2020-04-22T07:32:03.145258 | 2020-02-15T21:14:23 | 2020-02-15T21:14:23 | 170,219,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 553 | rd | Text.Highlight.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pumpR.R
\name{Text.Highlight}
\alias{Text.Highlight}
\title{Draws Text to Screen}
\usage{
Text.Highlight(Q, H, text, highlight_color = "white",
text_color = "red", TextSize = 0.75)
}
\arguments{
\item{Q}{Flow, GPM}
\item{H}{Head, FT}
\item{text}{Text to display}
\item{highlight_color}{optional}
\item{text_color}{optional}
\item{TextSize}{optional}
}
\value{
Plot
}
\description{
Draws Text to Screen
}
\examples{
Draw.Graph()
Text.Highlight(3000,50, "Test Point")
}
|
2b6ee6a0c7fec3656eb44267a75f296121fb06be | 52ab41eb2880f6fb1aa6e8d9bfe37bb024a7f687 | /sentiment_anal_predictions_galaxy.R | c44cc1ee3d3a06423d4a098e1b45ca25f2f7a287 | [] | no_license | gabordun/Sentiment_analyses_mobile_phones | 73dbfd77929a9e3ad75315533315b30e008283c3 | 033b5e3f422e70903a5b065de782d84a7abe1609 | refs/heads/master | 2020-12-06T03:18:49.048376 | 2020-05-18T21:09:46 | 2020-05-18T21:09:46 | 232,325,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,773 | r | sentiment_anal_predictions_galaxy.R | ##########################################################################################
## ##
############# Sentiment analyses toward Samsung Galaxy ##
## predictions ########
## Author: Gabor Dunai ##
## Version: 1.0 ##
## Date: 01.2020 ##
## ##
##########################################################################################
#################### Part 0: directory, libraries, dataset ################
#################### set directory, call libraries ###########################
setwd("A:/B/Ubiqum/module4/sentiment_analyses")
library(dplyr)
library(ggplot2)
library(caret)
library(C50)
library(e1071)
library(gbm)
library(randomForest)
library(kknn)
library(mlbench)
library(export)
#################### optional: load saved environment ########################
load("A:/B/Ubiqum/module4/sentiment_analyses/sentiment_anal_galaxy.RData")
#################### optional: load models ##########################
C50galaxy<-readRDS("A:/B/Ubiqum/module4/sentiment_analyses/C50galaxy.RDS")
SVMgalaxy<-readRDS("A:/B/Ubiqum/module4/sentiment_analyses/SVMgalaxy.RDS")
RFgalaxy<-readRDS("A:/B/Ubiqum/module4/sentiment_analyses/RFgalaxy.RDS")
kknngalaxy<-readRDS("A:/B/Ubiqum/module4/sentiment_analyses/kknngalaxy.RDS")
#################### import data ############################
Predictiongalaxy<-read.csv('A:/B/Ubiqum/module4/sentiment_analyses/largematrixgalaxy.csv',
header=TRUE, sep=";",fill=TRUE)
#################### preprocessing ############################
# filter out irrelevant WAPs and unneseceraily features
PredictiongalaxyR<-Predictiongalaxy[,-nzv]
#################### predictions ############################
Predict_C50galaxy<-predict(C50galaxy,PredictiongalaxyR)
Predict_SVMgalaxy<-predict(SVMgalaxy,PredictiongalaxyR)
Predict_RFgalaxy<-predict(RFgalaxy,PredictiongalaxyR)
Predict_kknngalaxy<-predict(kknngalaxy,PredictiongalaxyR)
#################### results ############################
summary(Predict_C50galaxy)
summary(Predict_SVMgalaxy)
summary(Predict_RFgalaxy)
summary(Predict_kknngalaxy)
#################### end of script ############################## |
49ad992b6f399d4281a425935485e900c32dfbfb | 77ef0a8557aac9450bc70eb5673d5b14e003e4bc | /inst/GENT/server.R | 8a2cf6626023b279110f88a6bfede71cc85edc85 | [] | no_license | SAFE-ICU/GeneExpressionNetworkToolkit | d124495e61e119fae21a3b688951f9b42cc68f30 | 23038aa10335a44fa2906df5ee7f0aaf8c7515e3 | refs/heads/master | 2020-03-30T14:48:30.313547 | 2018-11-03T10:25:06 | 2018-11-03T10:25:06 | 151,336,857 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 19,194 | r | server.R | #setwd("C://Networks//code//")
# Make sure GEOmetadb.sqlite is present in the working directory. Also update the GEOsqlite
# file to the latest version
# other libraries install pbapply, limma, shinyBS,shinythemes, shiny, networkd3,
# change working directory in all code files
## For first time running:
# Install libraries
# source("http://bioconductor.org/biocLite.R")
# biocLite("BiocUpgrade")
# biocLite("GEOmetadb")
# biocLite("Biobase")
# biocLite("limma")
# biocLite("edgeR")
# getSQLiteFile(destdir = getwd(), destfile = "GEOmetadb.sqlite.gz")
# install.packages("ggplot2")
# install.packages("philentropy")
# install.packages("minerva")
# install.packages("BNN")
# install.packages("data.table")
# install.packages("plotly")
# install.packages("pbapply")
# install.packages("reshape2")
# install.packages("igraph")
library(GEOmetadb)
library(Biobase)
server <- function(input, output) {
# Takes an action every time button is pressed;
# printing a message to the console for log history
observeEvent(input$goButton, {
cat("Showing max genes", input$geneids, "rows\n")
})
# Take a reactive dependency on input$button, but
# not on any of the stuff inside the function
library(GEOmetadb)
library(Biobase)
library(ggplot2)
library(philentropy)
library(minerva)
library(MEGENA)
library(BNN)
#---downlaod the latest version of GEOdb---#
#getSQLiteFile(destdir = getwd(), destfile = "GEOmetadb.sqlite.gz")
con <- dbConnect(SQLite(),'GEOmetadb.sqlite')
query_str1 <- paste("SELECT * FROM gds WHERE sample_organism LIKE '%")
query_str2 <- paste("%' AND sample_type LIKE '%RNA")
query_str3 <- paste("%' AND description LIKE '%")
query_str4 <- paste("%'")
# view all datasets (GDSIDs) for input keyword
df <- eventReactive(input$goButton, {
dio <- paste(input$organism)
dis <- paste(input$disease)
query <- paste(query_str1, dio, query_str2, query_str3, dis, query_str4,sep="")
gds_subset <- dbGetQuery(con, query)
g <- subset(gds_subset,gds_subset$feature_count >= input$geneids)
library(data.table)
a <- c("timecourse","time series","time-series","timeseries","timepoint","time point","time-point","time-course","time course")
g[- grep(paste(a,collapse="|"),g[,3]), ]
})
print(dbGetQuery(con, query))
output$download_df <- renderDataTable({df()})
#view expression data
expressiondata <- eventReactive(input$expbutton, {
#options('download.file.method.GEOquery'='auto')
dio <- paste(input$organism)
dis <- paste(input$disease)
query <- paste(query_str1, dio, query_str2, query_str3, dis, query_str4,sep="")
print(query)
gds_subset <- dbGetQuery(con, query)
gds_subset <- subset(gds_subset,gds_subset$feature_count >= input$geneids)
if (interactive()) {
#download data -- ////
output$downloadData <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), '.csv', sep='')
},
content = function(con) {
gds_id<-gds_subset[,2]
gds <- getGEO(gds_id[which(gds_id == input$gdsid)])
mdata <- Columns(gds)
edata <- Table(gds)
#write.csv(mdata, con)
write.csv(edata, con)
})
}
gds_id<-gds_subset[,2]
gds <- getGEO(gds_id[which(gds_id == input$gdsid)])
Table(gds)
})
output$expdata <- renderDataTable({expressiondata()})
#view metadata
metadata <- eventReactive(input$metbutton, {
dio <- paste(input$organism)
dis <- paste(input$disease)
query <- paste(query_str1, dio, query_str2, query_str3, dis, query_str4,sep="")
print(query)
gds_subset <- dbGetQuery(con, query)
gds_subset <- subset(gds_subset,gds_subset$feature_count >= input$geneids)
gds_id<-gds_subset[,2]
gds <- getGEO(gds_id[which(gds_id == input$gdsid)])
Columns(gds)})
output$metdata <- renderDataTable({metadata()})
#normalization
norm_data <- eventReactive(input$normbutton, {
dio <- paste(input$organism)
dis <- paste(input$disease)
query <- paste(query_str1, dio, query_str2, query_str3, dis, query_str4,sep="")
print(query)
gds_subset <- dbGetQuery(con, query)
gds_subset <- subset(gds_subset,gds_subset$feature_count >= input$geneids)
gds_id<-gds_subset[,2]
gds <- getGEO(gds_id[which(gds_id == input$gdsid)])
mdata <- Columns(gds)
edata <- Table(gds)
resul <- NULL
params <- NULL
if(input$normmethods == 'log') {
transform_data<- log(edata[,(3:ncol(edata))],base=2)}
if(input$normmethods == 'qtl'){
library(limma)
transform_data <- normalizeQuantiles(edata[,3:ncol(edata)])}
if(input$normmethods == 'zs'){
transform_data <- as.data.frame(scale(edata[,3:ncol(edata)]))}
if(input$normmethods == 'lw'){
x <- edata[,3:ncol(edata)]
for (i in 1:ncol(x)) {
resul <- cbind(resul, (x[, i] - mean(x[, i]))/(max(x[, i]) - min(x[, i])))}
transform_data <- as.data.frame(resul)}
if(input$normmethods == 'qsp'){
x <- edata[,3:ncol(edata)]
for (i in 1:ncol(x)) {
resul <- cbind(resul, (x[, i] - median(x[, i]))/(mad(x[, i])))}
transform_data <- as.data.frame(resul)}
if(input$normmethods == 'lqt'){
log_transform<- log(edata[,(3:ncol(edata))],base=2)
library(limma)
transform_data <- normalizeQuantiles(log_transform)}
transform_data["ID_REF"] = edata$ID_REF
transform_data["IDENTIFIER"] = edata$IDENTIFIER
rownames(transform_data) <- transform_data$ID_REF
a <- expressiondata()[,-(1:2)]
transform_data
})
output$norm_df <- renderDataTable({norm_data()})
library(plotly)
#View plots
output$plot1 <-
#renderPlot({ggplotly(norm_data()[,-((ncol(norm_data())-1): (ncol(norm_data())))], aes(x=cond, y=rating))+geom_boxplot()},height = 400,width = 600)
renderPlot({boxplot(expressiondata()[,-(1:2)], main = "Raw expression data")})
output$plot2 <- renderPlot({boxplot(norm_data()[,-((ncol(norm_data())-1): (ncol(norm_data())))], main = "Normalized data")})
# download button for raw vs norm plots ////
plotInput <- function(){boxplot(norm_data())}
output$downloadPlot <- downloadHandler(
filename = function() { paste('Shiny', '.pdf', sep='') },
content = function(file) {
pdf(file)
boxplot(expressiondata()[,-(1:2)], main = "Raw expression data")
boxplot(norm_data()[,-((ncol(norm_data())-1): (ncol(norm_data())))], main = "Normalized data")
#print(plotInput())
#plotInput2()
dev.off()
#ggsave(png(file), plot = plotInput())
})
#Minimal gene List
min_gl <- eventReactive(input$minimal, {
e <- new.env()
e$edata <- norm_data()
e$idf <- input$gdsid
#read.csv("exprsdata.csv") ##expressiondata()
e$mdata <- metadata()
diseased <- matrix(metadata()[,2])
s <- as.data.frame(levels(metadata()[,2]))
for(i in (1:nrow(metadata())))
{
diseased[i] <- which(grepl(metadata()[i,2], s$`levels(metadata()[, 2])`)) - 1
}
e$diseased <- diseased
#read.csv("metadata.csv") ##metadata()
sys.source("minimal_gl.R", e)
#feature_selection_output()
e$fetch_gene_expression_df
})
output$mini_gl <- renderDataTable({min_gl()})
##-----------------------CUSTOM ANALYSIS TAB------------------------------------##
#differentially expressed genes
diffexp_df <- eventReactive(input$diffexpbutton, {
edata <- expressiondata()
mdata <- metadata()
if(input$raw_df == TRUE){ test <- edata[,-(1:2)]}
if(input$norm_df == TRUE){test <- norm_data()[,-((ncol(norm_data())-1): (ncol(norm_data())))]}
#rownames(test) <- edata[,2]
#test <- test[(1:500),]
test[is.na(test)] <- as.numeric(0)
new<- t(test)
splitted_groups <- mdata[,2]
#give option to users to split by disease and cell type
required_format<- cbind(splitted_groups, new)
tempdf <- as.data.frame(required_format)
tempdf$splitted_groups <- as.factor(tempdf$splitted_groups)
splitted_groups <- split(tempdf,tempdf$splitted_groups)
if(input$diffexmethods == 'ano') {
require(pbapply)
pv <- pbapply(tempdf[,-1], 2, function(x){
oneway.test(x ~ tempdf$splitted_groups,data=tempdf[,-1])$p.value
})
pvalue <- data.frame(pv)}
if(input$diffexmethods == 'eb'){
m <- as.data.frame(tempdf[,-1])
colnames(m) <- NULL
rownames(m) <- NULL
sapply(m, class)
design <- as.numeric(tempdf[,1])
fit <- lmFit(t(m),design = design)
options(scipen = 999)
pvalue <- eBayes(fit)$p.value
pvalue <- data.frame(pvalue)}
if(input$diffexmethods == 'edr') {
require(edgeR)
f <- matrix(1, nrow = nrow(tempdf), ncol = 2)
x <- 0:(nrow(tempdf)-1)
f[,2]=x
d <- DGEList(t(tempdf[,-1]))
# Fit the NB GLMs
disp <- estimateDisp(d, f)$common.dispersion
fit <- glmQLFit(d ,f,dispersion = disp)
# Likelihood ratio tests for trend
pv <- glmLRT(fit)$table$PValue
pvalue <- data.frame(as.numeric(pv))}
pvalue[is.na(pvalue)] <- as.numeric(1)
#colnames(pvalue)
#false discovery rate:-
if(input$checkbox_fdr==TRUE){
fdr <- p.adjust(pvalue[,1], method="fdr")
testdata <- cbind(fdr,test)
deg<-subset(testdata,testdata$fdr < input$pval)
}
else{
testdata <- cbind(pvalue,test)
colnames(testdata)[1] <- "pv"
deg<-subset(testdata,testdata$pv < input$pval)
#deg[,-1]
}
#deg[,-1]
deg["ID_REF"] = rownames(deg)
deg
})
output$diff_exp <- renderDataTable({diffexp_df()})
##get diff exp dataset
## get association matrix
#Edgelist
assoc_matrix <- eventReactive(input$assoc_but, {
deg <- diffexp_df()
drop <- c("fdr", "ID_REF")
deg_droped = deg[,!(names(deg) %in% drop)]
if(input$assindex == 'pcor') {
assoc <- cor(t(deg_droped), method = "pearson")
}
if(input$assindex == 'mine') {
assoc <- mine(t(deg_droped))$MIC
colnames(assoc) <- colnames(t(deg_droped))
}
if(input$assindex == 'jac') {
assoc <- distance(deg_droped, method = "jaccard")
colnames(assoc) <- colnames(t(deg_droped))
}
if(input$assindex == 'cos') {
assoc <- distance(deg_droped, method = "cosine")
colnames(assoc) <- colnames(t(deg_droped))
}
if(input$assindex == 'simp') {
assoc <- distance(deg_droped, method = "sorensen")
colnames(assoc) <- colnames(t(deg_droped))
}
if(input$assindex == 'geo') {
assoc <- distance(deg_droped, method = "minkowski")
colnames(assoc) <- colnames(t(deg_droped))
}
if(input$assindex == 'hgeo') {
assoc <- cor(t(deg_droped), method = "spearman")
}
assoc})
output$ass_matrix <- renderDataTable({assoc_matrix()})
#correlation
#wgcna
#adjacency = adjacency(datExpr, power = softPower)
#-----------------------------------------------------------------------------------------#
#Edgelist
edgelist_table <- eventReactive(input$edglist_but, {
#mi_nonzero = adjacency(assoc_matrix(), power = softPower)
ab <- input$frac_genes
mi_nonzero <- assoc_matrix()
diag(mi_nonzero) <- 0
require(reshape2)
edgelist_mi_nonzero <- melt(mi_nonzero)
edglist <- calculate.PFN(as.data.frame(edgelist_mi_nonzero))
ac <- nrow(edglist)
ad <- as.numeric(ab)*as.numeric(ac)/100
edglist <- edglist[1:ad,]
#require(reshape2)
#edgelist_mi_nonzero <- melt(mi_nonzero)
#dec_order <- edgelist_mi_nonzero[order(edgelist_mi_nonzero$value , decreasing = TRUE),]
#dec_order <- subset(dec_order,dec_order$value!=0)
#edglist <- calculate.correlation(dec_order,method = "pearson",FDR.cutoff = 0.05)
#per = as.numeric(input$frac_genes)*nrow(dec_order))
#el <- dec_order[(1:((as.numeric(input$frac_genes))/100*(nrow(dec_order)))),]
})
output$edgelist <- renderDataTable({edgelist_table()})
#-------------------------------------------------------------------------------#
saveData <- function(data) {
diseased <- matrix(metadata()[,2])
s <- as.data.frame(levels(metadata()[,2]))
for(i in (1:nrow(metadata())))
{
diseased[i] <- which(grepl(metadata()[i,2], s$`levels(metadata()[, 2])`)) - 1
}
write.csv(diseased,row.names = FALSE, "Diseased.csv")
write.csv(edgelist_table(),row.names = FALSE, "Edgelist_top_10prcnt_genes.csv")
write.csv(metadata(),row.names = FALSE, "metadata.csv")
write.csv(expressiondata(),row.names = FALSE, "exprsdata.csv")
write.csv(diffexp_df(),row.names = FALSE, "Diff_Exp_Data.csv")
}
#Gene Network Plot
getNetwork<-function() {
saveData(edgelist_table)
return(source("networkd3.R"))
}
output$network_graph<-renderUI({getNetwork()})
#------------------------------------------------------------------------------#
#View modules
#infomap <- function(x){
#return(source("imap_comm_detect.R"))
# return(source("imap_comm_detect.R", local = TRUE))
#}
comm_det_modules <- eventReactive(input$map_but, {
require(igraph)
e <- new.env() ## new.env so exchange variables btw source file and main file
e$edgelist_el <- edgelist_table()
sys.source("imap_comm_detect.R", e)
#infomap(edgelist_el)
module_view <- as.data.frame(e$imap_comm_det[input$module_num])
module_view
})
output$map <- renderDataTable({comm_det_modules()})
#-------------------------------------------------------------------------#
#Network characteristics
net_chars <- eventReactive(input$net_chars_but, {
require(igraph)
e <- new.env() ## new.env so exchange variables btw source file and main file
e$edgelist_el <- edgelist_table()
sys.source("imap_characteristics.R", e)
#infomap(edgelist_el)
aj <- as.data.frame(e$comm_list)
aj
})
output$cec <- renderDataTable({net_chars()})
#------------------------------------------------------------------------#
##RF error rate between modules
#rf_modules <- function(){return(source("random_forest_modules.R"))}
rf_modules_df <- eventReactive(input$mod_rf_but, {
e <- new.env()
e$edata <- norm_data()
#read.csv("exprsdata.csv") ##expressiondata()
e$mdata <- metadata()
#read.csv("metadata.csv") ##metadata()
e$edgelist <- edgelist_table()
sys.source("random_forest_modules.R", e)
e$rf_csv
#rf_modules()
#rf_csv
})
output$rf_module_plot <- renderPlot({
input$mod_rf_but
boxplot(rf_modules_df(), xlab = "Modules", ylab = "Random forest error rate")
})
# download button for RF b/w modules plot- ////
plotInput <- function(){
boxplot(rf_modules_df())}
output$download_rf_Plot <- downloadHandler(
filename = function() { paste('RF_btw_modules', '.pdf', sep='') },
content = function(file) {
png(file)
boxplot(rf_modules_df())
dev.off()})
#------------------------------------------------------------------------#
#RF error rate vs network parameters
#degree centrality
#rf_degree_centrality <- function(){
#return(source("random_forest_network_params.R"))
#}
rf_deg_cent_res <- eventReactive(input$plot_degcen_but, {
e <- new.env()
e$edata <- norm_data()
#read.csv("exprsdata.csv") ##expressiondata()
e$mdata <- metadata()
#read.csv("metadata.csv") ##metadata()
e$edgelist <- edgelist_table()
if(input$netpars == 'dcen'){
e$net_param = 'degree_cent'
}
else if(input$netpars == 'bet'){
e$net_param = "edge_bet"
}
#e <- new.env()
#e$edata <- read.csv("exprsdata.csv") ##expressiondata()
#e$mdata <- read.csv("metadata.csv") ##metadata()
#e$edgelist <- edgelist_table()
sys.source("random_forest_network_params.R", e)
e$rf_result
#rf_degree_centrality()
#rf_result
})
output$rf_degcen_plot <- renderPlot({
input$plot_degcen_but
rf_result_df <- as.data.frame(rf_deg_cent_res())
rf_result_reqd <- cbind(rownames(rf_result_df),rf_result_df)
rf_result_numeric <- apply(rf_result_reqd, 2 , as.numeric)
m <- lm(rf_result_numeric[,2] ~ rf_result_numeric[,1])
plot(rf_deg_cent_res(), type="p",main= bquote("Slope"== .(m$coefficients[2])),
xlab="Models with probes of high degree to low", ylab="Error rate of RF model")
abline(m, col="red")
})
# download button for RF degree centrality plot- ////
plotInput <- function(){
plot(rf_deg_cent_res(), type="p",main= "Out Of Bag Error",
xlab="Models with probes of high degree to low", ylab="Error rate of RF model")
}
output$download_rf_degcen <- downloadHandler(
filename = "RF_deg_cent.png",
content = function(file) {
png(file)
plot(rf_deg_cent_res(), type="p",main= "Out Of Bag Error",
xlab="Models with probes of high degree to low", ylab="Error rate of RF model")
dev.off()})
#-----------------------------------------------------------------------------------#
#Feature selection
#feature_selection_output <- function(){
#return(source("feature_selection.R"))
#}
#feature selection output df with conf, tent and rejected gene exp data info
fs_genes_df <- eventReactive(input$fs_but, {
e <- new.env()
e$edata <- norm_data()
e$idf <- input$gdsid
#read.csv("exprsdata.csv") ##expressiondata()
e$mdata <- metadata()
#read.csv("metadata.csv") ##metadata()
e$edgelist <- edgelist_table()
sys.source("feature_selection_bnn.R", e)
#feature_selection_output()
e$fetch_gene_expression_df
})
output$fs_bor_df <- renderDataTable({fs_genes_df()})
#-----------------------------------------------------------------------------------#
#Final confirmed gene list(with gene symbols) from feature selection bor
#taken selected genes from each module and combined
final_gene_df <- eventReactive(input$gene_list_but, {
fs_genes_df()[,(1:2)]
#gene_df
})
output$final_genes <- renderDataTable({final_gene_df()})
#------------------------------------------------------------------------------------#
#Boxplot for comparison of predictive accuracy of selected genes
#vs differentially expressed genes
rf_modules_d <- eventReactive(input$plot_fs, {
e <- new.env()
e$edata <- norm_data()
#read.csv("exprsdata.csv") ##expressiondata()
e$mdata <- metadata()
#read.csv("metadata.csv") ##metadata()
e$edgelist <- edgelist_table()
if(input$ppc == 'bor') {
e$algo <- 'bor'
}
if(input$ppc == 'bnn') {
e$algo <- 'bnn'
}
sys.source("random_forest_genes.R", e)
e$rf_csv
})
output$fs_comp_plot <- renderPlot({
input$plot_fs
boxplot(rf_modules_d(), xlab = "Genes",names = c("Differentially expressed genes" ,"Feature selected genes"), ylab = "Random forest error rate")
})
# download button for RF b/w modules plot- ////
plotInput <- function(){
boxplot(rf_modules_d())}
output$download_comparison <- downloadHandler(
filename = "RF_Genes.png",
content = function(file) {
png(file)
#plotInput()
boxplot(rf_modules_d())
dev.off()})
#last closing bracket
}
|
28f3384c0509cee4c071ff8fbcf9525df16dd381 | cc925ab2d80219b6fa587717e19030ad9594b8d8 | /plot4.R | ec148f886675fc3abbd62f951cb4eae0cdb00026 | [] | no_license | fengtasy/ExData_Plotting1 | 52a793bda9c31373a73b74bc01b5856bf633c95f | 71c7c05775d8b0937ce018032ea890bb348dd905 | refs/heads/master | 2021-01-12T05:42:46.413514 | 2016-12-25T06:36:36 | 2016-12-25T06:36:36 | 77,173,890 | 0 | 0 | null | 2016-12-22T20:30:02 | 2016-12-22T20:30:01 | null | UTF-8 | R | false | false | 1,901 | r | plot4.R | ## 1.1) Download and Unzip Data Set ####
# setwd("./Week1")
getwd()
if(!file.exists("./data")){
dir.create("./data")
}
zip_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(zip_url, "./data/household_power_consumption.zip")
file <- unzip("./data/household_power_consumption.zip", exdir = "data")
## 1.2) Read required rows of data into R ####
hpc <- read.table(text = grep("^[1,2]/2/2007", readLines(file), value=TRUE)
, header = FALSE, sep = ";", na.strings="?"
, col.names = c("Date", "Time", "Global_active_power"
, "Global_reactive_power", "Voltage"
, "Global_intensity", "Sub_metering_1"
, "Sub_metering_2", "Sub_metering_3"))
## 1.3) Convert date and time from factor class to date and date-time class ####
hpc$Date <- as.Date(hpc$Date, format = "%d/%m/%Y", tz = "")
hpc$DateTime <- as.POSIXct(paste(hpc$Date, hpc$Time), format = "%Y-%m-%d %H:%M:%S")
# Plot 4 ####
png(filename = "plot4.png", width = 480, height = 480, units="px")
## Plot on 2x2 Canvas
par(mfrow = c(2,2))
## 1st Plot
plot(hpc$DateTime, hpc$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
## 2nd Plot
plot(hpc$DateTime, hpc$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
## 3rd Plot
plot(hpc$DateTime, hpc$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
points(hpc$DateTime, hpc$Sub_metering_2, type = "l", col = "red")
points(hpc$DateTime, hpc$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd = 1)
## 4th Plot
plot(hpc$DateTime, hpc$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
390a6fbf4ec31fdbd6d9de1b9f27d42929136ac4 | 7a7375245bc738fae50df9e8a950ee28e0e6ec00 | /R/SA4__Year_DwellingStructure_occupied_private_dwellings.R | d8d6e8927969d56e700ee720bad7307d97986d2e | [] | no_license | HughParsonage/Census2016.DataPack.TimeSeries | 63e6d35c15c20b881d5b337da2f756a86a0153b5 | 171d9911e405b914987a1ebe4ed5bd5e5422481f | refs/heads/master | 2021-09-02T11:42:27.015587 | 2018-01-02T09:01:39 | 2018-01-02T09:02:17 | 112,477,214 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 233 | r | SA4__Year_DwellingStructure_occupied_private_dwellings.R | #' @title DwellingStructure by SA4, Year
#' @description Number of occupied_private_dwellingsDwellingStructure by SA4, Year
#' @format 1,605 observations and 4 variables.
"SA4__Year_DwellingStructure_occupied_private_dwellings"
|
561299afb31f10ab569a1ce770f76d1db78e99a6 | 229a06eff625c59a3813050aec6d07b1b9041d96 | /OtherPackages/DAVIDQuery/R/formatList.R | 2a4a77b100b2c9765af07207f12ea27d93631c02 | [] | no_license | rikenbit/PubMedQuery | 75e161dec8cf792ef5d9be669bb8447c21e8bf3a | 964eeb30436ef93b8f1b34c216f15e8cbad51fef | refs/heads/master | 2021-01-10T20:07:15.630529 | 2014-08-07T08:31:54 | 2014-08-07T08:31:54 | 22,540,329 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 182 | r | formatList.R | formatList <- function(result){
names(result) <- result[1, ]
result <- result [-1, ]
#try(dimnames(result)[[1]] <- result[,1])
#dimnames(result)[[1]] <- 1:nrow(result)
result
}
|
9ab12d5b096e4c94ffd5399867b4dd1a7ffbd6d3 | e70e9c9a62d2876d961faf6e4e3f9ba6f69f5ea9 | /multiple regression mtcars.R | ac5ab7b084b1658a6d581d08d48502ea5d03b593 | [] | no_license | arunkumar561/analytics1 | 8e0d0572f3737565883d208ca809f5a720707de6 | 887f9572c5922cc22e11af0d91ee144b32347aa0 | refs/heads/master | 2020-04-02T16:33:16.615898 | 2018-10-28T12:35:46 | 2018-10-28T12:35:46 | 154,617,601 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 454 | r | multiple regression mtcars.R | #https://cran.r-project.org/web/packages/olsrr/olsrr.pdf
#install.packages('olsrr')
library(olsrr)
names(mtcars)
mtcars[,c('disp','hp','wt','qseu')]
model <- lm(mpg ~ disp + hp + wt + qsec, data = mtcars)
k <- ols_step_all_possible(model)
plot(k)
k
fit3 = lm(mpg~hp+wt+qsec, data=mtcars)
summary(fit3)
summary(lm(mpg ~ wt, data=mtcars))
summary(lm(mpg ~ wt+ hp, data=mtcars))
AIC(fit3)
fit4 = lm(mpg~.,data=mtcars)
summary(fit4)
AIC(fit4)
?R-square
|
657c2f9db9f80489c600a7232b2ab8b15d3e4d06 | 30e009eede387af1ffbe759145b47ccb55e74a2a | /Files/Vault Stats R Script/vault_stats.R | 826171f40e3c12e8b1a112cff7b5a37f2b3a0add | [
"MIT"
] | permissive | MichaelHettmer/FAQ | fd7525f9cba5612053c1a16b9f32bc3dbc8c380e | f67e7e0692edc70b6dc367bf0caff93ee57eca7e | refs/heads/master | 2023-04-11T12:24:01.060724 | 2021-02-25T19:12:31 | 2021-02-25T19:12:31 | 351,403,281 | 1 | 0 | MIT | 2023-04-04T01:40:21 | 2021-03-25T10:58:52 | null | UTF-8 | R | false | false | 4,391 | r | vault_stats.R | #!/usr/bin/env Rscript
# Install/Attach necessary packages to session
pkgs <- c('tidyverse','tidytext','tokenizers','DiagrammeR','glue')
xfun::pkg_attach2(pkgs)
#===============================================================#
#################### Environmental Variables ####################
vault_path <- '/Users/bryanjenks/Documents/Knowledge'
#===============================================================#
#===============================================================#
#################### Global Variables ####################
total_word_count <- 0
total_file_count <- 0
top_5_words <- 0
chart_string <- ""
aggregate_dataframe <- tibble(word = character())
#===============================================================#
# The actual processing of the text
analyze <- function(files) {
# For each file in the vault
for (file in fileNames) {
words <- read_file(file) %>%
# remove `[[` and `]]` so the link words are recognized as text too
gsub("\\[\\[", "", .) %>%
gsub("\\]\\]", "", .) %>%
gsub("\\n", " ", .) %>%
gsub("\\_+", "", .) %>%
# tokenize the words
tokenize_words(strip_punct = TRUE,
strip_numeric = TRUE,
simplify = TRUE,
lowercase = TRUE) %>%
# Make the words into a data frame
unlist() %>%
as_tibble()
# Append result data frame to global data frame i.e. data_frame += 1
aggregate_dataframe <<- rbind(aggregate_dataframe, words)
}
}
# Load files from vault
setwd(vault_path) # Sorry Jenny Bryan!
fileNames <-list.files(vault_path, recursive = TRUE, pattern = "*.md")
# fileNames <- Sys.glob('*.md')
# Lets start analyzing!
analyze(fileNames)
# Get a data frame of the aggregation of words and their counts
distinct_words <- aggregate_dataframe %>%
count(word = value,name = 'count',sort = TRUE)
# Remove the stop words
stop_words_removed <- distinct_words %>%
anti_join(stop_words, by = c('word' = 'word'))
# 'bing' is better for binary sentiment
# but 'nrc' produces nicer visuals 🤷
sentiments <- get_sentiments("nrc")
# Take initial raw aggregate column of words (with dupes)
word_sentiments <- aggregate_dataframe %>%
# Add sentiments to the data frame
left_join(sentiments, by = c('value' = 'word')) %>%
# Any missing sentiment words are removed
filter(!is.na(sentiment)) %>%
# Select only the columns we need
select(sentiment) %>%
# count frequency of sentiments
count(sentiment)
# word_sentiments <- summarise_at(group_by(word_sentiments,sentiment),vars(counts),funs(sum(.,na.rm=TRUE)))
# Get the values for each sentiment into vector for subset assignment
nums <- word_sentiments[[2]]
# Assign each sentiment its value count
anger <- nums[1]
anticipation <- nums[2]
disgust <- nums[3]
fear <- nums[4]
joy <- nums[5]
negative <- nums[6]
positive <- nums[7]
sadness <- nums[8]
surprise <- nums[9]
trust <- nums[10]
#======================================================#
# GLOBAL VAR -- Assign total word count
total_word_count <- length(aggregate_dataframe[[1]])
# GLOBAL VAR -- Assign top 5 words minus stop words
top_5_words <- stop_words_removed %>%
slice_max(count, n = 5)
word_5 <- top_5_words[[1]]
count_5 <- top_5_words[[2]]
# GLOBAL VAR -- Count of files
total_file_count <- length(fileNames)
# GLOBAL VAR --Result Chart
chart_string <- glue("```mermaid
pie title Vault Sentiment
\"anger\": {anger}
\"anticipation\": {anticipation}
\"disgust\": {disgust}
\"fear\": {fear}
\"joy\": {joy}
\"negative\": {negative}
\"positive\": {positive}
\"sadness\": {sadness}
\"surprise\": {surprise}
\"trust\": {trust}
```")
#======================================================#
# Create the presentation string of what actually gets written to the file
# and displayed in Obsidian
output_string <- glue("
# Vault Analysis
## Stats
**File Count:** {total_file_count}
**Word Count:** {total_word_count}
**Top 5 Words:**
| Word | Frequency |
|:----|:-----------|
| {word_5[1]} | {count_5[1]} |
| {word_5[2]} | {count_5[2]} |
| {word_5[3]} | {count_5[3]} |
| {word_5[4]} | {count_5[4]} |
| {word_5[5]} | {count_5[5]} |
## Visualization
{chart_string}
")
fileConn<-file("Vault Stats.md")
writeLines(output_string, fileConn)
close(fileConn)
|
235b2ee9c7c9b668fe571f24c452d69de9e8b1fb | 6570d9082e247c7f91d7c23970cc674989641417 | /simulation_fix_throat/summary/all_figure.R | 09d8cf29a1de12945a4fe37bd29a8009b7103cc5 | [] | no_license | fionarhuang/treeclimbR_article | d936e6426f4d9cbe46fd36581403703d93842b97 | af24899e4157b9de6b2c3129ad01a0218fb86e83 | refs/heads/master | 2023-05-10T14:53:42.421063 | 2021-04-11T10:37:18 | 2021-04-11T10:37:18 | 257,983,356 | 1 | 0 | null | 2021-04-11T10:08:02 | 2020-04-22T18:15:19 | PostScript | UTF-8 | R | false | false | 2,010 | r | all_figure.R |
suppressPackageStartupMessages({
library(TreeHeatmap)
library(ggnewscale)
library(ggplot2)
library(ggnewscale)
library(dplyr)
library(ggtree)
library(cowplot)
library(scales)
})
# ---------------------- load data ----------------------
### Edit this!!!
# resolution: low or high (low for the supplementary figure)
reso <- "low"
# schematic scenarios: BS, US, SS
source("summary/scenario.R")
fig_scene <- plot_grid(fig1 +
theme(plot.margin = unit(c(2, -5, 0, 5),"mm")),
fig2 +
theme(plot.margin = unit(c(2, -5, 0, 5),"mm")),
fig3 +
theme(plot.margin = unit(c(2, -5, 0, 5),"mm")),
labels = c("BS", "US", "SS"), nrow = 3,
hjust = -8, vjust = 3,
label_size = 10)
# TPR vs FDR
source("summary/fig_tpr_fdr_reso.R")
p_roc
# heatmap: bottom (panel c)
s <- 5 # 5th repetition
source("summary/DA_heatmap.R")
fig_up <- plot_grid(fig_scene,
p_roc +
theme(plot.margin = unit(c(2, -10, 0, -20),"mm")),
nrow = 1,
labels = c("a.", "b."),
rel_widths = c(0.4, 1), rel_heights = c(1, 1))
fig_cb <- plot_grid(fig_up, fig_bottom, nrow = 2,
labels = c(" ", "c."),
vjust = 0.5,
rel_heights = c(1.6, 1))
if (reso == "high") {
cbPath <- file.path(sprintf("summary/figure/microbe_%s_simu.eps", reso))
}
# Supplementary Figure 2
if (reso == "low") {
cbPath <- file.path(sprintf("summary/figure/Supplementary_microbe_%s_simu.eps", reso))
}
ggsave(cbPath, fig_cb, units = "in", width = 8, height = 8,
dpi = 300)
rm(list = ls())
# Supplementary Figure 1
s <- 5
source("summary/microbe_loc.R")
cbPath <- file.path("summary/figure/Supplementary_microbe_loc.eps")
ggsave(cbPath, fig_loc, units = "in", width = 8, height = 8,
dpi = 300)
|
8ed16f0cf91e2c68d73dba1908adfb96d6dc0602 | 9da4a27d8921bbcbd2f0719757710b0770ad3646 | /R/cluster-create.R | bd998ab22b17e8fc860732af351ed01c10e4fa7a | [] | no_license | Ax3man/multidplyr | 1731834b3680a4d8d797d8d635fe628f3c86065d | 8c12d639458e62473be9e19bf16119ce800b6bee | refs/heads/master | 2021-01-16T21:47:17.172882 | 2016-06-08T13:41:33 | 2016-06-08T13:41:33 | 51,756,840 | 0 | 1 | null | 2016-02-15T13:21:01 | 2016-02-15T13:21:01 | null | UTF-8 | R | false | false | 1,101 | r | cluster-create.R | #' Create a new cluster with sensible defaults.
#'
#' Clusters created with this function will automatically clean up after
#' themselves.
#'
#' @importFrom parallel detectCores makePSOCKcluster
#' @param cores Number of cores to use. If \code{NA} will uses at least two
#' cores, but if you have extra it leaves one free for other stuff.
#' @param quiet If \code{TRUE}, don't display initialisation message.
#' @export
create_cluster <- function(cores = NA, quiet = FALSE) {
if (is.na(cores)) {
cores <- guess_cores()
}
if (!quiet) message("Initialising ", cores, " core cluster.")
cluster <- parallel::makePSOCKcluster(cores)
attr(cluster, "finaliser") <- cluster_stop(cluster)
cluster
}
guess_cores <- function() {
if (in_check()) {
return(2L)
}
max <- parallel::detectCores()
if (max == 1L) 1L else pmax(2L, max - 1L)
}
cluster_stop <- function(x) {
reg.finalizer(environment(), function(...) {
parallel::stopCluster(x)
})
environment()
}
in_check <- function() {
paths <- strsplit(getwd(), "/", fixed = TRUE)[[1]]
any(grepl("Rcheck$", paths))
}
|
1cb6ccefbc10fb386d90c57c5545ceafa3d6d854 | 95da0dc7ddc9dd18a7494d11d5ff0f4106a53bf0 | /Labs/Hubway_Pt1/20180628_LeilaErbay_Lab4.R | 1570347179e126288bd5b7309e9f935b1fce8511 | [] | no_license | LeilaErbay/DataAnalytics | c1e4097e3322ad3d202a586fa2648807e389bf5c | 5db7bcb24d0e29e3d75900d8aa054fb65c4a802c | refs/heads/master | 2020-03-23T15:11:06.295227 | 2018-11-15T21:38:33 | 2018-11-15T21:38:33 | 141,728,777 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,227 | r | 20180628_LeilaErbay_Lab4.R | #Lab 4 - Hubway Pt I
#Author: Leila Erbay
###### PART A #############
library(readxl)
stations <- read.csv("hubway_stations.csv")
colNamesStations <- colnames(stations)
# id, terminal, station, municipal, lat, lng, status
uniqueStns <- unique(stations$station)
uniqueStnsAmt <- length(uniqueStns)
#137 unique stations
existingStns <- subset(stations, stations$status == "Existing")
uniqueExistingStns <- unique(existingStns)
uniqueExistStnsAmt <-length(uniqueExistingStns$station)
#130 unique existing stations
############ PART B #######################
trips <- read.csv("hubway_trips.csv")
numCols <- ncol(trips) #13 Col
numRows <- nrow(trips) #350615 Row
uniqueZipCodes <- unique(trips$zip_code) #375 Unique Zip Codes
uniqueBikeIDs <- unique(trips$bike_nr) #882 unique bikes
library(plyr)
numRides <- count(trips,"bike_nr")
maxValue <- numRides[which.max(numRides[ ,2 ]),] #B00401
minValue <- numRides[which.min(numRides[,2]),] #T01093
#duration of each bike: need bike, duration
duration <- count(trips, "bike_nr", "duration")
##OTHER WAYS TO FIND DURATION
#duration <- aggregate(duration~bike_nr, data= trips, sum)
#duration <- summaryBy(duration~bike_nr,data = trips,FUN= sum )
#duration <- data.frame(tapply(trips$duration, trips$bike_nr, sum))
longestRidden <- duration[which.max(duration[, 'freq']),] #B00585
shortestRidden <- duration[which.min(duration[, 'freq']),] #T01380
#SPECIFYING BIKE B00585
longestRiddenDF <- trips[trips$bike_nr == "B00585",]
freqEnd <- count(longestRiddenDF, "end_statn")
mostFreqEnd <- freqEnd[freqEnd$freq == max(freqEnd$freq),]
#OTHER WAYS TO FIND MAX FREQUENT END STATION
# freqEnd2 <- table(longestRiddenDF$end_statn)
# index <- which.max(freqEnd2)
# freqEnd2[index]
#freqEnd2 <- summaryBy(bike_nr~end_statn, data = longestRiddenDF,FUN= length)
#freqEnd2 <- aggregate(bike_nr~end_statn, data = longestRiddenDF, FUN =length)
#TAPPLY:
# freqEnd2 <- data.frame(tapply(longestRiddenDF$hubway_id,longestRiddenDF$end_statn, length))
# library(data.table)
# freqEnd2 <- setDT(freqEnd2, keep.rownames = T)[]
# mostFreqEnd2 <- freqEnd2[which.max(as.numeric(unlist(freqEnd2[,2]))),]
freqStart <- count(longestRiddenDF, "strt_statn")
mostFreqStart <- freqStart[freqStart$freq == max(freqStart$freq),]
#OTHER WAYS TO FIND MAX FREQUENT START STATION
# freqStart2 <-table(longestRiddenDF$strt_statn)
# mostFreqStart2 <- freqStart2[which.max(freqStart2)]
#freqStart2 <- summaryBy(bike_nr~strt_statn, data = longestRiddenDF,FUN= length)
#freqStart2 <- aggregate(bike_nr~strt_statn, data = longestRiddenDF, FUN =length)
#TAPPLY:
# freqStart2 <- data.frame(tapply(longestRiddenDF$hubway_id, longestRiddenDF$strt_statn, length))
# freqStart2 <- setDT(freqStart2, keep.rownames=T)[]
# mostFreqStart2 <- freqStart2[which.max(as.numeric(unlist(freqStart2[,2]))),]
#stationStart <- freqStart2[which.max(freqStart2$tapply.longestRiddenDF.hubway_id..longestRiddenDF.strt_statn..),]
#stationStart <- freqStart2[which.max(freqStart2$bike_nr),]
#for bike B00585
mostFreqEndSttn <- stations[stations$id == mostFreqEnd$end_statn,]
mostFreqEndSttn$station #South Station
mostFreqStartSttn <- stations[stations$id == mostFreqStart$strt_statn,]
mostFreqStartSttn$station #South Station
|
c476c84b9188bafee975a337f9f59a2a9e698c15 | 03eda003a1c853ab0b765fb6e691b4b8b5063ac9 | /Course Project 2/plot2.R | 6286a07860a2587b2a859738f9fd44d0ea26218d | [] | no_license | vidyavnv/Exploratory_Data | 78bdcff5e69678d8cca41bcf9cf337a2b2fe139a | 213dd8fe24ba625338c74cada812d45e65216ee5 | refs/heads/master | 2016-09-05T20:58:49.272668 | 2014-09-21T17:25:26 | 2014-09-21T17:25:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 448 | r | plot2.R | ## read data
NEI <- readRDS("./data/summarySCC_PM25.rds")
## Get rows for Baltimore city
data <- subset(NEI,fips=='24510')
## aggregate emissions by year
finalData <- aggregate(data[c("Emissions")],list(year=data$year),sum)
## plot aggregated data
png('plot2.png', width=480, height=480)
plot(finalData$year,finalData$Emissions,type = "l", main = "Total Emissions from PM2.5 in the Baltimore City",xlab = "Year", ylab = "Emissions")
dev.off() |
fa7b60a8bfc16f14a099fa39cd2bfb3b79bc8fb3 | 21ff5ea3b757191829a9bdc6010efd6b0bfa0bbd | /R/meanStdGMCMC.R | f96e1e797719ff6f4335f0d7b342aae3dc3fe09d | [] | no_license | GHBolstad/evolvability | 80b4dba8c1e6cfb579d8680739eca625eab76457 | 599d2380d2ac23feb5d71dac544a2527797af8a8 | refs/heads/master | 2023-02-08T15:40:18.127117 | 2023-02-06T12:11:40 | 2023-02-06T12:11:40 | 59,743,186 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,938 | r | meanStdGMCMC.R | #' Mean standardize the posterior distribution of a G-matrix
#'
#' \code{meanStdGMCMC} mean standardizes the posterior distribution of a
#' variance matrix (e.g. a G-matrix)
#'
#' @param G_mcmc A posterior distribution of a variance matrix in the form of a
#' table. Each row in the table must be one iteration of the posterior
#' distribution (or bootstrap distribution). Each iteration of the matrix must
#' be on the form as given by \code{c(x)}, where \code{x} is a matrix. A
#' posterior distribution of a matrix in the slot \code{VCV} of a object of
#' class \code{MCMCglmm} is by default on this form.
#' @param means_mcmc A posterior distribution of a vector of means in the form
#' of a table. Each row in the table must be one iteration of the posterior
#' distribution (or bootstrap distribution). A posterior distribution of a
#' mean vector in the slot \code{Sol} of an object of class \code{MCMCglmm} is
#' by default on this form.
#' @return The posterior distribution of a mean standardized variance matrix.
#' @author Geir H. Bolstad
#' @examples
#' # Simulating a posterior distribution
#' # (or bootstrap distribution) of a G-matrix:
#' G <- matrix(c(1, 1, 0, 1, 4, 1, 0, 1, 2), ncol = 3)
#' G_mcmc <- sapply(c(G), function(x) rnorm(10, x, 0.01))
#' G_mcmc <- t(apply(G_mcmc, 1, function(x) {
#' G <- matrix(x, ncol = sqrt(length(x)))
#' G[lower.tri(G)] <- t(G)[lower.tri(G)]
#' c(G)
#' }))
#'
#' # Simulating a posterior distribution
#' # (or bootstrap distribution) of trait means:
#' means <- c(1, 1.4, 2.1)
#' means_mcmc <- sapply(means, function(x) rnorm(10, x, 0.01))
#'
#' # Mean standardizing the G-matrix:
#' meanStdGMCMC(G_mcmc, means_mcmc)
#' @keywords array algebra multivariate
#' @export
meanStdGMCMC <- function(G_mcmc, means_mcmc) {
X <- cbind(means_mcmc, G_mcmc)
n1 <- ncol(means_mcmc)
n2 <- ncol(X)
t(apply(X, 1, function(x) x[(n1 + 1):n2] / c(x[1:n1] %*% x[1:n1])))
}
|
65627a004bb5dc2a831bd63dadd0007097f3a713 | aabcea565c9f428789c4e6e9716cff0489aefd5f | /R/plots.R | 316ca7c6270ee3e3ad6078f09f12ba0295e6dc31 | [
"MIT"
] | permissive | hesselberthlab/scrunchy | 643581f97e814803776853b885de913434a74662 | 5b755b5e878c882d0732aee9dbaf4a8a8c2a01d3 | refs/heads/master | 2020-04-03T03:18:01.818990 | 2019-11-06T15:50:52 | 2019-11-06T15:50:52 | 154,981,735 | 3 | 2 | NOASSERTION | 2019-11-06T15:50:54 | 2018-10-27T15:51:53 | R | UTF-8 | R | false | false | 10,192 | r | plots.R | # Plots -------------------------------------------------------------
#' Scatter plot of cells in a two-dimensional embedding.
#'
#' This is the base plot for superimposing annotations like cell types,
#' cluster assignments, and measured activities.
#'
#' Embeddings can be calculated by [`calc_umap()`] and [`calc_tsne()`], and
#' retrieved with [`tidy_dims()`].
#'
#' @param df plot data
#' @param x variable for x-axis
#' @param y variable for y-axis
#' @param color variable for point colors (default is black)
#' @param size size for [`geom_point`]
#' @param alpha alpha for [`geom_point`]
#' @param palette palette for continuous colors. One of cloupe (the default),
#' brewer, viridis.
#' @param labels labels for groups
#' @param label_legend add labels to legend
#' @param label_groups add labels to points
#'
#' @examples
#' plot_dims(fsce_tidy, UMAP1, UMAP2, size = 1)
#'
#' plot_dims(fsce_tidy, UMAP1, UMAP2, IL7R, size = 1)
#'
#' plot_dims(fsce_tidy, UMAP1, UMAP2, Uracil_45, size = 1)
#'
#' plot_dims(fsce_tidy, UMAP1, UMAP2, k_cluster, size = 1)
#'
#' plot_dims(fsce_tidy, UMAP1, UMAP2, k_cluster, labels = LETTERS[1:6])
#'
#' plot_dims(fsce_tidy, UMAP1, UMAP2, k_cluster,
#' labels = LETTERS[1:6], label_groups = TRUE)
#'
#' @family plot functions
#'
#' @importFrom forcats fct_count
#'
#' @export
plot_dims <- function(df, x, y, color = "cell_id",
size = 0.1, alpha = 1,
palette = "cloupe",
labels = NULL,
label_legend = TRUE,
label_groups = FALSE) {
x <- enquo(x)
y <- enquo(y)
color <- enquo(color)
p <- ggplot(df, aes(x = !!x, y = !!y)) +
geom_point(
aes(color = !!color),
size = size, alpha = alpha
)
## theme default
p <- p + cowplot::theme_minimal_grid(line_size = 0.2)
if (is_discrete(pull(df, !!color))) {
## get labels
n_col <- n_colors(df, color, labels)
## legend aesthetics
p <- p + guides(
colour = guide_legend(
override.aes = list(size = 4)
)
) + theme(legend.title = element_blank())
## color aesthetics
if (label_legend && !is.null(labels)) {
lbls <- labels
} else {
lbls <- n_col$f
}
p <- p + scale_color_manual(
values = discrete_palette_default,
labels = lbls
)
} else {
llim <- legend_limits(df, color)
if (palette == "cloupe") {
p <- p + scale_color_gradientn(
colors = loupe_palette,
limits = llim
)
} else if (palette == "viridis") {
p <- p + scale_color_viridis_c(
option = "inferno",
limits = llim,
direction = -1
)
} else if (palette == "brewer") {
p <- p + scale_color_distiller(
palette = "Reds",
limits = llim,
direction = 1
)
}
} # discrete?
if (label_groups) {
p <- add_group_labels(p, x, y, color, labels)
}
p
}
#' Plot multiple 2D plots in a grid
#'
#' @param df plot data
#' @param features list of features
#' @param ... params to pass to [`plot_dims()`]
#'
#' @examples
#' plot_dims_multi(
#' fsce_tidy,
#' features = c("k_cluster", "Uracil_45", "IL7R", "GNLY"),
#' x = UMAP1, y = UMAP2, size = 0.5
#' )
#'
#' @export
plot_dims_multi <- function(df, features, ...) {
plts <- list()
for (i in seq_along(features)) {
feat <- features[i]
plts[[i]] <- plot_dims(df, color = !!sym(feat), ...)
}
cowplot::plot_grid(plotlist = plts)
}
#' Plot activities per cluster
#'
#' Generates a beeswarm plot of activity across specified groups
#'
#' @param data data to plot
#' @param activity activity variable
#' @param group grouping variable
#' @param labels legend labels
#' @param vertical should activity be on the y axis. Default is FALSE
#' @param stats dataframe to use to add p values on plot. Stats can be a dataframe output from [`stat_activity_grouped()`].
#' Output must include columsn with names group, group1, q.value and/or p.value. If sepcificied, `vertical` is TRUE
#' @param ... params for [`add_stats()`]
#'
#'
#' @examples
#' plot_activity(fsce_tidy, Uracil_45, k_cluster)
#'
#' plot_activity(fsce_tidy, riboG_44, k_cluster, labels = LETTERS[1:6])
#'
#' plot_activity(fsce_tidy, Uracil_45, k_cluster, vertical = TRUE)
#'
#' x <- fsce_tidy[c("k_cluster", "Uracil_45")]
#' stats <- stat_activity_grouped(x, group = k_cluster)
#' stats <- subset(stats, q.value < 0.01)
#' stats[,"y_loc"] = seq(max(x$Uracil_45), max(x$Uracil_45) + 3, length.out = length(stats$group))
#' plot_activity(fsce_tidy, Uracil_45, k_cluster, stats = stats)
#'
#' @family plot functions
#'
#' @export
plot_activity <- function(data, activity, group = NULL, labels = NULL, vertical = FALSE,
stats = NULL, ...) {
x <- enquo(activity)
y <- enquo(group)
group <- enquo(group)
groupOnX = FALSE
x_lab = "Activity"
y_lab = "Group"
n_col <- n_colors(data, group, labels)
if(!is.null(stats)){
vertical <- TRUE
}
if(vertical){
x <- enquo(group)
y <- enquo(activity)
groupOnX = TRUE
x_lab = "Group"
y_lab = "Activity"
}
p <- ggplot(data, aes(x = !!x, y = !!y, color = !!group)) +
ggbeeswarm::geom_quasirandom(size = 0.5, groupOnX = groupOnX) +
scale_color_OkabeIto(use_black = TRUE, labels = labels %||% n_col$f) +
cowplot::theme_cowplot() +
labs(x = x_lab, y = y_lab)
if(!is.null(stats)){
p <- add_stats(p, stats, ...)
}
p
}
#' Heatmap of signals
#'
#' Plots `logcounts` or `counts` from an experiment for specified rows.
#'
#' @import ComplexHeatmap
#'
#' @param mtx Matrix of `logcounts` or `counts`
#' @param rows names of rows to select for heatmap
#' @param columns names of columns to select for heatmap
#' @param ... params for [`ComplexHeatmap::Heatmap`]
#'
#' @examples
#' mtx <- SingleCellExperiment::logcounts(fsce_small[["haircut"]])
#' rows <- paste("Uracil", 1:61, sep = "_")
#'
#' plot_heatmap(mtx, rows, name = "Uracil")
#'
#' @family plot fuctions
#'
#' @export
plot_heatmap <- function(mtx, rows = NULL, columns = NULL, ...) {
if (!is.null(rows)) {
mtx <- mtx[rows, ]
}
if (!is.null(columns)) {
mtx <- mtx[ ,columns]
}
if (class(mtx) %in% c("dgCMatrix")) {
mtx <- as.matrix(mtx)
}
# traspose and strip rownames (cell ids)
mtx <- t(mtx)
rownames(mtx) <- NULL
ComplexHeatmap::Heatmap(mtx, cluster_columns = FALSE, ...)
}
#' Plot PCA variance
#'
#' Plots proportion of variance explained by computed prinicpal components
#'
#' @param fsce An object of class [`FunctionalSingleCellExperiment`].
#' @param n_dims specify the number of dimensions from "dr" to use for
#' clustering, defaults to all dimensions
#' @param expt Data to use for calculating variable features
#' (default is `rnaseq`). Must be present in `names(fsce)`.
#' @examples
#' plot_pcvariance(fsce_small)
#'
#' @family plot fuctions
#'
#' @export
plot_pcvariance <- function(fsce, n_dims = NULL, expt = "rnaseq") {
if (!expt %in% names(fsce)) {
stop(glue("expt `{expt}` not found in fsce "), call. = FALSE)
}
if (!"PCA" %in% names(reducedDims(fsce[[expt]]))) {
stop("PCA values not found in expt", call. = FALSE)
}
var_df <- pcvariance_tbl(fsce[[expt]])
if(!is.null(n_dims)){
var_df <- var_df[1:n_dims, ]
}
ggplot(var_df, aes(PCs, `Variance Explained`)) +
geom_point() +
cowplot::theme_cowplot()
}
# Palettes ----------------------------------------------------------
loupe_palette <- rev(scales::brewer_pal(palette = "RdGy")(11)[c(1:5, 7)])
#' @noRd
#' @include reexport-colorblindr.R
discrete_palette_default <- c(
palette_OkabeIto_black,
scales::brewer_pal(palette = "Paired")(12),
scales::brewer_pal(palette = "Set1")(9),
scales::brewer_pal(palette = "Set2")(8),
scales::brewer_pal(palette = "Dark2")(8)
)
# Utilities ---------------------------------------------------------
legend_limits <- function(x, var) {
if (is_discrete(pull(x, !!var))) {
c(NA, NA)
}
c(0, max(pull(x, !!var)))
}
is_discrete <- function(x) {
is_character(x) | is_logical(x) | is.factor(x)
}
centroids <- function(df, x, y, group = NULL) {
if (!is.null(group)) {
df <- group_by(df, !!group)
}
summarize(df, x = median(!!x), y = median(!!y))
}
#' @importFrom ggrepel geom_label_repel
add_group_labels <- function(p, x, y, group, labels) {
cents <- centroids(p$data, x, y, group)
p + geom_label_repel(data = cents, aes(x = x, y = y, label = labels))
}
n_colors <- function(x, color, labels) {
n_col <- fct_count(pull(x, !!color))
if (!is.null(labels) && (length(labels) != nrow(n_col))) {
stop(glue("`labels` ({nl}) must match factors in `{color}` ({nc})",
color = rlang::quo_text(color),
nl = length(labels),
nc = nrow(n_col)), call. = FALSE)
}
n_col
}
#' Add stats to plot activities per cluster
#'
#' Adds stat comparison bars to a beeswarm plot of activity across specified groups
#'
#' @importFrom ggsignif geom_signif
#' @param p plot to add stats to
#' @param df dataframe output from [`stat_activity_grouped()`].
#' Output must include columnw with names group, group1, q.value and/or p.value.
#' @param val values to add to plot. Default is q.value from [`stat_activity_grouped()`] output
#' @param y_loc location of comparisson bars on graph. Default it `y_loc` column of `df`. Can also be numeric values.
#' @param xmin start location of comparison bar. Default is `group` column of `df`.
#' @param xmax stop location of comparison bar. Default is `group1` column of `df`.
#'
#' @family plot fuctions
#'
#' @export
add_stats <- function(p, df,
val = q.value,
y_loc = y_loc,
xmin = group,
xmax = group1) {
val <- enquo(val)
y_loc <- enquo(y_loc)
xmin <- enquo(xmin)
xmax = enquo(xmax)
p + geom_signif(data=df,
aes(xmin = !!xmin,
xmax = !!xmax,
annotations = signif(!!val, digits = 3),
y_position = !!y_loc
),
color='black',
manual = TRUE)
}
|
2c4392eddca06a33294c1ac7eedf707066752aed | 6cbb51fe996e65a51a8d9f2f35e3159721933f25 | /inst/shiny/ui_06_1_pathway.R | 7bf3b0271fb6ac9efb24f0e60052442bc44f3b9e | [
"MIT"
] | permissive | compbiomed/singleCellTK | 927fb97e257ba89cddee9a90f9cb7cb375a5c6fb | 990e89e7ccfbf663f23c793454f72fb8c6878a32 | refs/heads/master | 2023-08-11T09:17:41.232437 | 2023-07-26T20:43:47 | 2023-07-26T20:43:47 | 68,756,293 | 144 | 89 | NOASSERTION | 2023-09-06T18:22:08 | 2016-09-20T21:50:24 | R | UTF-8 | R | false | false | 1,503 | r | ui_06_1_pathway.R | shinyPanelPathway <- fluidPage(
tags$div(
class = "container",
h1("Pathway Activity Analysis"),
h5(tags$a(href = paste0(docs.artPath, "pathwayAnalysis.html"),
"(help)", target = "_blank")),
sidebarLayout(
sidebarPanel(
selectizeInput(
inputId = "pathwayAssay",
label = "Select input matrix:",
choices = NULL,
selected = NULL,
multiple = FALSE,
options = NULL),
#uiOutput("pathwayAssay"),
#selectInput("pathwayAssay", "Select Assay:", currassays),
selectInput("pathwayMethod", "Select Method:", "GSVA"),
uiOutput("selectPathwayGeneLists"),
uiOutput("selectNumTopPaths"),
selectInput("pathwayPlotVar",
"Select Condition(s) of interest for plot:", clusterChoice,
multiple = TRUE),
radioButtons("pathwayOutPlot", "Plot Type:", c("Heatmap", "Violin")),
withBusyIndicatorUI(actionButton("pathwayRun", "Run")),
tags$hr(),
h3("Save pathway results:"),
actionButton("savePathway", "Save Pathways"),
downloadButton("downloadPathway", "Download Pathway Results")
),
mainPanel(
tabsetPanel(
tabPanel(
"Plot",
shinyjqui::jqui_resizable(plotOutput("pathwayPlot"))
),
tabPanel(
"Results Table",
DT::dataTableOutput("pathwaytable")
)
)
)
)
)
)
|
956386015d02dd632ecb37484b98e9970ad1c8db | 24fcc7a9446871f5affbc82d3ae1ed20d6a7c8aa | /tests/performance/profile.R | 8a0ec8ae8fbbb16112c7d51949fa1a312a6875f4 | [
"MIT"
] | permissive | mrc-ide/malariasimulation | 3188657f6ff9da4ea35646189d0bd75d6e35aa52 | 397a7b7efe90958dd01f97110a1d16c71d041f33 | refs/heads/master | 2023-08-23T11:29:10.050424 | 2023-07-03T15:58:32 | 2023-07-03T15:58:32 | 233,609,741 | 10 | 10 | NOASSERTION | 2023-08-17T15:48:41 | 2020-01-13T14:06:17 | R | UTF-8 | R | false | false | 442 | r | profile.R | year <- 365
sim_length <- 1 * year
human_population <- 1e5
eir <- 1e3
simparams <- malariasimulation::get_parameters(
list(
human_population = human_population,
individual_mosquitoes = FALSE
)
)
simparams <- malariasimulation::set_equilibrium(simparams, eir)
profvis::profvis({output <- malariasimulation::run_simulation(sim_length, simparams)})
# output <- malariasimulation::run_simulation(sim_length, simparams)
print('done') |
f682cf9b07850aab0092edd686b1fa305f1fa482 | 983ad35bf43461555970dc6be1f33744bf97e075 | /bench/bench-tbl_summary.R | 510a2553e54130e77ec7d278bd2244c76410cdfa | [
"MIT"
] | permissive | DrShaneBurke/gtsummary | c3c3560258f5a5c9512c731eafae8ea68383102c | 914dc3f4592a2b2e4940acf42dcc3ef1157b3ff3 | refs/heads/master | 2023-03-16T21:38:45.808825 | 2021-03-10T00:01:58 | 2021-03-10T00:01:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | bench-tbl_summary.R | library(gtsummary, warn.conflicts = FALSE)
library(bench)
# setup code
big_trial <- purrr::map_dfr(seq_len(5000), ~trial)
bench::mark(
# simple summary
simple = tbl_summary(trial),
# simple calculation with comparisons+others
complex = tbl_summary(trial, by = trt) %>% add_overall() %>% add_p() %>% add_q(quiet = TRUE) %>% add_n(),
# big summary
big_data = big_trial %>% select(age, grade, trt) %>% tbl_summary(by = trt, missing = "no") %>% add_p(),
check = FALSE,
min_iterations = 30
)
|
aa8276e2c0df7f64ebfd0eea00e1e7257bc1649b | 67d39d356dc06f0f487e05e1436bae8070a5888b | /R/s3cache.R | 21bb9f018eb02538c6d05bc7c285944e4b4ef856 | [] | no_license | elainexmas/s3mpi | 1ccdb01aa6ff26cb4c3738c1b9ce4eeb1c166a40 | 59d60181cb808c11e24d50dcee86f375625c50fd | refs/heads/master | 2020-04-06T03:43:20.750561 | 2015-06-02T16:25:54 | 2015-06-02T16:25:54 | 43,513,276 | 0 | 0 | null | 2015-10-01T18:25:58 | 2015-10-01T18:25:57 | null | UTF-8 | R | false | false | 3,740 | r | s3cache.R | #' A caching layer around s3mpi calls.
#'
#' Fetching large files from the S3 MPI can be expensive when performed
#' multiple times. This methods allows one to add a caching layer
#' around S3 fetching. The user should specify the configuration option
#' \code{options(s3mpi.cache = 'some/dir')}. The recommended cache
#' directory (where files will be stored) is \code{"~/.s3cache"}.
#'
#' @param s3key character. The full S3 key to attempt to read or write
#' to the cache.
#' @param value ANY. The R object to save in the cache. If missing,
#' a cache read will be performed instead.
s3cache <- function(s3key, value) {
if (!cache_enabled())
stop("Cannot use s3mpi::s3cache until you set options(s3mpi.cache) ",
"to a directory in which to place cache contents.")
dir.create(d <- cache_directory(), FALSE, TRUE)
dir.create(file.path(d, 'info'), FALSE, TRUE)
dir.create(file.path(d, 'data'), FALSE, TRUE)
if (missing(value)) fetch_from_cache(s3key, d)
else save_to_cache(s3key, value, d)
}
#' Helper function for fetching a file from a cache directory.
#'
#' This function will also test to determine whether the file has been
#' modified on S3 since the last cache save. If the file has never been
#' cached or the cache is invalidated, it will return \code{s3mpi::not_cached}.
#'
#' @param key character. The key under which the cache entry is stored.
#' @param cache_dir character. The cache directory. The default is
#' \code{cache_directory()}.
#' @return the cached object if the cache has not invalidated. Otherwise,
#' return \code{s3mpi::not_cached}.
fetch_from_cache <- function(key, cache_dir) {
cache_key <- digest::digest(key)
cache_file <- function(dir) file.path(cache_dir, dir, cache_key)
if (!file.exists(cache_file('data'))) return(not_cached)
if (!file.exists(cache_file('info'))) {
# Somehow the cache became corrupt: data exists without accompanying
# meta-data. In this case, simply wipe the cache.
file.remove(cache_file('data'))
return(not_cached)
}
info <- readRDS(cache_file('info'))
# Check if cache is invalid.
connected <- has_internet()
if (!connected) {
warning("Your network connection seems to be unavailable. s3mpi will ",
"use the latest cache entries instead of pulling from S3.",
call. = FALSE, immediate. = FALSE)
}
if (connected && !identical(info$mtime, last_modified(key))) {
not_cached
} else {
readRDS(cache_file('data'))
}
}
#' Helper function for saving a file to a cache directory.
#'
#' @param key character. The key under which the cache entry is stored.
#' @param value ANY. The R object to save in the cache.
#' @param cache_dir character. The cache directory. The default is
#' \code{cache_directory()}.
save_to_cache <- function(key, value, cache_dir = cache_directory()) {
require(digest)
cache_key <- digest::digest(key)
cache_file <- function(dir) file.path(cache_dir, dir, cache_key)
saveRDS(value, cache_file('data'))
info <- list(mtime = last_modified(key), key = key)
saveRDS(info, cache_file('info'))
invisible(NULL)
}
#' Determine the last modified time of an S3 object.
#'
#' @param key character. The s3 key of the object.
#' @return the last modified time or \code{NULL} if it does not exist on S3.
last_modified <- function(key) {
if (!has_internet()) { return(as.POSIXct(as.Date("2000-01-01"))) }
s3result <- system(paste0('s3cmd ls ', key), intern = TRUE)[1]
if (is.character(s3result) && !is.na(s3result) && nzchar(s3result)) {
strptime(substring(s3result, 1, 16), '%Y-%m-%d %H:%M')
}
}
not_cached <- local({ tmp <- list(); class(tmp) <- 'not_cached'; tmp })
is.not_cached <- function(x) identical(x, not_cached)
|
b5cbe7c8613473d7b4d7460cfb93c8bebfc1f6e4 | e48bac3c7595d59ed95257723a8a61690ec24f10 | /R/diffTest.R | bde0ab657525da1351752f6fc5d9d1659a338da6 | [] | no_license | john-james-ai/xms | 904a0213854aa808d0b6f9919386547c7ff5d9f2 | 995cfc5eb175594d593c8b125a710dc8f2565805 | refs/heads/master | 2021-12-23T07:21:07.013135 | 2017-11-06T20:42:03 | 2017-11-06T20:42:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,971 | r | diffTest.R | #==============================================================================#
# diffTest #
#==============================================================================#
#' diffTest
#'
#' \code{diffTest} Conducts multiple z-tests for a multi-level categorical explanatory
#' variable on a response variable.
#'
#' @param data data frame containing data to be analyzed, with the response variable as the first variable.
#' @param alternative direction of the alternative hypothesis; "less","greater", or "two.sided"
#' @param success Character string indicating which level of the response to consider the "success"
#' @param conf confidence level, value between 0 and 1
#' @param alpha numeric between 0 and 1, the probability of a type I error
#'
#' @return Data frame containing the results of the z-test
#'
#' @author John James, \email{jjames@@datasciencesalon.org}
#' @family xmar functions
#' @export
#'
diffTest <- function(data, alternative = "two.sided", success,
conf = 0.95, alpha = 0.05) {
freqDf <- as.data.frame(addmargins(table(data), 1))
groups <- as.character(unique(freqDf %>% .[[2]]))
nGroups <- length(groups)
# Bonferroni Correction for Multiple Groups
numTests <- nGroups * (nGroups - 1) / 2
alpha <- alpha / numTests
# Compute critical value
area <- ifelse(alternative == "two.sided", alpha/2, alpha)
zAlpha <- qnorm(area, lower.tail = FALSE)
# Initialize variables
Populations = Contrast = Value = `Z-Score` = c()
`p-value` = `95% CI` = Significant = `Relative Risk` = c()
statements <- list()
plots <- list()
k <- 1
for (i in 1:(nGroups-1)) {
for (j in (i+1):nGroups) {
#-----------------------------------------------------------------------#
# Perform Tests #
#-----------------------------------------------------------------------#
# Compute prop.test
successes <- c(as.numeric(freqDf %>% filter(.[[1]] == success & .[[2]] == groups[i]) %>%
select(Freq)),
as.numeric(freqDf %>% filter(.[[1]] == success & .[[2]] == groups[j]) %>%
select(Freq)))
totals <- c(as.numeric(freqDf %>% filter(.[[1]] == "Sum" & .[[2]] == groups[i]) %>%
select(Freq)),
as.numeric(freqDf %>% filter(.[[1]] == "Sum" & .[[2]] == groups[j]) %>%
select(Freq)))
t <- prop.test(successes, totals, correct = FALSE,
alternative = alternative, conf.level = (1 - (alpha / numTests))) # Bonferroni Correction
# Compute z-score
pPooled <- sum(successes) / sum(totals)
sePooled <- sqrt((pPooled * (1-pPooled) / totals[1]) + (pPooled * (1-pPooled) / totals[2]))
zScore <- ((successes[1] / totals[1]) - (successes[2] / totals[2])) / sePooled
pValue <- 2 * pnorm(-abs(zScore))
# Render decision
if ((alternative == "two.sided" & pValue < (alpha / 2))
| (alternative != "two.sided" & pValue < alpha)) {
decision <- "Reject"
} else {
decision <- "Fail to Reject"
}
# Compute Relative Risk Ratio
r1 <- successes[1] / totals[1]
r2 <- successes[2] / totals[2]
rr <- r1 / r2
#-----------------------------------------------------------------------#
# Format Results #
#-----------------------------------------------------------------------#
Populations[k] <- paste0(groups[i], " - ", groups[j])
Contrast[k] <- paste0("p",i, " - p",j)
Value[k] <- as.numeric(round(t$estimate[1] - t$estimate[2], 3))
`Z-Score`[k] <- round(zScore, 3)
`p-value`[k] <- round(t$p.value, 3)
`95% CI`[k] <- paste0("[ ", round(t$conf.int[1], 3), ", ", round(t$conf.int[2], 3), " ]")
Significant[k] <- ifelse(decision == "Reject", "Yes", "No")
`Relative Risk`[k] <- round(rr, 2)
#-----------------------------------------------------------------------#
# Plot Results #
#-----------------------------------------------------------------------#
plots[[k]] <- plotDiffTest(x = groups[1], y = groups[2], zAlpha, zScore)
#-----------------------------------------------------------------------#
# Render Statement #
#-----------------------------------------------------------------------#
statements[[k]] <- list()
alt <- ifelse(alternative == "two.sided", "not equal to",
ifelse(alternative == "less", "less than","greater than"))
type1 <- ifelse(decision == "Reject", "less than", "greater than")
ciNote <- ifelse(decision == "Reject",
paste0("Further, the confidence interval for the difference in ",
"proportions does not include zero, the null ",
"hypothesis value, suggesting that a zero difference ",
"in ", tolower(success), " opinion between the groups ",
"is outside the ", alpha * 100, "% margin of error. "),
paste0("Further, the confidence interval for the difference in ",
"proportions includes zero, suggesting that a zero ",
"difference in the proportion of ", tolower(success),
" opinion is within the ", alpha * 100, "% margin of ",
"error. "))
statements[[k]]$type <- paste0("This was a ", (conf * 100), "% confidence, two-proportion z-test ",
"of the null hypothesis that the true population proportion of ",
tolower(success), " opinion for ", groups[i], " and ",
groups[j], " populations are equal. ")
if (decision == "Reject") {
statements[[k]]$conclude <- paste0("The results of the p-value and confidence interval ",
"approaches agree. The null hypothesis was ",
"rejected with a ", conf * 100, "% confidence, in favor ",
"of the alternative hypothesis that the true ",
"population proportion of ", tolower(success),
" opinion within the ", groups[i], " population is ",
alt, " the true proportion of ", tolower(success),
" opinion in the ", groups[j], " population. ")
} else {
statements[[k]]$conclude <- paste0("The results of the p-value and confidence ",
"interval approaches agree. The null ",
"hypothesis that the true ",
"population proportions of ",
tolower(success),
" opinion within the ",
groups[i], " and ",
groups[j], " populations are equal,
was not rejected. ")
}
statements[[k]]$detail <- paste0("the observed difference in the proportion of ",
tolower(success), " opinion between the ",
groups[i], " and ", groups[j], " respondents was ",
as.numeric(round(t$estimate[1] - t$estimate[2], 3)),
", raising a z-score of ", round(zScore, 3),
", as indicated by the red dot on the plot. ",
"The probability of encountering a difference in ",
"proportions this extreme (p-value) was approximately ",
round(t$p.value, 3), ", which is ", type1, " the ",
"probability of incorrectly rejecting the null ",
"hypothesis. ", ciNote)
k <- k + 1
}
}
#---------------------------------------------------------------------------#
# Compile Results and Return #
#---------------------------------------------------------------------------#
df <- as.data.frame(cbind(Populations, Contrast, Value, `Z-Score`,
`p-value`, `95% CI`, Significant,
`Relative Risk`), stringsAsFactors = FALSE)
res <- list(
sig = list(
conf = conf,
alpha = alpha,
zAlpha = zAlpha
),
result = df,
statements = statements,
plots = plots
)
return(res)
}
|
14329e1dc9aed48daba30a794b102d5c47657ff7 | 81a2fa3228451179b12779bb0149398cbfc8e9b1 | /R/countCloseToLimits.R | 21a8c759e7bd1faff7e8cd13704fa065cd689859 | [] | no_license | cran/wrMisc | c91af4f8d93ad081acef04877fb7558d7de3ffa2 | 22edd90bd9c2e320e7c2302460266a81d1961e31 | refs/heads/master | 2023-08-16T21:47:39.481176 | 2023-08-10T18:00:02 | 2023-08-10T19:30:33 | 236,959,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,099 | r | countCloseToLimits.R | #' Count from two vectors number of values close within given limits
#'
#' This functions summarizes the serach of similar (or identical) numeric values from 2 initial vectors, it
#' evaluates the result from initial search run by findCloseMatch(), whose output is a less convenient list.
#' \code{countCloseToLimits} checks furthermore how many results within additional (more stringent)
#' distance-limits may be found and returns the number of distance values within the limits tested.
#' Designed for checking if threshold used with findCloseMatch() may be set more stringent, eg when searching reasonable FDR limits ...
#'
#' @param closeMatch (list) output from findCloseMatch(), ie list indicating which instances of 2 series of data have close matches
#' @param limitIdent (numeric) max limit or panel of threshold values to test (if single value, in addtion a panel with values below will be tested)
#' @param prefix (character) prefix for names of output
#' @return integer vector with counts for number of list-elements with at least one absolue value below threshold, names
#' @seealso \code{\link[wrMisc]{findCloseMatch}}
#' @examples
#' set.seed(2019); aa <- sample(12:15,20,repl=TRUE) +round(runif(20),2)-0.5
#' bb <- 11:18
#' match1 <- findCloseMatch(aa,bb,com="diff",lim=0.65)
#' head(match1)
#' (tmp3 <- countCloseToLimits(match1,lim=c(0.5,0.35,0.2)))
#' (tmp4 <- countCloseToLimits(match1,lim=0.7))
#' @export
countCloseToLimits <- function(closeMatch,limitIdent=5,prefix="lim_") {
limitIdent <- unique(limitIdent)
if(length(limitIdent) ==1) {x <- floor(log10(signif(limitIdent,1)))
x <- c(10^c((x-1):x),10^c((x-1):x)/5,4*round(seq(limitIdent/40,limitIdent/4,length.out=20),2),limitIdent) # default series of limits
limitIdent <- unique(signif(sort(x),digits=5)) }
if(length(closeMatch) <1 | length(limitIdent) <1) { out <- rep(NA,length(limitIdent))
} else {
out <- rowSums(sapply(closeMatch,function(z) (min(abs(z)) <= limitIdent)))}
if(is.null(names(out))) names(out) <- paste(prefix,limitIdent,sep="")
out }
|
2f413127f363412e28a68722dbc9ef9f65847b6c | 7f86f568dab6279e6f2d987c77a023bed055a11c | /man/simFn.Rd | d45816b9d6fb4ac69b4bf9db710b46483bf1cc1c | [] | no_license | cran/AHMbook | b6acd2ed71319be2f0e3374d9d8960a8b04e21bf | d8f8ad8bef93120f187bef494b9ac1ad8200c530 | refs/heads/master | 2023-08-31T21:13:00.618018 | 2023-08-23T21:10:03 | 2023-08-23T22:30:32 | 88,879,777 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,731 | rd | simFn.Rd | \name{sim.fn}
\alias{sim.fn}
\encoding{UTF-8}
\title{
Simulate a Poisson point process
}
\description{
Simulates animal or plant locations in space according to a homogenous Poisson process. This process is characterized by the intensity, which is the average number of points per (very small)unit area. The resulting point pattern is then discretized to obtain abundance data and presence/absence (or occurrence) data. The discretization of space is achieved by choosing the cell size. It is used in AHM1 Section 1.1 to help to understand the relationship between point patterns, abundance data and occurrence data (also called presence/absence or distribution data). For a similar and somewhat more sexy version of this function, see \code{\link{simPPe}}.
}
\usage{
sim.fn(quad.size = 10, cell.size = 1, intensity = 1, show.plot = TRUE)
}
\arguments{
\item{quad.size}{
The length of each side of the quadrat (in arbitrary units)
}
\item{cell.size}{
The length of each side of the cells into which the quadrat is divided. The ratio of quad.size to cell.size must be an integer.
}
\item{intensity}{
The average number of points (animals or plants) per unit area.
}
\item{show.plot}{
If TRUE, the results are plotted. Set to FALSE when running simulations.
}
}
\value{
A list with the values of the arguments and the following additional elements:
\item{exp.N }{Expected population size in quadrat}
\item{breaks }{boundaries of grid cells}
\item{n.cell }{Number of cells in the quadrat}
\item{mid.pt }{Cell mid-points}
\item{M }{ Realized population size in quadrat}
\item{u1 }{ x coordinate of each individual}
\item{u2 }{ y coordinate of each individual}
\item{N }{The number of individuals in each cell (a vector of length n.cell)}
\item{z }{Presence/absence (1/0) in each cell (a vector of length n.cell)}
\item{psi }{Proportion of cells occupied, ie, the species is present.}
}
\references{
Kéry, M. & Royle, J.A. (2016) \emph{Applied Hierarchical Modeling in Ecology} AHM1 - 1.1.
}
\author{
Marc Kéry and Andy Royle
}
\examples{
# Generate a simulated data set with default arguments and look at the structure:
tmp <- sim.fn()
str(tmp)
# Effect of grain size of study on abundance and occupancy (intensity constant)
tmp <- sim.fn(quad.size = 10, cell.size = 1, intensity = 0.5)
tmp <- sim.fn(quad.size = 10, cell.size = 2, intensity = 0.5)
tmp <- sim.fn(quad.size = 10, cell.size = 5, intensity = 0.5)
tmp <- sim.fn(quad.size = 10, cell.size = 10, intensity = 0.5)
# Effect of intensity of point pattern (intensity) on abundance and occupancy
tmp <- sim.fn(intensity = 0.1) # choose default quad.size = 10, cell.size = 1
tmp <- sim.fn(intensity = 1)
tmp <- sim.fn(intensity = 5)
tmp <- sim.fn(intensity = 10)
}
|
e374d4cfaef82fddd7606b98d19bcd401065978a | e1b45c5941f53eea0f2c99615bd45ab3e89275c7 | /data_processing/generate_clinical_tables.R | d18f90ceb0711309b716ad34dc4b09e3e31ba2cb | [
"MIT"
] | permissive | instigatorofawe/clustering_manuscript | 32f727cb703c5dd6fed43776fc86a67fdfebedf5 | a75c437b45a07d7e6e0781012a92ce18179e47b0 | refs/heads/master | 2022-05-24T19:23:18.770560 | 2020-04-27T14:19:00 | 2020-04-27T14:19:00 | 259,345,244 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 21,973 | r | generate_clinical_tables.R | rm(list=ls())
library(RPostgreSQL)
library(pracma)
library(tictoc)
user = "postgres"
password = "postgres"
db = "eicu"
connection = dbConnect(PostgreSQL(), user=user, password=password, dbname=db)
# Query all patient data in general
query = "select * from patient"
tic("Patient data query")
patient.result = dbGetQuery(connection, query)
toc()
# # First, determine which patients have
# query = "select * from diagnosis where diagnosisstring like '%sepsis%'"
# tic("Sepsis diagnosis query")
# sepsis.dx.result = dbGetQuery(connection, query)
# toc()
# query = "select * from diagnosis where diagnosisstring like '%septic shock%'"
# tic("Septic shock diagnosis query")
# septic.shock.dx.result = dbGetQuery(connection, query)
# toc()
# query.subjects = unique(c(sepsis.dx.result$patientunitstayid,septic.shock.dx.result$patientunitstayid))
load("processed/diagnosis_query.rdata")
has.infection.icd9 = readRDS("processed/has_infection_icd9.rds")
query.subjects = subjects[has.infection.icd9]
# Query features
# HR
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and nursingchartcelltypevallabel = 'Heart Rate'",sep="")
tic("HR query")
hr.result = dbGetQuery(connection, query)
toc()
# SBP
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and (nursingchartcelltypevalname = 'Invasive BP Systolic' or nursingchartcelltypevalname = 'Non-Invasive BP Systolic')",sep="")
tic("SBP query")
sbp.result = dbGetQuery(connection, query)
toc()
# DBP
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and (nursingchartcelltypevalname = 'Invasive BP Diastolic' or nursingchartcelltypevalname = 'Non-Invasive BP Diastolic')",sep="")
tic("DBP query")
dbp.result = dbGetQuery(connection, query)
toc()
# MBP
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and (nursingchartcelltypevalname = 'Invasive BP Mean' or nursingchartcelltypevalname = 'Non-Invasive BP Mean')",sep="")
tic("MBP query")
mbp.result = dbGetQuery(connection, query)
toc()
# RESP
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and nursingchartcelltypevallabel = 'Respiratory Rate'",sep="")
tic("RESP query")
resp.result = dbGetQuery(connection, query)
toc()
# Temperature
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and nursingchartcelltypevalname = 'Temperature (C)'",sep="")
tic("Temperature query")
temp.result = dbGetQuery(connection, query)
toc()
# CVP
query = paste("select * from nursecharting where patientunitstayid in (", paste(query.subjects,collapse=","), ") and (nursingchartcelltypevallabel = 'CVP' or nursingchartcelltypevallabel = 'CVP (mmHg)')",sep="")
tic("CVP query")
cvp.result = dbGetQuery(connection, query)
toc()
# PaO2
query = paste("select * from lab where labname = 'paO2' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("HR query")
pao2.result = dbGetQuery(connection, query)
toc()
# FiO2
query = paste("select * from respiratorycharting where patientunitstayid in (", paste(query.subjects,collapse=",") ,") and (respchartvaluelabel = 'FiO2' or respchartvaluelabel = 'FIO2 (%)')",sep="")
tic("FiO2 query")
fio2.result = dbGetQuery(connection, query)
toc()
# GCS
query = paste("select * from physicalexam where physicalexampath like '%GCS%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("GCS query")
gcs.result = dbGetQuery(connection, query)
toc()
# Bilirubin
query = paste("select * from lab where labname = 'direct bilirubin' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Bili query")
bili.result = dbGetQuery(connection, query)
toc()
# Platelets
query = paste("select * from lab where labname = 'platelets x 1000' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Platelets query")
platelets.result = dbGetQuery(connection, query)
toc()
# Creatinine
query = paste("select * from lab where labname = 'creatinine' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Creatinine query")
creat.result = dbGetQuery(connection, query)
toc()
# Lactate
query = paste("select * from lab where labname = 'lactate' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Lactate query")
lact.result = dbGetQuery(connection, query)
toc()
# BUN
query = paste("select * from lab where labname = 'BUN' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("BUN query")
bun.result = dbGetQuery(connection, query)
toc()
# Arterial pH
query = paste("select * from lab where labname = 'pH' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("pH query")
ph.result = dbGetQuery(connection, query)
toc()
# WBC
query = paste("select * from lab where labname = 'WBC x 1000' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("WBC query")
wbc.result = dbGetQuery(connection, query)
toc()
# PaCO2
query = paste("select * from lab where labname = 'paCO2' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("PaCO2 query")
paco2.result = dbGetQuery(connection, query)
toc()
# Hemoglobin
query = paste("select * from lab where labname = 'Hgb' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Hgb query")
hgb.result = dbGetQuery(connection, query)
toc()
# Hematocrit
query = paste("select * from lab where labname = 'Hct' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Hct query")
hct.result = dbGetQuery(connection, query)
toc()
# Potassium
query = paste("select * from lab where labname = 'potassium' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Potassium query")
potassium.result = dbGetQuery(connection, query)
toc()
# Urine
query = paste("select * from intakeoutput where celllabel like '%Urin%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
tic("Urine query")
urine.result = dbGetQuery(connection, query)
toc()
# Vasopressors
tic("Vasopressors query")
dop.query = paste("select * from infusiondrug where drugname like '%Dopamine%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
dob.query = paste("select * from infusiondrug where drugname like '%Dobutamine%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
ep.query = paste("select * from infusiondrug where drugname like '%Epinephrine%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
norep.query = paste("select * from infusiondrug where drugname like '%Norepinephrine%' and patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
dop.result = dbGetQuery(connection, dop.query)
dob.result = dbGetQuery(connection, dob.query)
ep.result = dbGetQuery(connection, ep.query)
norep.result = dbGetQuery(connection, norep.query)
dop.result$drugrate = as.numeric(dop.result$drugrate)
dop.result$drugrate[is.na(dop.result$drugrate)] = 1
dob.result$drugrate = as.numeric(dob.result$drugrate)
dob.result$drugrate[is.na(dob.result$drugrate)] = 1
ep.result$drugrate = as.numeric(ep.result$drugrate)
ep.result$drugrate[is.na(ep.result$drugrate)] = 1
norep.result$drugrate = as.numeric(norep.result$drugrate)
norep.result$drugrate[is.na(norep.result$drugrate)] = 1
toc()
# Ventilator
tic("Ventilation query")
query = paste("select * from respiratorycare where patientunitstayid in (", paste(query.subjects,collapse=",") ,")",sep="")
vent.result = dbGetQuery(connection, query)
toc()
dbDisconnect(connection)
clinical.data = vector(mode="list",length=length(query.subjects))
tic("Processing clinical data")
increment = 100
for (i in 1:length(clinical.data)) {
if (i %% increment ==0) {
fprintf("Processing %d of %d...\n",i,length(clinical.data))
}
# 1. HR
hr = NULL
hr.entries = hr.result[hr.result$patientunitstayid==query.subjects[i],]
hr.entries$nursingchartvalue = as.numeric(hr.entries$nursingchartvalue)
if (dim(hr.entries)[1]>0) {
hr.timestamps = sort(unique(hr.entries$nursingchartoffset))
hr.values = sapply(hr.timestamps,function(x) mean(hr.entries$nursingchartvalue[hr.entries$nursingchartoffset==x],na.rm=TRUE))
hr = list(timestamps=hr.timestamps,values=hr.values)
}
# 2. SBP
sbp = NULL
sbp.entries = sbp.result[sbp.result$patientunitstayid==query.subjects[i],]
sbp.entries$nursingchartvalue = as.numeric(sbp.entries$nursingchartvalue)
if (dim(sbp.entries)[1]>0) {
sbp.timestamps = sort(unique(sbp.entries$nursingchartoffset))
sbp.values = sapply(sbp.timestamps,function(x) mean(sbp.entries$nursingchartvalue[sbp.entries$nursingchartoffset==x],na.rm=TRUE))
sbp = list(timestamps=sbp.timestamps,values=sbp.values)
}
# 3. DBP
dbp = NULL
dbp.entries = dbp.result[dbp.result$patientunitstayid==query.subjects[i],]
dbp.entries$nursingchartvalue = as.numeric(dbp.entries$nursingchartvalue)
if (dim(dbp.entries)[1]>0) {
dbp.timestamps = sort(unique(dbp.entries$nursingchartoffset))
dbp.values = sapply(dbp.timestamps,function(x) mean(dbp.entries$nursingchartvalue[dbp.entries$nursingchartoffset==x],na.rm=TRUE))
dbp = list(timestamps=dbp.timestamps,values=dbp.values)
}
# 4. MBP
mbp = NULL
mbp.entries = mbp.result[mbp.result$patientunitstayid==query.subjects[i],]
mbp.entries$nursingchartvalue = as.numeric(mbp.entries$nursingchartvalue)
if (dim(mbp.entries)[1]>0) {
mbp.timestamps = sort(unique(mbp.entries$nursingchartoffset))
mbp.values = sapply(mbp.timestamps,function(x) mean(mbp.entries$nursingchartvalue[mbp.entries$nursingchartoffset==x],na.rm=TRUE))
mbp = list(timestamps=mbp.timestamps,values=mbp.values)
}
# 5. RESP
resp = NULL
resp.entries = resp.result[resp.result$patientunitstayid==query.subjects[i],]
resp.entries$nursingchartvalue = as.numeric(resp.entries$nursingchartvalue)
if (dim(resp.entries)[1]>0) {
resp.timestamps = sort(unique(resp.entries$nursingchartoffset))
resp.values = sapply(resp.timestamps,function(x) mean(resp.entries$nursingchartvalue[resp.entries$nursingchartoffset==x],na.rm=TRUE))
resp = list(timestamps=resp.timestamps,values=resp.values)
}
# 6. Temperature
temp = NULL
temp.entries = temp.result[temp.result$patientunitstayid==query.subjects[i],]
temp.entries$nursingchartvalue = as.numeric(temp.entries$nursingchartvalue)
if (dim(temp.entries)[1]>0) {
temp.timestamps = sort(unique(temp.entries$nursingchartoffset))
temp.values = sapply(temp.timestamps,function(x) mean(temp.entries$nursingchartvalue[temp.entries$nursingchartoffset==x],na.rm=TRUE))
temp = list(timestamps=temp.timestamps,values=temp.values)
}
# 7. CVP
cvp = NULL
cvp.entries = cvp.result[cvp.result$patientunitstayid==query.subjects[i],]
cvp.entries$nursingchartvalue = as.numeric(cvp.entries$nursingchartvalue)
if (dim(cvp.entries)[1]>0) {
cvp.timestamps = sort(unique(cvp.entries$nursingchartoffset))
cvp.values = sapply(cvp.timestamps,function(x) mean(cvp.entries$nursingchartvalue[cvp.entries$nursingchartoffset==x],na.rm=TRUE))
cvp = list(timestamps=cvp.timestamps,values=cvp.values)
}
# 8. PaO2
pao2 = NULL
pao2.entries = pao2.result[pao2.result$patientunitstayid==query.subjects[i],]
pao2.entries$labresult = as.numeric(pao2.entries$labresult)
if (dim(pao2.entries)[1]>0) {
pao2.timestamps = sort(unique(pao2.entries$labresultoffset))
pao2.values = sapply(pao2.timestamps,function(x) mean(pao2.entries$labresult[pao2.entries$labresultoffset==x],na.rm=TRUE))
pao2 = list(timestamps=pao2.timestamps,values=pao2.values)
}
# 9. FiO2
fio2 = NULL
fio2.entries = fio2.result[fio2.result$patientunitstayid==query.subjects[i],]
fio2.entries$respchartvalue = as.numeric(fio2.entries$respchartvalue)
if (dim(fio2.entries)[1]>0) {
fio2.timestamps = sort(unique(fio2.entries$respchartoffset))
fio2.values = sapply(fio2.timestamps,function(x) mean(fio2.entries$respchartvalue[fio2.entries$respchartoffset==x],na.rm=TRUE))
fio2 = list(timestamps=fio2.timestamps,values=fio2.values)
}
# 10. GCS
gcs = NULL
gcs.entries = gcs.result[gcs.result$patientunitstayid==query.subjects[i],]
if (dim(gcs.entries)[1]>0) {
is.component = grepl("Verbal",gcs.entries$physicalexampath)|grepl("Motor",gcs.entries$physicalexampath)|grepl("Eyes",gcs.entries$physicalexampath)
gcs.timestamps = sort(unique(gcs.entries$physicalexamoffset[is.component]))
gcs.values = sapply(gcs.timestamps,function(x) sum(as.numeric(gcs.entries$physicalexamvalue[gcs.entries$physicalexamoffset==x&is.component])))
gcs = list(timestamps=gcs.timestamps,values=gcs.values)
}
# 11. Bilirubin
bili = NULL
bili.entries = bili.result[bili.result$patientunitstayid==query.subjects[i],]
bili.entries$labresult = as.numeric(bili.entries$labresult)
if (dim(bili.entries)[1]>0) {
bili.timestamps = sort(unique(bili.entries$labresultoffset))
bili.values = sapply(bili.timestamps,function(x) mean(bili.entries$labresult[bili.entries$labresultoffset==x]))
bili = list(timestamps=bili.timestamps,values=bili.values)
}
# 12. Platelets
platelets = NULL
platelets.entries = platelets.result[platelets.result$patientunitstayid==query.subjects[i],]
platelets.entries$labresult = as.numeric(platelets.entries$labresult)
if (dim(platelets.entries)[1]>0) {
platelets.timestamps = sort(unique(platelets.entries$labresultoffset))
platelets.values = sapply(platelets.timestamps,function(x) mean(platelets.entries$labresult[platelets.entries$labresultoffset==x]))
platelets = list(timestamps=platelets.timestamps,values=platelets.values)
}
# 13. Creatinine
creat = NULL
creat.entries = creat.result[creat.result$patientunitstayid==query.subjects[i],]
creat.entries$labresult = as.numeric(creat.entries$labresult)
if (dim(creat.entries)[1]>0) {
creat.timestamps = sort(unique(creat.entries$labresultoffset))
creat.values = sapply(creat.timestamps,function(x) mean(creat.entries$labresult[creat.entries$labresultoffset==x]))
creat = list(timestamps=creat.timestamps,values=creat.values)
}
# 14. Lactate
lactate = NULL
lactate.entries = lact.result[lact.result$patientunitstayid==query.subjects[i],]
lactate.entries$labresult = as.numeric(lactate.entries$labresult)
if (dim(lactate.entries)[1]>0) {
lactate.timestamps = sort(unique(lactate.entries$labresultoffset))
lactate.values = sapply(lactate.timestamps,function(x) mean(lactate.entries$labresult[lactate.entries$labresultoffset==x]))
lactate = list(timestamps=lactate.timestamps,values=lactate.values)
}
# 15. BUN
bun = NULL
bun.entries = bun.result[bun.result$patientunitstayid==query.subjects[i],]
bun.entries$labresult = as.numeric(bun.entries$labresult)
if (dim(bun.entries)[1]>0) {
bun.timestamps = sort(unique(bun.entries$labresultoffset))
bun.values = sapply(bun.timestamps,function(x) mean(bun.entries$labresult[bun.entries$labresultoffset==x]))
bun = list(timestamps=bun.timestamps,values=bun.values)
}
# 16. Arterial pH
ph = NULL
ph.entries = ph.result[ph.result$patientunitstayid==query.subjects[i],]
ph.entries$labresult = as.numeric(ph.entries$labresult)
if (dim(ph.entries)[1]>0) {
ph.timestamps = sort(unique(ph.entries$labresultoffset))
ph.values = sapply(ph.timestamps,function(x) mean(ph.entries$labresult[ph.entries$labresultoffset==x]))
ph = list(timestamps=ph.timestamps,values=ph.values)
}
# 17. WBC
wbc = NULL
wbc.entries = wbc.result[wbc.result$patientunitstayid==query.subjects[i],]
wbc.entries$labresult = as.numeric(wbc.entries$labresult)
if (dim(wbc.entries)[1]>0) {
wbc.timestamps = sort(unique(wbc.entries$labresultoffset))
wbc.values = sapply(wbc.timestamps,function(x) mean(wbc.entries$labresult[wbc.entries$labresultoffset==x]))
wbc = list(timestamps=wbc.timestamps,values=wbc.values)
}
# 18. PaCO2
paco2 = NULL
paco2.entries = paco2.result[paco2.result$patientunitstayid==query.subjects[i],]
paco2.entries$labresult = as.numeric(paco2.entries$labresult)
if (dim(paco2.entries)[1]>0) {
paco2.timestamps = sort(unique(paco2.entries$labresultoffset))
paco2.values = sapply(paco2.timestamps,function(x) mean(paco2.entries$labresult[paco2.entries$labresultoffset==x]))
paco2 = list(timestamps=paco2.timestamps,values=paco2.values)
}
# 19. Hemoglobin
hgb = NULL
hgb.entries = hgb.result[hgb.result$patientunitstayid==query.subjects[i],]
hgb.entries$labresult = as.numeric(hgb.entries$labresult)
if (dim(hgb.entries)[1]>0) {
hgb.timestamps = sort(unique(hgb.entries$labresultoffset))
hgb.values = sapply(hgb.timestamps,function(x) mean(hgb.entries$labresult[hgb.entries$labresultoffset==x]))
hgb = list(timestamps=hgb.timestamps,values=hgb.values)
}
# 20. Hematocrit
hct = NULL
hct.entries = hct.result[hct.result$patientunitstayid==query.subjects[i],]
hct.entries$labresult = as.numeric(hct.entries$labresult)
if (dim(hct.entries)[1]>0) {
hct.timestamps = sort(unique(hct.entries$labresultoffset))
hct.values = sapply(hct.timestamps,function(x) mean(hct.entries$labresult[hct.entries$labresultoffset==x]))
hct = list(timestamps=hct.timestamps,values=hct.values)
}
# 21. Potassium
potassium = NULL
potassium.entries = potassium.result[potassium.result$patientunitstayid==query.subjects[i],]
potassium.entries$labresult = as.numeric(potassium.entries$labresult)
if (dim(potassium.entries)[1]>0) {
potassium.timestamps = sort(unique(potassium.entries$labresultoffset))
potassium.values = sapply(potassium.timestamps,function(x) mean(potassium.entries$labresult[potassium.entries$labresultoffset==x]))
potassium = list(timestamps=potassium.timestamps,values=potassium.values)
}
# 22. Urine
urine = NULL
urine.entries = urine.result[urine.result$patientunitstayid==query.subjects[i],]
urine.entries$cellvaluenumeric = as.numeric(urine.entries$cellvaluenumeric)
if (dim(urine.entries)[1]>0) {
urine.timestamps = sort(unique(urine.entries$intakeoutputoffset))
urine.values = sapply(urine.timestamps, function(x) sum(urine.entries$cellvaluenumeric[urine.entries$intakeoutputoffset==x]))
urine = list(timestamps=urine.timestamps,values=urine.values)
}
# Calculate ventilation intervals - what to do about starts with no stop time?
vent = NULL
vent.entries = vent.result[vent.result$patientunitstayid==query.subjects[i],]
if (dim(vent.entries)[1]>0) {
vent.starts = sort(unique(vent.entries$ventstartoffset[vent.entries$ventstartoffset!=0]))
vent.stops = sort(unique(vent.entries$priorventendoffset[vent.entries$priorventendoffset!=0]))
vent = list(starts=vent.starts,stops=vent.stops)
}
# Calculate vasopressors
# Database is not consistent when it comes to units
dop = NULL
dob = NULL
ep = NULL
norep = NULL
dop.entries = dop.result[dop.result$patientunitstayid==query.subjects[i],]
dob.entries = dob.result[dob.result$patientunitstayid==query.subjects[i],]
ep.entries = ep.result[ep.result$patientunitstayid==query.subjects[i],]
norep.entries = norep.result[norep.result$patientunitstayid==query.subjects[i],]
if (dim(dop.entries)[1]>0) {
dop.timestamps = sort(unique(dop.entries$infusionoffset))
dop.values = sapply(dop.timestamps,function(x) mean(dop.entries$drugrate[dop.entries$infusionoffset==x]))
dop = list(timestamps=dop.timestamps, values = dop.values)
}
if (dim(dob.entries)[1]>0) {
dob.timestamps = sort(unique(dob.entries$infusionoffset))
dob.values = sapply(dob.timestamps,function(x) mean(dob.entries$drugrate[dob.entries$infusionoffset==x]))
dob = list(timestamps=dob.timestamps, values = dob.values)
}
if (dim(ep.entries)[1]>0) {
ep.timestamps = sort(unique(ep.entries$infusionoffset))
ep.values = sapply(ep.timestamps,function(x) mean(ep.entries$drugrate[ep.entries$infusionoffset==x]))
ep = list(timestamps=ep.timestamps, values = ep.values)
}
if (dim(norep.entries)[1]>0) {
norep.timestamps = sort(unique(norep.entries$infusionoffset))
norep.values = sapply(norep.timestamps,function(x) mean(norep.entries$drugrate[norep.entries$infusionoffset==x]))
norep = list(timestamps=norep.timestamps, values = norep.values)
}
clinical.data[[i]] = list(subject.id=query.subjects[i],hr=hr,sbp=sbp,dbp=dbp,mbp=mbp,resp=resp,temp=temp,cvp=cvp,pao2=pao2,fio2=fio2,gcs=gcs,bili=bili,platelets=platelets,creat=creat,lactate=lactate,bun=bun,ph=ph,wbc=wbc,paco2=paco2,hgb=hgb,hct=hct,potassium=potassium,urine=urine,vent=vent,dop=dop,dob=dob,ep=ep,norep=norep)
}
toc()
saveRDS(clinical.data,"processed/clinical_data_icd9_sofa_vent_2.rds")
# saveRDS(patient.result,"processed/patient_data.rds")
# saveRDS(sepsis.dx.result,"processed/sepsis_dx.rds")
# saveRDS(septic.shock.dx.result,"processed/septic_shock_dx.rds")
# Procedurally generate tables for each subject after randomly sampling timestamps from each entry for training
# Generate full tables for determining performance
|
abd16af9ea0d3d1158ed6c098454d2e4090ba5a3 | 3365e6e2af41edec8fd49d24a0f3bab9aaef4447 | /ui.R | cfc491a6c8c803890f855b416ceed03324e5b17c | [
"MIT"
] | permissive | scottshepard/agefromname | aea3bb1583f57f184878b6926e48548081384fe2 | 9c3f1804b1a8b68693e9baa58a2ce60169b42b37 | refs/heads/master | 2021-01-10T04:11:14.860178 | 2015-10-25T18:49:24 | 2015-10-25T18:49:24 | 44,881,439 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 516 | r | ui.R | library(shiny)
library(shinythemes)
shinyUI(fluidPage(theme = shinytheme("united"),
titlePanel("How Old is that Name?"),
sidebarLayout(
sidebarPanel(
textInput("name", "First Name", value = ""),
radioButtons("sex", "Sex", c("M", "F"), selected = "M"),
actionButton('go', "GO!")
),
mainPanel(
plotOutput("zigger_zagger"),
HTML("</br>"),
textOutput("born"),
textOutput("alive"),
textOutput("median_age"),
textOutput("peak")
)
)
)) |
ac9d6c468e45dc6e2d190870ba655dc68ddaff85 | 4d4e270ea32cee042f22199e4c051283b5c72ed2 | /Rising and Falling Terms/Union C/RF_Init.R | 29fe0d8acec29edd08f7bdeacbdf5ed81f85758d | [
"MIT"
] | permissive | emddarn/Text-Mining-Weak-Signals | b19701c2ca651e5337dd7c7ef69217c92f510f23 | f54f198f59b9ae239645713140024b46dffa93e0 | refs/heads/master | 2021-05-29T13:50:35.047726 | 2015-09-09T18:23:32 | 2015-09-09T18:23:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,393 | r | RF_Init.R | ## ***Made available using the The MIT License (MIT)***
# Copyright (c) 2012, Adam Cooper
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## ************ end licence ***************
##
## This contains the parameters for running a specific dataset against the RF_Terms.R Method
## It should be executed first
##
## Run Properties - dependent on the source
base.dir<-"/home/arc1/R Projects/Text Mining Weak Signals"
source.dir<-paste(base.dir,"Source Data",sep="/")
#the output directory. NB convention to include the year
output.dir<-"/home/arc1/R Projects/Text Mining Weak Signals Output/Rising and Falling Terms/Union C/2011"
dir.create(output.dir, showWarnings=FALSE)
setwd(output.dir)
title<-"Rising and Falling Terms - Conference Proceedings from ICALT, CAL, ECTEL, ICHL and ICWL"
# this determines the source type: conference abstracts or blog content
source.type="c"#c is for conference abstracts, b is for blogs
# these three (set.xxxxx) apply whichever source type is used
sets.csv <- c("ICALT Abstracts 2005-2011 with metrics.csv",
"ECTEL Abstracts 2006-2011 with metrics.csv",
"ICWL Abstracts 2005-2011 with metrics.csv",
"ICHL Abstracts 2008-2011 with metrics.csv",
"CAL Abstracts 2007-2011 with metrics.csv")
set.name <- c("ICALT",
"ECTEL",
"ICWL",
"ICHL",
"CAL")
set.title <- c("IEEE International Conference on Advanced Learning Technologies",
"European Conference on Technology Enhanced Learning",
"International Conference on Web-based Learning",
"International Conference on Hybrid Learning",
"Computer Assisted Learning Conference")
recent.themes.txt <- NA # file containing invited conference themes. Use NA if analysing blogs.
#these apply only for conference abstracts (and are for use in auto-report-generation using Brew)
last.conference.url <- c("http://www.ask4research.info/icalt/2011/",
"http://www.gast.it.uc3m.es/ectel2011/",
"http://www.hkws.org/conference/icwl2011/",
"http://www.hkuspace.hku.hk/ichl2011/",
"http://www.cal-conference.elsevier.com/")
publisher.series <- c("IEEE",
"Springer Lecture Notes in Computer Science (LNCS)",
"Springer Lecture Notes in Computer Science (LNCS)",
"Springer Lecture Notes in Computer Science (LNCS)",
"Elsevier Computers and Education Journal")
publisher.url <- c("http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5991609",
"http://www.springerlink.com/content/978-3-642-23984-7/",
"http://www.springerlink.com/content/978-3-642-25812-1/",
"http://www.springerlink.com/content/978-3-642-22762-2/",
"http://www.journals.elsevier.com/computers-and-education/")
## ensure subdirectories exist
dir.create("Gephi", showWarnings=FALSE)
dir.create("Images", showWarnings=FALSE)
dir.create("Wordle", showWarnings=FALSE)
##
## Run properties - date ranges
##
# key date is the one after which documents are considered to be in the "recent set"
if(source.type=="b"){
# for blogs key date is an N month period before the start of the current month
recent.months<-3#set to 3 to look at the last quarter
prior.months<-12#use the previous 12 months to compare against (if in dataset)
key.date<-as.POSIXlt(Sys.Date(), tz = "GMT")#today
key.date$mday<-1
last.date<-key.date
last.date$mday<-last.date$mday-1 #to match inequalities in corpus filtering
key.date$mon<-key.date$mon-recent.months
start.date<-key.date
start.date$mon<-start.date$mon - prior.months
start.date$mday<-start.date$mday-1 #to match inequalities in corpus filtering
display.dateLength<-10
}else if(source.type=="c"){
# for conference abstracts, key date is just the most-recent conference
conf.year.recent<-2011 #conference abs version => 2011 confs are "recent"
conf.years.in_past<-4 # abstracts from the previous 4 years are counted as "past"
key.date<-as.POSIXlt(paste(conf.year.recent-1,"12","31",sep="-"), tz = "GMT")
start.date<-key.date
start.date$year<-start.date$year-conf.years.in_past
last.date<-key.date
last.date$year<-last.date$year+1
display.dateLength<-4
}else{
stop("Unknown source type",source.type)
}
##
## Run properties - centrality data
##
#use NA if not available!
# This is a list of papers (id, dblp url, author-id-list)
# the row names are made to be the DBLP URLs
papers.table<-NA # read.csv(paste(source.dir,"Union B Author Ids 2010.csv",sep="/"), header=TRUE, sep=",", quote="\"", row.names=2, stringsAsFactors=FALSE)
# this is a list of author centrality measures (id, centrality) NB author IDs must match previous list of papers
authors.table<-NA #read.csv(paste(source.dir,"Author Betweenness D4_3.csv",sep="/"), header=TRUE, sep=",", quote="\"", row.names=1, stringsAsFactors=FALSE)
##
## Run properties - thresholds -
## normally the same between different sources of the same kind for comparability
##
# how many documents must the term appear in to be listed. This is in addition to the frequency thresholds. A value of 2 is expected, i.e. ignore terms that appear in only one doc
doc_count.thresh <- 2
# p-value to accept the "alternative hypothesis" that there is something interesting
thresh.pval<-0.005 #i.e. accept a .5% chance that null hypothesis falsely rejected
thresh.pval.falling<-0.01 #use a more lenient threshold for falling terms
#max frequency of term in the past set for eligibility as a weak signal.
#Above this, sigifnicant risers are "established terms"
max.past.freq<-0.0002 #i.e. 0.02%
# *for plotting* cap rising % at this level. If value greater then plot is effectively cut off
rising.plot.max<-800
#novelty calc
term.doc_occurrence.max<-0.5#remove terms appearing in more than 50% of documents
std.novelty.min<-0.25 #a min value of the "standard novelty"
##
## End setup
##
# in interactive execution it may be best to skip this command and to manually switch to it
#source("../RF_Terms.R")
|
6b05f0c340a2b25d0cc42a1effd6db4e5a71a4b7 | 80828838119e3ff7343549adb90d225e6b6f06d1 | /man/mpi.comm.free.Rd | b6eec9a86897238948361fbd31baf962ff1490c2 | [] | no_license | cran/npRmpi | 49131c9b187b8bddcf63523b30affa22658ae09e | 1ebad7da39e8ec4bf30775bcc2aea34ef5a87fec | refs/heads/master | 2016-09-05T17:44:47.216211 | 2014-06-27T00:00:00 | 2014-06-27T00:00:00 | 17,697,956 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 830 | rd | mpi.comm.free.Rd | \name{mpi.comm.free}
\title{MPI\_Comm\_free API}
\usage{
mpi.comm.free(comm=1)
}
\alias{mpi.comm.free}
\arguments{
\item{comm}{a communicator number}
}
\description{
\code{mpi.comm.free} deallocates a communicator so it
points to MPI\_COMM\_NULL.
}
\details{
When members associated with a communicator finish jobs or exit, they have to
call \code{mpi.comm.free} to release resource so \code{\link{mpi.comm.size}}
will return 0. If the comm was created from an intercommunicator by
\code{\link{mpi.intercomm.merge}}, use \code{\link{mpi.comm.disconnect}} instead.
}
\value{
1 if success. Otherwise 0.
}
\author{
Hao Yu
}
\references{
\url{http://www.lam-mpi.org/},
\url{http://www-unix.mcs.anl.gov/mpi/www/www3/}
}
\seealso{
\code{\link{mpi.comm.disconnect}}
}
%\examples{
%}
\keyword{interface}
|
9d6a22fd4c4392422d98fc33ce3cb8e3981b6b26 | 8d2d8704e623860e5058eb3ec9c80461ce874ec0 | /swirl.R | 8f3d6f862b89a00f62715802ef3d0c6eaa30e55c | [] | no_license | kaitiantk/datasciencecoursera | 2b345b69f5cf101f89442ee12677f9f834485034 | 372ed1e3e83314c2f0ded2634fb701176cbb8246 | refs/heads/master | 2023-06-30T08:17:05.531821 | 2021-08-03T09:25:03 | 2021-08-03T09:25:03 | 383,823,371 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 53 | r | swirl.R | install.packages("SWIRL.R")
library("swirl")
swirl()
|
309914158eced86457286e581491eeab6f7f67b0 | 0df2d0a9fb69910f8d3af5b622001d0f5aa595dc | /calculateRMSEvalues.R | fc72f98cb556635e36b5e5985786ecf49789a452 | [] | no_license | jvduijvenbode/thesis | 53fdb1e8b801f90a4c2edc7205f2888dcd14db22 | 1953159aa7125a10d9279abba3f107803b2fa800 | refs/heads/master | 2020-04-09T09:26:07.550062 | 2013-11-14T12:38:15 | 2013-11-14T12:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,305 | r | calculateRMSEvalues.R | #install.packages("data.table",dependencies=T)
#install.packages("boa",dependencies=T)
##load necessary packages
library(data.table)
library(plyr)
library(boa)
library(reshape2)
##set the working directory
setwd("D:/spectralsignatures")
values=read.csv("allflightvals.csv")
values$panel=substr(values$filename,14,14)
values$flight=substr(values$filename,12,12)
ILSvalues=as.data.frame(c(values[c(1,c(108:length(values)))]))
ILSvalues$filename=NULL
flightvalues=as.data.frame(c(values[c(1,7:107,c(209:211))]))
flightvalues$filename=NULL
ILSvalnorm<-data.frame()
for(paneel in c(1:5)){
ILSnormalized<-subset(ILSvalues,ILSvalues$panel==paneel)
ILSnormalized[2:102]<-ILSnormalized[2:102]/colMeans(ILSnormalized[2:102])
ILSvalnorm<-rbind(ILSvalnorm,ILSnormalized)
}
colnames(ILSvalnorm)<-colnames(ILSvalues)
rm(ILSnormalized)
flightvalues<-arrange(flightvalues,X)
ILSvalnorm<-arrange(ILSvalnorm,X)
flightvalscorrected<-data.frame(flightvalues[,1],flightvalues[,2:102]/ILSvalnorm[2:102],flightvalues[,103:104])
uncorrectedvariance<-data.frame()
correctedvariance<-data.frame()
for(paneel in c(1:5)){
uncorvar<-subset(flightvalues,flightvalues$panel==paneel)
corvar<-subset(flightvalscorrected,flightvalscorrected$panel==paneel)
uncvars<-c()
cvars<-c()
for (i in 2:102){uncvars<-rbind(uncvars,as.numeric(var(uncorvar[i])))}
for (i in 2:102){cvars<-rbind(cvars,as.numeric(var(corvar[i])))}
uncorrectedvariance<-cbind(colnames(uncorvar)[2:102],uncvars)
correctedvariance<-cbind(colnames(corvar)[2:102],cvars)
}
colnames(uncorrectedvariance)<-c("band","variance uncorrected")
colnames(correctedvariance)<-c("band","variance corrected")
variancematrix<-merge(uncorrectedvariance,correctedvariance,by="band")
install.packages("dataframes2xls",dependencies=T)
library(XLConnect)
files2write<-list(variancematrix,flightvalues,flightvalscorrected,ILSvalnorm,ILSvalues)
ssnames<-c("variance matrix","original reflectance values","corrected reflectance values","normalized ILS values","ILS values in W m-2 L-1")
#Create .xls file
wb <- loadWorkbook("flightcorrectionoutput.xlsx", create = TRUE)
for (output in 1:length(ssnames)){
createSheet(wb,name=ssnames[output])
#write data
writeWorksheet(wb,as.data.frame(files2write[output]),sheet=ssnames[output])
}
#save .xls file
saveWorkbook(wb)
|
ee95da55d56ed397749c191a554bbe7b8c40a63f | 1022c0957e291e6c545e2db89715a74d7c1a509d | /tbsim/R/tb_read_init.R | d4cc288511cda6e5ff975ab0e6c524d3d8dd3309 | [] | no_license | saviclab/TBsim | ace67165891836c8c11af2c80868131914bdfc95 | d30f62d809e4d807a88f585a306e251b63213892 | refs/heads/master | 2021-07-11T19:06:27.401669 | 2020-08-18T22:41:05 | 2020-08-18T22:41:05 | 192,394,874 | 2 | 1 | null | 2020-01-22T16:59:57 | 2019-06-17T18:02:43 | R | UTF-8 | R | false | false | 1,066 | r | tb_read_init.R | #' @export
tb_read_init <- function (file, folder=NULL) {
if(length(grep("txt", file))==0) {
file <- paste0(file, ".txt")
}
if(is.null(folder)) {
folder <- paste0(system.file(package="TBsim"), "/config")
}
filename <- paste0(folder, "/", file)
if(!file.exists(filename)) {
warning(paste0("Couldn't find file ", filename))
return(NULL)
}
if (file.exists(filename)) {
ini <- readLines(filename)
} else {
stop("File not found!")
}
obj <- list()
for (i in seq(ini)) {
ini[i] <- gsub("<", "", ini[i])
tmp <- strsplit(ini[i], ">")[[1]]
if(length(tmp) == 2) {
suppressWarnings({
if(!is.na(as.numeric(tmp[2]))) {
val <- as.numeric(tmp[2])
} else {
val <- tmp[2]
}
if(tmp[1] %in% names(obj)) { # e.g "drug" in therapy files
obj[[tmp[1]]] <- c(obj[[tmp[1]]], val)
} else {
obj[[tmp[1]]] <- val
}
})
} else {
warning(paste0("Parsing failed for: ", ini[i]))
}
}
obj$folder <- folder
return (obj)
}
|
a6df25966222e3045d85d599ea4bcea4a56d717e | eb94b52ee3efb15ae52bf75eaa554a07957947ed | /server.R | 86dfdb7906999e866ffb67aebc4300b8029e8938 | [] | no_license | abitcoinperson/film-R | 38a7c4d61b40a45d3985508bb18a5f4d52807581 | 88800213e082b65800475d4a32f03a233a432d3c | refs/heads/master | 2021-06-01T01:49:17.085044 | 2016-07-15T18:26:20 | 2016-07-15T18:26:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,049 | r | server.R | shinyServer(function(input, output) {
output$text10 <- renderText({
paste("Your movie type is", input$radio4)
})
output$text1 <- renderText({
paste("You have selected", input$select)
})
output$text2 <- renderText({
paste("You have chosen a budget of", input$radio)
})
output$text3 <- renderText({
paste("Your cast is", input$select2)
})
output$text4 <- renderText({
paste("Your film's rating is ", input$select3)
})
output$text5 <- renderText({
paste("Your ideal distribution platforms are", input$radio2)
})
output$text6 <- renderText({
paste("Your preferred marketing strategy is", input$select4)
})
output$text7 <- renderText({
paste("Your Director/Producer career stage is", input$select5)
})
output$text8 <- renderText({
paste("Your story concept is", input$radio3)
})
output$text9 <- renderPrint({ input$text })
})
|
fa41e96e05021addfbfd3d087958e298e0d943c1 | e96da61d9cdd9faf5a4a32c0308f9e201dbe6975 | /Get and Cleaning Data/New Queries.R | 8a4250467a76311d34975af20861184e816db823 | [] | no_license | gyz0807/JHU-Data-Science | 1888da152068ca118ee69d66c8c4e212966993d2 | 39dcb4b3092c7897efaf50382c1d264829b80eac | refs/heads/master | 2021-01-21T04:47:51.193233 | 2016-05-31T00:31:40 | 2016-05-31T00:31:40 | 54,347,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,910 | r | New Queries.R | DataUrl <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(DataUrl, "Restaurant.csv", method = "curl")
restData <- read.csv("Restaurant.csv")
# Creating sequences
seq(1, 10, by = 2)
seq(1, 10, length = 3)
x <- c(1, 3, 8, 25, 100); seq(along = x)
# Creating binary variables
restData$zipWrong <- ifelse(restData$zipCode < 0, TRUE, FALSE)
# Creating categorical variables
restData$zipGroups <- cut(restData$zipCode, breaks = quantile(restData$zipCode))
table(restData$zipGroups)
# Easier cutting
library(Hmisc)
restData$zipGroups <- cut2(restData$zipCode, g=4)
table(restData$zipGroups)
# Creating factor variables
restData$zcf <- factor(restData$zipCode)
# Levels of factor variables
yesno <- sample(c("yes", "no"), size = 10, replace = TRUE)
yesnofac <- factor(yesno, levels = c("yes", "no"))
relevel(yesnofac, ref = "yes")
as.numeric(yesnofac) # [1] 2 1 1 1 1 1 1 2 2 2
# Common transforms
abs(x)
sqrt(x)
ceiling(x)
floor(x)
round(x, digits = n)
signif(x, digits = n)
cos(x)
log(x)
log2(x); log10(x)
exp(x)
# Merging Data
merge(data1, data2, by.x = "", by.y = "", all = TRUE)
##################################### Exercise
# Reshaping Data
library(dplyr); library(tidyr)
head(mtcars)
mtcars$carname <- rownames(mtcars)
mtcars <- tbl_df(mtcars)
Melt <- mtcars %>%
select(carname, gear, cyl, mpg, hp) %>%
gather(variable, value, mpg, hp)
View(Melt)
# Casting data frames
cydData <- mtcars %>%
filter(cyl %in% c(4,6,8)) %>%
group_by(cyl) %>%
summarise(mpg = mean(mpg), hp = mean(hp))
# Averaging values
head(InsectSprays)
with(InsectSprays, tapply(count, spray, sum))
# Split, apply, combine
spIns <- split(InsectSprays$count, InsectSprays$spray)
sprCount <- lapply(spIns, sum)
unlist(sprCount)
sapply(spIns, sum)
# dplyr
InsectSprays %>%
group_by(spray) %>%
summarize(sum = sum(count))
|
12add2c84afe57557095f4e7a348590e55cf73bc | 7cb80b0b9c23085f4b9607c607603a9109958d12 | /Boxplot_ggboxplot_group_addNumber_addMean.R | af6f812250adea88b239986bf35ffc68e2fc664f | [] | no_license | xjtuLTJ/CodeTips | 735068101bed9aff8297b46270d942e6ceae6a29 | e5f78484d41669f7121ba8d2c60632ac18f2c7bb | refs/heads/main | 2023-02-02T09:42:54.602051 | 2020-12-16T13:12:17 | 2020-12-16T13:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,228 | r | Boxplot_ggboxplot_group_addNumber_addMean.R | ##################################################
## Project: most useful ploting functions
## Script purpose: boxplot group by a factor, add numbers and median values for group element
## Date: 2020-12-07
## Author: Yuting Liu
##################################################
## Section: set envs
##################################################
library(data.table)
library(ggpubr)
## Section: test data
##################################################
setwd("/lustre/user/liclab/liuyt/ChenQ/Figures/CTCF-diffBD-MKOGMvsMKODM")
file.ls <- list.files('data/HiC-Pileup/LocalInsulation/', pattern = "CTCF.WT-DMvsMKO-DM", full.names = T)
fc.ls <- lapply(file.ls, function(f){
load(f)
s.ls <- unlist(lapply(scores.ls, function(vec){
vec = vec[which(!is.na(vec))]
return(vec)
}))
return(s.ls)
})
mkogm.d <- fc.ls[[1]]
mkogm.s <- fc.ls[[2]]
## Section: generate the dataframe
##################################################
df <- data.frame(cbind('type' = c(rep('WTGM',length(mkogm.d)), rep('MKOGM', length(mkogm.s))),
'value' = c(mkogm.d, mkogm.s)))
df$type <- factor(df$type, levels = c('WTGM','MKOGM'))
df$value <- as.numeric(as.character(df$value))
## Section: seting plot parameters
##################################################
my_comparisons <- list(c('WTGM','MKOGM'))
stat_box_data <- function(y, upper_limit = quantile(df$value, 0.50) ) {
return( data.frame( y = upper_limit,
label = paste('count =', length(y), '\n','mean =', round(mean(y), 3), '\n')))
}
fontsize = 4
linesize = 1
#pdf('test.boxplot.pdf', width = 2.5, height = 2.5)
ggboxplot(df, x = "type", y = "value",
color = 'type', size = .3, font.label = list(size = fontsize), outlier.shape = NA)+
stat_summary(fun.data = stat_box_data, geom = "text", hjust = 0.5,vjust = 0.9)+
theme(legend.position = "none", axis.ticks.x = element_blank())+
stat_compare_means(comparisons = my_comparisons, label.y = c(10),method = 't.test', size = 2) +
ylab('Insulation Strength (15kb)')+ xlab('Decreased Peaks')+ ggtitle(label = "")+ # labs
coord_cartesian(ylim = c(-25,25))+
scale_color_manual(values = c('WTGM' ='chartreuse3' , 'MKOGM' = 'coral3')) # colors
#dev.off()
|
2dc849213ee4f00105adcf9aefcb42fd04164c6d | 8d5c87872ba3666e65c645e69a03871fd5ecd5ce | /usecases_data/alpha_vantage_api/get_started.R | a64cfb78e87bd87e0e0468f8f4e110c32603361c | [] | no_license | leewtai/leewtai.github.io | f28ca1902b48816fc23e7dae3ed1342a8a624b65 | 3a9554dca011da0962e1d0f611ad088d30d5043f | refs/heads/master | 2023-08-09T10:20:27.573438 | 2023-07-19T14:43:08 | 2023-07-19T14:43:08 | 230,492,618 | 3 | 7 | null | 2023-08-29T13:58:32 | 2019-12-27T18:09:52 | Jupyter Notebook | UTF-8 | R | false | false | 1,087 | r | get_started.R | # Data is using
# https://www.alphavantage.co/documentation/
library(dplyr)
library(httr)
library(jsonlite)
# This is to not expose my api_key...
creds <- read_json('../credentials.json')
api_key <- creds[['alpha_vantage_api_key']]
url <- "https://www.alphavantage.co/query"
symbols = c('VOO', 'BLV', 'BSV', 'VTIP', 'BIV', 'VTC',
'VIG', 'VUG', 'VYM', 'VV', 'VO')
params <- list("function"="TIME_SERIES_DAILY_ADJUSTED",
outputsize='full',
apikey=api_key)
Sys.sleep(60)
dfs <- list()
for(symbol in symbols){
print(symbol)
params['symbol'] <- symbol
response <- GET(url=url, query=params)
dat <- content(response)
ts <- dat[['Time Series (Daily)']]
df <- bind_rows(ts)
names(df) <- sub("[0-9]+\\. ", "", names(ts[[1]]))
df <- as.data.frame(sapply(df, as.numeric))
df$date <- names(ts)
df$symbol <- symbol
dfs[[symbol]] <- df
Sys.sleep(12)
}
bdf <- bind_rows(dfs)
# jsonlite::write_json(bdf, "alpha_vantage_etfs_ts_daily.json")
write.csv(bdf, 'alpha_vantage_etfs_ts_daily.csv', row.names=FALSE)
|
54c85521e0854ffc00c4dcfce5e4638a1f500a19 | d47e1c59319f896791ea855145b3e952ecbf027c | /R/init.R | 623a73bb4c8a0ce372f72c14c1952d5ab61647d1 | [] | no_license | cran/IMWatson | 421105ca77989dd1984485c9b9ede15d2c9858aa | 458fec21c1a7b3714ce461b2e44a395106c1ac5d | refs/heads/master | 2020-12-22T00:26:45.961895 | 2019-03-29T15:40:03 | 2019-03-29T15:40:03 | 236,614,695 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,846 | r | init.R | #' @title Initialize connection with Watson's Conversation API
#' @description Connect to Watson's Conversation API
#' @param api_key API Key
#' @param workspace Workspace api
#' @param url optional argument that defaults to "https://gateway.watsonplatform.net/assistant/api/v1/workspaces"
#' @param version optional argument that defaults to "2018-09-20"
#' @return The function returns a lits with the text and the contex that is returned by your assistant.
#' @details This function allows you to start a connection to your Watson Assistant
#' @examples
#' if(interactive()){
#' # See https://cloud.ibm.com/apidocs/assistant
#' conection <- init(api_key, workspace)
#' conection[["text"]]
#' chat1 <- chat("Ignacio", conection, username, password, workspace)
#' chat1[["text"]]
#' chat2 <- chat("I would like flower suggestions", chat1, username, password, workspace)
#' chat2[["text"]]
#' }
#' @export
init <- function(api_key, workspace, url, version) {
if(missing(url)){
url <- "https://gateway.watsonplatform.net/assistant/api/v1/workspaces"
}
if(missing(version)){
version <- "2018-09-20"
}
conv.init <- httr::POST(url=glue::glue("{url}/{workspace}/message?version={version}"),
httr::authenticate("apikey",api_key),
httr::add_headers(c("Content-Type"="application/json")),
body = '{ "input": { "text":""},
"system":{ "dialog_stack":["root"]},
"dialog_turn_counter":1,
"dialog_request_counter":1}',
encode = "json") %>%
httr::content("text", encoding = "UTF-8") %>%
jsonlite::fromJSON()
out <- list(text=conv.init$output$text,
context = jsonlite::toJSON(conv.init$context, auto_unbox = TRUE))
return(out)
}
|
fda9d6afc99f5151ccf915c98b11204c63f15c26 | 3877ee02e7deec476c64901c474a24ad56dcd431 | /man/read_genome.Rd | 4f24646428d1e88965aaea5dfacda61593b21f5f | [] | no_license | ropensci/biomartr | 282d15b64b1d984e3ff8d7d0e4c32b981349f8ca | e82db6541f4132d28de11add75c61624644f6aa1 | refs/heads/master | 2023-09-04T09:40:15.481115 | 2023-08-28T15:56:25 | 2023-08-28T15:56:25 | 22,648,899 | 171 | 34 | null | 2023-09-14T12:28:02 | 2014-08-05T15:34:55 | R | UTF-8 | R | false | true | 1,328 | rd | read_genome.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_genome.R
\name{read_genome}
\alias{read_genome}
\title{Import Genome Assembly as Biostrings or data.table object}
\usage{
read_genome(file, format = "fasta", obj.type = "Biostrings", ...)
}
\arguments{
\item{file}{a character string specifying the path to the file
storing the genome.}
\item{format}{a character string specifying the file format used to store
the genome, e.g. \code{format = "fasta"} (default) or \code{format = "gbk"}.}
\item{obj.type}{a character string specifying the object stype in which
the genomic sequence shall be represented. Either as
\code{obj.type = "Biostrings"} (default) or
as \code{obj.type = "data.table"}.}
\item{...}{additional arguments that are used by the
\code{\link[seqinr]{read.fasta}} function.}
}
\value{
Either a \code{Biostrings} or \code{data.table} object.
}
\description{
This function reads an organism specific genome stored in a
defined file format.
}
\details{
This function takes a string specifying the path to the genome file
of interest as first argument (e.g. the path returned by
\code{\link{getGenome}}).
}
\seealso{
\code{\link{getGenome}}, \code{\link{read_proteome}},
\code{\link{read_cds}}, \code{\link{read_gff}}, \code{\link{read_rna}}
}
\author{
Hajk-Georg Drost
}
|
d59d92abc8156c833d0ff4ed407ec7218652fa10 | c98d6f40abe3e3ad60569ae52e499de4ed6ab432 | /R/shinySNPGene.R | a5d6d9d60397dfde53f2bea9846dcee55fef5299 | [] | no_license | byandell/qtl2shiny | 6ad7309b7f4b6bf89147560e23102b4ac5f93620 | e342ce39f2a30ea4df4010aac61822e448d43e20 | refs/heads/master | 2023-05-11T06:04:00.877301 | 2023-04-30T20:22:25 | 2023-04-30T20:22:25 | 78,020,416 | 2 | 3 | null | 2018-01-17T14:31:31 | 2017-01-04T14:00:43 | R | UTF-8 | R | false | false | 3,294 | r | shinySNPGene.R | #' Shiny SNP Association
#'
#' Shiny module for SNP association mapping, with interfaces \code{shinySNPGeneInput}, \code{shinySNPGeneUI} and \code{shinySNPGeneOutput}.
#'
#' @param input,output,session standard shiny arguments
#' @param snp_par,chr_pos,pheno_names,snp_scan_obj,snpinfo,top_snps_tbl,gene_exon_tbl,project_info,snp_action reactive arguments
#'
#' @author Brian S Yandell, \email{brian.yandell@@wisc.edu}
#' @keywords utilities
#'
#' @return tbl with top SNPs
#'
#' @export
#' @importFrom shiny callModule NS reactive req
#' radioButtons
#' uiOutput
#' renderUI
#' fluidRow column tagList
shinySNPGene <- function(input, output, session,
snp_par, chr_pos, pheno_names,
snp_scan_obj, snpinfo, top_snps_tbl,
gene_exon_tbl,
project_info,
snp_action = shiny::reactive({"basic"})) {
ns <- session$ns
## Shiny Modules
## SNP Association Scan
shiny::callModule(shinySNPPlot, "snp_scan",
snp_par, chr_pos, pheno_names,
snp_scan_obj, snpinfo, snp_action)
## SNP Summary
shiny::callModule(shinySNPSum, "best_snp",
chr_pos, top_snps_tbl, project_info, snp_action)
## Gene Region
shiny::callModule(shinyGeneRegion, "gene_region",
snp_par,
top_snps_tbl,
project_info,
snp_action)
## Genes and Exons
shiny::callModule(shinyGeneExon, "gene_exon",
snp_par, chr_pos,
top_snps_tbl, gene_exon_tbl,
snp_action)
output$snp_check <- shiny::renderUI({
switch(shiny::req(input$button),
Genes = shinyGeneRegionInput(ns("gene_region")))
})
output$snp_input <- shiny::renderUI({
switch(shiny::req(input$button),
Exons = shinyGeneExonInput(ns("gene_exon")),
Scan = shinySNPSumInput(ns("best_snp")))
})
output$snp_output <- shiny::renderUI({
switch(shiny::req(input$button),
Scan = shiny::tagList(
shinySNPPlotOutput(ns("snp_scan")),
shinySNPSumOutput(ns("best_snp"))),
Genes = shinyGeneRegionOutput(ns("gene_region")),
Exons = shinyGeneExonOutput(ns("gene_exon")))
})
## Downloads
output$download_csv_plot <- shiny::renderUI({
switch(shiny::req(input$button),
Scan = shiny::tagList(shiny::fluidRow(
shiny::column(6, shinySNPSumUI(ns("best_snp"))),
shiny::column(6, shinySNPPlotUI(ns("snp_scan"))))),
Genes = shinyGeneRegionUI(ns("gene_region")),
Exons = shinyGeneExonUI(ns("gene_exon")))
})
output$radio <- shiny::renderUI({
shiny::radioButtons(ns("button"), "",
c("Scan", "Genes", "Exons"),
input$button)
})
input
}
shinySNPGeneInput <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
shiny::fluidRow(
shiny::column(6, shiny::uiOutput(ns("radio"))),
shiny::column(6, shiny::uiOutput(ns("snp_check")))),
shiny::uiOutput(ns("snp_input"))
)
}
shinySNPGeneUI <- function(id) {
ns <- shiny::NS(id)
shiny::uiOutput(ns("download_csv_plot"))
}
shinySNPGeneOutput <- function(id) {
ns <- shiny::NS(id)
shiny::uiOutput(ns("snp_output"))
}
|
edc9588984799d2d81ef36ac358b1de7c58e1681 | 040113c16d38a2ce1854c95c8d0b3c82481fc33c | /R/filter-files.R | 21e07c55046c9be833221a70e4ec59b94ce73374 | [] | no_license | sakrejda/pdhs | 9dc199648c89fc6ac036cd61d174e41a5335dedb | 0e32ea50e6db2abb7ab20e676c3457bd6e66c93c | refs/heads/master | 2020-03-17T22:25:43.022686 | 2018-05-18T20:27:55 | 2018-05-18T20:27:55 | 134,002,989 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,430 | r | filter-files.R |
#' Check if a string has a specific extension, fails without
#' extension, returns TRUE on NULL extension. This function
#' is dumb and when it encounters filenames with multiple dots
#' it only considers the final extension.
#'
#' @param s string to check for the extension
#' @param e string specifying the extension.
#' @export
has_extension <- function(s, e) ifelse(is.null(e),
rep(TRUE, length(s)),
grepl(pattern=paste0('\\.', e, '$'), x=s, ignore.case=TRUE))
#' Check if a string has the .dta extension.
#' @param s string to check
#' @export
is_dta <- function(s) has_extension(s, 'dta')
#' Allow NULL to evaluate as TRUE for filtering to play
#' nice with missing(...)
#'
#' @param x either NULL or a vector.
#' @return either NULL -> TRUE or non-NULL -> self
null_or_compare <- function(x, y) if (is.null(x)) return(TRUE) else return(x == y)
#' Takes a path or vector of paths and filters either using
#' a regex pattern or based on DHS codes. For full details
#' of code-based filtering see ?filter_file_name
#'
#' Filter a path for DHS data file name elements. Returns
#' true for path items that match the requested elements and
#' false for those that don't. The match considers the
#' position of the element in the filename so it's not a straight
#' regex.
#'
#' To avoid situations with unreliable filtering (e.g.-try typing
#' "Lao People's Democratic Republic" reliably/repeatedly) this
#' function relies on the DHS codes for countries, data formats,
#' etc... to get these use the functions 'get_country_code',
#' 'get_dataset_type_code', 'get_file_format_extensions',
#' 'get_file_format_code'. Those functions to loose string matching
#' via regex.
#'
#' @param path path or vector of paths to files to filter
#' out.
#' @param either NULL (for code based filtering) or a regex
#' to be used to filter out elements of the path.
#' @return paths matching the conditions or a length-zero
#' character vector.
#' @export
filter_file_names <- function(path, pattern=NULL, latest=FALSE, ...) {
have_dots <- !missing(...)
if (have_dots) {
dots <- list(...)
}
if (!is.null(pattern))
path = path[grepl(pattern=pattern, x=path)]
if (!have_dots && !latest) {
return(path)
}
file_data <- process_filenames(path)
if (!have_dots && isTRUE(latest)) {
file_data <- file_data %>%
dplyr::group_by(country, dataset_type, round, format) %>%
dplyr::filter(release == max(release))
return(file_data[['path']])
}
if (isTRUE(latest)) {
file_data <- file_data %>%
dplyr::group_by(country, dataset_type, round, format) %>%
dplyr::filter(
null_or_compare(dots[['country']], country),
null_or_compare(dots[['dataset_type']], dataset_type),
null_or_compare(dots[['round']], round),
release == max(release),
null_or_compare(dots[['format']], format)) %>%
dplyr::summarise(path=last(path))
return(file_data[['path']])
}
if (!isTRUE(latest)) {
file_data <- file_data %>%
dplyr::filter(
null_or_compare(dots[['country']], country),
null_or_compare(dots[['dataset_type']], dataset_type),
null_or_compare(dots[['round']], round),
null_or_compare(dots[['release']], release),
null_or_compare(dots[['format']], format))
return(file_data[['path']])
}
stop("Something is rotten in Denmark.")
}
|
3950cd719aaaaa07116ef64491e1b5f4371ecfa5 | 98900f0cd8cbc35ae0904a6c7df65f18ea04d6e9 | /R/parseNum.R | b990ad326d3c9e1e9e4c219c185431a4ac853d9d | [
"MIT"
] | permissive | seacode/rsimGmacs | 0a55a984cb1b457db72767209cf0c8964b172e14 | c49d708e51d2d4dddcab2eaded0500ef9ce1774a | refs/heads/master | 2021-01-13T13:58:34.136732 | 2015-07-01T15:18:41 | 2015-07-01T15:18:41 | 29,838,306 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 472 | r | parseNum.R | #'
#'@title Parse numeric expressions from a character vector.
#'
#'@title Function to parse numeric expressions from a character vector.
#'
#'@param str - character vector to parse
#'
#'@return parsed and evaluated numerical expressions as a vector
#'
#'@export
#'
parseNum<-function(str){
n<-length(str);
res<-list();
for (i in 1:n){
expr<-parse(text=paste('tmp<-',str[i]))
eval(expr);
res[[i]]<-tmp;
}
return(unlist(res));
}
|
00aa18bc78c1560eb4cb8916c5bf0f294fd2a38c | a30d89a181b8f4e9d49989058fc5ae2724883974 | /src/01.basic/5.environment.R | 7325c66845acf4a9b803963f48cf6749f0aa980c | [] | no_license | imdangun/r_grammar.20.12 | d9b120e9f6a54b0172d09599285423288f1fb6c7 | 4f83ed51ad227716c48cf5adfa1212dd02c72767 | refs/heads/master | 2023-01-30T11:00:23.288349 | 2020-12-15T05:33:32 | 2020-12-15T05:33:32 | 320,449,625 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 962 | r | 5.environment.R | ##
setwd('C:/DEV/git/r_grammar.20.12/r_grammar.20.12/src/01.basic')
getwd()
# lib 디렉토리를 먼저 만든다.
.libPaths('C:\\DEV\\git\\r_grammar.20.12\\r_grammar.20.12\\lib')
install.packages('animation')
library(animation)
##
ls() # [1] "iris" "t1" "t2" "x" "y"
rm(iris)
ls() # [1] "t1" "t2" "x" "y"
rm(list = ls())
ls() # character(0)
## option
options()
options(digits = 3)
print(1.2345) # [1] 1.23
options(prompt = '>>')
##
history(max.show = Inf)
savehistory(file = 'myhistory.txt')
loadhistory(file = 'myhistory.txt')
sink(file = 'out.txt', append = T)
print('hello') # out.txt 에 hel
sink() # 화면 출력
print('world')
## 도움말
help(cor)
?cor
apropos('cor')
# [1] ".rs.recordAnyTraceback" ".rs.recordFunctionInformation"
# [3] ".rs.recordHtmlWidget" ".rs.recordTraceback"
# ...
RSiteSearch('cor') # 브라우저에 검색 결과 출력
|
9773e3d2bc74ea43e8e7a3f35a2f1caaf9b58208 | b111b2cab6d52c3a5480820af36a8e363305f3cb | /demo/Votacao.R | 93ea59cc9926bee490c3c77fb43a46d1b2705814 | [] | no_license | rommelnc/hudar | 240cb22226802de0241d2d95095d09bd3b60bba0 | 257b6360f2e8d5d7d642c8a9a5cb8b73f48ba4f2 | refs/heads/master | 2020-05-09T14:58:06.410576 | 2015-08-19T02:58:21 | 2015-08-19T02:58:21 | 40,375,394 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,472 | r | Votacao.R | library(xlsx)
dados = read.xlsx(file='../../data/Datasus/consolidado.xlsx', sheetIndex=1, stringsAsFactors = FALSE)
dados = read.table(file = '../../data/Datasus/consolidado.csv', fileEncoding = 'ISO-8859-1', header = TRUE, sep = ';', stringsAsFactors = FALSE)
dados = read.csv(file = '../../data/Datasus/consolidado-utf8.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
dados = read.csv(file = '../../data/Datasus/consolidado.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
codigo = substr(dados$Município, 1, 6)
dados$Código = codigo
str(dados)
head(codigo)
ufs = read.csv(file = '../../data/Datasus/Tabela IBGEUF.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
ufs = read.csv(file = '../../data/Datasus/Tabela IBGEUF-utf8.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
names(ufs) = c('Código','Estado' ,'Município')
ufs$Município = tolower(ufs$Município)
ufs$Código = substr(as.character(ufs$Código), 1, 6)
str(ufs)
votos = read.csv(file = '../../data/Datasus/Votacao_candidato_municipio_e_zona.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
votos = read.csv(file = '../../data/Datasus/Votacao_candidato_municipio_e_zona-utf8.csv', header = TRUE, sep = ';', stringsAsFactors = FALSE)
votos$Município = tolower(votos$Município)
str(votos)
ufs
votos_codigo = merge(ufs, votos, )
str(votos_codigo)
tudo = merge(votos_codigo, dados, by = c('Código'))
str(tudo)
save(tudo, file='../../data/Datasus/tudo_consolidado.Rda')
votacao = tudo[,-c(4,6,8,10,11,20,21,22,25,26,27,28,30)]
str(votacao)
vencedor = ifelse(votacao$Qt.Votos.Nominais.do.Aécio > 1.05*votacao$Qt.Votos.Nominais.da.Dilma, 'Aécio',
ifelse(votacao$Qt.Votos.Nominais.da.Dilma > 1.05*votacao$Qt.Votos.Nominais.do.Aécio, 'Dilma', 'Empate'))
table(vencedor)
votacao$Vencedor = as.factor(vencedor)
str(votacao)
save(votacao, file='../../data/Datasus/votacao_vencedor.Rda')
classificacao = votacao[,-c(1,3,4,5,6)]
classificacao$Estado = as.factor(classificacao$Estado)
classificacao = votacao[,-c(1,2,3,4,5,6)]
str(classificacao)
classificacao$Estado = as.factor(classificacao$Estado)
install.packages('tree')
library(tree)
arvore = tree(formula = Vencedor ~ ., data = classificacao)
arvore = tree(formula = Vencedor ~ PIB_per_capita + Taxa_de_analfabetismo, data = classificacao)
?tree
summary(arvore)
plot(arvore)
text(arvore)
install.packages('rpart')
library(rpart)
fit <- rpart(Vencedor ~ .,
method="class", data=classificacao)
fit <- rpart(Vencedor ~ PIB_per_capita + Taxa_de_analfabetismo,
method="class", data=classificacao)
printcp(fit) # display the results
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
# plot tree
plot(fit, uniform=TRUE,
main="Votação por Município")
text(fit, use.n=TRUE, all=TRUE, cex=.8)
votacao$Estado = as.factor(votacao$Estado)
dilma = votacao[,-c(1,2,3,4)]
str(dilma)
lm = glm(Qt.Votos.Nominais.da.Dilma ~ ., data = dilma)
summary(lm)
plot(lm)
dilma.relevante = dilma[,c(1,2,9,11:15)]
str(dilma.relevante)
lm = glm(Qt.Votos.Nominais.da.Dilma ~ ., data = dilma.relevante)
summary(lm)
plot(dilma$Qt, dilma$Qt.Votos.Nominais.da.Dilma)
plot(dilma$PIB_per_capita, dilma$Qt.Votos.Nominais.da.Dilma)
plot(dilma$X._população_com_renda_._1.2_SM, dilma$Qt.Votos.Nominais.da.Dilma)
hist(dilma$X._população_com_renda_._1.2_SM)
lm
http://www.r-bloggers.com/general-regression-neural-network-with-r/
|
c2809349316b3f91442c600e529551c8f66dda6a | bd6416f52b21ee94e8b7516aa78efd817b75f97c | /man/fDLfun.Rd | 800d6d8470b5216dcb41cf31a37ab7b93b085f4c | [] | no_license | cran/GPPFourier | e509c2677b7af3e346419ae98b86f3d14540741f | 1cef0b3c594b154be2cacbe1e607db444028fb2b | refs/heads/master | 2021-07-03T08:11:23.429105 | 2017-09-22T15:17:01 | 2017-09-22T15:17:01 | 104,495,054 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 834 | rd | fDLfun.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SunsetSunrise.R
\name{fDLfun}
\alias{fDLfun}
\title{Relative amount of daylight hours at a specified date and location.}
\usage{
fDLfun(date = "2016-07-01", phi = 51.176, lambda = 4.326, H = 0)
}
\arguments{
\item{date}{POSIXct object or date specificied in unambiguous format. See \code{\link{as.POSIXct}}}
\item{phi}{Latitude}
\item{lambda}{Longitude}
\item{H}{Height of location where fDL is to be calculated}
}
\value{
The fraction of daylight hours at the specified date and location. Sunrise and Sunset are calculated with \code{\link{SunRiseSet}}.
}
\description{
Relative amount of daylight hours at a specified date and location.
}
\examples{
fDLfun("2016-06-21")
}
\seealso{
\code{\link{SunRiseSet}}
}
\author{
Tom Cox <tom.cox@uantwerp.be>
}
|
f11fad0eb9d6ee8d005e9348f98ff9513a89081c | ae33c584a4efdd6c337abd69fc9fa98b1c01e64b | /data/fpp2/save-fpp2-data.R | 30399bac53ccd6007bebf77e2cf8a29d5f957129 | [] | no_license | elray1/elray1.github.io | de1ffa6d854a7b58991f73c2ce1c18dc51d6723d | 818db063d1785c09a0da019929d6f5651593f1c1 | refs/heads/master | 2023-03-09T23:38:53.181329 | 2023-02-19T15:13:30 | 2023-02-19T15:13:30 | 91,704,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 239 | r | save-fpp2-data.R | library(fpp2)
library(dplyr)
library(here)
here()
elecdaily_df <- elecdaily %>%
as.data.frame() %>%
mutate(
WorkDay = ifelse(WorkDay, "WorkDay", "Other")
)
write.csv(elecdaily_df, "data/fpp2/elecdaily.csv", row.names = FALSE)
|
c89e4358851923718aa53fae3ecaf432cd978639 | 0cb726c711343de6b5c92a68c59e2998a274fda9 | /corr.R | 923a59a6ddaab44f715cedbf9f150394d0e5482d | [] | no_license | sangramga/RProgramming_Coursera_Ass1 | a458e22735ff3dc064c011aae43476b095a3d883 | 67dcbab8b67b93fd2ae83eda990f874bf90c5eea | refs/heads/master | 2021-01-10T03:57:24.492164 | 2015-06-02T11:15:03 | 2015-06-02T11:15:03 | 36,726,278 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | r | corr.R | corr <- function(directory,threshold = 0)
{
nobs <- integer()
vec <- numeric()
for(i in 1:332)
{
if(i <=9 )
path.i <- paste("~/",directory,"/","00",as.character(i),".csv",sep ="")
else if(i<=99 && i >=10)
path.i <- paste("~/",directory,"/","0",as.character(i),".csv",sep = "")
else
path.i <- paste("~/",directory,"/",as.character(i),".csv",sep = "")
df <- read.csv(skip = 1,col.names = c("Date","sulfate","nitrate","ID"),sep = ",",file = path.i)
nobs[i] <- sum(complete.cases(df))
if(nobs[i]>=threshold)
{
correl <- cor(df$sulfate,df$nitrate,use = "pairwise.complete.obs")
vec <- c(vec,correl)
}
i <- i+ 1
}
vec
} |
595b006726bd287bae9b9fcaa6a359ab5526046a | 477fa404690395dd92cab1695e3c5eecafd16ba8 | /entire_usa_analysis.R | 72834eb86b700acca47cdd01ec018a8cc2fde282 | [] | no_license | ChrisJones687/slf_casestudy | 60c5bdb2f4a647c3866231edb8df84d93a00982f | 0bae96da3252ed102cb2414735ee910775066a2b | refs/heads/master | 2020-12-28T08:46:10.276378 | 2020-08-01T13:41:20 | 2020-08-01T13:41:20 | 238,251,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,964 | r | entire_usa_analysis.R | ## Analysis for the entire USA
library(raster)
library(sp)
library(lubridate)
library(PoPS)
infected_file <- "H:/Shared drives/APHIS Projects/PoPS/Case Studies/spotted_latternfly/whole_usa/slf_infested.tif"
host_file <- "H:/Shared drives/APHIS Projects/PoPS/Case Studies/spotted_latternfly/whole_usa/host.tif"
total_plants_file <- "H:/Shared drives/APHIS Projects/PoPS/Case Studies/spotted_latternfly/whole_usa/total_plants.tif"
temperature_file <- ""
temperature_coefficient_file <- "H:/Shared drives/APHIS Projects/PoPS/Case Studies/spotted_latternfly/whole_usa/slf_infested.tif"
precipitation_coefficient_file <-""
use_lethal_temperature <- FALSE
temp <- FALSE
precip <- FALSE
season_month_start <- 5
season_month_end <- 11
time_step <- "month"
start_date <- '2019-01-01'
end_date <- '2020-12-31'
lethal_temperature <- -35
lethal_temperature_month <- 1
random_seed <- 42
reproductive_rate <- 1.7
treatments_file <- ""
treatment_dates <- c('2019-12-24')
treatment_method <- "ratio"
management <- FALSE
mortality_on <- FALSE
mortality_rate <- 0
mortality_time_lag <- 0
percent_natural_dispersal <- .995
natural_kernel_type <- "cauchy"
anthropogenic_kernel_type <- "cauchy"
natural_distance_scale <- 25
anthropogenic_distance_scale <- 8300
natural_dir <- "NONE"
natural_kappa <- 0
anthropogenic_dir <- "NONE"
anthropogenic_kappa <- 0
pesticide_duration <- c(0)
pesticide_efficacy <- 1.0
random_seed = NULL
output_frequency = "year"
movements_file <- ""
use_movements <- FALSE
num_iterations <- 30
number_of_cores <- 30
# data <- PoPS::pops(infected_file, host_file, total_plants_file,
# temp, temperature_coefficient_file,
# precip, precipitation_coefficient_file,
# time_step, reproductive_rate,
# season_month_start, season_month_end,
# start_date, end_date,
# use_lethal_temperature, temperature_file,
# lethal_temperature, lethal_temperature_month,
# mortality_on, mortality_rate, mortality_time_lag,
# management, treatment_dates, treatments_file,
# treatment_method,
# percent_natural_dispersal,
# natural_kernel_type, anthropogenic_kernel_type,
# natural_distance_scale, anthropogenic_distance_scale,
# natural_dir, natural_kappa,
# anthropogenic_dir, anthropogenic_kappa,
# pesticide_duration, pesticide_efficacy,
# random_seed, output_frequency,
# movements_file, use_movements)
data <- PoPS::pops_multirun(infected_file, host_file, total_plants_file,
temp, temperature_coefficient_file,
precip, precipitation_coefficient_file,
time_step, reproductive_rate,
season_month_start, season_month_end,
start_date, end_date,
use_lethal_temperature, temperature_file,
lethal_temperature, lethal_temperature_month,
mortality_on, mortality_rate, mortality_time_lag,
management, treatment_dates, treatments_file,
treatment_method,
percent_natural_dispersal,
natural_kernel_type, anthropogenic_kernel_type,
natural_distance_scale, anthropogenic_distance_scale,
natural_dir, natural_kappa,
anthropogenic_dir, anthropogenic_kappa,
num_iterations, number_of_cores,
pesticide_duration, pesticide_efficacy,
random_seed = NULL, output_frequency,
movements_file, use_movements)
|
2fb4422ec00d9a10d3b3a8dd9d311316f355df44 | 4a2e64bcb319ae4525359a69d2e62bafe50ea48f | /R/FUNCTION_plotMonthly.R | bf6e79cfc8f2510181da2181d991ded1714f0e2d | [] | no_license | Pacific-salmon-assess/eramonth | 34ad963fc4f6bc86d9e2c54f2e5cc44db01a11a9 | eb48212dda4eeafd45b6d82988d80b3c3565451c | refs/heads/master | 2022-11-04T07:21:41.993327 | 2020-06-15T17:26:01 | 2020-06-15T17:26:01 | 272,509,055 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,644 | r | FUNCTION_plotMonthly.R | #' plotMonthly
#'
#' Plot output from calcMonthly function. See details there.
#' @param monthly.df data frame output from calcMonthly
#' @param stk stock to plot
#' @param fgroup fishery grouping to plot (depends on calcMonthly inputs!)
#' @param type plot type. For now, one of "basic" or "box"
#' @export
#' @examples
#'
plotMonthly <- function(monthly.df,stk="ATN",fgroup = "ISBM_CBC_Term_N",type="basic"){
df.sub <- monthly.df %>% dplyr::filter(Stock == stk, Fishery_Group == fgroup)
if(type == "basic"){
plot(1:5,1:5,type="n", bty="n",xlab="Month",ylab="Prop. of CWT Recoveries (Not ER!)",xlim=c(0,13),
ylim=c(0,1))
yrs.vec <- sort(unique(df.sub$Recovery_Year))
for(i in 1:length(yrs.vec)){
#print(yr)
df.yr <- df.sub %>% dplyr::filter(Recovery_Year == yrs.vec[i])
#print(df.yr)
lines(df.yr$Recovery_Month + i * 0.05,df.yr$monprop,type="h",col="lightblue")
}} # end basic plot
if(type == "box"){
par(bty="n")
boxplot(monprop ~ Recovery_Month,data= df.sub, ylim=c(0,1), border="darkblue",col="lightblue",
xlab="Month", ylab = "Prop. of CWT Recoveries (Not ER!)")
} # end box plot
title(main= paste(stk,fgroup,sep= " / "),cex.main=0.85)
sub.txt <- paste(
paste(length(unique(df.sub$Recovery_Year)),"Years"),
paste0(min(df.sub$Recovery_Year), "-", max(df.sub$Recovery_Year) ),
paste0("n=",round(min(df.sub$yeartotal)), "-", round(max(df.sub$yeartotal)) ),
sep=" / ")
title(main=sub.txt,cex.main=0.7, line = 1)
# add in ggplot and plotly versions
}
|
f7b3f08301957ac330e5460e4d3f1507a6ef9f41 | 207c2714aeb42ff9c6eeb1e1569c314dae5576fc | /.Rprofile | 09c81c8266b60d39a22765d7d220656e5d1c376c | [] | no_license | murattasdemir/work_config | cad8d461c1a5c2bd980e18d4f49969c67167945c | 170975ec341fa6435d50e47ee4eb63106bafe299 | refs/heads/master | 2021-09-08T14:36:11.667454 | 2021-09-04T08:00:03 | 2021-09-04T08:00:03 | 139,559,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,062 | rprofile | .Rprofile | ##################################
# R profile file
# Things you might want to change
options(papersize="a4")
# options(editor="notepad")
# options(pager="internal")
# R interactive prompt
# options(prompt="> ")
# options(continue="+ ")
# to prefer Compiled HTML
# help options(chmhelp=TRUE)
# to prefer HTML help
# options(htmlhelp=TRUE)
# General options
# options(tab.width = 2)
# options(width = 130) #Default 100
# options(graphics.record=TRUE)
options("stringsAsFactors"=FALSE)
# RStata Options #########
#options("RStata.StataPath" = "/Applications/Stata/StataSE.app/Contents/MacOS/stata-se")
#options("RStata.StataVersion"=14)
#.First <- function(){
#library(RStata)
# stata("set more off, permanently")
# cat("\n**** Welcome at", date(), "****")
# cat("\n Successfully loaded .Rprofile ", "\n")
#}
#.Last <- function(){
# cat("\nGoodbye at ", date(), "\n")
#}
#Plotly account credentials
Sys.setenv("plotly_username"="mtasdemir")
Sys.setenv("plotly_api_key"="PHKEqsaotocEU94EiOIL")
Sys.setenv(RETICULATE_PYTHON = "/usr/local/bin/python3")
|
cdcbd26dfbde1e38e9c16af546ae7e955ac970f4 | 5dc1375ad5705d88b3511b79c06f4a66c62d9e15 | /man/scale2.Rd | ce984be16fea5359200cf0f9ad8899681e0ddb0a | [
"MIT"
] | permissive | llrs/inteRmodel | 441044e8ea74e9b0f7e89f535ec5115e20bab422 | 02dfb240efaeb7322cf555b17209d615635538c0 | refs/heads/master | 2022-05-24T19:06:06.922866 | 2022-03-14T14:59:08 | 2022-03-14T14:59:33 | 198,424,052 | 1 | 4 | NOASSERTION | 2021-11-04T23:08:17 | 2019-07-23T12:15:33 | R | UTF-8 | R | false | true | 338 | rd | scale2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{scale2}
\alias{scale2}
\title{Scale2}
\usage{
scale2(...)
}
\arguments{
\item{...}{Named arguments for scale2, A (Matrix), center, scale, bias (logical).}
}
\description{
If possible reexport scale2 of RGCCA, if not make it available to users.
}
|
2fdf98a82fdc150bab3dee6b2f283a71f887f9aa | c7969c98a4fc0a14f8a10d88a0a1bd26b0e33005 | /man/RunMAXEIG.Rd | 0a90e39a1de491ee74f5d8030bcd9cec576d1628 | [] | no_license | cran/RTaxometrics | efdb7e4eb3c056d892b6802c08adaf7879e81903 | f5bb4a3e0ccb44cc10e6ee0a13cd63697534d294 | refs/heads/master | 2023-06-08T05:14:27.529014 | 2023-05-30T11:00:02 | 2023-05-30T11:00:02 | 92,848,658 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 753 | rd | RunMAXEIG.Rd | \name{RunMAXEIG}
\alias{RunMAXEIG}
\title{
Performs MAXEIG
}
\description{
This function performs the MAXEIG analysis
}
\usage{
RunMAXEIG(x, parameters)
}
\arguments{
\item{x}{
The data matrix
}
\item{parameters}{
The data and program parameters
}
}
\details{
Called by higher-order functions; users do not need to call this
function directly.
}
\value{
Panel of MAXEIG curves:
\item{curve.x }{X values of curve}
\item{curve.y }{Y values of curve}
}
\references{
Waller, N.G., & Meehl, P.E. (1998). Multivariate taxometric procedures:
Distinguishing types from continua. Thousand Oaks, CA, US:
Sage Publications, Inc.
}
\author{
John Ruscio <ruscio@tcnj.edu> and Shirley Wang <shirleywang@g.harvard.edu>
Maintainer: John Ruscio <ruscio@tcnj.edu>
} |
cb743bd42ed2877c2a7f45b891c61b2caf78098d | d12342ffe9a2659831f1749ce2f373d36bcf2659 | /Rscripts/mapped_read_plots.R | 9bf70a92440013679b7ff05cf27e765593e3e4da | [
"MIT"
] | permissive | felixgrunberger/Native_RNAseq_Microbes | 4f2788e9ce0f812661c48c7d98837aff9adf1342 | 821b0fc24d343a82e091db0cad510bdfc9c44026 | refs/heads/master | 2020-08-31T00:32:56.057561 | 2020-06-22T10:10:58 | 2020-06-22T10:10:58 | 218,535,233 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,284 | r | mapped_read_plots.R | ###########################################################################
###########################################################################
###
### READ IDENTITY [%] TO GENOMIC FEATURES AND ENOLASE SPIKE-IN
###
###########################################################################
###########################################################################
# (identity = (1 - NM/aligned_reads)*100)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# LOAD LIBRARIES AND PLOTTING FUNCTION
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
library(here)
source(here("Rscripts/load_libraries.R"))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# LOAD FUNCTIONS
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................calculate bases on CDS
cds_depth_calculator <- function(input_gff){
gff_length_table <- read.gff(input_gff) %>%
dplyr::filter(type == "CDS") %>%
mutate(length = abs(start - end))
return(sum(gff_length_table$length))
}
#...................................calculate enolase quality from mapped file
enolase_quality_finder <- function(input_bam_file, input_summary_file, seq_set){
summary_file <- fread(input_summary_file)
p4 <- ScanBamParam(tag=c("NM", "MD"), what="mapq")
allReads <- readGAlignments(input_bam_file, use.names = T, param = p4)
allReads_table <- GenomicAlignments::as.data.frame(allReads) %>%
as_tibble() %>%
mutate(minion_read_name = names(allReads))
bam_summary <- left_join(allReads_table, summary_file, by = c("minion_read_name" = "read_id"))
bam_summary$aligned_reads <- NA
bam_summary$aligned_reads <- unlist(lapply(explodeCigarOpLengths(bam_summary$cigar, ops = c("M", "I")), function(x) sum(x)))
bam_summary$identity <- NA
bam_summary <- bam_summary %>%
mutate(identity = (1 - NM/aligned_reads)*100,
mapped_to = "control",
sequencing_set = seq_set,
mapped_type = "CDS")
return(bam_summary)
}
#...................................modify id table output
modify_id_table <- function(id_table_input, name){
return(id_table_input %>%
ungroup() %>%
distinct(minion_read_name, .keep_all = TRUE) %>%
mutate(mapped_to = "genome") %>%
mutate(sequencing_set = name))
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# LOAD & TIDY DATA ENOLASE
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................paths to summary files from guppy
summary_files <- paste(here("data/summary_data/"), list.files(here("data/summary_data/"),pattern = ".txt"), sep = "")
#...................................paths to mapped enolase files
enolase_files <- paste(here("data/enolase_data/"), list.files(here("data/enolase_data/"),pattern = ".bam"), sep = "")
#...................................get sample names
sample_names <- unlist(lapply(summary_files, FUN=function(x){str_split_fixed(str_split_fixed(x, "_seq", 2)[1],"summary_data/",2)[2]}))
#...................................calculate enolase tables
enolase_table <- data.frame()
for (i in seq_along(sample_names)){
enolase_table <- rbindlist(list(enolase_table, enolase_quality_finder(enolase_files[i], summary_files[i],sample_names[i])))
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# LOAD & TIDY DATA GENOME
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................input: pre-calculated single-read tidy files
id_files <- paste(here("data/tidy_data/"), list.files(here("data/tidy_data/"),pattern = "_id_table"), sep = "")
#...................................calculate genome tables
genome_table <- data.frame()
for (i in seq_along(sample_names)){
load(id_files[i])
genome_table <- rbindlist(list(genome_table, modify_id_table(full_id_table, sample_names[i])))
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# MERGE ALL DATA
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................combine enolase and genome tables
full_table <- rbindlist(list(enolase_table, genome_table), fill = TRUE)
#...................................reorder levels
full_table$sequencing_set <- factor(full_table$sequencing_set,
levels = rev(c("ecoli_tex","ecoli_notex",
"pfu_tex", "pfu_notex",
"hvo_tex", "hvo_notex")))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# CALCULATE STATISTICS
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................calculate mapped read statistics
#.................................number of mapped reads / median identity
full_table %>%
dplyr::filter(mapped_to == "genome") %>%
group_by(sequencing_set) %>%
summarise(number_of_mapped_reads = n(),
median_identity = median(identity))
#.................................CDS mapping features
#...............................calculate bases on CDS
cds_pfu <- cds_depth_calculator(here("data/genome_data/pfu.gff"))
cds_hvo <- cds_depth_calculator(here("data/genome_data/hvo.gff"))
cds_ecoli <- cds_depth_calculator(here("data/genome_data/ecoli.gff"))
full_table %>%
mutate(cds_depth = ifelse(sequencing_set == "ecoli_notex" | sequencing_set == "ecoli_tex", cds_ecoli,
ifelse(sequencing_set == "pfu_notex" | sequencing_set == "pfu_tex", cds_pfu, cds_hvo))) %>%
group_by(sequencing_set) %>%
dplyr::filter(mapped_type == "CDS", mapped_to == "genome") %>%
summarise(reads_mapped_to_CDS = n(),
bases_mapped_to_CDS = sum(aligned_reads),
sequencing_depth_CDS = sum(aligned_reads)/max(cds_depth))
#.................................rRNA mapping features
full_table %>%
group_by(sequencing_set) %>%
dplyr::filter(mapped_type == "rRNA", mapped_to == "genome") %>%
summarise(reads_mapped_to_rRNA = n())
#.................................enolase mapping features
full_table %>%
group_by(sequencing_set) %>%
dplyr::filter(mapped_type == "CDS", mapped_to == "control") %>%
summarise(reads_mapped_to_enolase = n(),
median_length_enolase = median(as.numeric(sequence_length_template)),
median_quality_enolase = median(as.numeric(mean_qscore_template)))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# PLOT
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................change colors
heat_color_npg <- rev(c(pal_npg()(10)[4],
pal_npg()(10)[1]))
#...................................for different group layout
full_table_rearranged <- full_table %>%
ungroup() %>%
mutate(new_group = ifelse(mapped_to == "control", "control",
ifelse(mapped_to == "genome" & mapped_type == "CDS", "CDS",
ifelse(mapped_to == "genome" & mapped_type == "rest", "rest",
ifelse(mapped_to == "genome" & mapped_type == "rRNA", "rRNA", NA)))))
#...................................plot mapped read identity comparing genome-derived reads with enolase reads (Supplementary Fig. 3b)
gg_identity <- ggplot(data = full_table_rearranged, aes(x = identity, color = mapped_to, fill = mapped_to, linetype = mapped_type, y = sequencing_set, alpha = mapped_to)) +
geom_density_ridges2(size = 1, scale = 0.9) +
scale_alpha_discrete(range = c(0.5, 0.2)) +
theme_Publication_white() +
scale_x_continuous(limits = c(65,100), expand = c(0,0)) +
scale_y_discrete(expand = c(0,0, 0.2, 0)) +
ylab("") +
xlab("Mapped read identity (%)") +
scale_linetype_manual(values = c("CDS" = "solid", "tRNA" = "dashed", "rRNA" = "dotted")) +
scale_color_manual(values = heat_color_npg) +
scale_fill_manual(values = heat_color_npg) +
theme(axis.text.y = element_text(face = "italic")) +
guides(linetype = guide_legend(title="")) +
guides(fill = guide_legend(title="")) +
guides(color = guide_legend(title=""))
pdf(here("figures/identity_control_vs_genome.pdf"),
width = 7, height = 7, paper = "special",onefile=FALSE)
gg_identity
dev.off()
#...................................plot mapped read lengths comparing genome-derived reads with enolase reads (Supplementary Fig. 3c)
gg_length <- ggplot(data = full_table, aes(x = aligned_reads, color = mapped_to, fill = mapped_to, linetype = mapped_type, y = sequencing_set, alpha = mapped_to)) +
geom_density_ridges2(aes(height =..ndensity..), scale = 0.9, size = 1) +
theme_Publication_white() +
scale_alpha_discrete(range = c(0.5, 0.2)) +
scale_x_continuous(trans = "log10", limits = c(50,5000), breaks = c(100,1000,1314,3000,5000),expand = c(0, 0)) +
scale_y_discrete(expand = c(0,0, 0.2, 0)) +
ylab("") +
xlab("Log10 Mapped read length (nt)") +
scale_linetype_manual(values = c("CDS" = "solid", "tRNA" = "dashed", "rRNA" = "dotted")) +
scale_color_manual(values = heat_color_npg) +
scale_fill_manual(values = heat_color_npg) +
theme(axis.text.y = element_text(face = "italic")) +
guides(linetype = guide_legend(title="")) +
guides(fill = guide_legend(title="")) +
guides(color = guide_legend(title=""))
pdf(here("figures/aligned_length_control_vs_genome.pdf"),
width = 7, height = 7, paper = "special",onefile=FALSE)
gg_length
dev.off()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# MINIMUM MAPPED READ LENGHTS
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................set heatmap like colors
heat_color_npg <- c(pal_npg()(10)[4],
pal_npg()(10)[6],
pal_npg()(10)[7],
pal_npg()(10)[5],
pal_npg()(10)[1],
pal_npg()(10)[8])
#...................................filter genome table for short reads
genome_table_short <- genome_table %>%
dplyr::filter(sequence_length_template < 300)
#...................................calculate frequencies for short reads
calculate_frequency <- function(set_org){
# > filter for organism
chosen_set <- genome_table %>%
dplyr::filter(sequencing_set == set_org) %>%
dplyr::filter(sequence_length_template < 300)
# > list read lengths
list_lengths <- NULL
list_lengths <- list(as.numeric(chosen_set$sequence_length_template))
# > make data table
heat_data <- as.data.frame(table(unlist(list_lengths))) %>%
mutate(coordinate = as.numeric(as.character(Var1))) %>%
mutate(group = set_org) %>%
mutate(Freq = Freq/sum(Freq))
}
#...................................calculate heat tables for frequency of short redas
short_read_table <- data.frame()
for (i in seq_along(sample_names)){
short_read_table <- rbindlist(list(short_read_table, calculate_frequency(sample_names[i])))
}
#...................................reorder levels
short_read_table$group <- factor(short_read_table$group,
levels = rev(c("ecoli_tex","ecoli_notex",
"pfu_tex", "pfu_notex",
"hvo_tex", "hvo_notex")))
#...................................plot frequencies of short reads (Supplementary Fig. 3d)
gg_heatmap_shortreads <- ggplot() +
geom_tile(data = short_read_table, aes(y = group, x = coordinate, color = Freq, fill= Freq), size = 0.5) +
scale_x_continuous(limits = c(0,300),expand = c(0,0)) +
scale_fill_gradientn(colours = heat_color_npg) +
scale_color_gradientn(colours = heat_color_npg) +
scale_y_discrete(expand = c(0,0)) +
theme_Publication_white() +
ylab("") +
xlab("Minimum read length (nt)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5, color = "white") +
guides(fill = guide_colorbar(title = "counts",barwidth = 15, barheight = 0.5, ticks = T, label = T)) +
guides(color = F)
pdf(here("figures/short_reads_heatmap.pdf"),
width = 7, height = 7, paper = "special",onefile=FALSE)
gg_heatmap_shortreads
dev.off()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# EXAMPLE PLOTS FOR PYROCOCCUS FURIOSUS SET
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#...................................extract Pyrococcus furiosus TEX data (only CDS)
pfu_genome_table <- genome_table %>%
dplyr::filter(sequencing_set == "pfu_tex", mapped_to == "genome", mapped_type == "CDS")
#...................................Mapped identity of reads vs. mapped read length (including spearman correlation, Supplementary Fig. 3e)
identity_length <- ggplot(data = pfu_genome_table, aes(x = identity, y = as.numeric(aligned_reads))) +
xlab("Mapped identity (%)") +
ylab("Mapped read length (nt)") +
ggtitle("") +
stat_binhex(bins = 30, aes(fill = ..count.., alpha = ..count..), color = "white") +
scale_fill_gradientn(colours = heat_color_npg) +
scale_alpha(range = c(0.7,1)) +
theme_Publication_white() +
guides(alpha = F) +
guides(fill = guide_colorbar(title = "counts",barwidth = 15, barheight = 0.5, ticks = T, label = T)) +
stat_cor(method = "spearman", color = "black")
pdf(here("figures/identity_vs_length.pdf"),
width = 7, height = 7, paper = "special",onefile=FALSE)
identity_length
dev.off()
#...................................Mapped identity of reads vs. read quality (including spearman correlation, Supplementary Fig. 3f)
identity_quality <- ggplot(data = pfu_genome_table, aes(x = identity, y = as.numeric(mean_qscore_template))) +
xlab("Mapped identity (%)") +
ylab("Read quality (Phred-like score)") +
ggtitle("") +
stat_binhex(bins = 30, aes(fill = ..count.., alpha = ..count..), color = "white") +
scale_fill_gradientn(colours = heat_color_npg) +
scale_alpha(range = c(0.7,1)) +
theme_Publication_white() +
guides(alpha = F) +
guides(fill = guide_colorbar(title = "counts",barwidth = 15, barheight = 0.5, ticks = T, label = T)) +
stat_cor(method = "spearman", color = "black")
pdf(here("figures/identity_vs_quality.pdf"),
width = 7, height = 7, paper = "special",onefile=FALSE)
identity_quality
dev.off()
|
73ef8e856c1b8acc0907d5bf54edd93a3b024bf8 | 1b872282a8fcfa99273958a7f95fab6a813d7d09 | /tests/testthat/test-hyperGeometricTest.R | da700badb91b4737afc1d90f5babd72541a8e943 | [
"MIT"
] | permissive | mirax87/multiGSEA | cecc8e1b6ebedbe92a87ecb7a91034635b3b69c3 | b8747abc1815ab4fa08ef024e77eee2a864ac6ed | refs/heads/master | 2020-04-02T13:49:21.057653 | 2018-06-01T22:07:22 | 2018-06-01T22:07:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,243 | r | test-hyperGeometricTest.R | context("Hyper Geometric Test")
##test_that("do.hyperGeometricTest performs like standard hyperG test", {
## vm <- exampleExpressionSet(do.voom=TRUE)
## gsl <- exampleGeneSets()
## gsd <- conform(GeneSetDb(gsl), vm)
##
## min.logFC <- 1
## max.padj <- 0.1
##
## my.hg <- multiGSEA:::do.hyperGeometricTest(gsd,vm,vm$design,ncol(vm$design))
##
## ## Calculate expected resulted from hyperG test
## tt <- calculateIndividualLogFC(vm, vm$design, ncol(vm$design),
## min.logFC=min.logFC, max.padj=max.padj)
## tt[, hyperG.selected := abs(logFC) >= min.logFC & padj <= max.padj]
##})
test_that("hyperGeometricTest performs like do.hyperGeometricTest performs", {
vm <- exampleExpressionSet(do.voom=TRUE)
gsl <- exampleGeneSets()
gsd <- conform(GeneSetDb(gsl), vm)
min.logFC <- 1
max.padj <- 0.1
mg <- multiGSEA(gsd, vm, vm$design, methods='hyperGeometricTest',
feature.min.logFC=min.logFC, feature.max.padj=max.padj,
really.use.hyperGeometricTest=TRUE)
res <- results(mg)
selected <- subset(logFC(mg), significant)
selected <- unique(selected$featureId)
hg <- hyperGeometricTest(gsd, selected, rownames(vm))
expect_equal(hg$pval, res$pval)
})
|
78c0b88ab24adafc8113ee04e32e0fb1148605ab | 5cb6c45b8fd2ff1f60a2db445fac19a135da0fe7 | /cachematrix.R | 132dbe3e63f3fbccc143032d2ce00936de4556f4 | [] | no_license | suhasmallya/ProgrammingAssignment2 | 5172fb8655be61e07d6b8ca05e461d0ed6790ed6 | 8004ff0aebf9060151b9c2072fc326be49e00076 | refs/heads/master | 2021-01-18T07:55:53.286288 | 2014-10-26T13:59:29 | 2014-10-26T13:59:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,776 | r | cachematrix.R | ## cachematrix.R - contains two functions: makeCacheMatrix() and cacheSolve()
##
## makeCacheMatrix() creates a special matrix capable of caching its inverse
## cacheSolve() returns the inverse of a matrix - either by retrieving from cache or computing it
## makeCacheMatrix
## pmatrix: matrix parameter passed to the main function
makeCacheMatrix <- function(pmatrix = matrix()) {
matinverse <- NULL
setmatrix<-function(y){
pmatrix<<-y
matinverse<<-NULL
}
#returns the original matrix 'pmatrix'
getmatrix<-function() pmatrix
#do not call directly
setinverse <- function(inv) matinverse <<- inv
#get the inverse of the matrix 'pmatrix'
#the inverse is held in the object matinverse
getinverse <- function() matinverse
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve
## Returns the inverse of the matrix received by the object 'pmatrix'
## in the above function makeMatrix
##
## Uses the built-in function solve() to compute the inverse
cacheSolve <- function(pmatrix) {
## Return a matrix that is the inverse of 'x'
##Retrieve inverse, if already computed
matinverse <-pmatrix$getinverse()
##Check if null; if not return cached inverse
if(!is.null(matinverse)){
message ("Retrieving cached inverse")
matinverse
return(matinverse)
}
mymatrix <- pmatrix$getmatrix()
mymatrix
##Directly calling solve
##assumed that mymatrix is always invertible
matinverse<-solve(mymatrix)
##Cache the inverse
pmatrix$setinverse(matinverse)
##Return inverse after computing
matinverse
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.