blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3cc9c125da0830f2a10808fa719fe40fc654e17e
|
0bd4d5ee50ebfb5a5325ae0284087ee886be4f37
|
/man/enumerate.Rd
|
17e1e6601e4143c7ddbbb186a85c51525da54740
|
[] |
no_license
|
stla/SLutils
|
91f53e3ef48b22154642b7425a1be94c0c48053e
|
5c5ef7dbb5d172c0a7788b3975a1363a47c4bf67
|
refs/heads/master
| 2020-04-10T06:21:30.476088
| 2019-09-10T10:00:57
| 2019-09-10T10:00:57
| 160,851,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 437
|
rd
|
enumerate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{enumerate}
\alias{enumerate}
\title{Sentence enumerating the elements of a vector}
\usage{
enumerate(x)
}
\arguments{
\item{x}{character or numeric vector}
}
\value{
A character string.
}
\description{
Make a sentence enumerating the elements of a vector.
}
\examples{
enumerate(letters[1])
enumerate(letters[1:2])
enumerate(letters[1:3])
}
|
1f50a03f363c965fe6e714dbdb2c939718e95627
|
e4f637cc645427b6e1c7a7eb33df47afd42264bc
|
/size_dist/R_code.R
|
103ce8350bcaf20b9aaa528a3fa1b72aa611f970
|
[] |
no_license
|
yinrui0/thesis-project
|
a1887045819d3be456b5a31455c66eaff79d457d
|
a7402c05092229c129bfae9b5a23da39dd09f6cf
|
refs/heads/master
| 2021-05-02T17:30:59.268773
| 2017-07-19T21:51:42
| 2017-07-19T21:51:42
| 61,657,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 886
|
r
|
R_code.R
|
rm(list=ls())
cat("\014")
#read dlnp
imput1 <- read.csv("diameters_ln_intervals_2014.csv", skip=16, header = FALSE)
#read dN/dlnp
imput2 <- read.csv("size_distributions__2014_09.csv", skip = 16, header = FALSE)
num <- 3544
#read diameter
imput3 <- read.csv("diameters_ln_intervals_2014.csv", skip=14, nrows=1, header = FALSE)
p <- matrix(unlist(imput3[4:90]),ncol = 87, byrow = TRUE)
#normalize pdf
dNdlnp <- matrix(unlist(imput2[4:90]),ncol = num, byrow = TRUE)
dNdlnp <- t(dNdlnp)
dNdlnp[1,]
dlnp <- matrix(unlist(imput1[4:90]),ncol = 87, byrow = TRUE)
dlnp <- t(dlnp)
dlnp[,1]
N <- dNdlnp %*% dlnp
NN <- matrix(rep(N,times=87), ncol=87)
dndlnp <- dNdlnp/NN
dndlnp[1,]
#get mean diameter
PP <- t(p)*dlnp
mu <- dndlnp %*% PP
mu
myData <- data.frame(imput2$V1,imput2$V3, mu)
colnames(myData) <- c("date","julday","mean diameter")
write.csv(myData, file = "mu_2014_09.csv")
|
8b2bed835386094c34da48c30821696e2d2d620f
|
52f95b07a1d460d90350d5dced856363d96b5aa0
|
/TestData3.R
|
5359b47a26578ffe79bd71e34b82966655d2383b
|
[] |
no_license
|
twgg201/datan3_2019
|
16122d6f5a7271ba6932c677db97855f2a87e877
|
1a1c833eacce1cde029ad8aa504ae9d2983d1a39
|
refs/heads/master
| 2020-04-17T04:57:58.115699
| 2019-01-17T16:37:47
| 2019-01-17T16:37:47
| 166,255,456
| 0
| 0
| null | 2019-01-17T16:12:30
| 2019-01-17T16:12:30
| null |
UTF-8
|
R
| false
| false
| 19
|
r
|
TestData3.R
|
#test r script
2+2
|
59f1f93339b7da135d57d297e233a479ea21ca8a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/chicane/examples/fill.in.zeros.Rd.R
|
ecf30fa584e866d246ccab89041b9c3e12b9777a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
fill.in.zeros.Rd.R
|
library(chicane)
### Name: fill.in.zeros
### Title: fill.in.zeros
### Aliases: fill.in.zeros fill.in.zeroes
### ** Examples
data(bre80);
bait.file <- system.file('extdata', '2q35.bed', package = 'chicane');
fragment.file <- system.file('extdata', 'GRCh38_HindIII_chr2.bed.gz', package = 'chicane');
results <- fill.in.zeros(
bre80,
baits = read.bed(bait.file),
fragments = read.bed(fragment.file)
);
|
670e33231aff25011e9607b70f12a0e43cbb8114
|
701997224f99e13e24046b7b5313d59730d0d1c2
|
/general/performance/stats.R
|
f72640c57401d0fa0b8967d65cc71eeff653ecc1
|
[
"MIT"
] |
permissive
|
autocare/tlaplus
|
021699fa24eabef6ffb35e6a51c06dabeb70c0de
|
1e652ddeab8109ba725c73094cb0efb48e02bf70
|
refs/heads/master
| 2023-01-28T16:57:05.083857
| 2023-01-05T16:52:02
| 2023-01-05T16:52:02
| 263,623,177
| 0
| 0
|
MIT
| 2020-05-13T12:27:21
| 2020-05-13T12:27:20
| null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
stats.R
|
## Read input csv file.
library(here)
data <- read.csv(header=TRUE, sep = "#", file = here("out_run-stats.csv"))
## Merge two or more commits when they cannot impact performance because
## a commit only changes auxiliary files.
## Replace git commit short-hash f91... with df1...
data[data == "f91c7b0"] <- "df144c5"
data[data == "46de326"] <- "df144c5"
data[data == "0f6a80b"] <- "1eb600d"
data[data == "0b93602"] <- "1eb600d"
## Convert epoch to date.
library(anytime)
data$Date <- anytime(data$Timestamp)
## Aggregate multiple runs.
data <- aggregate(cbind(Generated,Duration) ~ Spec + RevTag + Workers, data = data, FUN = mean, na.rm = TRUE)
## Calculate Throughput on aggregated data.
data$Throughput <- data$Generated / data$Duration
##################
lma <- lm(Throughput ~ RevTag, data)
line <- data.frame(slope = coef(lma)["RevTag"], intercept = coef(lma)["(Intercept)"], stroke = "red", stroke_width = 3, stroke_dasharray = "7,5")
library(scatterD3)
#library(htmlwidgets)
s <- scatterD3(data = data,
x = RevTag, y = Throughput, y_log=T,
xlab = "RevTag", ylab = "log(Time&Throughput)",
col_var=Spec
)
plot(s)
library(htmlwidgets)
saveWidget(s, file="index.html")
##################
library(dplyr)
trend <- data %>%
group_by(Spec, RevTag, Workers) %>%
summarise(Throughput = mean(Throughput)) %>%
arrange(Spec, Workers) %>%
group_by(Spec, Workers) %>%
summarise(
inc = (first(Throughput) - last(Throughput)) / first(Throughput),
increase = scales::percent(inc)
)
library(knitr)
trend <- trend[order(-trend$inc),]
kable(format = "markdown", trend[, c(1,2,4)], digits=2)
|
1c78097dcf32e93819033ae63b7362fd17f6c630
|
25449f88edddc74beb261a934964d7d1ce358deb
|
/R/read_vpts.R
|
a96e8383ead9029ef8e68574cc62871403f215ff
|
[
"MIT"
] |
permissive
|
adokter/bioRad
|
53de114ca6e2151743045db8556ffd7a45f90570
|
d4935eddaa7cc1c3c50e47278e72967c8bbd980c
|
refs/heads/master
| 2023-09-01T10:49:36.747974
| 2023-07-28T14:12:57
| 2023-07-28T14:12:57
| 59,586,835
| 29
| 21
|
NOASSERTION
| 2023-09-02T17:36:08
| 2016-05-24T15:49:06
|
R
|
UTF-8
|
R
| false
| false
| 4,762
|
r
|
read_vpts.R
|
#' Read time series of vertical profiles (`vpts`) from file(s)
#'
#' Reads `vpts` data from one or more files.
#' The following file formats are supported (but cannot be mixed):
#' - [VPTS CSV](https://aloftdata.eu/vpts-csv/).
#' - [ODIM bird profile](https://github.com/adokter/vol2bird/wiki/ODIM-bird-profile-format-specification).
#' - vol2bird standard output (see example below).
#' @param files Path(s) to one or more files containing vpts data.
#' @param data_frame When `FALSE` (default) output a `vpts` object, when `TRUE` output a data.frame
#' @param ... Additional arguments for backward compatibility, passed to `read_stdout`.
#' @return `vpts` object.
#' @family read functions
#' @export
#' @examples
#' ## read a vertical profile time series in VPTS CSV format:
#' vptsfile <- system.file("extdata", "example_vpts.csv", package = "bioRad")
#' read_vpts(vptsfile)
#' # read a single vertical profile file in ODIM h5 format:
#' vpfile <- system.file("extdata", "profile.h5", package = "bioRad")
#' read_vpts(vpfile)
#' # read a vertical profile time series in `vol2bird` stdout format:
#' stdout_file <- system.file("extdata", "example_vpts.txt", package = "bioRad")
#' read_vpts(stdout_file, radar = "KBGM", wavelength = "S")
read_vpts <- function(files, data_frame = FALSE, ...) {
# Define valid extensions
valid_extensions <- c("csv", "gz", "h5", "txt")
# Get file extension
extension <- unique(tools::file_ext(files))
assertthat::assert_that(
length(extension) == 1,
msg = "`files` must all have the same extension."
)
# If the file has an extension, check if it is valid
if (extension != "") {
assertthat::assert_that(
extension %in% valid_extensions,
msg = glue::glue(
"`files` must have one of the following extensions: {valid_extensions_collapse}",
valid_extensions_collapse = glue::glue_collapse(valid_extensions, sep = ", ")
)
)
# infer the file type
guessed_file_type <- guess_file_type(files[1])
assertthat::assert_that(
extension == guessed_file_type,
msg = glue::glue(
"The extension of the input file(s) {extension} does not match the guessed file type: {guessed_file_type}"
)
)
} else {
# If the file does not have an extension, infer the file type
extension <- guess_file_type(files)
}
# Check if the input file has a .txt extension and if so reroute to read_stdout
if (extension == "txt") {
warning(".txt extenstion detected - falling back to read_stdout().\n
Please consider updating your workflow by using VPTS csv or h5 input files")
# Attempt to call read_stdout
tryCatch({
return(do.call(read_stdout, c(list(file = files), list(...))))
},
error = function(e) {
# Display custom message
message(paste(e$message, " See ?read_stdout() for more details."))
stop()
}
)
}
# Read files
data <- switch(extension,
csv = read_vpts_csv(files, data_frame=data_frame),
gz = read_vpts_csv(files, data_frame=data_frame),
h5 = read_vpts_hdf5(files, data_frame=data_frame)
)
data
}
#' Read time series of vertical profiles (`vpts`) from VPTS CSV file(s)
#'
#' @inheritParams read_vpts
#' @param data_frame If `TRUE` returns data as dataframe rather than `vpts` object.
#' @return `vpts` object.
#' @keywords internal
#' @noRd
read_vpts_csv <- function(files, data_frame = FALSE) {
if (!exists("cached_schema")) {
# Read the schema from the URL and cache it
cached_schema <- jsonlite::fromJSON(system.file("extdata", "vpts-csv-table-schema.json", package = "bioRad"), simplifyDataFrame = FALSE, simplifyVector = TRUE)
cached_schema$missingValues <- c("", "NA")
}
# Create Frictionless Data Package
package <- frictionless::create_package()
# Add resource to the package
package <- frictionless::add_resource(
package,
"vpts",
data = files,
schema = cached_schema
)
# Read resource (compares data with schema and binds rows of all files)
data <- frictionless::read_resource(package, "vpts")
# Convert data
source_file <- datetime <- radar <- NULL
data <- dplyr::mutate(
data,
radar = as.factor(radar),
source_file = as.factor(source_file),
datetime = as.POSIXct(datetime, format = "%Y-%m-%dT%H:%M:%SZ", tz = "UTC")
)
# Return data as data frame
if (!data_frame) {
data <- as.vpts(data)
}
return(data)
}
#' Read time series of vertical profiles (`vpts`) from hdf5 file(s)
#'
#' @inheritParams read_vpts
#' @return `vpts` object.
#' @noRd
read_vpts_hdf5 <- function(files, data_frame = FALSE) {
vps <- read_vpfiles(files)
output <- bind_into_vpts(vps)
if(data_frame) output <- as.data.frame(output)
output
}
|
c799688b5abd56f0a2d93d10a13ceacb7d1a70a0
|
f255ef3c7452a307bbaaf95a092e4279aa5f366e
|
/man/coords.Rd
|
9aa68a2c952931c3c2b729d22a2d8e63a00588e5
|
[] |
no_license
|
bbuchsbaum/eyesim
|
a1a61068f53a16925566deb81e03fa5943686f0e
|
4d4f48ef0b1812d5200b86d7216f8d03792c2435
|
refs/heads/master
| 2023-05-11T00:43:30.086058
| 2023-05-08T15:02:35
| 2023-05-08T15:02:35
| 86,451,769
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 240
|
rd
|
coords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generic.R
\name{coords}
\alias{coords}
\title{extract coordinates}
\usage{
coords(x)
}
\arguments{
\item{x}{the object}
}
\description{
extract coordinates
}
|
f9e4e1e07577a644da16814a38e33eda1147e98e
|
b1d42fdf2a683687642d857ae03be94371a24395
|
/man/gromov.hyperbolicity.Rd
|
fa0fa51c71883a7be88645e9c0ecf1fed916a97c
|
[] |
no_license
|
cran/distory
|
61a22457ac6051cae7624b697dbbe57875e7cd7d
|
b0f847940dc178c2d0610a3862b5da6c4f7e042f
|
refs/heads/master
| 2021-01-01T16:13:46.344609
| 2020-04-19T08:00:02
| 2020-04-19T08:00:02
| 17,695,542
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,838
|
rd
|
gromov.hyperbolicity.Rd
|
\name{gromov.hyperbolicity}
\Rdversion{1.1}
\alias{gromov.hyperbolicity}
\title{Gromov Hyperbolicity Constant}
\description{
Computes the Gromov Hyperbolicity Constant of a distance matrix.
}
\usage{
gromov.hyperbolicity(d, deltas = FALSE, scale = NA)
}
\arguments{
\item{d}{
A distance matrix of type \code{dist} or \code{matrix}, or anything that
can be coerced into \code{dist} by \code{as.dist}. Must have at least 4
points.}
\item{deltas}{
A logical value specifying whether to return the vector of delta values.
Default is \code{FALSE}.}
\item{scale}{
Specifies a scaling method for each delta. Default is no scaling (NA or
"none"). Available methods are "max" which scales deltas by the max of the
sums computed, and "perimeter" for the largest perimeter of the four
points.}
}
\details{
This computes a constant that represents the relaxation of a 4-point
condition for delta-hyperbolicity. See (Gromov 1987) for details.
}
\value{
The Gromov hyperbolicity constant of the given distance matrix.
}
\author{John Chakerian}
\seealso{\code{\link{dist.multiPhylo}}}
\references{
M. Gromov. \emph{Hyperbolic groups}. In Essays in Group Theory, pages
73--263. Springer, New York, 1987.
Chakerian, J. and Holmes, S. P. Computational Tools for Evaluating
Phylogenetic and Heirarchical Clustering Trees. arXiv:1006.1015v1.
}
\examples{
# scale final delta by max distance
points <- cbind(runif(100), runif(100))
d <- dist(points)
gromov.hyperbolicity(d)/max(d)
# scale each delta by max distance for the 4 points
points <- cbind(runif(100), runif(100))
d <- dist(points)
gromov.hyperbolicity(d, scale="max")
# scale each delta by the max perimeter for the 4 points
points <- cbind(runif(100), runif(100))
d <- dist(points)
gromov.hyperbolicity(d, scale="max")
}
\keyword{manip}
|
ad12c7d5e91938adb3d80ad7b1b3969b64a10d82
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ecd/examples/ecd.mpfr.Rd.R
|
b2ea640e5f0ce7cd8746a40cf3ee20d1c988a153
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 314
|
r
|
ecd.mpfr.Rd.R
|
library(ecd)
### Name: ecd.mpfr
### Title: Wrapper to convert numeric to mpfr
### Aliases: ecd.mpfr ecd.mp1 ecd.mppi ecd.gamma ecd.erf ecd.erfc ecd.erfcx
### ecd.dawson ecd.erfi ecd.devel
### Keywords: datasets utility
### ** Examples
x <- ecd.mpfr(1)
y <- ecd.mpfr(c(1,2,3))
z <- ecd.mp1
p <- ecd.mppi()
|
02aaf3cc9faf3e377c0c2a661138a2ce3c8b20ca
|
62f023eefc837f9d7a0e640c7bea52d90e30d68f
|
/User - process cushion stability .r
|
16bb894d29e71352d0f3ba9d717096dca9257af1
|
[] |
no_license
|
E1kT6MNF/TrackImage-R-Scripts
|
748242e3f719bb12e81e04d1e44d841ff180afb2
|
5444be32d521c3e34b7fd85808c61c28e711cf7e
|
refs/heads/master
| 2021-01-18T21:27:52.331198
| 2016-04-05T16:33:16
| 2016-04-05T16:33:16
| 39,971,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,362
|
r
|
User - process cushion stability .r
|
# This script will read TrackImage .csv files and output cushion stabiliy metrics
# Author: John Newkirk
# Rev. History:
# 08/11/15 added arguments for offset, Slope Lower Time Limit, Slope Upper Time Limit
# 09/01/15 1. Moved arguement entry to line items instead of within function call
# 2. changed output form of results table from clipboard to .csv file
# 3. added a header section to results table listing file source and parameters used.
# 12/03/15 added arguement for L/R camera side view.
# 12/11/15 added arguements for in-position line (was -190) and edge line (was 700)
# Requirements:
# 1. Be sure there are front and side .csv files for all test with no extras. Remove any
# other .csv files from the directory
# 2. Side view files must be named "S_test number.csv". (e.g., S_55A550-01.csv)
# Front view files must be named "F_test number.csv". (e.g., F_55A550-01.csv)
# Instructions:
# 1. Run the following lines of code. Easiest way is Control-A to select everything, then
# select "Run" button in upper-right corner of RStudio script editing window.
# 2. You will be prompted to select the directory where TrackImage .csv files are stored.
# You might have to look for it on the Windows task bar if the file selection window
# doesn't open to the front screen. You can navigate within the file selection window
# but it is easier to use the "Copy address as text" function in Windows Explorer to
# copy/paste the directory path into the R file window.
# 3. R will read the files, combine the front and side view data, and write a .csv file
# stacked data set into a subdirectory named "Output".
# 4. The stability results will be written to a .csv file in the "Output" folder
# load function scripts
source("S:\\OTCCommon\\Programming Tools\\Newkirk GitHub\\TrackImage-R-Scripts\\combine front and side TI files.r")
source("S:\\OTCCommon\\Programming Tools\\Newkirk GitHub\\TrackImage-R-Scripts\\Cushion stability metrics.r")
#
# Read TrackImage .csv files, put in "dat2"
res <- CombineFS()
#
# Optional line to read existing stacked .csv file: dat2 <- read.csv(choose.files())
# Pass stacked TrackImage data through metrics function
# arguments:
# dat2 = dataframe containing stacked TI data (should alway = dat2)
# User provided arguements
offset<- 105 # offset = distance from origin to center of cushion (used for FCA spec., typ. 100-110)
LTL <- 20 # LTL= lower time limit for stability evaluation (For Toyota = 20)
UTL <- 75 # UTL = upper time limits for stability evaluation (For Toyota = 75)
ST1 <- 11 # ST1 is min time for slope calculation (need to graph data and make judgement)
ST2 <- 19.5 # ST2 is max time for slope calculation (need to graph data and make judgement)
Edge_side <- "L" # Side cushion leading edge(depends on camera position).
# OptionS are "L" and "R". MUST BE IN QUOTES
#
# Run metrics function
dat3 <- StabMetrics(dat2=res$data,offset=offset, LTL=LTL, UTL=UTL,ST1=ST1,ST2=ST2,
Src=res$Source,Qty=res$Quantity, Edge_side=Edge_side)
#
# optional to write data table to clipboard:
# Select rest of line and hit "Run" button -> write.table(dat3, file='clipboard', sep='\t',row.names=FALSE)
# End script
|
5c92b19d1fd266319fde9e238e1c74ea5fc79487
|
8e41c5e15a9c707baf47d8540cbb4bfa20a7ee1b
|
/man/en_boggle_dices.Rd
|
dd0f4d55faf24c63d56dd3ceb0b80f77e72116aa
|
[] |
no_license
|
jcrodriguez1989/boggler
|
ca288c40df9e6a2a324ff42ff3afe9da7a32d630
|
600086dfdee93b6d758869d9ff62817c005c3d61
|
refs/heads/master
| 2020-08-15T05:55:56.023003
| 2019-10-15T12:26:03
| 2019-10-15T12:26:03
| 215,290,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 649
|
rd
|
en_boggle_dices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{en_boggle_dices}
\alias{en_boggle_dices}
\title{English Boggle dices.}
\format{A character vector with sixteen 6-letters strings, each representing
one dice.}
\usage{
en_boggle_dices
}
\description{
Sixteen dices of the english version of the Boggle game.
Original Boggle dices according to
[https://boardgames.stackexchange.com/questions/48151/boggle-letter-distribution-and-face-orientation-of-each-die](https://boardgames.stackexchange.com/questions/48151/boggle-letter-distribution-and-face-orientation-of-each-die)
}
\keyword{datasets}
|
0ef258c9435e35a28c62ea43de5ba7e3cf5eaf14
|
a22eb8c44c5e341716520615ff8797c52a4d6f92
|
/R/6/d.r
|
e8b8371af03bb39b8466d9133a1b00a1fc155e9f
|
[] |
no_license
|
alexzhirkevich/All.sklad
|
1c5b8dc44a2ee3046cc56520d498060f9246a700
|
427ef1d5a169583ea61904b407ba6e74e2fd3430
|
refs/heads/main
| 2023-04-19T08:46:02.712248
| 2021-05-07T19:40:31
| 2021-05-07T19:40:31
| 304,058,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
d.r
|
library(moments)
x = rbeta(200,0.5,0.5)
print(x[1:10])
print(x[180:200])
print(min(x))
print(max(x))
print(0.5)
print(var(x))
print(median(x))
print(kurtosis(x))
print(skewness(x))
print(quantile(x,c(0.25,0.75,0.95)))
boxplot(x)
plot.ecdf(x)
y=x[x!=0]
hist(y,col="green",border="blue",prob = TRUE,main = "Гистрограмма")
lines(density(y),lwd = 10,col = "red")
dbeta(x,0,5,0,5)
|
06cad77e198b1fc92da2081ebd1c2a282127f09b
|
2e8130df687fe6ace366709029b7fa3a71c99424
|
/R/run_app.R
|
239ff43cc6ceb022b17154144eaa7d0ba9a5dc6a
|
[
"MIT"
] |
permissive
|
feinmann/psymap
|
8b9828959d24d52766769a6e5f930bbf3bd2f84e
|
9989febd86a48bce48487bfe27c8354e57cff4c8
|
refs/heads/master
| 2021-03-05T21:29:39.770657
| 2020-03-15T16:54:03
| 2020-03-15T16:54:03
| 246,154,095
| 0
| 0
|
NOASSERTION
| 2020-03-14T16:18:51
| 2020-03-09T22:22:48
|
R
|
UTF-8
|
R
| false
| false
| 444
|
r
|
run_app.R
|
#' Run the Shiny Application
#'
#' @export
#' @importFrom shiny shinyApp
#' @importFrom golem with_golem_options
run_app <- function(...) {
with_golem_options(
app = shinyApp(ui = app_ui,
server = app_server,
onStart = function() {
cat("Doing application setup\n")
onStop(function() {
cat("Doing application cleanup\n")
})
}),
golem_opts = list(...)
)
}
|
643232d47c8eda38accf82c4d5a599e47e5a15fc
|
a59d4f0bc24042fcc3fa9e6e3c1c2c815c5ad2b8
|
/R/marginal.lkl.nl.r
|
51e57270b536ad44b2a2fe5350ead247a8c886bc
|
[] |
no_license
|
cran/BMAmevt
|
91c78dfffff611265d89288be70f85b786963e42
|
e4c449a55c81e39831acbe1e891b3b186b57f179
|
refs/heads/master
| 2023-04-27T08:12:34.608762
| 2023-04-21T01:22:38
| 2023-04-21T01:22:38
| 17,677,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
r
|
marginal.lkl.nl.r
|
##' @export
##' @rdname marginal.lkl.pb
marginal.lkl.nl <-
function(dat,
Nsim=10e+3,
displ=TRUE, Hpar = get("nl.Hpar"),
Nsim.min=Nsim,
precision=0,
show.progress = floor(seq(1, Nsim, length.out = 20 ) )
)
{
marginal.lkl(dat=dat,
likelihood=dnestlog,
prior=prior.nl,
Nsim=Nsim,
displ=displ,
Hpar=Hpar,
Nsim.min=Nsim.min,
precision=precision,
show.progress=show.progress
)
}
|
a0b2933c6dc1171cdbca2df69ae3ad9ad9d4aea0
|
25ea69494b2cb174f04adf111c263dcfe8ca82b8
|
/server.R
|
20b7d0cfb65ed5632838aefb0319e12fb0e48851
|
[] |
no_license
|
zek12/BCLiquor
|
8911e952976a61ad0eb4cce074225ebcc8b440a3
|
c99cff2b28352e34722f22aae1028352eef0c062
|
refs/heads/master
| 2020-03-21T20:42:00.678919
| 2018-07-02T16:36:12
| 2018-07-02T16:36:12
| 139,022,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,212
|
r
|
server.R
|
# library(shiny)
# if (!require("DT")) install.packages('DT')
library(ggplot2)
library(dplyr)
library(DT)
bcl <- read.csv("bcl-data.csv", stringsAsFactors = FALSE)
# print(head(bcl))
server <- function(input, output) {
# observe({ print(input$priceInput) })
#
# priceDiff <- reactive({
# diff(input$priceInput)
# })
# observe({ print(priceDiff()) })
output$typeOutput <- renderUI({
radioButtons("typeInput", "Product type",
choices = bcl$Type %>% unique() %>% sort(),
selected = "WINE")
})
output$subtypeOutput <- renderUI({
if (is.null(input$typeInput)) {
return(NULL)
}
selectInput("subtypeInput", "Product subtype",
choices = bcl %>% filter(Type == input$typeInput) %>%
.$Subtype %>% unique() %>% sort() %>% c("ALL", .),
selected = "ALL"
)
})
output$countryOutput <- renderUI({
selectInput("countryInput", "Country",
choices = bcl$Country %>% unique() %>% sort() %>% c("ALL", .),
selected = "ALL"
)
})
filtered <- reactive({
if (is.null(input$countryInput) | is.null(input$typeInput) | is.null(input$subtypeInput)) {
return(NULL)
}
if (input$subtypeInput == "ALL") {
subtype_selected <- bcl %>% filter(Type == input$typeInput) %>% .$Subtype %>% unique()
} else {
subtype_selected <- input$subtypeInput
}
if (input$countryInput == "ALL") {
country_selected <- bcl$Country %>% unique()
} else {
country_selected <- input$countryInput
}
# bcl %>%
# filter(Price >= input$priceInput[1],
# Price <= input$priceInput[2],
# Type == input$typeInput,
# Subtype == input$subtypeInput,
# Country == input$countryInput
# )
bcl %>%
filter(Price >= input$priceInput[1],
Price <= input$priceInput[2],
Type == input$typeInput,
Subtype %in% c(subtype_selected),
Country %in% c(country_selected)
)
})
output$coolplot <- renderPlot({
# print(input$priceInput)
if (is.null(filtered())) {
return()
}
ggplot(filtered(), aes(Alcohol_Content)) +
geom_histogram(binwidth = 0.1)
})
output$results_number <- renderText({
if (!is.null(filtered())) {
paste0("Number of results: ", nrow(filtered()))
}
})
# output$results <- renderTable({
# filtered()
# })
output$results <- DT::renderDataTable({
datatable(filtered(),
options = list(
columnDefs = list(list(searchable = FALSE, targets = c(2,3)))
# 2,3 means columns 2,3 from table, which are Subtype and Country
),
filter = 'top')
})
}
|
fab8528809796cc42a2ce4b93da2cdcc1d9a8a2b
|
1461465b418919bb79a7b0ba6846b62936e6b55e
|
/man/sBayesRF_parallel.Rd
|
444840894d224d48840ed7f1bc2a58692a047005
|
[
"MIT"
] |
permissive
|
guhjy/sBayesRF
|
c20886611cc23c7ee639a7b1e8f1dec0e7e8bb2e
|
ae81a1accf0d83f95b741d339eca8ff269704169
|
refs/heads/master
| 2020-08-08T14:47:59.075532
| 2019-08-14T16:31:51
| 2019-08-14T16:31:51
| 213,851,633
| 0
| 1
| null | 2019-10-09T07:34:24
| 2019-10-09T07:34:24
| null |
UTF-8
|
R
| false
| true
| 2,522
|
rd
|
sBayesRF_parallel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sBayesRF_parallel.R
\name{sBayesRF_parallel}
\alias{sBayesRF_parallel}
\title{Parallel Safe-Bayesian Random Forest}
\usage{
sBayesRF_parallel(lambda = 0.45, num_trees = 1000, seed, num_cats, y,
original_datamat, alpha_parameters = rep(1, num_cats), beta_par = 1,
test_datamat, ncores = 1)
}
\arguments{
\item{lambda}{A real number between 0 and 1 that determines the splitting probability in the prior (which is used as the importance sampler of tree models). Quadrianto and Ghahramani (2015) recommend a value less than 0.5 .}
\item{num_trees}{The number of trees to be sampled.}
\item{seed}{The seed for random number generation.}
\item{num_cats}{The number of possible values for the outcome variable.}
\item{y}{The training data vector of outcomes. This must be a vector of integers between 1 and num_cats.}
\item{original_datamat}{The original training data. Currently all variables must be continuous. The training data does not need to be transformed before being entered to this function.}
\item{alpha_parameters}{Vector of prior parameters.}
\item{beta_par}{The power to which the likelihood is to be raised. For BMA, set beta_par=1.}
\item{ncores}{The number of cores to be used in parallelization.}
\item{original_datamat}{The original test data. This matrix must have the same number of columns (variables) as the training data. Currently all variables must be continuous. The test data does not need to be transformed before being entered to this function.}
}
\value{
A matrix of probabilities with the number of rows equl to the number of test observations and the number of columns equal to the number of possible outcome categories.
}
\description{
A parallelized implementation of the Safe-Bayesian Random Forest described by Quadrianto and Ghahramani (2015)
}
\examples{
Num_vars <- 50
Num_obs <- 100
Num_cats <- 5
alpha_parameters <- rep(1,Num_cats)
beta_par <- 0.5
data_original1 <- matrix( rnorm(Num_obs*Num_vars,mean=0,sd=1), Num_obs, Num_vars)
y <- sample(Num_cats,Num_obs, replace = TRUE)
Num_test_vars <- 50
Num_test_obs <- 700
data_test1 <- matrix( rnorm(Num_test_obs*Num_test_vars,mean=0,sd=1), Num_test_obs, Num_test_vars)
Num_split_vars <- 10
lambda <- 0.45
Num_trees <- 100
seed1 <- 42
ncores <- 1
sBayesRF_parallel(lambda, Num_trees,
seed1, Num_cats,
y, data_original1,
alpha_parameters, beta_par,
data_test1,ncores)
}
|
a2f854e2edb41b94e50e22240f3eb1e493c2e835
|
c438e401cbc856aeb77707846260f0525734b997
|
/data-raw/05-calc_windmodel_data.R
|
d967efb5fc2869219a416e1543a2191e3f7a891e
|
[] |
no_license
|
geanders/hurricaneexposuredata
|
6794f93831b7ee3dec19ea83975e1b2b738a0014
|
b42fe54788ba8ade5e6aab614c75eea41d51a80c
|
refs/heads/master
| 2022-06-02T05:47:11.387854
| 2022-05-16T01:39:35
| 2022-05-16T01:39:35
| 61,568,076
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
05-calc_windmodel_data.R
|
## Be sure to re-build package after running 01 and 02 and before
## running this
library(dplyr)
data(hurr_tracks, package = "hurricaneexposuredata")
storms <- unique(hurr_tracks$usa_atcf_id)
storm_id_table <- hurr_tracks %>%
select(storm_id, usa_atcf_id) %>%
distinct()
library(stormwindmodel)
data(county_points, package = "stormwindmodel")
library(devtools)
library(dplyr)
storm_winds <- vector("list",
#length = 2)
length = length(storms))
for(i in 1:length(storm_winds)){
print(storms[i])
storm_track <- subset(hurr_tracks, usa_atcf_id == storms[i])
winds <- get_grid_winds(hurr_track = storm_track,
grid_df = county_points) %>%
dplyr::rename(fips = gridid) %>%
dplyr::mutate(usa_atcf_id = storms[i],
storm_id = storm_id_table$storm_id[storm_id_table$usa_atcf_id == storms[i]])
storm_winds[[i]] <- winds
}
storm_winds <- do.call("rbind", storm_winds)
usethis::use_data(storm_winds, overwrite = TRUE)
|
c0218071cd1e2a287c1a62c79ed7aeca54b18497
|
daa39ef0a3e4d643bfdead74e0067ff6c856f304
|
/Chapter-03-DataTypes-Lists.R
|
bb8bade46d32f64e9f97b25fb9ed9ee0a62456a2
|
[] |
no_license
|
balajidileepkumar/R_ML
|
ca32105e78c41f17c1397078c34c478a84e38334
|
3da18dad0d173ae28c6552a5f9a22dd308180e1b
|
refs/heads/master
| 2021-06-30T03:32:27.506476
| 2020-12-23T15:00:16
| 2020-12-23T15:00:16
| 188,610,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,935
|
r
|
Chapter-03-DataTypes-Lists.R
|
#Creating Normal List From Vectors
#list is a collection of items of different type
#it's multidimenisional
basic_list =list(1,2,3+4i,TRUE, "Hello World")
typeof(basic_list)
m = c(1,2,3)
n = c(1.0,2.0,3.0)
o = c(1.0+2.0i,2.0+3.0i,3.0+4.0i)
p = c(TRUE,FALSE)
Q_list = list(m,n,o,p)
print(Q_list)
names(Q_list) = c("numbers","float","complex", "boolean")
print(Q_list)
Q_list$float
Q_list$boolean
Q_list$numbers[3]
Q_list$boolean[2]
Q_list$float
Q_list$boolean
#Creating named Lists
NoNamedList = list(c("Raj","Dilip","Jai"),c(23,27,25),c(14000.00, 12000.00, 15000.00))
names(NoNamedList) = c("Name","Age")
my_named_list = list(name=c("Raj","Dilip","Jai"),age=c(23,27,25),salary=c(14000.00, 12000.00, 15000.00))
print(my_named_list)
#List Slicing (is equivalent to indexing only it's when read)
print(my_named_list["name"])
my_named_list$name[1]
my_named_list[["name"]]
print(my_named_list[["name"]][1])
my_named_list$name[1]
print(my_named_list["age"])
print(my_named_list["salary"])
#Get the MEMBER REFERENCED
print(my_named_list[["name"]][1])
print(my_named_list[["age"]][2])
print(my_named_list[["salary"]][3])
print(my_named_list[["name"]][1:2])
print(my_named_list[["name"]][c(1,3)])
print(my_named_list[c("name","salary")])
#APPLY THE INDEXING
my_named_list[["name"]][1] = "Peter"
my_named_list[["name"]][2] = "John"
my_named_list[["name"]][3] = "Mike"
#Reprinting the Values
print(my_named_list[["name"]][1])
print(my_named_list[["age"]][2])
print(my_named_list[["salary"]][3])
print(my_named_list[["name"]][1:2])
print(my_named_list[["name"]][c(1,3)])
#lapply(my_named_list,"[",1,2)
print(my_named_list[[c(1:3)]])
print(my_named_list[["name"]])
#attach and detach
#attach is used access the values by the column directly
#Default the header is attached until we give a detach method
my_named_list$name
name
detach(my_named_list)
name
age
salary
attach(my_named_list)
class(my_named_list)
a =5.0
class(a)
typeof(a)
|
1a55e0275e65cd4d654577b37e2d434b94aa1e4c
|
484b030dde8f1f7fa407f3352e4bd585f2ce07ab
|
/Air Quality Analysis/plot5.R
|
ed9f449c14d4489c4393d1051bbf8f0a5883c7d7
|
[] |
no_license
|
conniewang3/All-Coursera-Projects
|
ba9109e7d442379e9c89048f59a530e6ebb0e7f3
|
d0fcae49af8e1c037a17aa82d967563ebb8ba00a
|
refs/heads/master
| 2020-03-12T12:34:56.116355
| 2018-06-02T13:22:06
| 2018-06-02T13:22:06
| 130,621,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
r
|
plot5.R
|
# Load packages
library(ggplot2)
library(scales)
# Read in files
data <- readRDS("summarySCC_PM25.rds")
sources <- readRDS("Source_Classification_Code.rds")
# Answer the question: "How have emissions from motor vehicle sources changed
# from 1999-2008 in Baltimore City?"
# Subset only data from motor vehicle sources
# I'm including in "motor vehicle sources": motorcycles, aircraft,
# highway vehicles (including cars, trucks, buses)
SCC.motor <- as.character(sources[grep("Motor", sources$Short.Name), 'SCC'])
SCC.air <- as.character(sources[grep("Aircraft", sources$SCC.Level.Two), 'SCC'])
SCC.cars <- as.character(sources[grep("Highway", sources$SCC.Level.Two), 'SCC'])
SCC.veh <- c(SCC.motor, SCC.air, SCC.cars)
data.veh <- subset(data, SCC %in% SCC.veh)
# Using the ggplot2 plotting system, make a plot showing the emissions from
# motor vehicles in 1999-2008
# Open PNG graphics device
png(filename='plot5.png')
# Construct plot showing emissions from 1999-2008 from motor vehicles
ggplot(data.veh, aes(x=year, y=Emissions)) +
stat_summary(fun.y=sum, geom = "line", linetype = 3, size=1) +
stat_summary(fun.y=sum, geom = "point", size = 2) +
ggtitle("PM2.5 Emissions from 1999-2008 from Motor Vehicles") +
xlab("Year") + ylab("Emissions")
# Close PNG graphics device
dev.off()
|
fa924c66d7fc07917585c9382ff41418f214364a
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RRI/man/two_sided_test.Rd
|
5675e90acb846f31e0737f6a86599cdfa23825f3
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 797
|
rd
|
two_sided_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{two_sided_test}
\alias{two_sided_test}
\title{Two-sided testing}
\usage{
two_sided_test(tobs, tvals, alpha)
}
\arguments{
\item{tobs}{The observed value of the test statistic (scalar).}
\item{tvals}{Vector of randomization values of the test statistic (to compare with \code{tobs}).}
\item{alpha}{Desired level of the test (between 0 to 1).}
}
\value{
Test decision (binary).
}
\description{
Decides to reject or not based on observed test statistic value \code{tobs}
and randomization values \code{tvals}. The test may randomize to achieve the specified level \code{alpha}
when there are very few randomization values.
}
\seealso{
Testing Statistical Hypotheses (Ch. 15, Lehman and Romano, 2006)
}
|
daaa91e92acb13c3980cb067dd336db5b42df7b3
|
2011a34fd7a941541f2c293808fadca757cebab9
|
/SegOpt_TerraLib.R
|
423f0423e61e5aa6f0b19eaee1ff8bb3167f867a
|
[] |
no_license
|
RicMarPre/Segmentation_SegOptim
|
6a0725f487794d9dada024ce68d9086e229837ef
|
bfc5556058b7c15b9488223709b79b5340a71878
|
refs/heads/main
| 2023-01-23T21:41:39.655829
| 2020-11-25T13:39:13
| 2020-11-25T13:39:13
| 315,928,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,633
|
r
|
SegOpt_TerraLib.R
|
installed.packages()
## ----- INSTALL THE PACKAGE SEGOPTIM---- ##
#I get an error as I try to install from GitHub:(converted from warning) package ‘raster’ is in use and will not be installed
##Then , I run the following:
detach("package:raster", unload = TRUE)
#PROTOCOL: First, install remotes to conect to GitHUB
#Check if devtools package is installed and install it if not
if(!requireNamespace("remotes")){
install.packages("remotes")
}
# Run the installation, from repository in GitHub . I have chosen ALL installation
#
remotes::install_github("joaofgoncalves/SegOptim")
## installed succcesfully, but WITHOUT selecting any of the packages listed. If required, I include a script to install /update
update.packages(ask = FALSE)
##install.packages(c('tools', 'raster',, 'rgdal', 'rgeos','RColorBrewer', 'devtools', 'tools','sp',))
##install.packages(c('pillar', 'cli', 'vctrs', 'rprojroot','rstudioapi'))
"pillar" %in% rownames(installed.packages()) ## Install if it doesn't exist --> TRUE ? -> DOESN'T INSTALL
"cli" %in% rownames(installed.packages()) ## Install if it doesn't exist --> TRUE ? -> DOESN'T INSTALL
"vctrs" %in% rownames(installed.packages()) ## Install if it doesn't exist --> TRUE ? -> DOESN'T INSTALL
"rprojroot" %in% rownames(installed.packages()) ## Install if it doesn't exist --> TRUE ? -> DOESN'T INSTALL
"rstudioapi" %in% rownames(installed.packages()) ## Install if it doesn't exist --> TRUE ? -> DOESN'T INSTALL
# list all packages where an update is available
update.packages(old.packages()) ## Update packages which are not updated yet.
## ----- END OF INSTALLATION OF PACKAGE SEGOPTIM---- ##
##install.packages("rstudioapi") -- I HAD TO INSTALL SOME PACKAGES INDIVIDUALLY
library(SegOptim)
library(tools)
library(raster)
library(rgdal)
library(rgeos)
library(sp)
library(RColorBrewer)
library(pillar)
library(cli)
library(vctrs)
library(rprojroot)
library(rstudioapi)
#Remove existing variables
rm(list = ls())
# A function providing CLI access to TerraLib 5 Baatz-Shcape segmentation
##and optimize its parameters using genetic algorithms.
###Source of information: https://rdrr.io/github/joaofgoncalves/SegOptim/man/segmentation_Terralib_Baatz.html
#HELP: ??segmentation_Terralib_Baatz
##Create multi layered Raster (input bands)
allBands <- list.files("D:/_PhD_/_y_2020_n_2021/Studies/02_Methodology/_Data_/Original_Data/MATSALU/multispectral/hosby",pattern = "tif$",full.names = TRUE)
#1. stack bands and save them
stackBands <- stack(allBands)
writeRaster(stackBands, filename = "D:/_PhD_/_y_2020_n_2021/Studies/02_Methodology/_Data_/Process_Data/Stacks/Stack_Hosby.tif")
#Check the bands
brikBands_br <- brick(stackBands)
#find the band indexes
print(brikBands_br[[4]])##RED EDGRE
print("/n")
print(brikBands_br[[3]])## RED
print("/n")
print(brikBands_br[[2]])##NIR
print("/n")
print(brikBands_br[[1]])##GREEN
ImgSegment <- segmentation_Terralib_Baatz(
c(5,3,20,500),
"D:/_PhD_/_y_2020_n_2021/Studies/02_Methodology/_Data_/Process_Data/Stacks/Stack_Hosby.tif",
outputSegmRst = "D:/_PhD_/_y_2020_n_2021/Studies/02_Methodology/_Data_/Process_Data/Segmentations/Hosby/aaa.tif",
CompactnessWeight = NULL,
SpectralWeight = NULL,
Threshold = NULL,
MinSize = NULL, # Minimum size of 8 because oour training samples have 8 pixels (Pol -> Ras)
verbose = TRUE, # I want to see messages
TerraLib.path = "C:/terralib-5.2.1-TISA-win_x64/terralib-5.2.1-TISA/lib" #input for argument path
)
# Check out the result
rstSegm <- raster(ImgSegment$segm)
print(rstSegm)
plot(rstSegm)
|
32a861d015a1c9dfa0c78d636a2c376b90a236fa
|
5e016a253b0af1556e01ba76eac396530d07746c
|
/humann/analysis/humann_otuModel_China_unlog.R
|
cb07859a6cc298c2dd73d16be8d8db52b5b78eab
|
[] |
no_license
|
mafergomez70/UrbanRuralChina
|
14346052bedc426ebeb1ef4afcac74f047a8e13a
|
51dd745ff22c335698343e2167644bea3a37b3b5
|
refs/heads/master
| 2020-03-18T04:22:29.672605
| 2017-08-15T01:01:14
| 2017-08-15T01:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,066
|
r
|
humann_otuModel_China_unlog.R
|
##models for HUMAnN results
rm(list=ls())
library("pscl")
library("lmtest")
library("nlme")
setwd("")
levels = c("module", "pathway")
for(lev in levels) {
print(lev)
file = paste("humann_keggAsCol_withRurUrb_", lev, ".txt", sep="")
table = read.table(file, header=T, sep="\t")
ncol = ncol(table)
table = read.table(file, header=T, sep="\t", colClasses=c(rep("character", 2), rep("numeric", ncol-2)))
desc = read.table(paste("humann_keggAsRow_", lev, ".txt", sep=""), sep="\t", quote="",
header=T, stringsAsFactors = F)
names <- vector()
description <- vector()
pValuesUrbanRural <- vector()
mean <- vector()
sd <- vector()
meanUrban <- vector()
sdUrban <- vector()
meanRural <- vector()
sdRural <- vector()
pValuesUrbanRuralWilcox <- vector()
r.squared <- vector()
index <- 1
##p-values
for(i in 7:ncol) {
if(sum(table[,i] != 0 ) > nrow(table) / 4) {
kegg <- table[,i]
mean[index] <- mean(kegg)
sd[index] <- sd(kegg)
meanUrban[index] <- mean(kegg[table$ruralUrban=="urban"])
meanRural[index] <- mean(kegg[table$ruralUrban=="rural"])
sdUrban[index] <- sd(kegg[table$ruralUrban=="urban"])
sdRural[index] <- sd(kegg[table$ruralUrban=="rural"])
urbanRural <- factor(table$ruralUrban)
##abbreviation and description
names[index] = names(table)[i]
description[index] = desc$NAME[desc$sampleID==names[index]]
##linear model
model = lm(kegg~urbanRural)
pValuesUrbanRural[index] = anova(model)$`Pr(>F)`[1]
r.squared[index] = summary(model)$r.squared
##non parametric test
pValuesUrbanRuralWilcox[index] = wilcox.test(kegg~urbanRural)$p.value
index=index+1
}
}
dFrame <- data.frame(kegg=names, name=description,
mean, sd, meanUrban, sdUrban, meanRural, sdRural,
pValuesUrbanRural, pValuesUrbanRuralWilcox)
dFrame$UrbanToRural <- meanUrban / meanRural
dFrame$adjustedPurbanRural <- p.adjust( dFrame$pValuesUrbanRural, method = "BH" )
dFrame$adjustedPurbanRuralWilcox <- p.adjust(dFrame$pValuesUrbanRuralWilcox, method="BH")
dFrame$r.squared = r.squared
dFrame <- dFrame[order(dFrame$pValuesUrbanRural),]
write.table(dFrame, file=paste("humann_otuModel_pValues_unlog_", lev, ".txt",sep=""), sep="\t",row.names=FALSE)
##plot
pdf(paste("humann_model_boxplots_", lev, ".pdf", sep=""), height=5, width=5)
for(i in 1:nrow(dFrame)) {
name = dFrame$kegg[i]
abun = table[,names(table)==name]
graphMain = paste("WGS ", lev, ": ", name,
"\npAdjRuralUrban= ", format(dFrame$adjustedPurbanRural[i],digits=3), sep="")
boxplot(abun~urbanRural, main=graphMain, ylab="log relative abundance", cex.main=1, outline=F, ylim=range(abun))
points(abun~jitter(as.numeric(urbanRural)), pch=16, col=ifelse(urbanRural=="rural", "blue", "red"))
}
dev.off()
}
|
0a12fea0abb6245a37819a90eaa856509924d9c4
|
ef3c9b82c35810b59421875ccaf885b3bd221d9d
|
/steps/1a_getdata_cmip6.R
|
5dda700f95960f0e2d57ef897c510b99a401baba
|
[] |
no_license
|
haachicanoy/wfp_training
|
5f18f9bd2fcc9db5cb44a3798f23edc27400786e
|
a397da362585bc30efb54d3acd82f3489be009d7
|
refs/heads/main
| 2023-08-06T15:42:17.395063
| 2021-09-29T21:03:48
| 2021-09-29T21:03:48
| 409,820,371
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,697
|
r
|
1a_getdata_cmip6.R
|
source("risk_profiles/code/_CMIP6_funs.R")
vars <- c("pr","tas","tasmax","tasmin")
models <- c("ACCESS-ESM1-5","EC-Earth3-Veg","INM-CM5-0","MPI-ESM1-2-HR","MRI-ESM2-0")
varmod <- expand.grid(vars, models)
names(varmod) <- c("vars", "models")
i <- 1
var <- as.character(varmod$vars[i])
model <- as.character(varmod$models[i])
# historical simulations
dhist <- try(getMetaCMIP6(offset = 0,
limit = 10000,
activity_id="CMIP",
experiment_id = "historical",
frequency = "day",
member_id = "r1i1p1f1",
variable_id = var,
source_id = model,
mip_era = "CMIP6"))
head(dhist)
dim(dhist)
# future simulations
dfut <- try(getMetaCMIP6(offset = 0,
limit = 10000,
activity_id="ScenarioMIP",
experiment_id = "ssp585",
member_id = "r1i1p1f1",
frequency = "day",
variable_id = var,
source_id = model,
mip_era = "CMIP6"))
head(dfut)
dim(dfut)
# combine both results
dd <- rbind(dhist, dfut)
# now download files
options(timeout=3600)
downdir <- "..."
# one file
getDataCMIP6(1, dd, downdir, silent=FALSE)
# all file
lapply(1:nrow(dd), getDataCMIP6, dd, downdir, silent=FALSE)
# order by file size
dfut$file_size <- as.numeric(dfut$file_size)
View(dfut[order(dfut$file_size),])
################################################################################
# try processing a smaller file
library(terra)
library(geodata)
library(data.table)
dfut <- try(getMetaCMIP6(offset = 0,
limit = 10000,
activity_id="ScenarioMIP",
experiment_id = "ssp585",
member_id = "r1i1p1f1",
frequency = "day",
variable_id = "tas",
source_id = "MPI-ESM1-2-HR",
mip_era = "CMIP6"))
getDataCMIP6(1, dfut, downdir, silent=FALSE)
f <- list.files(downdir, ".nc$", full.names = TRUE)
r <- rast(f)
# time from layer names
tm <- as.POSIXct(r@ptr$time, origin = "1970-01-01")
# let's say we want the month of may
k <- which(tm >= "2100-05-01" & tm <= "2100-05-31")
r <- subset(r, k)
rt <- rotate(r)
# what is the effect of rotate?
dev.new(width=6, height=4, noRStudioGD = TRUE)
par(mfrow=c(1,2))
plot(r[[1]], range=c(273,330), legend=FALSE)
plot(rt[[1]], range=c(273,330))
# mask by country boundary
v <- geodata::gadm(country="TZA", level=0, path=downdir)
rs <- crop(rt, v)
rs <- mask(rs, v)
plot(rs[[1]])
plot(v, add = T)
# resample/disaggregate to smaller cell size
tres <- 0.05 # CHIRPS resolution
rs1 <- disaggregate(rs, fact = round(res(rs)/tres))
chr <- rain_chirps(region, res, interval, "2020-01-01", "2020-01-01",
path, asRaster = TRUE)
ciso <- crop(chr, v)
ciso <- mask(ciso, v)
rs2 <- resample(rs, ciso)
dev.new(width=6, height=4, noRStudioGD = TRUE)
par(mfrow=c(1,2))
plot(rs1[[1]], range=c(273,330), legend=FALSE)
plot(v, add = T)
plot(rs2[[1]], range=c(273,330))
plot(v, add = T)
# save raster as dataframe
dd <- terra::as.data.frame(rs1, xy = TRUE)
names(dd) <- c("x", "y", as.character(as.Date(tm[k])))
# convert from wide to long with dataframe, safer way?
ddl <- melt(setDT(dd), id.vars = c("x","y"), value.name = "tas", variable = "date")
# add cellnumbers for using with join later
xy <- as.matrix(ddl[,c("x","y")])
ddl <- data.frame(id = cellFromXY(rs1, xy), ddl, stringsAsFactors = FALSE)
|
60dfaaf0f191b040bedc8c7698d0de6132650216
|
4d439b238f8e61f2f5225bb81f25b075b931f868
|
/main.R
|
838df221283674c9fbcc5c9ef380c741312446a6
|
[] |
no_license
|
wqx94330/CrisAlex_FinalProject
|
08938d5862a2fd9e5b622aa831bb63b4cd9c68a6
|
405ca388baefc38ad740d81a21e1b83877be4452
|
refs/heads/master
| 2020-05-23T09:09:20.675962
| 2017-02-02T18:54:55
| 2017-02-02T18:54:55
| 80,440,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,201
|
r
|
main.R
|
###Final Project
###Changes of vegetation within approximately 10 years in Brazil
###Team CrisAlex, Cristina González and Wan Quanxing
###02/02/2017
##This is the script to produce the NDVI difference between 2001 and 2010
#libraries and functions needed(Check that all the libraries and functions are loaded
#before starting with the script)
library(sp)
library(rgdal)
library(raster)
library(tiff)
library(lattice)
library(graphics)
library(leaflet)
source("R/functions.R")
source("R/pre_process.R")
source("R/plot_results.R")
rm(list.files())
#####NDVI Part#####
#Directories necessaries for the script
inDir <- "data"
midDir <- "midsteps"
landsatDir01 <- file.path("data", "landsat2001")
landsatDir10 <- file.path("data", "landsat2010")
outDir <- "output"
#Create folders needed for the process
pre_processing <- create_folders()
#Download image of landsat in 2001 and decompress it (take a few minutes to do the command)
if (!file.exists(file.path(inDir, 'landsat2001.tar.gz'))) {
download.file('https://www.dropbox.com/s/csiazbfo9569q4z/LT05_L1TP_231062_20010803_20161210_01_T1.tar.gz?dl=1',
destfile = file.path(inDir, 'landsat2001.tar.gz'), method='auto', mode = "wb")
# Unpack the data
untar(file.path(inDir, 'landsat2001.tar.gz'), exdir = landsatDir01)
}
#Download image of landsat in 2010 and decompress it
if (!file.exists(file.path(inDir, 'landsat2010.tar.gz'))) {
download.file('https://www.dropbox.com/s/z3vc5rybctgjk3o/LT05_L1TP_231062_20100727_20161014_01_T1.tar.gz?dl=1',
destfile = file.path(inDir, 'landsat2010.tar.gz'), method='auto', mode = "wb")
# Unpack the data
untar(file.path(inDir, 'landsat2010.tar.gz'), exdir = landsatDir10)
}
#Input the file of landsat in 2001 and 2010
ln01 <- input_landsat01()
ln10 <- input_landsat10()
#Remove zeros values in the landsat images
ln10[ln10 <= 0 ] <- NA
ln01[ln01 <= 0 ] <- NA
#strick the rasters
ln01_trim <- trim(ln01, values = NA)
ln10_trim <- trim(ln10, values = NA)
#Change the extension and have the same shape the landsat images
ln01_ext <- crop(ln01_trim,ln10_trim, filename = "midsteps/landsat01_ext.grd", datatype = "INT2U", overwrite = TRUE)
ln10_ext <- crop(ln10_trim,ln01_trim, filename = "midsteps/landsat10_ext.grd", datatype = "INT2U", overwrite = TRUE)
#Create a variable with the cloud layer and other with the rest
cloud_ln01 <- ln01_ext[[3]]
ln01_drop <- dropLayer(ln01_ext, 3)
cloud_ln10 <- ln10_ext[[3]]
ln10_drop <- dropLayer(ln10_ext, 3)
#apply the cloud function to remove the clouds
ln01_CloudFree <- overlay(x = ln01_drop, y = cloud_ln01, fun = cloud_01, filename= "midsteps/ln01_CloudFree.grd", overwrite= TRUE)
ln10_CloudFree <- overlay(x = ln10_drop, y = cloud_ln10, fun = cloud_10, filename= "midsteps/ln10_CloudFree.grd", overwrite= TRUE)
#Calculate NDVI for both rasters
ndvi_land01 <- ndvical(ln01_CloudFree[[1]], ln01_CloudFree[[2]])
ndvi_land10 <- ndvical(ln10_CloudFree[[1]], ln10_CloudFree[[2]])
#Save the rasters of ndvi in the output folder
writeRaster(x=ndvi_land01, filename='output/ndvi_land01.grd', datatype="FLT8S")
writeRaster(x=ndvi_land01, filename='output/ndvi_land10.grd', datatype="FLT8S")
#Produce the plot of ndvis
plot_ndvi01 <- ndvi_2001()
plot_ndvi10 <- ndvi_2010()
##Visualization of the results
plot_ndvi01
plot_ndvi10
##Generate the final result of NDVI part
#Differences in the NDVI between 1990 and 2014, we substract landsat5 of landsat8
NDVI_diff <- ndvi_land10 - ndvi_land01
writeRaster(x=NDVI_diff, filename='midsteps/difference_ndvi.grd', datatype="FLT8S")
#check the changes, negative values determine lost of vegetation and positive values gain of vegetation
hist(NDVI_diff, main= "Histogram difference NDVI")
#####Plot the difference between years in ndvi####
plot_difference <- ndvi_dif()
plot_difference
#Save the NDVI difference in png format for plotting with the population
trellis.device(device="png", filename="output/plot_difference2.png")
#####Population Part#####
##commands to produce the interactive map
# Read the raster image of NDVI difference and add to leaflet
r <- raster("midsteps/difference_ndvi.grd")
s <- raster(nrow=1500, ncol=1500)
s@extent=r@extent
proj4string(s)=proj4string(r)
s <- projectRaster(from = r, to = s, method='bilinear')
# Read the population data and plot to leaflet
population <- read.csv("pop_manaus.csv")
dh <- data.frame(cbind(x=population$Longitude,y=population$Latitude,mc=population$DiffPop_2000_2010))
pal <- colorNumeric(c("#030303", "#FFFFFF", "#00CD00"), values(r),
na.color = "transparent")
cPal <- colorNumeric(palette = c("#FFFF00","#FF6103","#EE0000"),domain = population$DiffPop_2000_2010)
leaflet(population) %>% addProviderTiles("OpenStreetMap") %>%
addCircleMarkers(fillColor = ~cPal(population$DiffPop_2000_2010),stroke = FALSE, fillOpacity = 0.9, popup=~DiffPop_2000_2010)%>%
addRasterImage(s, colors = pal, opacity = 0.6)%>%
addLegend("bottomright", pal = cPal, values = ~DiffPop_2000_2010,title = "Population difference(2000/2010)",labFormat = labelFormat(suffix = "p."),opacity = 1)%>%
addLegend("bottomleft",pal = pal, values = values(s),
title = "NDVI difference(2000/2010)")
|
ca671e27a80183d84bbbd81ce4b20c625a76f70a
|
e4783bc2ea62637c0d0097401f87f76434d91785
|
/tests/testthat/tests_effectlite_latent_variables.R
|
24863bceeacca7ff2a31a43e7541bbfd5f4a106e
|
[] |
no_license
|
amayer2010/EffectLiteR
|
83c03abb11c54b16143744711afd22187f454d71
|
7eaac5cba8ce9bbd317c0abcda2fc94435a6652d
|
refs/heads/master
| 2023-08-06T16:26:42.513688
| 2023-06-27T20:29:24
| 2023-06-27T20:29:24
| 17,675,984
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,501
|
r
|
tests_effectlite_latent_variables.R
|
test_that("effectLite works with latent z and y",{
## latent z and latent y
mmtest <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
CPM11 + CPM12 ~ 0*1
CPM21 ~ c(m,m)*1
CPM22 ~ c(p,p)*1
'
m1 <- effectLite(y="eta2", x="x", z=c("eta1"), control="0",
measurement=mmtest, data=example02lv, fixed.cell=FALSE,
missing="fiml", syntax.only=FALSE)
effectsl <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(effectsl[1,1], 1.901696, tolerance=1e-5)
expect_equal(effectsl[2,3], 11.07721, tolerance=1e-5)
expect_equal(effectsl[3,5], 2.154411, tolerance=1e-5)
})
test_that("effectLite works with latent variables and bootstrap",{
## latent and bootstrap
set.seed(142424)
mmtest <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
CPM11 + CPM12 ~ 0*1
CPM21 ~ c(m,m)*1
CPM22 ~ c(p,p)*1
'
expect_warning({
m1 <- effectLite(y="eta2", x="x", z=c("eta1"), control="0",
measurement=mmtest, data=example02lv, fixed.cell=TRUE,
missing="fiml", syntax.only=FALSE,
se="boot", bootstrap=5L)
})
res_latboot <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_latboot[1,1], 1.901696, tolerance=1e-5)
expect_equal(res_latboot[2,3], 12.95835, tolerance=1e-5)
expect_equal(res_latboot[3,5], 2.154411, tolerance=1e-5)
})
test_that("effectLite works with method factors",{
############ Example 01a with method factor ##################
mmtest <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
mf =~ 1*CPM11 + 1*CPM12
CPM11 + CPM21 + CPM12 + CPM22 ~ 0*1
'
expect_warning({
m1 <- effectLite(y="eta2", x="x", z=c("eta1","mf"), control="0",
measurement=mmtest, data=example02lv, fixed.cell=FALSE,
missing="fiml", syntax.only=FALSE)
})
res_latmf <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_latmf[2,2], 0.8210409, tolerance=1e-5)
expect_equal(res_latmf[2,4], 0.0086504805, tolerance=1e-5)
expect_equal(res_latmf[3,5], 2.163089, tolerance=1e-5)
## Steffis method factor 2
mm <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
mf =~ 1*CPM11 + -1*CPM21 + 1*CPM12 + -1*CPM22
CPM11 + CPM21 + CPM12 + CPM22 ~ 0*1
'
## As in Ivailo Partchev's EffectLite with MF as covariate
expect_warning({
m1 <- effectLite(y="eta2", x="x", z=c("eta1","mf"), control="0",
measurement=mm, data=example02lv)
})
res_latmfsteffi1 <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_latmfsteffi1[2,2], 0.8167239, tolerance=1e-5)
expect_equal(res_latmfsteffi1[2,4], 0.008304169, tolerance=1e-5)
expect_equal(res_latmfsteffi1[3,5], 2.198004, tolerance=1e-5)
## Oder wenn man den MF nicht als Kovariate haben will (sondern nur im Messmodell):
m1 <- effectLite(y="eta2", x="x", z="eta1", control="0",
measurement=mm, data=example02lv)
res_latmfsteffi2 <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_latmfsteffi2[2,2], 0.11576991, tolerance=1e-5)
expect_equal(res_latmfsteffi2[2,3], 11.69961, tolerance=1e-5)
expect_equal(res_latmfsteffi2[3,5], 2.078501, tolerance=1e-5)
})
test_that("effectLite works with latent variable and K",{
############ Example 02 with latent variable and K ##################
mmtest <- '
eta2 =~ 1*CPM12 + 1*CPM22
eta1 =~ 1*CPM11 + 1*CPM21
CPM11 + CPM12 ~ 0*1
CPM21 ~ c(m,m,m,m,m,m)*1
CPM22 ~ c(p,p,p,p,p,p)*1
'
m1 <- effectLite(y="eta2", x="x", k="k", z=c("eta1"), control="0",
measurement=mmtest, data=example02lv, fixed.cell=FALSE,
missing="fiml", syntax.only=FALSE)
res_latzandk <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_latzandk[3,3], 23.01217, tolerance=1e-5)
expect_equal(res_latzandk[5,3], 13.67339, tolerance=1e-5)
expect_equal(res_latzandk[7,5], 0.9992641, tolerance=1e-5)
})
test_that("effectLite works with latent z and manifest y or z2",{
######### Example with latent z and manifest y #############
mmtest <- '
eta1 =~ 1*CPM11 + 1*CPM21
CPM11~ 0*1
CPM21 ~ c(m,m)*1
'
m1 <- effectLite(y="CPM22", x="x", z=c("eta1"), control="0",
measurement=mmtest, data=example02lv, fixed.cell=FALSE,
missing="fiml", syntax.only=FALSE)
res_lzmy <- rbind(m1@results@Egx,
m1@results@Egxgx,
m1@results@Egxgk,
m1@results@Egxgxk)
expect_equal(res_lzmy[1,1], 1.805862, tolerance=1e-5)
expect_equal(res_lzmy[2,2], 0.1289066, tolerance=1e-5)
expect_equal(res_lzmy[3,3], 20.93596, tolerance=1e-5)
######### Example with latent z and manifest z #############
d <- example02lv
d$maniz <- rnorm(nrow(d))
mmtest <- '
eta1 =~ 1*CPM11 + 1*CPM21
CPM11~ 0*1
CPM21 ~ c(m,m)*1
'
m1 <- effectLite(y="CPM22", x="x", z=c("eta1","maniz"), control="0",
measurement=mmtest, data=d, fixed.cell=TRUE,
missing="fiml", syntax.only=FALSE)
})
|
b264c10f18dea1b1975a7f27b382876c032a9b5a
|
7f4687fc685e45172a297fd2beadf142894c093b
|
/bottleneck_results_plot.R
|
afb107b93a079a1fd01f79ff6be2583dd4b54b94
|
[] |
no_license
|
mastoffel/seal_bottleneck
|
09a9289c3b96369ea7b3a9028236b8bf1b87cccb
|
e1123b1eef96875c6c95374a3b58bb96dc78018d
|
refs/heads/master
| 2021-01-10T07:15:49.530268
| 2016-04-20T07:20:12
| 2016-04-20T07:20:12
| 44,677,195
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,690
|
r
|
bottleneck_results_plot.R
|
# plotting bottleneck
library(data.table) # faster fread() and better weekdays()
library(dplyr) # consistent data.frame operations
library(purrr) # consistent & safe list/vector munging
library(tidyr) # consistent data.frame cleaning
library(lubridate) # date manipulation
library(ggplot2) # base plots are for Coursera professors
library(scales) # pairs nicely with ggplot2 for plot label formatting
library(gridExtra) # a helper for arranging individual ggplot objects
library(ggthemes) # has a clean theme for ggplot2
library(viridis) # best. color. palette. evar.
library(knitr) # kable : prettier data.frame output
library(stringr)
# load data with original population names ---------------------------------------------------------
library(readxl)
# sheet numbers to load
bottleneck_out <- read_excel("data/out_bottleneck_stats.xls")
bottleneck_out_hw <- read_excel("data/out_bottleneck_stats_HW.xls")
bottleneck_out <- rbind(bottleneck_out, bottleneck_out_hw)
names(bottleneck_out)[1] <- "id"
# extract pure names
bottleneck_out$id <- sapply(strsplit(bottleneck_out$id, "_genepop"), `[[`, 1)
# numeric
charcols <- str_detect(names(bottleneck_out), "Def.Exc") | str_detect(names(bottleneck_out), "id")
bottleneck_out[!charcols] <- lapply(bottleneck_out[!charcols], as.numeric)
# split up column with number of loci in het excess
str(bottleneck_out)
exc_cols <- str_detect(names(bottleneck_out), "Def.Exc")
split_up <- function(x){
df <- apply(data.frame(str_split_fixed(x, "vs", 2)), 2, as.numeric)
out <- as.data.frame(df)
}
sep_cols <- do.call(cbind, lapply(bottleneck_out[exc_cols], split_up))
names(sep_cols) <- str_replace(names(sep_cols), "X1", "het_def")
names(sep_cols) <- str_replace(names(sep_cols), "X2", "het_exc")
names(sep_cols) <- str_replace(names(sep_cols), "Def.Exc.", "")
# add to original data.frame
bottleneck <- cbind(bottleneck_out[!exc_cols], sep_cols)
# add factor for full / pop / cl
bottleneck$dataset[str_detect(bottleneck$id, "_pop")] <- "pop"
bottleneck$dataset[str_detect(bottleneck$id, "_cl")] <- "cl"
bottleneck$dataset[is.na(bottleneck$dataset)] <- "full"
bottle_tests <- bottleneck %>%
mutate(IAM_het_exc_ratio = IAM_Heq / IAM_het_def) %>%
mutate(TPM70_het_exc_ratio = TPM70_Heq / TPM70_het_def) %>%
mutate(TPM95_het_exc_ratio = TPM95_Heq / TPM95_het_def) %>%
mutate(SMM_het_exc_ratio = SMM_Heq / SMM_het_def)
# extract sign tests for all models
sign_tests <- str_detect(names(bottleneck_out), "Sign")
bottle_tests <- bottleneck_out[, sign_tests]
bottle_tests$id <- bottleneck_out$id
library(reshape2)
library(ggplot2)
library(dplyr)
bot <- melt(bottle_tests, id.vars = "id")
bot$value <- as.numeric(bot$value)
# bot$dataset <- NA
# str_detect(bot$id, "_pop")
# bot$dataset[str_detect(bot$id, "_pop")] <- "pop"
# bot$dataset[str_detect(bot$id, "_cl")] <- "cl"
# bot$dataset[is.na(bot$dataset)] <- "full"
# bot$dataset <- as.factor(bot$dataset)
ggplot(bot, aes(x= variable, y = id, fill = value)) +
#facet_grid(.~dataset) +
geom_tile(color = "white", size = 0.1) +
scale_fill_viridis(name = "p-value", label = comma) +
theme_tufte(base_family="Helvetica") +
#coord_equal() +
theme(plot.title=element_text(hjust=0),
axis.ticks=element_blank(),
axis.text=element_text(size=10),
legend.title=element_text(size=10),
legend.text=element_text(size=9))
# scale_fill_gradientn(colors = breaks=c(0.01, 0.05, 0.1, 1))
#coord_equal()
|
f4dbe640d188826661c853a74481f32d109033bc
|
7478ade376ddbc1374c675caabf514c547387bc2
|
/V1/global.R
|
3e9c253fba679c782ab2aa0bda2d93ffc4430625
|
[] |
no_license
|
XPF100/Shiny-Mock-ups
|
259fda76b203191cd5fad54db04ffe8a89400f47
|
b6cb7a71cb7cf9872324a6000d23edd134a594b9
|
refs/heads/master
| 2021-01-16T18:20:08.802486
| 2017-01-14T20:30:10
| 2017-01-14T20:30:10
| 78,965,107
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,103
|
r
|
global.R
|
# load required libraries
library(shiny)
library(plyr)
library(ggplot2)
library(googleVis)
library(reshape2)
require(sjPlot)
library(tidyr)
#Get and clean data
source("FullDataSet.R")
if (!is.data.frame(df)) {
df <- getData()
df <- cleanData(df)
class(categories$race)
df <- df[, colSums(is.na(df)) < nrow(df)]
nums <- sapply(df, is.numeric)
numbers <- df[, nums]
cats <- sapply(df, is.list)
categories <- df[, cats]
cats <- names(categories)
categories[, cols <-
cats] <- lapply(categories[, cols <- cats], unlist)
categories[, cols <-
cats] <- lapply(categories[, cols <- cats], as.factor)
class(df$dt_rna1)
dat <- sapply(df, is.Date)
dates <- df[, dat]
cate <- sjt.frq(categories)
numeric <- sjt.df(numbers)
}
if (!is.data.frame(dd)) {
dd <- read.csv("dictionary.csv")
dd <- dd[, colSums(is.na(dd)) < nrow(dd)]
names(dd)
dd <-
select(dd, one_of(
c(
"Variable...Field.Name",
"Form.Name",
"Field.Type",
"Field.Label",
"Choices..Calculations..OR.Slider.Labels"
)
))
}
|
456513054d24492c62803a58078f6606990cdbb4
|
89146c512bf32ed5afab564357e3f0f20c21171d
|
/tests/testthat/test_conversions.R
|
ffb00a32f5ce4df3e53c4b1104b5cc3a0196504b
|
[] |
no_license
|
romainfrancois/egor
|
ff2c1d71b5cd389a42d5a8fa2026827df12f05f5
|
265a4e3635abd650affb600922d349f01ac7e72d
|
refs/heads/master
| 2021-11-30T07:02:30.860678
| 2019-10-07T06:49:18
| 2019-10-07T06:49:18
| 215,741,275
| 0
| 0
| null | 2019-10-17T08:27:34
| 2019-10-17T08:27:28
| null |
UTF-8
|
R
| false
| false
| 2,792
|
r
|
test_conversions.R
|
context("test_conversions.R")
test_that("as_tibble and other conversions work",
{
expect_error({
e <- make_egor(3, 22)
as_network(e)
as_network(x = e, include.ego = TRUE)
as_network(x = e,
ego.attrs = "sex",
include.ego = TRUE)
as_tibble(e)
activate(e, "aatie") %>%
as_tibble(include.ego.vars = TRUE)
activate(e, "alter") %>%
as_tibble(include.ego.vars = TRUE)
activate(e, "aatie") %>%
as_tibble(include.alter.vars = TRUE)
x <- activate(e, "aatie")
as_alters_df(e, include.ego.vars = TRUE)
as_aaties_df(e)
}, NA)
})
test_that("as_igraph.nested_egor works",
{
expect_error({
e <- make_egor(3, 22)
en <- as_nested_egor(e)
as_igraph(en,
include.ego = T,
ego.attrs = c("sex", "age"))
}, NA)
})
test_that("as_network works.",
{
e <- make_egor(3, 22)
expect_error(network::as.network(e), NA, label = "default arguments")
e$alter <- e$alter %>%
mutate(weight = sample((1:3) / 3, nrow(.), replace = TRUE))
expect_error(
as_network(
x = e,
include.ego = TRUE,
ego.attrs = c("sex", "age"),
ego.alter.weights = "weight"
),
NA,
label = "include.ego/ego.attrs/ego.alter.weights"
)
})
test_that("as_igraph works.",
{
e <- make_egor(3, 22)
expect_error(as_igraph(e), NA, label = "default arguments")
e$alter <- e$alter %>%
mutate(weight = sample((1:3) / 3, nrow(.), replace = TRUE))
expect_error(as_igraph(
e,
include.ego = T,
ego.attrs = c("sex", "age"),
ego.alter.weights = "weight"
),
NA,
label = "include.ego/ego.attrs/ego.alter.weights")
})
test_that("as_alters_df works.",
{
e <- make_egor(3, 22)
expect_error(as_alters_df(e), NA)
expect_error(as_alters_df(e, include.ego.vars = T), NA)
})
test_that("as_aaties_df works.",
{
e <- make_egor(3, 22)
expect_error(as_aaties_df(e), NA)
expect_error(as_aaties_df(object = e, include.alt.vars = T), NA)
})
|
e64cd7ff8fc732e0dd0cbb3c4205458374664811
|
362c0be541b1483782e4dc82f933781c90781b0f
|
/tools/config/cleanup.R
|
f4c330307c122ca57aded0aebbcdea0fec89f435
|
[] |
no_license
|
cran/cuml
|
dfe54bb0bc98b0b01817910d59e6c5280a96101f
|
d96fa0cf44757093fa6236aa2e637d9a7899ad0e
|
refs/heads/master
| 2023-08-14T09:16:27.751080
| 2021-09-20T17:50:14
| 2021-09-20T17:50:14
| 407,392,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
cleanup.R
|
for (x in c("Makevars", "Makefile", "CMakeCache.txt", "CMakeFiles", "cmake_install.cmake", "CMakeLists.txt", "*.o", "*.so")) {
unlink(file.path("src", x), recursive = TRUE, expand = TRUE)
}
|
bf49d59dbb22dff91c193d7b0436afd735d75f14
|
425b09b9615e0824edd60b9655e3095046295319
|
/Code/sim_bic.R
|
cd5ecfb7a0b165968fa9882fecbfaf43c6bad6df
|
[] |
no_license
|
yingljin/cost_sparsity
|
092a37ef8a18cb53c9d084b12788909a4aeda778
|
2c2abde4e443602a6b3d5842793776ac23384511
|
refs/heads/master
| 2023-01-28T08:15:42.318610
| 2020-12-07T21:15:44
| 2020-12-07T21:15:44
| 319,367,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,424
|
r
|
sim_bic.R
|
# this script generates simulations
# and select models with BIC
##### generate data #####
# true covariate
true_beta <- c(1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5,
rep(0, 35))
# group index
grp <- rep(c(1:10), each = 5)
table(grp)
# all data
x_grp_all <- genDataGrp(n = 400, J = 10, K = 5, beta = true_beta,
family = "gaussian", SNR = 1,
rho = 0.5, rho.g = 0.5)
table(x_grp_all$group)
x_grp_all$beta
cor(x_grp_all$X)
# training set
x_grp <- list(X = x_grp_all$X[1:200,], y = x_grp_all$y[1:200])
# testing set
x_grp_test <- list(X = x_grp_all$X[201:400, ], y = x_grp_all$y[201:400])
# generate group-wise cost
c_grp <- rnorm(10, 15, runif(1, 0.25, 5))
w_grp <- c_grp/sum(c_grp)
sum(w_grp)
##### Lasso #####
# penalize each covariate equally
lasso.fit <- ncvreg(x_grp$X, x_grp$y, penalty = "lasso")
# select best model by BIC
l1 <- lasso.fit$lambda[which.min(BIC(lasso.fit))]
# prediction error
pred1 <- predict(lasso.fit, X = x_grp_test$X, lambda = l1)
pe1 <- mean((pred1 - x_grp_test$y)^2)
# cost
cpp1 <- c_grp[unique(grp[coef(lasso.fit, lambda = l1)[-1] != 0])]
cpp1 <- sum(cpp1)
##### Group lasso #####
# penaliza each group equally
grp.lasso.fit <- grpreg(x_grp$X, x_grp$y, group = grp, penalty = "grLasso",
family = "gaussian")
# select best model by BIC
l2 <- grp.lasso.fit$lambda[which.min(BIC(grp.lasso.fit))]
# prediction error
pred2 <- predict(grp.lasso.fit, X = x_grp_test$X, lambda = l2)
pe2 <- mean((pred2 - x_grp_test$y)^2)
# cost
grp2 <- as.numeric(predict(grp.lasso.fit, type = "groups", lambda = l2))
cpp2 <- sum(c_grp[grp2])
##### Group lasso with cost #####
cgrp.lasso.fit <- grpreg(x_grp$X, x_grp$y, group = grp, penalty = "grLasso",
family = "gaussian", group.multiplier = w_grp)
# select best model by BIC
l3 <- cgrp.lasso.fit$lambda[which.min(BIC(cgrp.lasso.fit))]
# prediction error
pred3 <- predict(cgrp.lasso.fit, X = x_grp_test$X, lambda = l3)
pe3 <- mean((pred3 - x_grp_test$y)^2)
# cost
grp3 <- as.numeric(predict(cgrp.lasso.fit, type = "groups", lambda = l3))
cpp3 <- sum(c_grp[grp3])
##### Group lasso with cost tunned by gamma no scale #####
# gamma
rs <- c(0, .25, .5, .75, 1)
# select best gamma and lambda
cgrp_fits <- list()
cgrp_ls <- numeric(length(rs))
cgrp_bics <- numeric(length(rs))
for(i in 1:length(rs)) {
cgrp_fits[[i]] <- grpreg(x_grp$X, x_grp$y, group = grp, penalty = "grLasso",
family = "gaussian", group.multiplier = c_grp^rs[i])
cgrp_ls[[i]] <- cgrp_fits[[i]]$lambda[which.min(BIC(cgrp_fits[[i]]))]
cgrp_bics[i] <- min(BIC(cgrp_fits[[i]]))
}
# Pick best model
best_cgrp <- cgrp_fits[[which.min(cgrp_bics)]]
best_l <- cgrp_ls[which.min(cgrp_bics)]
## prediction error
pred4 <- predict(best_cgrp, X = x_grp_test$X, lambda = best_l)
pe4 <- mean((pred4 - x_grp_test$y)^2)
## cost
grp4 <- as.numeric(predict(best_cgrp, type = "groups", lambda = best_l))
cpp4 <- sum(c_grp[grp4])
##### Results #####
pred_error <- c("lasso" = pe1, "grp_lasso" = pe2, "cost_grep_lasso" = pe3, "wcost_grp_lasso" = pe4)
cpp <- c("lasso" = cpp1, "grp_lasso" = cpp2, "cost_grep_lasso" = cpp3, "wcost_grp_lasso" = cpp4)
|
b66a243a1077b6b975af8108eecabb21322320a0
|
9132996d08213cdf27c8f6d444e3f5b2cfdcfc85
|
/tests/testthat/test_all_binary.R
|
47f604f2ce8cca4458c64ca690c69dc1ea7c417f
|
[] |
no_license
|
prioritizr/prioritizr
|
152013e81c1ae4af60d6e326e2e849fb066d80ba
|
e9212a5fdfc90895a3638a12960e9ef8fba58cab
|
refs/heads/main
| 2023-08-08T19:17:55.037205
| 2023-08-08T01:42:42
| 2023-08-08T01:42:42
| 80,953,648
| 119
| 30
| null | 2023-08-22T01:51:19
| 2017-02-04T22:45:17
|
R
|
UTF-8
|
R
| false
| false
| 3,458
|
r
|
test_all_binary.R
|
test_that("x = default", {
expect_tidy_error(all_binary("a"), "recognized")
})
test_that("x = numeric", {
expect_true(all_binary(c(0, 1)))
expect_true(all_binary(c(0L, 1L)))
expect_true(all_binary(c(0L, NA, 1L)))
expect_true(all_binary(c(0, NA, 1)))
expect_false(all_binary(c(-1, 0, 1)))
expect_error(assert(all_binary(c(-1, 0, 1))), "binary")
})
test_that("x = Matrix", {
expect_true(all_binary(Matrix::Matrix(c(0, 1))))
expect_true(all_binary(Matrix::Matrix(c(0L, 1L))))
expect_true(all_binary(Matrix::Matrix(c(0L, NA, 1L))))
expect_true(all_binary(Matrix::Matrix(c(0, NA, 1))))
expect_false(all_binary(Matrix::Matrix(c(-1, 0, 1))))
expect_error(
assert(all_binary(Matrix::Matrix(c(-1, 0, 1)))),
"binary"
)
})
test_that("x = matrix", {
expect_true(all_binary(matrix(c(0, 1))))
expect_true(all_binary(matrix(c(0L, 1L))))
expect_true(all_binary(matrix(c(0L, NA, 1L))))
expect_true(all_binary(matrix(c(0, NA, 1))))
expect_false(all_binary(matrix(c(-1, 0, 1))))
expect_error(
assert(all_binary(matrix(c(-1, 0, 1)))),
"binary"
)
})
test_that("x = data.frame", {
expect_true(all_binary(data.frame(x = c(0, 1), y = c(0L, 1L))))
expect_true(all_binary(data.frame(x = c(0, 1, NA), y = c(0L, NA, 1L))))
expect_false(all_binary(data.frame(x = c(0, 1, -1), y = c(0L, NA, 1L))))
expect_error(
assert(all_binary(data.frame(x = c(0, 1, -1)))),
"binary"
)
})
test_that("x = sf", {
# create data
g <- sf::st_sfc(list(sf::st_point(c(1, 0)), sf::st_point(c(0, 1))))
x <- sf::st_as_sf(tibble::tibble(x = c(0, 1), y = c(0L, 1L), geom = g))
g <- sf::st_sfc(
list(sf::st_point(c(1, 0)), sf::st_point(c(0, 1)), sf::st_point(c(0, 1)))
)
y <- sf::st_as_sf(
tibble::tibble(x = c(0, 1, NA), y = c(0L, 1L, NA), geom = g)
)
z <- y
z$x[2] <- -1
# tests
expect_true(all_binary(x))
expect_true(all_binary(y))
expect_false(all_binary(z))
expect_error(assert(all_binary(z)), "binary")
})
test_that("x = Spatial", {
# create data
g <- sf::st_sfc(list(sf::st_point(c(1, 0)), sf::st_point(c(0, 1))))
x <- sf::st_as_sf(tibble::tibble(x = c(0, 1), y = c(0L, 1L), geom = g))
g <- sf::st_sfc(
list(sf::st_point(c(1, 0)), sf::st_point(c(0, 1)), sf::st_point(c(0, 1)))
)
y <- sf::st_as_sf(
tibble::tibble(x = c(0, 1, NA), y = c(0L, 1L, NA), geom = g)
)
z <- y
z$x[2] <- -1
# tests
expect_true(all_binary(sf::as_Spatial(x)))
expect_true(all_binary(sf::as_Spatial(y)))
expect_false(all_binary(sf::as_Spatial(z)))
expect_error(
assert(all_binary(sf::as_Spatial(z))),
"binary"
)
})
test_that("x = SpatRaster", {
expect_true(all_binary(terra::rast(matrix(c(0, 1)))))
expect_true(all_binary(terra::rast(matrix(c(0L, 1L)))))
expect_true(all_binary(terra::rast(matrix(c(0L, NA, 1L)))))
expect_true(all_binary(terra::rast(matrix(c(0, NA, 1)))))
expect_false(all_binary(terra::rast(matrix(c(-1, 0, 1)))))
expect_error(
assert(all_binary(terra::rast(matrix(c(-1, 0, 1))))),
"binary"
)
})
test_that("x = Raster", {
expect_true(all_binary(raster::raster(matrix(c(0, 1)))))
expect_true(all_binary(raster::raster(matrix(c(0L, 1L)))))
expect_true(all_binary(raster::raster(matrix(c(0L, NA, 1L)))))
expect_true(all_binary(raster::raster(matrix(c(0, NA, 1)))))
expect_false(all_binary(raster::raster(matrix(c(-1, 0, 1)))))
expect_error(
assert(all_binary(raster::raster(matrix(c(-1, 0, 1))))),
"binary"
)
})
|
f8c703a81544f6cbc28a5b30938d139b690b7e19
|
fdcace641b533557575af43e2e8d536c64d0cda7
|
/munging/Geocoding.R
|
f1628e9170b094af4b69e028d6ec09ddac7302f4
|
[] |
no_license
|
kent37/CLFirst
|
f32f9163a799ada9cd8e755c6f25e8cf5f6a2330
|
0f5170130e1bb735958807610d7e9f2b8a2bf350
|
refs/heads/master
| 2022-05-21T08:17:46.267788
| 2020-05-02T23:51:24
| 2020-05-02T23:51:24
| 259,715,973
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,387
|
r
|
Geocoding.R
|
# Geocode Businesses_Open_Closed
library(tidyverse)
library(mapview)
library(readxl)
library(sf)
source_path = here::here('data/Businesses Open_Closed in Cambridge.xlsx')
df = read_xlsx(source_path)
# Create a data set for Texas A&M geocoder
to_geocode = df %>%
mutate(id=1:nrow(df)) %>%
select(id, `Business Address`) %>%
filter(!duplicated(`Business Address`),
!is.na(`Business Address`),
!str_detect(`Business Address`, 'location')) %>%
mutate(Address = str_replace(`Business Address`,
'Mass ?\\.? Ave\\.?', 'Massachusetts Ave')) %>%
mutate(Address = str_replace_all(Address, # fixup for typos
c('Anve'='Ave', 'Stt'='St',
'Masschusetts'='Massachusetts', 'Massaschusetts'='Massachusetts',
'My\\. Auburn'='Mount Auburn', 'Avve'='Ave',
'Blachard'='Blanchard', 'Brooke'='Brook'))) %>%
mutate(City='Cambridge', State='MA', Zip='')
write_csv(to_geocode, path=here::here('munging/to_geocode.csv'))
from_geocode <- read_csv(here::here('munging/from_geocode_geocodio.csv')) %>%
select("id", "Business Address", "Address",
"Latitude", "Longitude", "Accuracy Score", "Accuracy Type",
Zip="Zip_1", "Source")
from_sf = from_geocode %>% st_as_sf(coords=c('Longitude', 'Latitude'), crs=4326)
# These are bogus. Write a file to edit with corrections
from_geocode %>%
filter(`Accuracy Score`<=0.6) %>%
select(1:5) %>%
write_csv(here::here('munging/geocode_corrections.csv'))
corr = read_csv(here::here('munging/geocode_corrections.csv'))
corr %>%
st_as_sf(coords=c('Longitude', 'Latitude'), crs=4326) %>%
mapview()
# Now merge this mess
from_geocode[from_geocode$`Accuracy Score`<=0.6, 'Latitude'] = corr$Latitude
from_geocode[from_geocode$`Accuracy Score`<=0.6, 'Longitude'] = corr$Longitude
from_geocode %>%
st_as_sf(coords=c('Longitude', 'Latitude'), crs=4326) %>%
mapview()
geocoded = df %>%
left_join(from_geocode %>% select(-id))
mapview(geocoded %>%
filter(!is.na(Latitude)) %>%
st_as_sf(coords=c('Longitude', 'Latitude'), crs=4326),
zcol='Business Name', legend=FALSE)
geocoded = read_csv(here::here('data/business_open_closed.csv'))
# Find business district, etc for the geocoded addresses
bus_dist = st_read(here::here('COD/ASSESSING_CommercialDistrictsFY2020.gdb.zip'),
stringsAsFactors=FALSE)
# st_as_sf fails with NA values so just work with the valid geocodes
to_locate = geocoded %>%
filter(!is.na(Latitude)) %>%
st_as_sf(coords=c('Longitude', 'Latitude'), crs=4326) %>%
st_transform(crs=2249)
by_dist = to_locate %>%
st_join(bus_dist %>% select(DIST_NAME, DISTRICT))
by_dist %>%
mapview(zcol='DIST_NAME')
# Neighborhood
nbhd = st_read('/Users/kent/Dev/FPRA/Cambridge Open Data/Shapefiles/BOUNDARY_CDDNeighborhoods.shp/BOUNDARY_CDDNeighborhoods.shp', stringsAsFactors=FALSE) %>%
select(N_HOOD, NAME)
by_dist = by_dist %>% st_join(nbhd)
by_dist %>%
mapview(zcol='NAME')
census = st_read(here::here('COD/DEMOGRAPHICS_Tracts2010.gdb.zip'),
stringsAsFactors=FALSE) %>%
select(GEOID10)
by_dist = by_dist %>% st_join(census)
mapview(by_dist, zcol='GEOID10') + mapview(census)
# Join with the full geocoded data
geocoded = geocoded %>% left_join(by_dist %>% st_drop_geometry())
write_csv(geocoded, here::here('data/business_open_closed.csv'))
|
4cf1f8716ea16c79308544f791c4effd2ef1e75c
|
7271ca2c97b0ac1a2a4332e0b5d3d33998188982
|
/DEseq2_replicates.R
|
69b1f43c23821e49a45c9dad728e59eab4d52eb3
|
[] |
no_license
|
tmlx/Analyses_Pipelines
|
af5b2cec4683e875c7a17a17d777b9ac204eea94
|
3333eb6e49393e4cd3ace503b173cd527b2f8162
|
refs/heads/main
| 2023-06-17T20:03:48.042657
| 2021-07-07T17:39:24
| 2021-07-07T17:39:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,229
|
r
|
DEseq2_replicates.R
|
set.seed("1234")
library(DESeq2)
library(dplyr)
#library(vidger)
library("BiocParallel")
args = commandArgs(trailingOnly=TRUE)
register(MulticoreParam(4))
countdata = read.table(args[1], header = T, row.names = NULL, check.names = F)
colnames(countdata)[1] = "Geneid";
countdata = aggregate(.~Geneid, countdata, max)
row.names(countdata) = countdata[,1]; countdata = countdata[,-1]
sampleInfo <- read.table(args[2], header = T,sep = "\t", row.names = 1)
countdata = countdata[,as.character(as.matrix(row.names(sampleInfo)))]
countdata = countdata[rowSums(countdata) >= 10,]
countdata = data.matrix(countdata)
ddsMat <- DESeqDataSetFromMatrix(countData = countdata, colData = sampleInfo, design = ~Replicate)
ddsMat<- estimateSizeFactors(ddsMat)
rld <- rlog(ddsMat, blind = FALSE)
system("mkdir -p data")
write.table(assay(rld), file = "data/readCountRlogNorm.xls", sep = "\t",col.names = NA, quote=FALSE)
diffExp <- DESeq(ddsMat)
system("mkdir -p figures")
pdf(file = "figures/Dispersion_plots.pdf", height = 5, width = 5.5)
plotDispEsts(diffExp)
dev.off()
#####Extra information#####
## counts per sample
sink("data/size_factors.txt")
total_counts = apply(assay(diffExp), 2, sum)
sizeFactors(diffExp)
sink()
###############Samples-Relationships##########
library(RColorBrewer)
library(gplots)
library(circlize)
library(ComplexHeatmap)
library(Hmisc)
#distance matrix
col4 <- colorRampPalette(c("darkblue","darkgreen", "yellow", "darkviolet", "darkmagenta"))
sampleDists <- as.matrix(dist( t(assay(rld)) ))
write.table(sampleDists,file = "data/rlog_Normalized_Values.txt",sep = "\t", quote=FALSE)
ht = Heatmap(sampleDists, col = col4(100), heatmap_legend_param = list(title = NULL))
pdf(file = "figures/SampleDistanceRlog.pdf", height = 5, width = 5.5)
draw(ht,heatmap_legend_side = "left")
dev.off()
#cor matrix
ht = Heatmap(cor(assay(rld)), col = col4(100), heatmap_legend_param = list(title = NULL))
pdf(file = "figures/SampleCorrelationRlog.pdf", height = 5, width = 5.5)
draw(ht,heatmap_legend_side = "left")
dev.off()
#PCA
pdf("figures/PCA_var1_var2.pdf", width=7, height=6)
data <- plotPCA(rld, intgroup="Replicate", returnData=TRUE)
percentVar = round(100 * attr(data, "percentVar"))
ggplot(data, aes(PC1, PC2, color=Replicate, shape=name)) +
geom_hline(aes(yintercept=0), colour="grey") +
geom_vline(aes(xintercept=0), colour="grey") +
geom_point(size=5)+
xlab(paste0("PC1: ", percentVar[1], "% variance")) +
ylab(paste0("PC2: ", percentVar[2], "% variance")) +
theme_bw(base_size = 14) +
ggtitle("PCA\n") + labs(color="Groups", shape="Sample Names")+
scale_shape_manual(values=c(0:18,33:17))
dev.off()
################ Contrasts to be performed ##########
normReadCount = counts(diffExp, normalized = TRUE)
write.table(normReadCount, file = "data/readCountNorm.xls", sep = "\t",col.names = NA, quote=FALSE)
mCountdata = data.frame(factor(sampleInfo$Replicate),t(normReadCount), check.names = FALSE)
colnames(mCountdata)[1] = "Sample"
mCountdata = aggregate(.~Sample, mCountdata, mean)
row.names(mCountdata) = mCountdata[,1];
mCountdata = mCountdata[,-1];mCountdata = t(mCountdata)
ind = match(colnames(mCountdata),unique(sampleInfo$Replicate))
mCountdata = mCountdata[,ind]
write.table(mCountdata, file = "data/readCountMatrixMergedRepli.xls", sep = "\t",col.names = NA, quote=FALSE)
mCountData=mCountdata
comp = read.table(args[3], header = T, row.names = NULL)
outs<-within(comp, contrasts <- paste(comp$Contrast,comp$Reference,sep='_vs_'))[3]
outs
comp = data.frame("Replicate",comp[1:(length(comp)/2)],comp[((length(comp)/2)+1):length(comp)])
listDiffMat_fdr = apply(comp, 1, function(x){ results(diffExp, contrast = x, alpha=0.05)})
####### Results, Volcano and MA ##############
#sapply(1:length(listDiffMat_fdr),function(x)write.table(listDiffMat_fdr[[x]], file=sprintf("data/%s%d.All.xls", outs$contrasts[x], x), quote=FALSE, sep="\t"))
for (i in 1:length(comp$Reference)) {
#output_comps<- paste("data/",comp$Contrast[i],"_vs_",comp$Reference[i],".all.xls", sep="")
output_comps<- paste("data/",outs$contrasts[i],".all.xls", sep="")
mat<- as.data.frame(listDiffMat_fdr[i])
mat$diffexpressed <- "No_change"
mat$diffexpressed[mat$log2FoldChange >= 1 & mat$pvalue < 0.05] <- "Up"
mat$diffexpressed[mat$log2FoldChange <= -1 & mat$pvalue < 0.05] <- "Down"
mat$diffexpressed[mat$log2FoldChange >= 1 & mat$pvalue >= 0.05] <- "Non_significant"
mat$diffexpressed[mat$log2FoldChange <= -1 & mat$pvalue >= 0.05] <- "Non_significant"
write.table(file=output_comps, mat, sep="\t", quote=FALSE)
#output_filt<- paste("data/",comp$Contrast[i],"_vs_",comp$Reference[i],".DEGs.xls", sep="")
output_filt<- paste("data/",outs$contrasts[i],".DEGs.xls", sep="")
filter_mat<- mat %>% filter(padj < 0.05) %>% filter(log2FoldChange >= 1 | log2FoldChange <= -1 )
write.table(file=output_filt, filter_mat, sep="\t", quote=FALSE)
mycolors <- c("blue", "red", "green", "black")
names(mycolors) <- c("Up", "Down", "Non_significant", "No_change")
#output_volcano<- paste("figures/", comp$Contrast[i],"_vs_",comp$Reference[i],".Volcano.pdf", sep="")
output_volcano<- paste("figures/", outs$contrasts[i],".Volcano.pdf", sep="")
pdf(file=output_volcano, width=7, height=6)
p<- ggplot(data=mat, aes(x=log2FoldChange, y=-log10(pvalue), col=diffexpressed)) + geom_point(size=0.5) + theme_minimal() + geom_vline(xintercept=c(-1, 1), col="red") + geom_hline(yintercept=-log10(0.05), col="red") + scale_colour_manual(values = mycolors)
print(p)
dev.off()
#output_maplot<- paste("figures/", comp$Contrast[i],"_vs_",comp$Reference[i],".MA.pdf", sep="")
output_maplot<- paste("figures/", outs$contrasts[i],".MA.pdf", sep="")
pdf(file=output_maplot, width=7, height=6)
p<- ggplot(data=mat, aes(x=log10(baseMean), y=log2FoldChange, col=diffexpressed)) + geom_point(size=0.5) + theme_minimal() + geom_hline(yintercept=c(-1,1), col="red") + scale_colour_manual(values = mycolors)
print(p)
dev.off()
}
####### Expression density plot #########
toplot = data.frame(counts(diffExp, normalized=T))
toplot = stack(toplot, select=colnames(toplot))
pdf("figures/Density_plot.sample_read_counts.pdf", width=7, height=6)
p = ggplot( toplot, aes(values, colour=ind, alpha=0.5))
p + geom_line(aes(color=ind), stat="density", alpha=0.5) +
scale_x_log10(name="\nnormalized counts", breaks=c(0.1,1,10,100,1000,10000,100000), limits=c(0.1,100000) ) +
scale_y_continuous(name="density\n") +
scale_colour_discrete(name="Samples") +
geom_vline(xintercept=10, colour="grey", linetype = "dashed") +
theme_minimal() +
ggtitle("Density plot\n") +
theme()
dev.off()
rm(ddsMat); gc()
#####Comparative heatmap#####
library( "genefilter" )
sideCols=brewer.pal(12, "Set3")[colData(rld)$Replicate]
topVarGenes <- head( order( rowVars( assay(rld) ), decreasing=TRUE ), 100)
gene<- rownames(assay(rld)[ topVarGenes,])
fcs<-(sapply(1:length(listDiffMat_fdr),function(x)listDiffMat_fdr[[x]][gene,]$log2FoldChange))
rownames(fcs)<- gene
colnames(fcs)= outs$contrasts
pdf("figures/Heatmap_100_variable_genes_FC.pdf")
heatmap.2(fcs, scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(6, "BrBG")) ), cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
pdf("figures/Heatmap_100_variable_genes.pdf")
heatmap.2(assay(rld)[ topVarGenes,], scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(9, "RdBu")) ), ColSideColors=sideCols, cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
topVarGenes <- head( order( rowVars( assay(rld) ), decreasing=TRUE ), 50)
gene<- rownames(assay(rld)[ topVarGenes,])
fcs<-(sapply(1:length(listDiffMat_fdr),function(x)listDiffMat_fdr[[x]][gene,]$log2FoldChange))
rownames(fcs)<- gene
colnames(fcs)= outs$contrasts
pdf("figures/Heatmap_50_variable_genes_FC.pdf")
heatmap.2(fcs, scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(6, "BrBG")) ), cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
pdf("figures/Heatmap_50_variable_genes.pdf")
heatmap.2(assay(rld)[ topVarGenes,], scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(9, "RdBu")) ), ColSideColors=sideCols, cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
topVarGenes <- head( order( rowVars( assay(rld) ), decreasing=TRUE ), 25)
gene<- rownames(assay(rld)[ topVarGenes,])
fcs<-(sapply(1:length(listDiffMat_fdr),function(x)listDiffMat_fdr[[x]][gene,]$log2FoldChange))
rownames(fcs)<- gene
colnames(fcs)= outs$contrasts
pdf("figures/Heatmap_25_variable_genes_FC.pdf")
heatmap.2(fcs, scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(6, "BrBG")) ), cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
pdf("figures/Heatmap_25_variable_genes.pdf")
heatmap.2(assay(rld)[ topVarGenes,], scale="row", trace="none", dendrogram="row", col = colorRampPalette(rev(brewer.pal(9, "RdBu")) ), ColSideColors=sideCols, cexRow=0.4, cexCol=0.75, key=TRUE, margins=c(10,10),srtCol=45)
dev.off()
####Session information#######
sink("data/DESeq2.session_info.txt")
print("seed is 1234")
sessionInfo()
sink()
|
51ebfc8002d42c8cdc452d91954a177f33d95105
|
251ec93f7c54f2a2f0dd5051ed56f69313a530fd
|
/man/seeFastq.Rd
|
c1986f9713ef210b0efe87539f4f714b98ee9b34
|
[] |
no_license
|
tgirke/systemPipeR
|
5df29a7af4fb44794b7ad4700bf8917647ec943b
|
039594710ecd4c515d0421cb9a3393eb30eae38f
|
refs/heads/devel
| 2023-08-16T12:14:52.734954
| 2023-06-21T01:02:18
| 2023-06-21T01:02:18
| 45,077,906
| 54
| 48
| null | 2023-09-12T00:08:17
| 2015-10-28T00:09:31
|
R
|
UTF-8
|
R
| false
| false
| 2,900
|
rd
|
seeFastq.Rd
|
\name{seeFastq}
\alias{seeFastq}
\alias{seeFastqPlot}
\title{
Quality reports for FASTQ files
}
\description{
The following \code{seeFastq} and \code{seeFastqPlot} functions generate and plot a series of
useful quality statistics for a set of FASTQ files including per cycle quality
box plots, base proportions, base-level quality trends, relative k-mer
diversity, length and occurrence distribution of reads, number of reads above
quality cutoffs and mean quality distribution. The functions allow processing
of reads with variable length, but most plots are only meaningful if the read
positions in the FASTQ file are aligned with the sequencing cycles. For
instance, constant length clipping of the reads on either end or variable
length clipping on the 3' end maintains this relationship, while variable
length clipping on the 5' end without reversing the reads erases it.
The function {seeFastq} computes the summary stats and stores them in a relatively
small list object that can be saved to disk with \code{save()} and reloaded with
\code{load()} for later plotting. The argument 'klength' specifies the k-mer length and 'batchsize' the
number of reads to random sample from each fastq file. }
\usage{
seeFastq(fastq, batchsize, klength = 8)
seeFastqPlot(fqlist, arrange = c(1, 2, 3, 4, 5, 8, 6, 7), ...)
}
\arguments{
\item{fastq}{
Named character vector containing paths to FASTQ file in the data fields and sample labels in the name slots.
}
\item{batchsize}{
Number of reads to random sample from each FASTQ file that will be considered in the QC analysis. Smaller numbers reduce the memory footprint and compute time.
}
\item{klength}{
Specifies the k-mer length in the plot for the relative k-mer diversity.
}
\item{fqlist}{
\code{list} object returned by \code{seeFastq()}.
}
\item{arrange}{
Integer vector from 1 to 7 specifying the row order of the QC plot. Dropping numbers eliminates the corresponding plots.
}
\item{\dots}{
Additional plotting arguments to pass on to \code{seeFastqPlot()}.
}
}
\value{
The function \code{seeFastq} returns the summary stats in a \code{list} containing all information required for the quality plots.
The function \code{seeFastqPlot} plots the information generated by \code{seeFastq} using \code{ggplot2}.
}
\author{
Thomas Girke
}
\examples{
\dontrun{
targets <- system.file("extdata", "targets.txt", package="systemPipeR")
dir_path <- system.file("extdata/cwl", package="systemPipeR")
args <- loadWorkflow(targets=targets, wf_file="hisat2/hisat2-mapping-se.cwl",
input_file="hisat2/hisat2-mapping-se.yml", dir_path=dir_path)
args <- renderWF(args, inputvars=c(FileName="_FASTQ_PATH1_", SampleName="_SampleName_"))
fqlist <- seeFastq(fastq=infile1(args), batchsize=10000, klength=8)
pdf("fastqReport.pdf", height=18, width=4*length(fastq))
seeFastqPlot(fqlist)
dev.off()
}
}
\keyword{ utilities }
|
90a004098651f7003d04b2ffb95b4410c68f9fae
|
97c5d5f3568c4ab59e446d42d476c9a94f33a379
|
/CompareVariables.R
|
6c5e8204cdea1bebf0c43702a0d090212a9ebd72
|
[] |
no_license
|
Tezzzcatlipoca/OrgCrime
|
27e96bacece39e27dca41db8e07967b60db068fe
|
ebc0c85f6a3db4df87fd77e341e317534f66cf45
|
refs/heads/master
| 2020-09-22T14:52:46.310025
| 2016-12-20T16:53:07
| 2016-12-20T16:53:07
| 66,792,765
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,582
|
r
|
CompareVariables.R
|
library(xlsx)
library(foreign)
cdel<-read.xlsx("CDEL2012.xlsx",1) # Claves de delitos (Seleccionados los útiles)
co.del<-cdel[cdel$EstÃ.n.>0,]
# Identificar delitos importantes
delitos.imp<-co.del$CVE_DEL
# Abrir base de delitos
sdel<-read.dbf("sdel2009.dbf")
sdel.imp<-sdel[sdel$B_DELITO %in% delitos.imp,]
sdel.imp$ENTMUN<-paste0(sdel.imp$B_ENTOC,"-",sdel.imp$B_MUNOC)
# Identificar todos los implicados
id_ps<-unique(sdel.imp$ID_PS)
# Abrir archivo de registros
sreg<-read.dbf("sreg2009.dbf")
sreg.imp<-sreg[sreg$ID_PS %in% id_ps,] # De
sreg.imp$ENTMUN_NAC<-paste0(sreg.imp$B_ENTNAC,"-",sreg.imp$B_MUNNAC) # Lugar de nacimiento
sreg.imp$ENTMUN_RES<-paste0(sreg.imp$B_ENTRH,"-",sreg.imp$B_MUNRH) # Lugar de residencia hab.
# Carga resultados de censo de población 2010
pop<-read.table("C:/Users/Roberto/Documents/Proyectos/Blog Narco-CO/CO/2016/CensoPob2010.txt",sep="\t",quote = "",header=T)
pop$key<-gsub(" ","-",pop$ENTMUN)
# Municipios con más delitos registrados
munic.del<-sdel.imp$ENTMUN
tabla.munic.del<-table(munic.del) # Frecuencias
ratio.munic.del<-data.frame(entmun=names(tabla.munic.del),ocurr=as.integer(tabla.munic.del))
ratio.munic.del<-merge(ratio.munic.del,pop,by.x="entmun",by.y="key",all.x=T)
ratio.munic.del$ratio<-ratio.munic.del$ocurr/ratio.munic.del$pop
ratio.munic.del.sorted<-ratio.munic.del[order(ratio.munic.del$ratio,decreasing = T),] # Ordenar municipios por ratio
tabla.munic.del.ord<-tabla.munic.del[order(tabla.munic.del,decreasing = TRUE)] # Ordenadas
munics.del<-rep(0,dim(tabla.munic.del.ord)[1])
for(i in 1:(dim(tabla.munic.del.ord)[1])){ # Genera tabla con porcentajes de ocurrencia
munics.del[i]<-sum(tabla.munic.del.ord[1:i])/sum(tabla.munic.del.ord)
}
plot(munics.del) # Graficar número de municipios que representan total de observaciones (del.)
# Municipios con más delincuentes registrados - Por Lugar de Nacimiento
munic.regnac<-sreg.imp$ENTMUN_NAC
tabla.munic.regnac<-table(munic.regnac) # Frecuencias
ratio.munic.regnac<-data.frame(entmun=names(tabla.munic.regnac),ocurr=as.integer(tabla.munic.regnac))
ratio.munic.regnac<-merge(ratio.munic.regnac,pop,by.x="entmun",by.y="key",all.x=T)
ratio.munic.regnac$ratio<-ratio.munic.regnac$ocurr/ratio.munic.regnac$pop
ratio.munic.regnac.sorted<-ratio.munic.regnac[order(ratio.munic.regnac$ratio,decreasing = T),]# Ordernar municipios por ratio
tabla.munic.regnac.ord<-tabla.munic.regnac[order(tabla.munic.regnac,decreasing = TRUE)] # Ordenadas
munics.regnac<-rep(0,dim(tabla.munic.regnac.ord)[1])
for(i in 1:(dim(tabla.munic.regnac.ord)[1])){ # Genera tabla con porcentajes de ocurrencia
munics.regnac[i]<-sum(tabla.munic.regnac.ord[1:i])/sum(tabla.munic.regnac.ord)
}
plot(munics.regnac) # Graficar número de municipios que representan total de observaciones (del.)
# Municipios con más delincuentes registrados - Por Lugar de Residencia Habitual
munic.regres<-sreg.imp$ENTMUN_RES
tabla.munic.regres<-table(munic.regres) # Frecuencias
ratio.munic.regres<-data.frame(entmun=names(tabla.munic.regres),ocurr=as.integer(tabla.munic.regres))
ratio.munic.regres<-merge(ratio.munic.regres,pop,by.x="entmun",by.y="key",all.x=T)
ratio.munic.regres$ratio<-ratio.munic.regres$ocurr/ratio.munic.regres$pop
ratio.munic.regres.sorted<-ratio.munic.regres[order(ratio.munic.regres$ratio,decreasing = T),]# Ordernar municipios por ratio
tabla.munic.regres.ord<-tabla.munic.regres[order(tabla.munic.regres,decreasing = TRUE)] # Ordenadas
munics.regres<-rep(0,dim(tabla.munic.regres.ord)[1])
for(i in 1:(dim(tabla.munic.regres.ord)[1])){ #Genera tabla con porcentaje de delitos realizados
# en cada municipio (del total)
munics.regres[i]<-sum(tabla.munic.regres.ord[1:i])/sum(tabla.munic.regres.ord)
}
plot(munics.regres) # Graficar número de municipios que representan total de observaciones (del.)
# Comparar los lugares de ocurrencia con nacimiento y residencia habitual
munic_del_100<-ratio.munic.del.sorted[ratio.munic.del.sorted$ocurr>7,][1:100,] # 7= 3rd Qu.
munic_regnac_100<-ratio.munic.regnac.sorted[ratio.munic.regnac.sorted$ocurr>6,][1:100,] # 6=3rd Qu.
munic_regres_100<-ratio.munic.regres.sorted[ratio.munic.regres.sorted$ocurr>7,][1:100,] # 7=3rd Qu.
sum(munic_regnac_100$entmun %in% munic_regres_100$entmun) #71
sum(munic_del_100$entmun %in% munic_regnac_100$entmun) # 55
sum(munic_del_100$entmun %in% munic_regres_100$entmun) # 64
# Sacar nombre y estado de los 100 municipios
nazwy<-read.dbf("C:/Users/Roberto/Documents/Proyectos/Blog Narco-CO/CO/2016/MPIOS2012.dbf")
nazwy$key<-paste0(nazwy$CVE_ENT,"-",nazwy$CVE_MUN)
# Obtener los nombres de los municipios con mayores ocurrencias
nombres_del_100<-nazwy$DESCRIP[nazwy$key %in% munic_del_100]
nombres_regnac_100<-nazwy$DESCRIP[nazwy$key %in% munic_regnac_100]
nombres_regres_100<-nazwy$DESCRIP[nazwy$key %in% munic_regres_100]
# Imprimirlos en un mapa
# Diferenciar por tipo de crimen (producción, tráfico, violencia, lavado de dinero)
# Definir la línea de corte para observar los municipios con más actividad
# para ser comparados con los municipios derivados de las personas
# Para identificar municipios, hacer llave EST-MUN
plot(xtabs(B_NACION~B_SEXO,data=sreg.imp))
plot(xtabs(B_NACION~B_EDAD,data=sreg.imp)) # Mal hechos, porque suman los códigos de nacionalidad
# Ocupación por edad
# Ocupación por estado civil
# Sexo
# Mujeres por ocupación y estado civil
#
library(mosaic)
mosaic::tally(~B_OCUPA+B_SEXO,data=sreg.imp)
mosaic::tally(~B_EDOCIVIL+B_SEXO,data=sreg.imp)
mosaic::tally(~B_EDOCIVIL+B_REINCIDE,data=sreg.imp)
|
45905c522515e5799a3338ade61cf0d639b77896
|
a00ae6e32bec6f96ec37cc6aee8374df6565871b
|
/R/comp.pat.R
|
da71680d2972038fa5cccf0cddff8cfa9be1d56d
|
[] |
no_license
|
cran/lcd
|
6372f23d4d3f3e2dd6656a632f2cdd7c9d6e9483
|
a6c9acc4e576cf76bc809c4953dbb818cf3c8ad4
|
refs/heads/master
| 2020-04-22T19:55:52.285942
| 2012-11-09T00:00:00
| 2012-11-09T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
r
|
comp.pat.R
|
`comp.pat` <- function(truepat, pat)
{
vset <- rownames(truepat)
truearr <- which(truepat - t(truepat) == 1)
pat <- pat[vset, vset]
arr <- which(pat - t(pat) == 1)
a.missing <- length(truearr)-length(which(match(truearr, arr)>0))
a.extra <- length(arr)-length(which(match(arr, truearr)>0))
## computing structural Hamming distance
idx <- which(skeleton(truepat) - skeleton(pat) != 0)
pat[idx] <- skeleton(truepat)[idx]
shd <- length(idx) / 2
c <- abs(truepat-pat)
shd <- shd + length(which(c+t(c) != 0)) / 2
return(list(a.total = length(truearr),
a.missing = a.missing,
a.extra = a.extra,
shd = shd))
}
|
1163e8fded4fd77fbca027b65d0d23edf2754d1f
|
18259bc6828fb5da8d003f1f98e2761c6697d931
|
/.Rproj.user/C625F279/sources/per/t/718886E1-contents
|
9d398a1f2332bf4ab45cd718c1a8b94c4c44a9fa
|
[] |
no_license
|
leosampsousa/Panorama_Covid
|
f1b00142a6760b807380becc98935e273b64bab8
|
fa2f9a91417ac25ddb0368483324276f81c34bb8
|
refs/heads/master
| 2022-09-20T13:02:10.961107
| 2020-06-01T14:11:44
| 2020-06-01T14:11:44
| 268,529,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,293
|
718886E1-contents
|
brazil_cities_coordinates <- read.csv('brazil_cities_coordinates.csv', header = T, encoding="UTF-8")
brazil_covid19 <- read.csv('brazil_covid19.csv', header = T, encoding="UTF-8")
brazil_covid19_cities <- read.csv('brazil_covid19_cities.csv', header = T, encoding="UTF-8")
brazil_covid19_macro <- read.csv('brazil_covid19_macro.csv', header = T, encoding="UTF-8")
brazil_population_2019 <- read.csv('brazil_population_2019.csv', header = T, encoding="UTF-8")
states <- read.csv('estados.csv', header = F, encoding="UTF-8")
library(dplyr)
df.state.last <- brazil_covid19 %>%
filter(date == '2020-05-26') %>%
select(state,deaths,cases)
formate_number <- function(numero){
numero <- format(round(numero, 4), nsmall = 4)
return(numero)
}
death_rate <- function(mortes,confirmados){
return(mortes/confirmados)
}
df.state.last$death_rate = death_rate(df.state.last$deaths, df.state.last$cases)*100
df.state.last$cases_rate = sapply(df.state.last$cases, function(x){x <- (x*100)/sum(df.state.last$cases)})
barPlot_data <- df.state.last[,c('state', 'death_rate', 'cases_rate')]
barPlot_data$death_rate = as.numeric(formate_number(barPlot_data$death_rate))
barPlot_data$cases_rate = as.numeric(formate_number(barPlot_data$cases_rate))
states$V1 <- NULL
barPlot_data <- inner_join(barPlot_data, states, by = c('state' = 'V2'))
barPlot_data$state <- NULL
barPlot_data <- rename(barPlot_data, state=V3)
library(plotly)
barPlot1 <- barPlot_data %>% plot_ly()
barPlot1 <- barPlot1 %>% add_trace(x = ~state, y = ~cases_rate, type = 'bar',
name = '% de confirmados do estado em relação ao País',
marker = list(color = 'rgba(247,202,24,1)',
line = list(color = 'rgb(8,48,107)', width = 1.5)))
barPlot1 <- barPlot1 %>% add_trace(x = ~state, y = ~death_rate, type = 'bar',
name = 'Letalidade do Covid no estado',
marker = list(color = 'rgba(242,38,19,1)',
line = list(color = 'rgb(8,48,107)', width = 1.5)))
barPlot1 <- barPlot1 %>% layout(title = "Corona Vírus nos estados do Brasil",
barmode = 'group',
xaxis = list(title = ""),
yaxis = list(title = "%"),
legend = list(orientation = 'h',
xanchor = 'center',
x = 0.5))
##########Por Região##########
df.region.last <- brazil_covid19 %>%
filter(date == '2020-05-26') %>%
group_by(region) %>%
summarize(deaths = sum(deaths), cases = sum(cases))
df.region.last$death_rate = death_rate(df.region.last$deaths, df.region.last$cases)*100
df.region.last$cases_rate = sapply(df.region.last$cases, function(x){x <- (x*100)/sum(df.region.last$cases)})
barPlotRegion_data <- df.region.last[,c('region', 'death_rate', 'cases_rate')]
barPlotRegion_data$death_rate = as.numeric(formate_number(barPlotRegion_data$death_rate))
barPlotRegion_data$cases_rate = as.numeric(formate_number(barPlotRegion_data$cases_rate))
barPlot2 <- barPlotRegion_data %>% plot_ly()
barPlot2 <- barPlot2 %>% add_trace(x = ~region, y = ~cases_rate, type = 'bar',
name = '% de confirmados da região em relação ao País',
marker = list(color = 'rgba(247,202,24,1)',
line = list(color = 'rgb(8,48,107)', width = 1.5)))
barPlot2 <- barPlot2 %>% add_trace(x = ~region, y = ~death_rate, type = 'bar',
name = 'Letalidade do Covid na região',
marker = list(color = 'rgba(242,38,19,1)',
line = list(color = 'rgb(8,48,107)', width = 1.5)))
barPlot2 <- barPlot2 %>% layout(title = "Corona Vírus nas regiões do Brasil",
barmode = 'group',
xaxis = list(title = ""),
yaxis = list(title = "%"),
legend = list(orientation = 'h',
xanchor = 'center',
x = 0.5))
################ Casos e mortes acumuladas no Brasil ###############
brazil_covid19_macro$date <- as.Date(brazil_covid19_macro$date, format = '%Y-%m-%d')
#Casos acumulados
scatterPlotConfirmados_data<-brazil_covid19_macro[,c('date', 'cases')]
x_axis <- list(title = 'Data', tickformat = '%d/%m')
y_axis <- list(title = 'Número de casos confirmados')
plotConfirmados <- plot_ly(data = scatterPlotConfirmados_data, x = ~date, y = ~cases,
marker = list(color = 'rgba(44,130,201,1)'), type='scatter')
plotConfirmados <- plotConfirmados %>% layout(title = 'Casos confirmados de Covid no Brasil', xaxis = x_axis, yaxis=y_axis)
#Mortes acumuladas
scatterPlotObitos_data<-brazil_covid19_macro[,c('date', 'deaths')]
x_axis <- list(title = 'Data', tickformat = '%d/%m')
y_axis <- list(title = 'Número de óbitos')
plotObitos <- plot_ly(data = scatterPlotObitos_data, x = ~date, y = ~deaths,
marker = list(color = 'rgba(255,0,0,1)'), type='scatter')
plotObitos <- plotObitos %>% layout(title = 'Número de óbitos por Covid no Brasil', xaxis = x_axis, yaxis=y_axis)
#Novos casos confirmados por dia
novosCasos_data <- brazil_covid19_macro[,c('date', 'cases')]
novosCasos_data <- distinct(novosCasos_data)
novosCasosDia <- function(vetor){
resultado <- vetor[1]
for(i in 2:length(vetor)){
resultado[i] = vetor[i] - vetor[i-1]
}
return(resultado)
}
novosCasos_data$novosCasos <- novosCasosDia(novosCasos_data$cases)
x_axis <- list(title = 'Data', tickformat = '%d/%m')
y_axis <- list(title = 'Número de novos casos por dia')
plotNovosCasos <- plot_ly(data = novosCasos_data, x = ~date, y = ~novosCasos,
marker = list(color = 'rgba(44,130,201,1)'), type='bar')
plotNovosCasos <- plotNovosCasos %>% layout(title = 'Novos casos confirmados de Covid no Brasil (por dia)', xaxis = x_axis, yaxis=y_axis)
#Novos obitos por dia
#Como existem datas iguais com valores diferentes para o numero de mortes, apagaresmo manualmente
#os registros de mesma data com menos obitos.
novosCasosObito_data <- brazil_covid19_macro[,c('date', 'deaths')]
novosCasosObito_data <- novosCasosObito_data[-c(73,89),]
novosCasosObito_data$novosCasos <- novosCasosDia(novosCasosObito_data$deaths)
x_axis <- list(title = 'Data', tickformat = '%d/%m')
y_axis <- list(title = 'Número de novos obitos por dia')
plotNovosCasosObito <- plot_ly(data = novosCasosObito_data, x = ~date, y = ~novosCasos,
marker = list(color = 'rgba(255,0,0,1)'), type='bar')
plotNovosCasosObito <- plotNovosCasosObito %>% layout(title = 'Novos obitos por Covid no Brasil (por dia)', xaxis = x_axis, yaxis=y_axis)
######### choropleth map Brasil vs casos covid ############
library(dplyr)
df.state.last <- brazil_covid19 %>%
filter(date == '2020-05-26') %>%
select(state,cases)
states <- read.csv('estados.csv', header = F, encoding="UTF-8")
df.state.last <- inner_join(df.state.last, states, by = c('state' = 'V2'))
df.state.last <- rename(df.state.last, codigo_uf=V1)
df.state.last <- rename(df.state.last, uf=V3)
library(rgdal)
shp <- readOGR("BR_UF_2019.shp", stringsAsFactors=FALSE, encoding="UTF-8")
class(shp)
##Agora, fazer a junção do dataframe com o shapefile
df <- merge(shp, df.state.last, by.x = 'CD_UF', by.y= 'codigo_uf')
proj4string(df) <- CRS('+proj=longlat +datum=WGS84 +no_defs')
Encoding(df$NM_UF) <- 'UTF-8'
library(RColorBrewer)
library(leaflet)
pal <- colorBin("YlOrRd",domain = df$cases) #cores do mapa
state_popup <- paste0("<strong>Estado: </strong>",
df$NM_UF,
"<br><strong>Casos confirmados: </strong>",
df$cases)
mapa <- leaflet(data = df) %>%
addProviderTiles(providers$CartoDB.Positron) %>%
addPolygons(fillColor = ~pal(df$cases),
fillOpacity = 0.8,
weight = 1,
color = "#BDBDC3",
popup = state_popup)
mapa <- mapa %>% addLegend("bottomright", pal = pal, values = df$cases,
title = "Casos confirmados Corona Virus",
opacity = 1)
|
|
abc6ac79a55bb7b7e8d6303a42dc3c9dc093b306
|
42230d1b619cd2fb7114dde4fc0b3d9c4c454407
|
/suicide_R Script.R
|
61f2cf80bfa1e5bb6ef4df50115dfe2fcd316076
|
[] |
no_license
|
KaushikRajanRK/Suicide-Rate-Analysis_Data-Visualization
|
087f45a268ac935d075a264ea6f5cee7ac51e348
|
8ad70d1d2a04a86e8c0df28a4e39877b909eb04d
|
refs/heads/master
| 2020-06-06T15:48:19.483759
| 2019-06-19T18:19:18
| 2019-06-19T18:19:18
| 192,783,209
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,941
|
r
|
suicide_R Script.R
|
#Setting up working directory
setwd('D:/Education/IRELAND/NCI/Moodle Documents/SEM 2/DV/dataset')
# Importing the dataset
suicide<-read.csv('master.csv')
str(suicide)
colnames(suicide)<-c("country","year","sex","age", "suicide_count","population","suicides/100k pop","country-year","HDI for year","gdp_for_year ($)","gdp_per_capita ($)","generation")
suicide_corr<-suicide
suicide_corr$year<-as.numeric(suicide$year)
suicide_corr$suicide_count<-as.numeric(suicide$suicide_count)
suicide_corr$population<-as.numeric(suicide$population)
suicide_corr$`suicides/100k pop`<-as.numeric(suicide$`suicides/100k pop`)
suicide_corr$`HDI for year`<-as.numeric(suicide$`HDI for year`)
suicide_corr$`gdp_per_capita ($)`<-as.numeric(suicide$`gdp_per_capita ($)`)
suicide_corr<- suicide_corr[c(2,5,6,7,11)]
suicide_corr<-as.data.frame(suicide_corr)
str(suicide_corr)
// Correlation plot
library(corrplot)
forcorrplot<- cor(suicide_corr)
corrplot(forcorrplot,order = "AOE", method = "color",bg="green",addCoef.col = "gray")
?corrplot
write.csv(suicide_corr,"forcorrplot.csv")
countrydeathcount<-as.data.frame(read.csv("countrywisedeaths.csv"))
countrydeathcount<- as.data.frame(countrydeathcount)
str(countrydeathcount)
countrydeathcount$suicides_no<-as.numeric(countrydeathcount$suicides_no)
// countrydeathcount$country<-as.list(countrydeathcount$country)
agg = aggregate(countrydeathcount$suicides_no,
by = list(countrydeathcount$country),FUN = sum)
?aggregate
rm(by)
aggregate()
write.csv(agg,"agg.csv")
plot(suicide$population,suicide$`gdp_for_year ($)`)
?plot
scaling<- read.csv("fr scaling.csv")
Scaling$Suicide.Count<-scale(scaling$Suicide.Count)
scale(scaling$Life.Ladder)
scale(scaling$gdp_per_capita....)
library(dplyr)
library(reshape2)
Summary <- suicide %>%
group_by(year,age) %>%
summarise(Net = sum(suicide_count))
write.csv(Summary,"summary.csv")
|
eb69087c6c8cb3b43e66afba56c3a23339b866a8
|
9269cbb8581ffaee3cfc20c827aa3ced1c7df8a3
|
/man/initializeTests.Rd
|
e26deaa637867bdc20773e9bf46e6ece6954c3a3
|
[] |
no_license
|
cran/RTest
|
0e75ee391fd3c796287c191d6aad675e0764198e
|
83ec1756f362f210786e04c8d5c6913f5f5907f3
|
refs/heads/master
| 2020-03-27T05:26:02.376025
| 2019-12-04T15:10:08
| 2019-12-04T15:10:08
| 146,018,503
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 742
|
rd
|
initializeTests.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RTestCase.R
\docType{methods}
\name{initializeTests}
\alias{initializeTests}
\alias{initializeTests,RTestCase-method}
\title{Initialize the Test Slot for a Test Case.}
\usage{
\S4method{initializeTests}{RTestCase}(object)
}
\arguments{
\item{object}{(\code{object}) The \code{\link{RTestCase-class}} object.}
}
\description{
This method initializes the slots '\code{tests}', '\code{test.for}' and '\code{test.result}' of
a object of class '\code{RTestCase}'. See description of \code{\link{RTestCase-class}} for
further information.
}
\seealso{
\code{\link{RTestCase-class}}
}
\author{
Matthias Pfeifer \email{matthias.pfeifer@roche.com}
}
|
c02f9daec89422771b3aba891f4054abb942940a
|
8150426f0bbd304a6fb82afb78e55054c032a330
|
/ui.R
|
229d2be539b65343f89c1c03fca16c126a87bb49
|
[] |
no_license
|
jjanzen/data_products_cp
|
c0f8abcf2f3f000a627691d61d95fa8d4a7bb798
|
da0a94240d517bf89ccc2a258a8dd54c8420e8ee
|
refs/heads/master
| 2020-03-29T11:17:50.140421
| 2015-06-13T22:25:53
| 2015-06-13T22:25:53
| 37,382,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,071
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel("Experiment Results and Statistical Significance"),
sidebarPanel(
numericInput('control_loads', 'control participants', 0, min=0, step=1),
numericInput('control_clicks', 'control conversions', 0, min=0, step=1),
#numericInput('control_thx', 'control_thx', 0, min=0, step=1),
numericInput('test_loads', 'test participants', 0, min=0, step=1),
numericInput('test_clicks', 'test conversions', 0, min=0, step=1),
#numericInput('test_thx', 'test_thx', 0, min=0, step=1),
actionButton("goButton", "Go!")
),
mainPanel(
h3('Results of Test'),
h3(' '),
h4('Statisically Significant T-test'),
verbatimTextOutput("significant_t_test"),
h4('Statisically Significant Chi-Square'),
verbatimTextOutput("significant_chi_square"),
h4('Control Rate'),
verbatimTextOutput("control_click_rate"),
h4('Test Rate'),
verbatimTextOutput("test_click_rate"),
plotOutput('newHist'),
plotOutput('boxPlot')
)
))
|
57913080f1116c703830d5f19d755d2fe106c66d
|
8a52f49deb648606fff204740950a913d7c1f37c
|
/fig4-kl_cell.R
|
b936723046090a5cc565bfe2bd353f694eee19ea
|
[] |
no_license
|
gersteinlab/topicnet
|
8ca7420c63d796df8f31a1a4663a50fbd261f2f8
|
77c08d7256c588e7efa3d0461e9d446dd6361869
|
refs/heads/master
| 2020-09-13T13:51:10.101877
| 2019-11-20T20:43:35
| 2019-11-20T20:43:35
| 222,805,587
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,586
|
r
|
fig4-kl_cell.R
|
# Calculate KL divergence for TFs in a given cell line
load("alltf.rewiring.rdata")
load("TFmem_data.fig4-190115.RData")
library(reshape2)
library(entropy)
library(gridExtra)
library(grid)
library(RColorBrewer)
library(gplots)
cell.count = table(tf.doc$cell)
cell.filtered = names(cell.count)[cell.count>50]
cell.topic50.tf.list = list() # tf: topic50 values in relative cell-tf documents
cell.topic50.tf.kl.list = list() # tf: KL divergence based on topic50 values between cell types
cell.topic50.tf.klsymm.list = list() # tf: symmetric KL divergence matrix (mean of two directions)
for(cell in cell.filtered){
cell.doc.id = which(tf.doc$cell == cell)
n.doc = length(cell.doc.id)
cell.topic50 = topics.50[cell.doc.id,]
cell.tf = tf.doc[cell.doc.id,]$tf
rownames(cell.topic50) = cell.tf
cell.topic50.tf.list[[cell]] = cell.topic50
cell.topic50.tf.kl = pair.mat(cell.topic50, KL.plugin)
cell.topic50.tf.kl.list[[cell]] = cell.topic50.tf.kl
cell.topic50.tf.klsymm = (cell.topic50.tf.kl + t(cell.topic50.tf.kl))/2
cell.topic50.tf.klsymm.list[[cell]] = cell.topic50.tf.klsymm
}
### Plot the KL divergence for each cell
for(cell in names(cell.topic50.tf.klsymm.list)) {
cell.topic50.tf.leu.klsymm = cell.topic50.tf.klsymm.list[[cell]]
pdf(paste("cell.klsymm/", cell, "-klsymm.pdf", sep=""), height = 10, width = 10)
heatmap.2(cell.topic50.tf.leu.klsymm, scale="none", symm=TRUE, trace="none", col = colorRampPalette(c("red","yellow"))(32), main=cell,
distfun = function(x) as.dist(x)) # cluster by the sqrt distance matrix
dev.off()
}
|
b63f096122ba6e6bfe198f9781619e1e9e5a6d5b
|
9dcff6306fb7c38df8d2130f9deb33703afa332d
|
/code/HNSCC_and_MCF10A_EMT_and_epithelal_signature_scores_&_expression_of_putative_EMT_checkpoint_genes.R
|
b63112d8f33f78f29222bbe4d28b1459cac56f4d
|
[] |
no_license
|
cole-trapnell-lab/pseudospace
|
a951e1604a74cc3c9e72e8320ce57140d9002bb3
|
bae0e5bb5ecee5691842105e02f902ff8f73b7ad
|
refs/heads/master
| 2020-06-17T09:26:22.165737
| 2019-09-04T21:01:13
| 2019-09-04T21:01:13
| 195,880,675
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,533
|
r
|
HNSCC_and_MCF10A_EMT_and_epithelal_signature_scores_&_expression_of_putative_EMT_checkpoint_genes.R
|
###### Load packages ######
# Load necessary packages for single cell RNA-Seq analysis including packages for downstream Gene Ontology Analysis
suppressPackageStartupMessages({
library(devtools)
library(stringr)
library(scales)
library(dtw)
library(reshape2)
library(GSA)
library(limma)
library(DBI)
library(MASS)
library(plyr)
library(dplyr)
library(matrixStats)
library(piano)
library(cluster)
library(pheatmap)
library(grid)
library(RColorBrewer)
library(viridis)
library(dendextend)
library(monocle)})
sessionInfo()
##### Load and define necessary functions #####
source("Pseudospace_support_functions.R")
calculate_signature_scores <- function (cds, gene_list)
{
vst = as.data.frame(as.matrix(log2((Matrix::t(Matrix::t(Biobase:::exprs(cds))/pData(cds)$Size_Factor)) + 1)))
vst = vst[gene_list$id, ]
vst = merge(vst, gene_list[, names(gene_list) %in% c("id",
"signature")], by.x = "row.names", by.y = "id")
vst = melt(vst, id.vars = c("Row.names", "signature"), variable.name = "cell",
value.name = "vst")
scores = vst %>% group_by(cell, signature) %>% summarize(score = mean(vst))
final_scores = recast(scores, signature ~ cell)
final_scores = t(final_scores)
colnames(final_scores) = final_scores[1, ]
final_scores = as.data.frame(final_scores[-1, ])
for (column in colnames(final_scores)) {
final_scores[, column] <- as.numeric(as.character(final_scores[,
column]))
}
final_scores = final_scores
return(final_scores)
}
get_expression_matrix <- function(cds, gene_list)
{
vst = as.data.frame(as.matrix(log2((Matrix::t(Matrix::t(Biobase:::exprs(cds))/pData(cds)$Size_Factor)) + 1)))
vst = vst[gene_list$id, ]
vst = merge(vst, gene_list[, names(gene_list) %in% c("id",
"gene_short_name")], by.x = "row.names", by.y = "id")
vst = melt(vst, id.vars = c("Row.names", "gene_short_name"), variable.name = "cell",
value.name = "vst")
# scores = vst %>% group_by(cell, signature) %>% summarize(score = mean(vst))
return(vst)
}
# Load Mock and TGFB cds objects created in Figure1 code
cds.list <- readRDS("pseudospace_processed_trajectories_cds.list.rds")
# Identify genes expressed in at least 50 cells
expressed_genes.list <- list()
expressed_genes.list[["Mock"]] <- row.names(fData(cds.list[["Mock"]])[Matrix::rowSums(Biobase::exprs(cds.list[["Mock"]]) > 0) > 50 ,])
length(expressed_genes.list[["Mock"]])
expressed_genes.list[["TGFB"]] <- row.names(fData(cds.list[["TGFB"]])[Matrix::rowSums(Biobase::exprs(cds.list[["TGFB"]]) > 0) > 50 ,])
length(expressed_genes.list[["TGFB"]])
# Identify MCF10A cells expressed at the beginning and end of spontaneous (mock) and TGFB-driven (tgfb) pseudospatial trajectories
Mock_inner_subset_cells <- row.names(pData(cds.list[["Mock"]])[pData(cds.list[["Mock"]])$pseudospace_quantile == 1,])
length(Mock_inner_subset_cells)
Mock_outer_subset_cells <- row.names(pData(cds.list[["Mock"]])[pData(cds.list[["Mock"]])$pseudospace_quantile == 10,])
length(Mock_outer_subset_cells)
TGFB_inner_subset_cells <- row.names(pData(cds.list[["TGFB"]])[pData(cds.list[["TGFB"]])$pseudospace_quantile == 1,])
length(TGFB_inner_subset_cells)
TGFB_outer_subset_cells <- row.names(pData(cds.list[["TGFB"]])[pData(cds.list[["TGFB"]])$pseudospace_quantile == 10,])
length(TGFB_outer_subset_cells)
#### Load clean datasets ####
# Unecessary for MCF10A, change this later
pseudospace_cds <- readRDS("pseudospace_cds.rds")
pseudospace_cds <- pseudospace_cds[,c(Mock_inner_subset_cells,Mock_outer_subset_cells,
TGFB_inner_subset_cells,TGFB_outer_subset_cells)]
# Load HNSCC cds object
HNSCC_cds <- readRDS("HSNCC_cds.rds")
# Filter HNSCC data for samples with more than 40, non-lymph node, cancer cells processesd with Maxima enzyme
HNSCC_patient_list <- as.data.frame(pData(HNSCC_cds) %>%
filter(Maxima_enzyme == "0" & clasified_as_cancer_cell == "1" & lymph_node == "0") %>%
group_by(patient_id) %>% summarize(n = n()) %>% arrange(desc(n)))
HNSCC_patients_forAnalysis <- HNSCC_patient_list[HNSCC_patient_list$n > 40,]$patient_id
HNSCC_cds <- HNSCC_cds[,pData(HNSCC_cds)$patient_id %in% HNSCC_patients_forAnalysis &
pData(HNSCC_cds)$Maxima_enzyme == "0" &
pData(HNSCC_cds)$clasified_as_cancer_cell == "1" &
pData(HNSCC_cds)$lymph_node == "0"]
# Create a joint MCF10A-HNSCC cds object
HNSCC_fData <- fData(HNSCC_cds)
MCF10A_fData <- fData(pseudospace_cds)[,c("id","gene_short_name")]
pData(HNSCC_cds)$cell <- as.character(row.names(pData(HNSCC_cds)))
HNSCC_pData <- pData(HNSCC_cds)[,c("cell","patient_id")]
MCF10A_pData <- pData(pseudospace_cds)[,c("cell","sample")]
colnames(HNSCC_pData) <- c("cell","sample")
HNSCC_exprs <- as(round(Biobase::exprs(HNSCC_cds)),"dgTMatrix")
MCF10A_exprs <- Biobase::exprs(pseudospace_cds)
MCF10A_exprs <- MCF10A_exprs[row.names(HNSCC_exprs),]
MCF10A_fData <- MCF10A_fData[row.names(HNSCC_fData),]
# Create joint exprs matrices as well as metadata and feature data data.frames
new_pData <- rbind(MCF10A_pData, HNSCC_pData)
new_pData <- new("AnnotatedDataFrame", data = new_pData)
new_fData <- new("AnnotatedDataFrame", data = MCF10A_fData)
new_exprs <- cbind(MCF10A_exprs,HNSCC_exprs)
HNSCC_MCF10A_cds <- newCellDataSet(as(as.matrix(new_exprs), "sparseMatrix"),
phenoData = new_pData,
featureData = new_fData,
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
pData(HNSCC_MCF10A_cds)$cell <- as.character(pData(HNSCC_MCF10A_cds)$cell)
# Add a spatial sample id for MCF10A cells
sample_id <- sapply(pData(HNSCC_MCF10A_cds)$cell, function(x){
if(x %in% Mock_inner_subset_cells)return("Mock_early")
if(x %in% Mock_outer_subset_cells)return("Mock_late")
if(x %in% TGFB_inner_subset_cells)return("TGFB_early")
if(x %in% TGFB_outer_subset_cells)return("TGFB_late")
return(pData(HNSCC_MCF10A_cds)[x,]$sample)
})
pData(HNSCC_MCF10A_cds)$Cell.Type <- sample_id
# Pre-process the new cds object
HNSCC_MCF10A_cds <- estimateSizeFactors(HNSCC_MCF10A_cds)
HNSCC_MCF10A_cds <- estimateDispersions(HNSCC_MCF10A_cds)
# Load results of differential gene expression test and clustering pertaining to Figure 2c.
pseudo.DTW.gene.clusters.aucRank <- readRDS("pseudo.DTW.gene.clusters.aucRank.rds")
# Isolate genes that define an early EMT induction and
# KRAS ASSOCIATED induction of a mature mesenchymal state
Early_EMT_genes <- names(pseudo.DTW.gene.clusters.aucRank[pseudo.DTW.gene.clusters.aucRank == 1])
Late_EMT_genes <- names(pseudo.DTW.gene.clusters.aucRank[pseudo.DTW.gene.clusters.aucRank == 5])
# Load MSigDB hallmarks and GO biological process geneset collections
source("loadGSCSafe.R")
hallmarksGSC <- loadGSCSafe(file="h.all.v6.0.symbols.gmt")
GOGSC<-loadGSCSafe(file="Human_GO_bp_no_GO_iea_symbol.gmt")
# Isolate genes from genesets associated with induction of an EMT and epidermis development
EMT_marker_gene_names <- hallmarksGSC$gsc$HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION
EMT_marker_id <- unique(fData(cds.list[["Mock"]])[fData(cds.list[["Mock"]])$gene_short_name %in% EMT_marker_gene_names,]$id)
epithelial_marker_gene_names <- GOGSC$gsc$'EPIDERMIS DEVELOPMENT%GOBP%GO:0008544'
epithelial_marker_id <- unique(fData(cds.list[["Mock"]])[fData(cds.list[["Mock"]])$gene_short_name %in% epithelial_marker_gene_names,]$id)
Epithelial_marker_genes <- epithelial_marker_id
Canonical_EMT_marker_genes <- EMT_marker_id
# Create a data.frame consisting of all signatures
EMT_signature_df <- as.data.frame(matrix(c(Epithelial_marker_genes,
Early_EMT_genes,
Late_EMT_genes,
Canonical_EMT_marker_genes,
rep("Epithelial_score",
length(Epithelial_marker_genes)),
rep("Early_EMT_score",
length(Early_EMT_genes)),
rep("Late_EMT_score",
length(Late_EMT_genes)),
rep("Canonical_EMT_score",
length(Canonical_EMT_marker_genes))),
ncol = 2))
colnames(EMT_signature_df) <- c("id","signature")
EMT_signature_df$id <- as.character(EMT_signature_df$id)
EMT_signature_df$signature <- as.character(EMT_signature_df$signature)
# Calculate and scale scores for every cell across every signature
HNSCC_MCF10A_EMT_scores <- calculate_signature_scores(HNSCC_MCF10A_cds,EMT_signature_df)
HNSCC_MCF10A_EMT_scores$Sample <- pData(HNSCC_MCF10A_cds)$Cell.Type
HNSCC_MCF10A_EMT_scores_scaled <- HNSCC_MCF10A_EMT_scores %>% mutate_at(c(1,2,3,4), funs(c(scale(.))))
HNSCC_MCF10A_EMT_scores_scaled %>%
group_by(Sample) %>%
summarize(median_EMT_score = median(Canonical_EMT_score), median_Epithelial_score = median(Epithelial_score),
median_Early_EMT_score = median(Early_EMT_score), median_Late_EMT_score = median(Late_EMT_score)) %>%
arrange(median_EMT_score)
HNSCC_MCF10A_EMT_scores_scaled$Sample <- factor(HNSCC_MCF10A_EMT_scores_scaled$Sample,
levels = c("Mock_early","Mock_late",
"TGFB_early","TGFB_late",
"HNSCC6","HNSCC20","HNSCC5",
"HNSCC18","HNSCC22","HNSCC25","HNSCC17",
"HNSCC16"))
HNSCC_MCF10A_EMT_scores_scaled %>%
filter(Sample %in% c("Mock_early","TGFB_late")) %>%
group_by(Sample) %>%
summarize(median_EMT_score = median(Canonical_EMT_score), median_Epithelial_score = median(Epithelial_score),
median_Early_EMT_score = median(Early_EMT_score), median_Late_EMT_score = median(Late_EMT_score))
ggplot(HNSCC_MCF10A_EMT_scores_scaled, aes(x = Sample, y = Canonical_EMT_score)) +
geom_boxplot(fill = "gray70") +
geom_jitter(size = 0.01, color = "#702632", alpha = 0.1) +
geom_hline(yintercept = -1.482951, "#0075F2", linetype = "dashed") +
geom_hline(yintercept = 1.143410, color = "#70163C", linetype = "dashed") +
theme(text=element_text(size=24),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank()) +
ylab("Hallmarks EMT\nscore") +
monocle:::monocle_theme_opts() +
ggsave(file = "HNSCC_MCF10A_Canonical_EMT_score.png", height = 4.2, width = 6)
ggplot(HNSCC_MCF10A_EMT_scores_scaled, aes(x = Sample, y = Epithelial_score)) +
geom_boxplot(fill = "gray70") +
geom_jitter(size = 0.01, color = "#702632", alpha = 0.1) +
geom_hline(yintercept = 1.0393513, "#0075F2", linetype = "dashed") +
geom_hline(yintercept = -0.9142161, color = "#70163C", linetype = "dashed") +
theme(text=element_text(size=24),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank()) +
#ylim(-3,5) +
ylab("GO-BP epithelial\nscore") +
monocle:::monocle_theme_opts() +
ggsave(file = "HNSCC_MCF10A_Epithelial_score.png", height = 4.2, width = 6)
ggplot(HNSCC_MCF10A_EMT_scores_scaled, aes(x = Sample, y = Early_EMT_score)) +
geom_boxplot(fill = "gray70") +
geom_jitter(size = 0.01, color = "#702632", alpha = 0.1) +
geom_hline(yintercept = -1.313916, "#0075F2", linetype = "dashed") +
geom_hline(yintercept = 0.738604, color = "#70163C", linetype = "dashed") +
theme(text=element_text(size=24),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank()) +
ylab("MCF10A early EMT\nscore") +
monocle:::monocle_theme_opts() +
ggsave(file = "HNSCC_MCF10A_Early_EMT_score.png", height = 4.2, width = 6)
ggplot(HNSCC_MCF10A_EMT_scores_scaled, aes(x = Sample, y = Late_EMT_score)) +
geom_boxplot(fill = "gray70") +
geom_jitter(size = 0.01, color = "#702632", alpha = 0.1) +
geom_hline(yintercept = -1.589488, "#0075F2", linetype = "dashed") +
geom_hline(yintercept = 1.523222, color = "#70163C", linetype = "dashed") +
theme(text=element_text(size=24),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title.x = element_blank()) +
ylab("MCF10A late EMT\nscore") +
monocle:::monocle_theme_opts() +
ggsave(file = "HNSCC_MCF10A_Late_EMT_score.png", height = 4.2, width = 6)
# Generate heatmaps of the expression of genes whose loss leads to accumulation of MCF10A cells
# across pseudospace in HNSCC cells and investigate their correlation with partial-EMT rank from Puram et al.
all_enriched_target.list <- list()
all_enriched_target.list[["Mock"]] <- c('MYC','TWIST1','ITGB1','FOXQ1',
'FOXC2','FOXO3','TWIST2','FOXA1',
'KLF8','FGFR1','FZD7','IL6R',
'POU5F1','MET','FZD4','FOXD3',
'PRRX1','TRIM28','EGFR','GATA6',
'ITGAV','ZEB2','NOTCH1','PTCH1','SNAI2')
all_enriched_target.list[["TGFB"]] <- c('FGFR1','FOXC2','GATA6','POU5F1',
'ZEB2','NOTCH1','FZD2','MET',
'PTCH1','TGFBR1','TGFBR2','ITGAV',
'ZNF703','ZEB1','PRRX1','TWIST2')
all_enriched_targets <- unique(union(all_enriched_target.list[["Mock"]],all_enriched_target.list[["TGFB"]]))
enriched_targets_receptors <- c("EGFR","FGFR1","FZD2","FZD4",
"FZD7","IL6R","ITGAV","ITGB1","MET",
"NOTCH1","PTCH1","TGFBR1","TGFBR2")
enriched_targets_transcription_factors <- c("FOXA1","FOXC2","FOXD3","FOXO3",
"FOXQ1","GATA6","KLF8","MYC",
"POU5F1","PRRX1","SNAI2","TRIM28",
"TWIST1","TWIST2","ZEB1","ZEB2",
"ZNF703")
HNSCC_cds <- estimateSizeFactors(HNSCC_cds)
# Filter for genes expressed in HNSCC cells
HNSCC_expressed_genes <- row.names(fData(HNSCC_cds)[Matrix::rowSums(Biobase::exprs(HNSCC_cds[,pData(HNSCC_cds)$patient_id %in% HNSCC_patients_forAnalysis,
pData(HNSCC_cds)$Maxima_enzyme == "0" &
pData(HNSCC_cds)$clasified_as_cancer_cell == "1" &
pData(HNSCC_cds)$lymph_node == "0"]) > 0) > 50 ,])
length(HNSCC_expressed_genes)
enriched_targets_receptors <- unique(intersect(as.character(fData(HNSCC_cds)[fData(HNSCC_cds)$gene_short_name %in% enriched_targets_receptors,]$id), HNSCC_expressed_genes))
length(enriched_targets_receptors)
enriched_targets_transcription_factors <- unique(intersect(as.character(fData(HNSCC_cds)[fData(HNSCC_cds)$gene_short_name %in% enriched_targets_transcription_factors,]$id), HNSCC_expressed_genes))
length(enriched_targets_transcription_factors)
enriched_targets_receptors <- fData(HNSCC_cds)[enriched_targets_receptors,]$gene_short_name
enriched_targets_transcription_factors <- fData(HNSCC_cds)[enriched_targets_transcription_factors,]$gene_short_name
receptors_enriched_fData <- fData(HNSCC_cds)[fData(HNSCC_cds)$gene_short_name %in% enriched_targets_receptors,]
transcriptiona_factors_enriched_fData <- fData(HNSCC_cds)[fData(HNSCC_cds)$gene_short_name %in% enriched_targets_transcription_factors,]
receptors_enriched_fData$id <- as.character(receptors_enriched_fData$id)
receptors_enriched_fData$gene_short_name <- as.character(receptors_enriched_fData$gene_short_name)
transcriptiona_factors_enriched_fData$id <- as.character(transcriptiona_factors_enriched_fData$id)
transcriptiona_factors_enriched_fData$gene_short_name <- as.character(transcriptiona_factors_enriched_fData$gene_short_name)
receptors_enriched_targets_HNSCC_counts <- get_expression_matrix(HNSCC_cds, receptors_enriched_fData)
transcription_factors_enriched_targets_HNSCC_counts <- get_expression_matrix(HNSCC_cds, transcriptiona_factors_enriched_fData)
receptors_enriched_targets_HNSCC_counts <- dcast(receptors_enriched_targets_HNSCC_counts,
cell~gene_short_name, value.var = "vst")
transcription_factors_enriched_targets_HNSCC_counts <- dcast(transcription_factors_enriched_targets_HNSCC_counts,
cell~gene_short_name, value.var = "vst")
HNSCC_pData_receptors_enriched_counts <- merge(pData(HNSCC_cds), receptors_enriched_targets_HNSCC_counts, by = "cell")
HNSCC_pData_transcription_factors_enriched_counts <- merge(pData(HNSCC_cds), transcription_factors_enriched_targets_HNSCC_counts, by = "cell")
row.names(HNSCC_pData_receptors_enriched_counts) <- HNSCC_pData_receptors_enriched_counts$cell
row.names(HNSCC_pData_transcription_factors_enriched_counts) <- HNSCC_pData_transcription_factors_enriched_counts$cell
head(HNSCC_pData_receptors_enriched_counts[,c("patient_id",as.character(enriched_targets_receptors))])
HNSCC_pData_receptors_enriched_counts <- HNSCC_pData_receptors_enriched_counts[,c("patient_id",as.character(enriched_targets_receptors))]
HNSCC_pData_transcription_factors_enriched_counts <- HNSCC_pData_transcription_factors_enriched_counts[,c("patient_id", as.character(enriched_targets_transcription_factors))]
HNSCC_average_receptor_enriched_counts <- as.data.frame(HNSCC_pData_receptors_enriched_counts %>%
group_by(patient_id) %>%
summarize_all(funs(mean)))
HNSCC_average_transcription_factors_enriched_counts <- as.data.frame(HNSCC_pData_transcription_factors_enriched_counts %>%
group_by(patient_id) %>%
summarize_all(funs(mean)))
row.names(HNSCC_average_receptor_enriched_counts) <- as.character(HNSCC_average_receptor_enriched_counts$patient_id)
HNSCC_average_receptor_enriched_counts <- HNSCC_average_receptor_enriched_counts[,-1]
row.names(HNSCC_average_transcription_factors_enriched_counts) <- as.character(HNSCC_average_transcription_factors_enriched_counts$patient_id)
HNSCC_average_transcription_factors_enriched_counts <- HNSCC_average_transcription_factors_enriched_counts[,-1]
head(HNSCC_average_receptor_enriched_counts)
head(HNSCC_average_transcription_factors_enriched_counts)
HNSCC_average_receptor_enriched_counts_scaled <- t(scale(t(scale(HNSCC_average_receptor_enriched_counts))))
HNSCC_average_transcription_factors_enriched_counts_scaled <- t(scale(t(scale(HNSCC_average_transcription_factors_enriched_counts))))
HNSCC_EMT_annotation <- data.frame(row.names = c("HNSCC6","HNSCC20","HNSCC5","HNSCC18","HNSCC22","HNSCC25","HNSCC17","HNSCC16"),
patient_id = c("HNSCC6","HNSCC20","HNSCC5","HNSCC18","HNSCC22","HNSCC25","HNSCC17","HNSCC16"),
EMT_rank = c("1","2","3","4","5","6","7","8"))
pheatmap(t(HNSCC_average_receptor_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = FALSE,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_receptors.png",
width = 3.5, height = 3)
receptors_ph <- pheatmap(t(HNSCC_average_receptor_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = TRUE,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_receptors_clustered.png",
width = 3.5, height = 4)
# Flip clades around in a manner that is proper and highlights the relative EMT rank
receptors_ph$tree_col$order
new_receptor_tree_col_order <- receptors_ph$tree_col
new_receptor_tree_col_order$order <- c(8,4,3,5,6,7,2,1)
plot(receptors_ph$tree_col)
plot(new_receptor_tree_col_order)
pheatmap(t(HNSCC_average_receptor_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = new_receptor_tree_col_order,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_receptors_clustered.png",
width = 4.2, height = 4)
pheatmap(t(HNSCC_average_transcription_factors_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = FALSE,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_transcription_factors.png",
width = 3.5, height = 3)
TFs_ph <- pheatmap(t(HNSCC_average_transcription_factors_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = TRUE,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_transcription_factors_clustered.png",
width = 3.5, height = 4)
TFs_ph$tree_col$order
new_TF_tree_col_order <- TFs_ph$tree_col
new_TF_tree_col_order$order <- c(8,4,5,6,2,3,7,1)
plot(TFs_ph$tree_col)
plot(new_TF_tree_col_order)
pheatmap(t(HNSCC_average_transcription_factors_enriched_counts_scaled),
show_colnames = TRUE,
show_rownames = TRUE,
cluster_cols = new_TF_tree_col_order,
annotation_col = data.frame(row.names = row.names(HNSCC_EMT_annotation),
EMT_rank = as.numeric(HNSCC_EMT_annotation$EMT_rank)),
color = inferno(25),
clustering_method = "ward.D2",
file = "Mean_expression_levels_of_enriched_transcription_factors_clustered.png",
width = 4.2, height = 4)
|
f17fbd58764af0d77a68980b7a5d3ec0b12d2dd2
|
9dc0be2dea189adfc517b72c5b0fe1b4b4bfd28d
|
/test_script.R
|
7ff5f9a4c85803c146b2e70aa1ecf2490ba10c78
|
[] |
no_license
|
brwaang55/Test_plotting
|
12aacff65c89017637bf87d489ad3f0a64e66379
|
e2792a85161d4d4e023de638cc2781cff2aa2090
|
refs/heads/master
| 2020-03-29T12:59:47.831330
| 2018-09-23T00:15:06
| 2018-09-23T00:15:06
| 149,933,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,864
|
r
|
test_script.R
|
library(rgl)
library(Rvcg)
library(FNN)
#Function for computing the barycenter/centroid of a face.
compute_face_centroid=function(vertices,face){
vertex=vertices[,face][-4,]
centroid=apply(X=vertex,MARGIN = 1,FUN = mean)
return(centroid)
}
#Function for Returning the barycenters of all the faces
find_face_coordinates=function(complex){
coords=apply(X=complex$it,MARGIN = 2,FUN = compute_face_centroid,vertices=complex$vb)
return(t(coords))
}
#Function for finding the k nearest faces to a given point. Can be done via the k nearest neighbor algorithm, or just considering the neihboring faces of the first closest face
find_closest_faces=function(complex,point,face_coords,n,nearest_neighbor=FALSE){
face_list=c()
if (nearest_neighbor==TRUE){
point_index=knnx.index(data = face_coords,query = point,k = n)
return(point_index)
}
else{
point_index=knnx.index(data = face_coords,query = point,k = 1)
base=point_index[1]
for (i in 1:ceil(n/2)){
point_index=c(point_index,base+i)
point_index=c(point_index,base-i)
}
#point=matrix(face_coords[point_index,],ncol=3)
face_list=c(face_list,point_index)
return(face_list)
}
}
#Read in file
file=vcgImport('m1810.off')
#Initialize Colors
colors=rep('white',dim(file$it)[2]*3)
#Find face barycenters of this complex
face_coords=find_face_coordinates(file)
#Find the closest faces of the complex
closest_faces=find_closest_faces(complex=file,point=matrix(file$vb[-4,2],ncol = 3),face_coords = face_coords,n =10,nearest_neighbor = TRUE)
#Coloring the '4' indices of the faces red
vert2=c()
for (i in closest_faces){
base=i*3+1
vert2=c(vert2,base,base+1,base+2,base+3)
}
#Setting the color to be red
colors[vert2]='red'
#Setting the material
file$material=list(color=colors)
plot3d(file)
rgl.points(matrix(file$vb[-4,2],ncol = 3),size=10,color='blue')
|
f990cde54de754bff4fd78e02c38503e0555bb9c
|
9504dd1c45daecae343646aa9243dd3fa1c7add5
|
/solutions/MetaR.Workshop/classes_gen/instantRefresh/test.R
|
2de404532c7ad3ecc408136ea1923086f197b335
|
[
"Apache-2.0"
] |
permissive
|
darthmanwe/MetaR
|
fc949007ea041fd4823b65583766e6bf9011badd
|
f88fb2576221e494d0b599517e7c95a46bbd2017
|
refs/heads/master
| 2021-06-19T14:51:42.904008
| 2017-06-24T21:01:42
| 2017-06-24T21:01:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
r
|
test.R
|
# Generated with MetaR, http://metaR.campagnelab.org, from script "test" on Sat Jun 24 16:35:01 EDT 2017
installOrLoad<-function (lib,repo="http://cran.us.r-project.org"){if(!require(lib,character.only=TRUE)){install.packages(lib,repos=repo)
library(lib,character.only=TRUE)}}
installOrLoad("session")
a<-1
a<-2
c<-a+a
e<-function (f){cat(c)}
save.image("/Users/fac2003/R_RESULTS/instantRefresh/WVHTXNIAVG.Rda", safe = FALSE);
oo=1
ii=2
o<-oo+1
i=ii+1
g<-o+e(1)
h<-g+1
{}
function (f){function (){}}
|
f67a28814832f213ac65f1c150630df6154b79bf
|
90b3c72db44de4d5f132c2a8b4944245bfa7785c
|
/tests/testthat/test_22_rflow.R
|
c9cbb73731c3e1994add44965cb526d3fe91a435
|
[
"MIT"
] |
permissive
|
vh-d/Rflow
|
80aff510e5192cccf2a10616bb5edec727ca1e28
|
6a50bb27dcb52659a39cbb451f8a1d1e165cc155
|
refs/heads/master
| 2022-05-22T18:40:26.218576
| 2022-05-07T13:23:31
| 2022-05-07T13:23:31
| 167,013,026
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,407
|
r
|
test_22_rflow.R
|
context("Builidng rflows")
test_that("Rflow can be crated", {
rf1 <- Rflow::new_rflow()
expect_true(exists("rf1"), "rflow object exists")
expect_s3_class(rf1, "rflow")
expect_s3_class(rf1, "environment")
})
test_that("nodes can be added", {
rf2 <- Rflow::new_rflow()
expect_true(Rflow::add_node(list(id = "node1", r_expr = expression_r(1)), rflow = rf2, verbose = FALSE))
expect_is(rf2[["node1"]], "node")
expect_true(Rflow::add_node(list(id = "node2", r_expr = expression_r(1), depends = "node1"), rflow = rf2, verbose = FALSE))
expect_is(rf2[["node2"]], "node")
expect_length(rf2$node2$depends, 1)
expect_null(rf2$node2$upstream)
})
test_that("nodes can be connected", {
rf2 <- Rflow::new_rflow()
ENV <- new.env()
expect_true(Rflow::add_node(process_obj_defs(list(list(id = "ENV.node1", r_expr = expression_r(1))))[[1]], rflow = rf2, verbose = FALSE))
expect_true(Rflow::add_node(process_obj_defs(list(list(id = "ENV.node2", r_expr = expression_r(1), depends = "ENV.node1")))[[1]], rflow = rf2, verbose = FALSE))
# connecting nodes
connect_nodes(rf2, verbose = FALSE)
expect_length(rf2[["ENV.node2"]]$upstream, 1)
expect_length(rf2[["ENV.node1"]]$downstream, 1)
expect_identical(rf2[["ENV.node2"]]$upstream[[1]], rf2[["ENV.node1"]])
expect_identical(rf2[["ENV.node1"]]$downstream[[1]], rf2[["ENV.node2"]])
})
test_that("node definitions are processed correctly", {
expect_equal("FOO", Rflow:::env_name_from_id("FOO.bar")[, env[1]])
expect_equal("bar", Rflow:::env_name_from_id("FOO.bar")[, name[1]])
obj <- list(
env = "FOO",
name = "bar"
)
obj_process <- process_obj_defs(list(obj))
expect_equal(obj_process, list("FOO.bar" = obj))
obj2 <- list(
id = "FOO.bar"
)
obj_process2 <- process_obj_defs(list(obj2))
expect_equal(obj_process2, list("FOO.bar" = c(list(id = "FOO.bar"), obj)))
})
test_that("logging setup works as expected", {
hl <- handler_list()
lg <- logger(handlers = list(hl))
log_record(lg, "Logger initiated")
logoutput <- as.list(lg[["handlers"]][[1]])
expect_length(logoutput, 1)
rf1 <- Rflow::new_rflow(logging = lg)
logoutput <- as.list(lg[["handlers"]][[1]])
expect_length(logoutput, 2)
log_record(rf1, "testing message")
logoutput <- as.list(lg[["handlers"]][[1]])
expect_length(logoutput, 3)
expect_true(any(grepl("testing message", logoutput, fixed = TRUE)))
})
|
5de5847280d0b0798762823e90c58afd23dfe209
|
0d86ba90a9a0c46e404414c4dd8f6f4ecd448558
|
/scripts/project_gene_sets.R
|
239c13500c4b8fa19a2c8d9e18385f05984466fa
|
[
"MIT"
] |
permissive
|
amytildazhang/p3-model-iteration
|
5fdc159b7c6e027629792e88de6d4a4eff06a1fd
|
8cc5f1c47a9af3ca88ffa54811d2cf7083dcd82e
|
refs/heads/master
| 2020-06-15T23:37:38.983451
| 2019-07-25T14:56:37
| 2019-07-25T14:56:37
| 195,422,788
| 0
| 0
| null | 2019-07-05T14:26:58
| 2019-07-05T14:26:58
| null |
UTF-8
|
R
| false
| false
| 2,141
|
r
|
project_gene_sets.R
|
#!/bin/env Rscript
#
# aggregate feature data along specified gene sets
#
suppressMessages(library(GSEABase))
suppressMessages(library(tidyverse))
options(stringsAsFactors = FALSE)
# determine input data type from wildcards ("create_rna_gene_sets")
data_type <- strsplit(snakemake@rule, '_')[[1]][[2]]
# load feature data
dat <- read_tsv(snakemake@input[[1]], col_types = cols())
if (snakemake@wildcards[['dtrans']] == 'raw') {
write_tsv(dat, snakemake@output[[1]])
} else {
# load gene sets
gene_sets = c()
message(sprintf("Loading gene sets for %s", data_type))
for (infile in Sys.glob('gene_sets/*.gmt.gz')) {
fp <- gzfile(infile)
gene_sets <- c(gene_sets, geneIds(getGmt(fp)))
close(fp)
}
# remove gene set :length suffixes, if present
names(gene_sets) <- sub(':\\d+$', '', names(gene_sets))
# remove gene set weights, if present
# e.g. "ANXA1,1.0" -> "ANXA1"
gene_sets <- lapply(gene_sets, function(x) { sub(',\\d+\\.\\d+$', '', x) })
# exclude any gene sets with fewer than the required number of genes
set_sizes <- lapply(gene_sets, length)
mask <- set_sizes >= snakemake@config$gene_set_projection$gene_set_min_size
gene_sets <- gene_sets[mask]
# iterate over gene sets and apply function to data for gene in each set
res <- NULL
gset_names <- c()
# determine which aggregation function to use
agg_func <- snakemake@config$gene_set_projection$aggregation_funcs[[data_type]]
ID_COL_INDEX <- 1
message(sprintf('Aggregating %s along gene sets...', data_type))
for (gset in names(gene_sets)) {
dat_gset <- dat %>%
filter(symbol %in% gene_sets[[gset]])
# if no genes from gene set were found, continue to next gene set
if (nrow(dat_gset) == 0) {
next
}
gset_names <- c(gset_names, gset)
res <- rbind(res, apply(dat_gset[, -ID_COL_INDEX], 2, agg_func, na.rm = TRUE))
}
# drop any rows with zero variance (uninformative)
mask <- apply(res, 1, var, na.rm = TRUE) > 0
res <- res[mask, ]
gset_names <- gset_names[mask]
message(sprintf('Saving gene sets aggregated %s data...', data_type))
res <- bind_cols(gene_set = gset_names, as.data.frame(res))
write_tsv(res, snakemake@output[[1]])
}
|
92a68e4146199c512cb774a0c293dfbbda040d05
|
8d8f10817941bbebcf9c16075de8bae4d0bfe4ce
|
/featureCounts.R
|
8538a2377022a600c250add0d2519dc34d4d4b2a
|
[] |
no_license
|
Albertomaria/farnam_script
|
e7a8b2af6fa3577f65af64a703b8f41367eedd68
|
397015f8259d86a1c48088297b19b5fba10e907a
|
refs/heads/master
| 2021-01-25T12:36:25.019131
| 2019-08-27T02:14:48
| 2019-08-27T02:14:48
| 123,482,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 547
|
r
|
featureCounts.R
|
library(Rsubread)
ann <- "/ysm-gpfs/project/am2485/Genome/Human/microRNA.subset.of.GENCODE.V27.gtf"
setwd("/ysm-gpfs/project/am2485/Stiffness_mRNA_lexo_Dionna/miRNA/STAR")
files <- list.files(path = ".",pattern = "sortedByCoord")
for (f in files){
f_name <- strsplit(f,".",fixed = T)[[1]][1]
seq_data <- featureCounts(f,annot.ext=ann,isGTFAnnotationFile = TRUE, countMultiMappingReads=TRUE,allowMultiOverlap=TRUE)
assign(f_name,seq_data$count)
write.table(get(f_name),paste(f_name,".txt",sep=""),sep="\t",quote=F,col.names="ID\tNUMBER")
}
|
0ce400fe6c9675304772c131a2685cd73a5fba4a
|
7e7bb7bfdf62c24b7fecf78f5247d28839728710
|
/Summer Loss/src/Summer_Loss_Teacher_Tools.R
|
38c85cf52479c3a987142b3c02dbe2b5e16b3921
|
[] |
no_license
|
kippchicago/Data_Analysis
|
1ad042d24c7a1e11e364f39c694692f5829363a4
|
8854db83e5c60bc7941654d22cbe6b9c63613a7f
|
refs/heads/master
| 2022-04-09T10:10:43.355762
| 2020-02-20T18:03:40
| 2020-02-20T18:03:40
| 5,903,341
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,736
|
r
|
Summer_Loss_Teacher_Tools.R
|
require(ProjectTemplate)
load.project()
map_mv<-mapvizier(map_all)
map_kccp<-filter(map_mv$mapData,
SchoolInitials=="KCCP",
TermName %in% c("Spring 2012-2013", "Fall 2013-2014")) %>%
mutate(Name=paste(StudentFirstname, StudentLastname))
map_kccp_2<-inner_join(filter(map_kccp, Year2==2013),
filter(map_kccp, Year2==2014),
by=c("StudentID", "MeasurementScale"))
map_kccp2 <- select(map_kccp, -Name)
ggplot(filter(map_kccp, MeasurementScale=="Reading"),
aes(x=factor(TermName, levels=c("Spring 2012-2013", "Fall 2013-2014")),
y=TestPercentile)
) +
geom_line(data=filter(map_kccp2, MeasurementScale=="Reading"),
aes(group=StudentID),
alpha=.1) +
geom_line(aes(group=StudentID),
color="orange",
size=2) +
facet_wrap(Name~MeasurementScale,ncol = 9)
pctl_change_plot<-function(.data, term_first, term_second, n_col=9,...){
.data<-filter(.data, TermName %in% c(term_first, term_second), ...) %>%
mutate(TermName=factor(as.character(TermName),
levels=c(term_first, term_second)
)
)
.data_joined<-inner_join(filter(.data, TermName==term_first),
filter(.data, TermName==term_second),
by=c("StudentID", "MeasurementScale")
) %>%
mutate(Diff_Pctl = TestPercentile.y-TestPercentile.x,
StudentName=paste(StudentFirstname.x,
StudentLastname.x),
Name=factor(StudentName, levels=unique(StudentName)[order(-Diff_Pctl)])
)
#str(.data_joined$Name)
.data_joined_2<-select(.data_joined, -Name)
p<-ggplot(.data_joined,
aes(x=Year2.x,
y=TestPercentile.x)
) +
geom_segment(data=.data_joined_2,
aes(xend=Year2.y,
yend=TestPercentile.y,
group=StudentID
),
alpha=.1) +
geom_segment(aes(xend=Year2.y,
yend=TestPercentile.y,
group=StudentID),
color="orange",
size=2) +
facet_wrap(~Name,ncol = n_col) +
theme_bw() +
theme()
p
}
pdf(file="graphs/TT_test_S_to_F_loss_11x17.pdf", height=10.75, width=16.75)
pctl_change_plot(map_mv$mapData,
"Spring 2012-2013",
"Fall 2013-2014",
n_col=9,
MeasurementScale=="Reading",
Grade %in% 5:6,
SchoolInitials=="KCCP"
) +
ggtitle("Spring 2013 to Fall 2013 Losses/Gains\nKCCP 6th Grade Reading")
dev.off()
|
627b823d4af0d46325b9419c446c374a4fc6eecd
|
b09e6cbd019c4f2002ba0c34caaa4506132c1d6b
|
/Developing/Hailian/4. Hailian_TD_Preprocessing1.0.R
|
64bf0cdb6157669dd654b3ad0e9d6f7723c0ee63
|
[
"MIT"
] |
permissive
|
o0oBluePhoenixo0o/AbbVie2017
|
935be251170722443a13602e5ae635b0128bf286
|
356583487455ba2616457f8a59ca741321c0b154
|
refs/heads/master
| 2021-09-24T17:32:02.676494
| 2018-10-12T12:49:04
| 2018-10-12T12:49:04
| 83,968,797
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,610
|
r
|
4. Hailian_TD_Preprocessing1.0.R
|
##bring Google Chrome's language detection to R
url <- "http://cran.us.r-project.org/src/contrib/Archive/cldr/cldr_1.1.0.tar.gz"
pkgFile<-"cldr_1.1.0.tar.gz"
download.file(url = url, destfile = pkgFile)
install.packages(pkgs=pkgFile, type = "source", repos = NULL)
unlink(pkgFile)
install.packages("tm")
install.packages("translateR")
install.packages("mscstexta4r")
library(cldr)
library(tm)
library(translateR)
library(mscstexta4r)
#remove links, symbols before detecting
#detecting examples
detectLanguage(a)
b<- detectLanguage(fb_page_psoriasisSpeaks[[5]])
b<- unique(detectLanguage(fb_page_psoriasisSpeaks[[5]])[[1]])
#diseases posts detecting
Diseases_posts_language<- detectLanguage(Hailian_Diseases_NoD[[3]])
Diseases_posts_withlanguage<- cbind(Hailian_Diseases_NoD, Diseases_posts_language)
Diseases_posts_foreignlanguage<- subset(Diseases_posts_withlanguage, detectedLanguage!="ENGLISH")
##remove Stopwords
#change data.frame into corpus and removing stop words
d<- tm_map(Corpus(VectorSource(psoriasis_post[[3]])), removeWords, stopwords("english"))
#transfer corpus into data.frame
f<- data.frame()
for (i in 1:nrow(psoriasis_post))
{
e<- data.frame(d[[i]]$content)
f<- try(rbind(f,e))
}
#add time to posts without stop words:
psoriasis_post_new<- try(cbind(f, psoriasis_post[[4]]))
##why turn the data.frame into corpus before removing stop words?
#d<- tm_map(psoriasis_post[[3]], removeWords, stopwords("english"))
#Error in UseMethod("tm_map", x) :
#no applicable method for 'tm_map' applied to an object of class "character"
##Topic Detection
#using package mscstexta4r
|
532057408da8dfc87f6264bca06b3658b7d8ce21
|
f2ccb53af7c548c53f12062e59b5b35341c75e7f
|
/01_初心者.R
|
b8f8f54f0f30f92720ad788d718c7c5e5e986b57
|
[] |
no_license
|
totoko00/DataAnalysis
|
2b71937e1a142b44cc05a4c4c9c844af0fd19083
|
f268ddc0eae527151539b5a3f36d48283edad74a
|
refs/heads/master
| 2021-05-22T18:27:50.228590
| 2020-04-04T16:04:54
| 2020-04-04T16:04:54
| 253,039,660
| 0
| 0
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 573
|
r
|
01_初心者.R
|
#ベクトルの作成
x <- c(1,2,3,4,5)
x
y <- c(1:5,3:1)
y
z <- c(rep(3,4),rep(c(1,5,10),c(2,3,4)))
z
a <- c("A","B","C")
a
#ベクトルを作るときはc()で指定。#行列
mat1 <- matrix(c(1:10),nrow=2,ncol=5)
mat1 <- matrix(c(1:10),2,5)
mat2 <- matrix(c(1:10),2,byrow=T)
mat3 <- matrix(c(1,3,2,5,7,3,2,15,2),3,3,byrow=T)
#行列はmatrix()で作成。byrow=Tで要素順が変わる。#行列の計算
mat1+mat2
mat1-mat2
mat1*mat2
mat1/mat2
mat3%*%mat3
solve(mat3)#行列の各要素へのアクセス(参照)
p <- mat1[1,2]
q <- mat3[,2]
r <- mat1[1,c(2,5)]
|
4507375aaaace516d387acd791d989cc8a609bc1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/drc/examples/nasturtium.Rd.R
|
419ea4dcc8263fbdc97367b13de53f719fb5be71
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
nasturtium.Rd.R
|
library(drc)
### Name: nasturtium
### Title: Dose-response profile of degradation of agrochemical using
### nasturtium
### Aliases: nasturtium
### Keywords: datasets
### ** Examples
nasturtium.m1 <- drm(weight~conc, data=nasturtium, fct = LL.3())
modelFit(nasturtium.m1)
plot(nasturtium.m1, type = "all", log = "", xlab = "Concentration (g/ha)", ylab = "Weight (mg)")
|
6fb868704ab2015e7667412526cb7bee401205dd
|
80ea1c9981469ae22d640de03852c716207544b8
|
/tests/testthat/test-ggseg_atlas.R
|
d65b22359943260e2086bc1dafc662a0bc140fec
|
[
"MIT"
] |
permissive
|
torch0703/ggseg3d
|
477ecd3f205197c43184431a3cf8f23df5a25488
|
df2de6e1249ba2b0d6a4c42fd6e6074cae553cdd
|
refs/heads/master
| 2023-05-09T17:54:05.032460
| 2021-06-01T07:30:20
| 2021-06-01T07:30:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
test-ggseg_atlas.R
|
tt <- data.frame(atlas = "k",
surf = "white",
hemi = "left",
region = "something",
colour = "#d2d2d2",
stringsAsFactors = FALSE)
tt$mesh[[1]] = list(it=array(0, dim=3),vb=array(0, dim=3))
test_that("check that ggseg3d_atlas is correct", {
expect_error(as_ggseg3d_atlas(tt[,-1]),
"missing necessary columns")
expect_error(as_ggseg3d_atlas(),
"is missing, with no default")
k <- expect_warning(as_ggseg3d_atlas(tt),
"Unknown columns")
expect_equal(names(k),
c("atlas", "surf", "hemi", "ggseg_3d"))
expect_equal(nrow(k), 1)
})
test_that("check that is_ggseg_atlas works", {
expect_false(is_ggseg3d_atlas(tt))
})
|
fd64c51494f8821501482253d5909a1252500670
|
62f84d7157e0e3bfc57cc6d6942ea9205adc4463
|
/man/agdb.checkagdb.Rd
|
2c274bb7459c224283ec4b9422d833d7a6bf7da5
|
[
"MIT"
] |
permissive
|
SamT123/acutilsLite
|
251da4cf955c05a4e52a6b10e59fa2876759ea4a
|
fb36cd0f0786b9a9822ebda76fe4a44538569c4b
|
refs/heads/master
| 2023-03-02T20:52:23.145170
| 2021-02-15T10:03:21
| 2021-02-15T10:03:21
| 315,282,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 513
|
rd
|
agdb.checkagdb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acdatabase_dbtest.R
\name{agdb.checkagdb}
\alias{agdb.checkagdb}
\title{Checks Antigen Database formatting}
\usage{
agdb.checkagdb(agdb)
}
\arguments{
\item{agdb}{list}
}
\value{
bool
}
\description{
Checks that an antigen database follows the formatting rules. For information on database structure, see ?agdb.
}
\details{
The following rules are currently checked:
\enumerate{
\item All entries are antigens (see ?agddb.checkAG)
}
}
|
7c9e77458065ffc3886e639f5a2e869adfc10ec0
|
ec213b23bf4dcba4243ef834235f2b8352c3f500
|
/man/betaregmodel_20220718.Rd
|
7779ca480dc744964b673807fceca9d9e247a524
|
[] |
no_license
|
mccoy-lab/rhapsodi
|
941eaa317f7c5e83a0c15bfbf03c729a389459d6
|
8a5d712b1eb500594ac75428aa8dd94494bf81f3
|
refs/heads/master
| 2023-04-12T15:18:32.125743
| 2022-07-25T21:30:28
| 2022-07-25T21:30:28
| 328,792,330
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 985
|
rd
|
betaregmodel_20220718.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betaregmodel_20220718-data.R
\docType{data}
\name{betaregmodel_20220718}
\alias{betaregmodel_20220718}
\title{Trained beta regression model for automatic phasing window size calculation}
\format{
an object of class "betareg", i.e., a list with components \code{coefficients}, \code{residuals}, \code{n}, etc.
}
\usage{
betaregmodel_20220718
}
\description{
The trained beta regression model to be used for automatic phasing window size calculation.
The model was trained with an intercept and predictors of number of gametes, coverage,
genotyping error rate, and average recombination rate. Of those,
the number of gametes, coverage, and average recombination rate were significant predictors.
The response variable was the optimal window proportion (i.e., the optimal window size / total number of SNPs).
This trained model can be used to predict new values given all the predictors.
}
\keyword{datasets}
|
87b036f17751b6bed554c0f385eb5a7a127eed4a
|
a37dabee7f85661056732f85e3b718fe785d3716
|
/getKeyFieldsFromTable.R
|
e6b686ecbc7feb6d833d05c2909213af21faf237
|
[] |
no_license
|
Bibhushan/SCO
|
7709ef336714544241555e4873f42248cea4e8df
|
9f9745248b9abba4e4359f12be8becfd1cd605d1
|
refs/heads/master
| 2021-01-17T13:18:54.186126
| 2016-07-05T09:15:02
| 2016-07-05T09:15:02
| 59,553,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
getKeyFieldsFromTable.R
|
getKeyFieldsFromTable <- function(tableName, onlyPK = F, onlyFK = F){
tableDef <- DataDefinition[DataDefinition$TableName == tableName,]
return(getKeyFields(fieldNames = tableDef$FieldName,
fieldTypes = tableDef$FieldType,
onlyPK = onlyPK, onlyFK = onlyFK))
}
|
5b9eef804afc78fddf81400252a4ed7f73140cff
|
f61cbba27542ad327fd2a00b0e648c1189956a3d
|
/misc/german_credit_dt/german_credit_dt.R
|
1160a240b147a2cf088920c88bc170799a3e8a30
|
[] |
no_license
|
bpeek/machine_learning
|
d46d3890504e1ba1380922e3dc227ec249a6b08a
|
db52c45bd9669585419028d0e91eb39c481d1fe9
|
refs/heads/master
| 2020-03-10T23:54:00.280200
| 2018-09-24T01:55:03
| 2018-09-24T01:55:03
| 129,650,391
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,509
|
r
|
german_credit_dt.R
|
<<<<<<< HEAD
library(rpart)
library(rpart.plot)
library(plyr)
german_df = read.csv("C:/Users/Brendan/Desktop/ML/german_credit_dt/german_credit_data.csv", header=FALSE)
column_names = c("checking.account.status","duration.in.months","credit.history","purpose","credit.amount","savings.or.bonds","present.employment.since","installment.rate.as.percentage.of.disposable.income","personal.status.and.sex","cosigners","present.resident.since","collateral","age.in.years.","other.installment.plans","housing","number.of.existing.credits.at.this.bank","job","number.of.dependents","telephone","foreign.worker","good.loan")
colnames(german_df) = column_names
#Convert the int in the good.loan column to "Good" or "Bad" to make them more descriptive
german_df[german_df$good.loan == "1",]$good.loan = "Good"
german_df[german_df$good.loan == "2",]$good.loan = "Bad"
#Convert purpose codes to something more descriptive
german_df$purpose = revalue(german_df$purpose, c("A40" = "new car",
"A41" = "used car",
"A42" = "furniture/equipment",
"A43" = "radio/television",
"A44" = "domestic appliances",
"A45" = "repairs",
"A46" = "education",
"A48" = "retraining",
"A49" = "business",
"A410" = "others"))
#create more descriptive housing labels
german_df$housing = revalue(german_df$housing, c("A151" = "rent",
"A152" = "own",
"A153" = "for free"))
#create more descriptive labels for other installment plans
german_df$other.installment.plans = revalue(german_df$other.installment.plans, c("A141" = "bank",
"A142" = "stores",
"A143" = "none"))
View(german_df)
credit_dt = rpart(good.loan ~ duration.in.months + purpose + housing + installment.rate.as.percentage.of.disposable.income + credit.amount + other.installment.plans, data = german_df, control = rpart.control(maxdepth = 4), method = "class")
rpart.plot(credit_dt)
=======
library(rpart)
library(rpart.plot)
library(plyr)
german_df = read.csv("C:/Users/Brendan/Desktop/ML/german_credit_dt/german_credit_data.csv", header=FALSE)
column_names = c("checking.account.status","duration.in.months","credit.history","purpose","credit.amount","savings.or.bonds","present.employment.since","installment.rate.as.percentage.of.disposable.income","personal.status.and.sex","cosigners","present.resident.since","collateral","age.in.years.","other.installment.plans","housing","number.of.existing.credits.at.this.bank","job","number.of.dependents","telephone","foreign.worker","good.loan")
colnames(german_df) = column_names
#Convert the int in the good.loan column to "Good" or "Bad" to make them more descriptive
german_df[german_df$good.loan == "1",]$good.loan = "Good"
german_df[german_df$good.loan == "2",]$good.loan = "Bad"
#Convert purpose codes to something more descriptive
german_df$purpose = revalue(german_df$purpose, c("A40" = "new car",
"A41" = "used car",
"A42" = "furniture/equipment",
"A43" = "radio/television",
"A44" = "domestic appliances",
"A45" = "repairs",
"A46" = "education",
"A48" = "retraining",
"A49" = "business",
"A410" = "others"))
#create more descriptive housing labels
german_df$housing = revalue(german_df$housing, c("A151" = "rent",
"A152" = "own",
"A153" = "for free"))
#create more descriptive labels for other installment plans
german_df$other.installment.plans = revalue(german_df$other.installment.plans, c("A141" = "bank",
"A142" = "stores",
"A143" = "none"))
View(german_df)
credit_dt = rpart(good.loan ~ duration.in.months + purpose + housing + installment.rate.as.percentage.of.disposable.income + credit.amount + other.installment.plans, data = german_df, control = rpart.control(maxdepth = 4), method = "class")
rpart.plot(credit_dt)
>>>>>>> origin/master
|
5d68825a9f8ba7d35d811875603f931adb394dec
|
d26cddf482ef88c4f44f68cd22a34a4dfbe9c12f
|
/tests/testthat/test-packages.R
|
5bf4eaade93fe1153c0401af126f59a1926afcfd
|
[] |
no_license
|
wlandau/grapes
|
dcddd6e21879b5f0ae372dc3bf2312986a3f64eb
|
114f6ca49d77bf8dba6b312a51a1ccf080228112
|
refs/heads/main
| 2021-01-18T18:40:04.175873
| 2017-07-21T21:06:28
| 2017-07-21T21:06:28
| 86,870,564
| 13
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
test-packages.R
|
# library(testthat); devtools::load_all()
context("packages")
test_that("grow() loads from a package", {
expect_error(1 %knit_params% 2)
expect_error(knit_params(1, 2))
expect_error(ls("package:knitr"))
grow(knit_params, from = "knitr")
expect_equal(1 %knit_params% 2, knit_params(1, 2))
out = ls("package:knitr")
expect_true(length(out) > 0)
detach("package:knitr")
})
test_that("grow() can load everything from a package", {
expect_error(1 %knit_params% 2)
expect_equal(bunch(), character(0))
grow(from = "knitr")
expect_equal(1 %knit_params% 2, knit_params(1, 2))
expect_true(length(bunch()) > 2)
envir = as.environment("package:knitr")
expect_equal(intersect(bunch(), functions()), character(0))
expect_equal(intersect(bunch(), functions("knitr")), character(0))
expect_equal(intersect(bunch("knitr"), functions()), character(0))
expect_equal(intersect(bunch("knitr"), functions("knitr")), character(0))
for(op in bunch())
expect_true(is.function(environment()[[op]]))
for(f in functions("knitr"))
expect_true(is.function(envir[[f]]))
detach("package:knitr")
})
|
05fc0cca0e295f4ec22a71f37a6a896a3fdfd357
|
9641bbaa11404cdffff808528b5957a2b0d184de
|
/day14/day14fdlk.R
|
37052719b7acfdb5a4593ecbbdde6162948d40fc
|
[] |
no_license
|
cortinah/aoc2020
|
02b23b32a76e9faa5e1a987218f6e2ced350a74d
|
72abe7ff1dfeee30863e1a7a39af3580524bb74b
|
refs/heads/main
| 2023-02-03T15:15:05.276293
| 2020-12-14T22:04:59
| 2020-12-14T22:04:59
| 317,453,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,963
|
r
|
day14fdlk.R
|
library(tidyverse)
input <- tibble(line = readLines("input-2020-14")) %>%
extract(line, "mask", "mask = ([01X]{36})", remove = FALSE) %>%
extract(line, c("address", "value"), "mem\\[(\\d+)\\] = (\\d+)", convert = TRUE)
input
# Part 1
i2b <- function(value) {
result <- logical(36)
index <- 36
while (value > 0) {
result[index] <- value %% 2 == 1
value <- value %/% 2
index <- index - 1
}
result
}
b2d <- function(bits) {
which(bits) %>%
{36 - .} %>%
{2 ** .} %>%
sum()
}
apply_mask <- function(value, mask) {
bits <- i2b(value)
mask <- unlist(str_split(mask, ""))
masked_bits <-
map_lgl(1:36, function(index) {
switch(mask[index],
"1" = TRUE,
"0" = FALSE,
"X" = bits[index]
)
})
b2d(masked_bits)
}
mem <- double()
for (i in 1:nrow(input)) {
if (!is.na(input$mask[[i]])) {
mask <- input$mask[[i]]
} else {
mem[input$address[[i]]] <- apply_mask(input$value[[i]], mask)
}
}
options(digits = 22)
sum(mem, na.rm = TRUE)
# Part 2
library(hash)
expand_masks <- function(mask) {
if (str_detect(mask, "X")) {
c(
expand_masks(str_replace(mask, "X", "0")),
expand_masks(str_replace(mask, "X", "1"))
)
} else {
b2d(unlist(str_split(mask, "")) == "1")
}
}
apply_mask_v2 <- function(value, mask) {
bits <- i2b(value)
mask <- unlist(str_split(mask, ""))
applied <- map_chr(1:36, function(index) {
switch(mask[index],
"1" = "1",
"0" = if (bits[[index]]) "1" else "0",
"X" = "X"
)
})
expand_masks(str_c(applied, collapse = ""))
}
apply_mask_v2(42, "000000000000000000000000000000X1001X")
mem <- hash()
for (i in 1:nrow(input)) {
if (!is.na(input$mask[[i]])) {
mask <- input$mask[[i]]
} else {
addresses <- apply_mask_v2(input$address[[i]], mask)
for (j in 1:length(addresses)) {
mem[addresses[[j]]] <- input$value[[i]]
}
}
}
sum(values(mem))
|
477458a377e59268a1ee27753639bb2e56bcdc30
|
f917af767b05506cd2b1d8fa2b52c1e7c326f789
|
/tests/testthat/test_zones.R
|
75a0223014bc3be79b3f7eed3df5e325615d2a57
|
[] |
no_license
|
cran/scanstatistics
|
c9107d67234afc79d34adca77cef50307bea83e3
|
3e0078f362e7db49396f667bb865744b79f6fb11
|
refs/heads/master
| 2023-02-08T10:53:25.879892
| 2023-01-26T11:40:02
| 2023-01-26T11:40:02
| 69,553,546
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,146
|
r
|
test_zones.R
|
context("zone-creating functions")
test_that("dist_to_knn: returns correct order", {
coords <- matrix(c(c(0, 0),
c(1, 0),
c(4, 0),
c(1, 2),
c(-0.5, 2)),
ncol = 2, byrow = TRUE)
m <- as.matrix(dist(coords, diag = T, upper = T))
true_nns <- matrix(c(c(1, 2, 5, 4, 3),
c(2, 1, 4, 5, 3),
c(3, 2, 4, 1, 5),
c(4, 5, 2, 1, 3),
c(5, 4, 1, 2, 3)),
ncol = 5, byrow = TRUE)
expect_equal(unname(dist_to_knn(m)), true_nns)
})
test_that("k_nearest_neighbors: returns correct order", {
coords <- matrix(c(c(0, 0),
c(1, 0),
c(4, 0),
c(1, 2),
c(-0.5, 2)),
ncol = 2, byrow = TRUE)
true_nns <- matrix(c(c(1, 2, 5, 4, 3),
c(2, 1, 4, 5, 3),
c(3, 2, 4, 1, 5),
c(4, 5, 2, 1, 3),
c(5, 4, 1, 2, 3)),
ncol = 5, byrow = TRUE)
expect_equal(unname(coords_to_knn(coords)), true_nns)
})
# test_that("k_nearest_neighbors: returns correct order", {
# x <- matrix(c(c(-0.70, 1.01, 1.13, -0.11),
# c(-0.18, 0.82, 0.81, -0.76),
# c(-0.14, -0.21, 0.33, -0.35),
# c(0.28, 0.65, 1.02, 0.35),
# c(0.40, 0.18, -0.59, 0.79)),
# ncol = 4, byrow = TRUE)
#
# nn <- matrix(c(c(1, 2, 4, 3, 5),
# c(2, 1, 3, 4, 5),
# c(3, 2, 4, 1, 5),
# c(4, 1, 2, 3, 5),
# c(5, 3, 4, 2, 1)),
# ncol = 5, byrow = TRUE)
#
# expect_equal(unname(k_nearest_neighbors(x)), nn)
# })
test_that("closest_subsets: returns correct sets", {
expres <- lapply(sets::set(sets::as.set(1L),
sets::as.set(1:2),
sets::as.set(1:3),
sets::as.set(1:4)), as.integer)
expect_equal(closest_subsets(1:4), expres)
})
test_that("knn_zones: returns correct sets", {
nn <- matrix(c(c(1L, 2L, 4L, 3L, 5L),
c(2L, 1L, 3L, 4L, 5L),
c(3L, 2L, 4L, 1L, 5L),
c(4L, 1L, 2L, 3L, 5L),
c(5L, 3L, 4L, 2L, 1L)),
ncol = 5, byrow = TRUE)
zones <- sets::set(sets::as.set(1L),
sets::as.set(2L),
sets::as.set(3L),
sets::as.set(4L),
sets::as.set(5L),
sets::as.set(c(1L, 2L)),
sets::as.set(c(3L, 2L)),
sets::as.set(c(4L, 1L)),
sets::as.set(c(5L, 3L)))
res <- knn_zones(nn[, 1:2])
expect_length(res, length(zones))
res <- sets::as.set(lapply(res, sets::as.set))
expect_equal(res, zones)
})
# Flexible zone shape (Tango 2005) -------------------------------------------
test_that("flexible_zones: works", {
A <- matrix(c(0,1,0,0,0,0,
1,0,1,0,0,0,
0,1,0,0,0,0,
0,0,0,0,1,0,
0,0,0,1,0,0,
0,0,0,0,0,0),
nrow = 6, byrow = TRUE) == 1
kn <- matrix(as.integer(
c(1,2,3,4,5,6,
2,1,3,4,5,6,
3,2,1,4,5,6,
4,5,1,6,3,2,
5,4,6,1,3,2,
6,5,4,1,3,2)),
nrow = 6, byrow = TRUE)
zones <- sets::set(sets::set(1L),
sets::set(2L),
sets::set(3L),
sets::set(4L),
sets::set(5L),
sets::set(6L),
sets::set(1L, 2L),
sets::set(2L, 3L),
sets::set(4L, 5L),
sets::set(1L, 2L, 3L))
res <- flexible_zones(kn, A)
expect_length(res, length(zones))
res <- sets::as.set(lapply(res, sets::as.set))
expect_equal(res, zones)
})
test_that("connected_neighbors: works", {
A <- matrix(c(0,1,0,0,0,0,
1,0,1,0,0,0,
0,1,0,0,0,0,
0,0,0,0,1,0,
0,0,0,1,0,0,
0,0,0,0,0,0),
nrow = 6, byrow = TRUE)
A <- A == 1
expect_equal(connected_neighbors(1:6, A),
sets::set(sets::set(1L),
sets::set(1L, 2L),
sets::set(1L, 2L, 3L)))
expect_equal(connected_neighbors(c(2:6, 1L), A),
sets::set(sets::set(2L),
sets::set(1L, 2L),
sets::set(2L, 3L),
sets::set(1L, 2L, 3L)))
expect_equal(connected_neighbors(c(3:6, 1:2), A),
sets::set(sets::set(3L),
sets::set(2L, 3L),
sets::set(1L, 2L, 3L)))
expect_equal(connected_neighbors(c(4:6, 1:3), A),
sets::set(sets::set(4L),
sets::set(4L, 5L)))
expect_equal(connected_neighbors(c(5:6, 1:4), A),
sets::set(sets::set(5L),
sets::set(4L, 5L)))
expect_equal(connected_neighbors(c(6L, 1:5), A),
sets::set(sets::set(6L)))
})
test_that("if_connected: works", {
A <- matrix(c(0,1,0,0,0,
1,0,1,0,0,
0,1,0,0,0,
0,0,0,0,1,
0,0,0,1,0),
nrow = 5, byrow = TRUE)
A <- A == 1
expect_equal(if_connected(sets::set(2L), 1L, A), sets::set(1L, 2L))
expect_equal(if_connected(sets::set(2L, 3L), 1L, A), sets::set(1L, 2L, 3L))
expect_equal(if_connected(sets::set(4L), 1L, A), sets::set())
expect_equal(if_connected(sets::set(2L, 4L), 1L, A), sets::set())
})
test_that("is_connected: works", {
A <- matrix(c(0,1,0,0,0,
1,0,1,0,0,
0,1,0,0,0,
0,0,0,0,1,
0,0,0,1,0),
nrow = 5, byrow = TRUE)
A <- A == 1
expect_true(is_connected(sets::set(2L), 1L, A))
expect_true(is_connected(sets::set(2L, 3L), 1L, A))
expect_false(is_connected(sets::set(4L), 1L, A))
expect_false(is_connected(sets::set(2L, 4L), 1L, A))
})
test_that("connected_to: works", {
A <- matrix(c(0,1,0,0,0,
1,0,1,0,0,
0,1,0,0,0,
0,0,0,0,1,
0,0,0,1,0),
nrow = 5, byrow = TRUE)
A <- A == 1
z0a <- sets::as.set(1L)
z1a <- sets::as.set(2L)
actual_a <- connected_to(z0a, z1a, A)
z0b <- sets::as.set(1L)
z1b <- sets::set(4L, 5L)
actual_b <- connected_to(z0b, z1b, A)
z0c <- sets::as.set(2L)
z1c <- sets::set(1L, 3L)
actual_c <- connected_to(z0c, z1c, A)
expect_equal(actual_a, sets::set(2L))
expect_equal(actual_b, sets::set())
expect_equal(actual_c, sets::set(1L, 3L))
})
|
7468afbbd3ce6647916eba6774599f37c04d1d89
|
68c1218bddaf4faaacdd4fca56af2ecca9d66ed7
|
/global.R
|
2ad8074574a212ea7fd9c2cccb483451fad8ec93
|
[] |
no_license
|
STAT360/finalproject-minniemap
|
698c9918009786dfd863c3c24bbe419d6b7bd4d2
|
53143c5e0d433781f121c20365f58fa44e48c6a6
|
refs/heads/master
| 2020-05-04T13:00:54.104019
| 2019-05-21T20:02:51
| 2019-05-21T20:02:51
| 179,144,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
global.R
|
library(geosphere)
library(googleway)
library(leaflet)
library(shiny)
library(shinyalert)
library(shinydashboard)
library(shinyjs)
library(tidyverse)
library(tidyr)
#Load in the functions we created for this app
source("functions/build_routes.R")
source("functions/search_locations.R")
#Load in the data for 8 (most used) existing Metro Transit routes
greenline <- read.csv("data/greenline.csv")
blueline <- read.csv("data/blueline.csv")
nsline <- read.csv("data/nsline.csv")
aline <- read.csv("data/aline.csv")
redline <- read.csv("data/redline.csv")
route5 <- read.csv("data/route5.csv")
route21 <- read.csv("data/route21.csv")
route18 <- read.csv("data/route18.csv")
route6 <- read.csv("data/route6.csv")
#Combine the individual routes into a list
mtroutes <- list(greenline, blueline, nsline, aline, redline, route5, route21, route18, route6)
#Initialize some other things. 'location_points' will be used in different sessions of the app,
#and it's important whether it's NULL or not. 'colorpal' puts together a color palette for maps
location_points <- NULL
colorpal <- c("red", "blue", "lime", "cyan", "magenta", "darkorange", "olive", "maroon")
#Some custom CSS for the appearance of the app
custom_css <- "
.main-header .logo {
font-family: 'Avenir Next' !important;
font-weight: bold;
font-size: 28px;
color: white;
background-color: rgba(58,128,167);
}
#maptitle {
font-weight: bold;
font-size: 30px;
letter-spacing: -1px;
}
#map {
height: calc(100vh - 80px) !important;
}
#searched_for {
color: rgba(58,128,167);
}
#locations_found {
color: rgba(58,128,167);
}
.irs-grid-pol.small {
height: 0px;
}
"
|
15baea8e492b7216519e8717d27e49e0081d9928
|
0fbe97c46ed2eb453f468ee9369445ef6c44771f
|
/r_dependencies.R
|
25fbc05bcc0ef53fe416982bf524220021cd69e5
|
[] |
no_license
|
SeanTomlinson30/bio-pipeline-dependencies
|
1eb8eca300c2605fa333d9a6aa0ff8a8837792a7
|
64c35634f026cd745f7cfb1370bf71b7d13516f6
|
refs/heads/master
| 2020-06-20T17:50:30.172599
| 2019-07-26T08:24:02
| 2019-07-26T08:24:02
| 197,198,810
| 0
| 0
| null | 2019-07-25T10:09:53
| 2019-07-16T13:24:57
|
Makefile
|
UTF-8
|
R
| false
| false
| 1,205
|
r
|
r_dependencies.R
|
.libPaths(.libPaths()[1])
print(sprintf('Installing packages into %s', .libPaths()))
is.installed <- function(mypkg) is.element(mypkg, installed.packages()[,1])
suppressMessages(source("https://bioconductor.org/biocLite.R"))
bioc_package <- function(pkgname) {
if (is.installed(pkgname)) {
print(sprintf('%s already installed.', pkgname))
} else {
biocLite(pkgname)
}
}
cran_package <- function(pkgname) {
if (is.installed(pkgname)) {
print(sprintf('%s already installed.', pkgname))
} else {
install.packages(pkgname, repos='http://cran.us.r-project.org')
}
}
custom_package <- function(pkgname, pkgpath) {
if (is.installed(pkgname)) {
print(sprintf('%s already installed', pkgname))
} else {
library(devtools)
install(pkgpath)
}
}
bioc_package('copynumber')
bioc_package('signeR')
cran_package('nnls')
cran_package('dbscan')
cran_package('cowplot')
cran_package('deconstructSigs')
cran_package('mutSignatures')
cran_package('cancerTiming')
cran_package('rjags')
cran_package('coda')
cran_package('R.matlab')
cran_package('GenSA')
cran_package('rmutil')
custom_package('hrdtools', 'packages/hrdtools')
|
0f7e0ae080b8cacac66f2c60c2e02314fac34603
|
206c80dc11a2264d31f0b4d75bcddb37ab5576a1
|
/R/lbdnd_package.R
|
0ef80d17bb490f245f3b50a2508aee911e0e7b3e
|
[] |
no_license
|
lbraglia/lbdnd
|
0e54f47de4d9cb3e147e72a9ee43534c75007ed7
|
2c46ecb8edb18de6e899573f5fc4d690f4150032
|
refs/heads/master
| 2021-07-16T00:09:58.390914
| 2021-06-25T09:40:26
| 2021-06-25T09:40:26
| 207,334,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
lbdnd_package.R
|
#' An R package for dnd
#'
#' An R package for dnd
#'
#' @name lbdnd
#' @docType package
NULL
|
b7470158d901433079d98e81aeab94758a02a547
|
54546f9cb5c136e2fb16484a46771551af0f21cf
|
/postprocessing_scripts/print_split_enh_out.R
|
913ce76ad061509aa63da094080540032f184a7f
|
[] |
no_license
|
cboix/EPIMAP_ANALYSIS
|
bb409d32cd8bdb69393fc8522356e365c4cb4246
|
de98ff96695941056b28323ec138465a8852659c
|
refs/heads/master
| 2022-09-15T00:37:00.487847
| 2022-08-22T20:00:44
| 2022-08-22T20:00:44
| 218,806,299
| 29
| 13
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,153
|
r
|
print_split_enh_out.R
|
#!/usr/bin/R
# --------------------------------------
# Write BED files of enhancer locations:
# Updated 05/11/21
# --------------------------------------
library(rhdf5)
# Read in metadata:
metadata = read.delim('../public_metadata_released/main_metadata_table.tsv', header=T)
mmap = read.delim('mnemonic_mapping.tsv', header=F)
names(mmap) = c('id','mn')
rownames(mmap) = mmap$id
# Load DHS locations:
dhsdf = read.delim('masterlist_DHSs_733samples_WM20180608_all_coords_hg19.core.srt.txt', header=F)
eind = as.numeric(scan('ENH_masterlist_indices_0indexed.tsv','c')) + 1
pind = as.numeric(scan('PROM_masterlist_indices_0indexed.tsv','c')) + 1
names(dhsdf) = c('chr','start','end','name')
# ----------------------------------------
# Make all of the calls for the enhancers:
# ----------------------------------------
# Load the enhancer by epigenomes matrix:
enames = scan('Enhancer_H3K27ac_intersect_matrix.names.tsv','c')
hd.file = 'Enhancer_H3K27ac_intersect_matrix.hdf5'
h5ls(hd.file)
system('mkdir -p enhancers_bysample/')
chunksize = 25
nchunk = (length(enames) %/% chunksize) + 1
for (i in 1:nchunk){
cat(paste('Chunk',i,'of',nchunk, '\n'))
ind = (chunksize * (i-1) + 1):min(c(i *chunksize, length(enames)))
# Slice the hdf5 matrix:
h5f = H5Fopen(hd.file)
h5d = h5f&"matrix"
mat = h5d[ind,]
H5Dclose(h5d)
H5Fclose(h5f)
rownames(mat) = enames[ind]
mat = t(mat)
print(colSums(mat))
# Write each out:
for (j in 1:ncol(mat)){
cat('.')
id = colnames(mat)[j]
mn = as.character(mmap[id,'mn'])
# Get loc:
hits = which(mat[,id] != 0)
# Intersect with enhancer ind:
hits = hits[hits %in% eind]
df = dhsdf[hits,]
# Write out:
write.table(df, gzfile(paste0('enhancers_bysample/', id, '_', mn, '_hg19_enhancer_list.bed.gz')), quote=F, sep="\t", row.names=F)
}
cat ('\n')
}
# ----------------------------------------
# Make all of the calls for the promoters:
# ----------------------------------------
# Load the promoter by epigenomes matrix:
pnames = scan('Promoter_H3K27ac_intersect_matrix.names.tsv','c')
prom.hd.file = 'Promoter_H3K27ac_intersect_matrix.hdf5'
h5ls(prom.hd.file)
system('mkdir -p promoters_bysample/')
chunksize = 25
nchunk = (length(pnames) %/% chunksize) + 1
for (i in 1:nchunk){
cat(paste('Chunk',i,'of',nchunk, '\n'))
ind = (chunksize * (i-1) + 1):min(c(i *chunksize, length(pnames)))
# Slice the hdf5 file:
h5f = H5Fopen(prom.hd.file)
h5d = h5f&"matrix"
mat = h5d[ind,]
H5Dclose(h5d)
H5Fclose(h5f)
rownames(mat) = pnames[ind]
mat = t(mat)
print(colSums(mat))
# Write each out:
for (j in 1:ncol(mat)){
cat('.')
id = colnames(mat)[j]
mn = as.character(mmap[id,'mn'])
# Get loc:
hits = which(mat[,id] != 0)
# Intersect with promoter ind:
hits = hits[hits %in% pind]
df = dhsdf[hits,]
# Write out:
write.table(df, gzfile(paste0('promoters_bysample/', id, '_', mn, '_hg19_promoter_list.bed.gz')), quote=F, sep="\t", row.names=F)
}
cat ('\n')
}
|
25dd636999dd7f5bccecda831406b5ac80c9c018
|
5c873ac7d8116ed4045ab1b2b298a6c3cfe3fd48
|
/Matriz Origen - Destino/od-network.R
|
fb7541c2021cea8d16f332f004f2de6bfcd00a50
|
[] |
no_license
|
LeonardoCordoba/SSTYT
|
6c10134c415e60f3760c51f7a52ad53a3c74d21c
|
eecbc017ce8d7bb1a369c6d2f342778a5fdcbb0e
|
refs/heads/master
| 2021-01-22T09:27:18.285389
| 2017-06-23T14:46:00
| 2017-06-23T14:46:00
| 81,960,213
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,449
|
r
|
od-network.R
|
## Análisis de Matriz OD
rm(list=ls())
library(RPostgreSQL) #Para establecer la conexión
library(spatstat)
library(sp)
library(rgdal) #Para poder establecer proyecciones
library(postGIStools)
library(ggmap)
library(geomnet)
pw <- {
"postgres"
}
# loads the PostgreSQL driver
drv <- dbDriver("PostgreSQL")
# creates a connection to the postgres database
# note that "con" will be used later in each connection to the database
con <- dbConnect(
drv,
dbname = "sube",
host = "10.78.14.54",
port = 5432,
user = "postgres",
password = pw
)
matriz_od_zona <- dbGetQuery(con, "select * from matriz_od.a2016_05_04_etapas")
zonificacion <- get_postgis_query(con, "select * from matriz_od.zonas", geom_name = 'centroid' )
zonificacion_mtb <- get_postgis_query(con, "select * from informacion_geografica.zonificacion_para_mtb_transversal", geom_name = 'centroid' )
zonificacion$long <- coordinates(zonificacion)[,1]
zonificacion$lat <- coordinates(zonificacion)[,2]
zonificacion <- as.data.frame(zonificacion)
zonificacion_mtb$long <- coordinates(zonificacion_mtb)[,1]
zonificacion_mtb$lat <- coordinates(zonificacion_mtb)[,2]
zonificacion_mtb <- as.data.frame(zonificacion_mtb)
zona <- unique(zonificacion$id)
zona_mtb <- unique(zonificacion_mtb$id)
matriz_od_zona <- matriz_od_zona[which(matriz_od_zona$id_zona %in% zona_mtb) | (matriz_od_zona$id_zona_destino_etapa %in% zona_mtb), ]
matriz_od_zona_grafo <- NA
matriz_od_zona_grafo <- rbind(matriz_od_zona_grafo, matriz_od_zona[matriz_od_zona$id_zona %in% zona_mtb & matriz_od_zona$id_zona_destino_etapa %in% zona,])
matriz_od_zona_grafo <- rbind(matriz_od_zona_grafo, matriz_od_zona[matriz_od_zona$id_zona_destino_etapa %in% zona_mtb & matriz_od_zona$id_zona %in% zona,])
tripnet <- fortify(as.edgedf(matriz_od_zona_grafo[matriz_od_zona_grafo$q_trx > 100,c(2,3,4,8)]), zonificacion[,c(1,16,15)])
quantile(tripnet$q_trx, na.rm =TRUE)
map <- get_map(location = c("Balvanera, CABA, Argentina"), zoom = 12)
map <- get_map(location = c("long" = -58.416212, "lat" = -34.631330), zoom = 12, maptype = 'terrain')
ggmap(map) + geom_net(
data = tripnet,
layout.alg = NULL,
singletons = FALSE,
labelon = FALSE,
vjust = -0.5,
ealpha = 0.5,
aes(
from_id = from_id,
to_id = to_id,
x = long ,
y = lat,
linewidth = q_trx/1000)
) + scale_color_brewer() + theme_net() %+replace% theme(aspect.ratio =NULL, legend.position = "bottom") + coord_map()
|
a9f3606ec5ffd8a7ad1e7564a37616a858d501ed
|
53dd7a212c5caeb7280454c1e92af526c0b3ae3a
|
/.github/workflows/functions.R
|
fba2ce852a1d3975c70746b961f525e5ffaaa244
|
[] |
no_license
|
LiesaSalzer/MetClassNet_MetNet.R
|
32cc60d1882cd513422bd2841a2ff35e936a1a53
|
8f2acc2aed8a99aeafd52e14ac95827c3b2bdbba
|
refs/heads/master
| 2022-09-15T21:17:35.486065
| 2020-06-05T12:02:38
| 2020-06-05T12:02:38
| 269,279,227
| 0
| 0
| null | 2020-06-05T12:02:40
| 2020-06-04T06:33:09
|
R
|
UTF-8
|
R
| false
| false
| 27,922
|
r
|
functions.R
|
library(tidyverse)
library(MetNet)
library(igraph)
library(reshape2)
library(Hmisc)
setwd("/Users/Liesa4/Library/Mobile Documents/com~apple~CloudDocs/Promotion/R/MetClassNet/MetNet")
#' Changes to MetNet:
#' structural() has additional list entry of matrix containing mass values of respective matches
structural <- function(x, transformation, ppm = 5, directed = FALSE) {
if (!is.data.frame(transformation))
stop("transformation is not a data.frame")
if (!"group" %in% colnames(transformation))
stop("transformation does not contain the column group")
if (!"mass" %in% colnames(transformation))
stop("transformation does not contain the column mass")
if (!"mz" %in% colnames(x)) stop("x does not contain the column mz")
if (!is.numeric(ppm)) stop("ppm is not numeric")
mass <- x[, "mz"]
mat <- matrix(0, nrow = length(mass), ncol = length(mass))
rownames(mat) <- colnames(mat) <- mass
## create matrix which has rowmames per row
mat <- apply(mat, 1, function(x) as.numeric(mass))
## calculate ppm deviation
mat_1 <- mat / abs(ppm / 10 ^ 6 + 1)
mat_2 <- mat / abs(ppm / 10 ^ 6 - 1)
## calculate difference between rownames and colnames
## (difference between features)
mat_1 <- mat - t(mat_1) ## max
mat_2 <- mat - t(mat_2) ## min
if (!directed) {
mat_1_abs <- abs(mat_1)
mat_2_abs <- abs(mat_2)
mat_1 <- ifelse(mat_1_abs <= mat_2_abs, mat_2_abs, mat_1_abs) ## max
mat_2 <- ifelse(mat_1_abs > mat_2_abs, mat_2_abs, mat_1_abs) ## min
}
## create three matrices to store result (additional to MetNet: mat_mass)
mat <- matrix(0, nrow = length(mass), ncol = length(mass))
mat_type <- matrix("", nrow = length(mass), ncol = length(mass))
mat_mass <- matrix("", nrow = length(mass), ncol = length(mass))
## iterate through each column and check if the "mass" is in the interval
## defined by the m/z value and ppm
for (i in seq_along(transformation[, "mass"])) {
transformation_i <- transformation[i, ]
ind_mat_1 <- which(mat_1 >= transformation_i[["mass"]])
ind_mat_2 <- which(mat_2 <= transformation_i[["mass"]])
## get intersect from the two (indices where "mass" is in the interval)
ind_hit <- intersect(ind_mat_1, ind_mat_2)
## write to these indices 1 and the "group"
mat[ind_hit] <- 1
mat_type[ind_hit] <- ifelse(nchar(mat_type[ind_hit]) != 0,
yes = paste(mat_type[ind_hit], transformation_i[["group"]],
sep = "/"),
no = as.character(transformation_i[["group"]]))
## additional to MetNet:
## wirte to these indices 1 and the "mass"
mat_mass[ind_hit] <- ifelse(nchar(mat_mass[ind_hit]) != 0,
yes = paste(mat_mass[ind_hit], transformation_i[["mass"]],
sep = "/"),
no = as.numeric(transformation_i[["mass"]]))
}
rownames(mat) <- colnames(mat) <- rownames(x)
rownames(mat_type) <- colnames(mat_type) <- rownames(x)
rownames(mat_mass) <- colnames(mat_mass) <- rownames(x) #additional to MetNet
return(list(mat, mat_type, mat_mass))
}
#'Changes to MetNet:
#' additional attribute in funtion: p; default is FALSE (so works like MetNet), if p is TRUE and model is spearman/pearson
#' than the output will contain lists of pearson/spearman containing the corresponding correlation values (INCLUDING positive
#' and negative values) and p-values
statistical <- function(x, model, p = FALSE, ...) {
## check if model complies with the implemented model and return error
## if not so
if (!(all(model %in% c("lasso", "randomForest", "clr", "aracne",
"pearson", "pearson_partial", "pearson_semipartial",
"spearman", "spearman_partial", "spearman_semipartial", "bayes"))))
stop("'model' not implemented in statistical")
## check if x is numeric matrix and return error if not so
if (mode(x) != "numeric") stop("x is not a numerical matrix")
## z-scale x and transpose
x_z <- apply(x, 1, function(y) {
(y - mean(y, na.rm = TRUE)) / sd(y, na.rm = TRUE)
})
x_z <- t(x_z)
l <- list()
## add entry for lasso if "lasso" is in model
if ("lasso" %in% model) {
lasso <- lasso(x = x_z, ...)
diag(lasso) <- NaN
l <- addToList(l, "lasso", lasso)
print("lasso finished")
}
## add entry for randomForest if "randomForest" is in model
if ("randomForest" %in% model) {
randomForest <- randomForest(x = x, ...)
diag(randomForest) <- NaN
l <- addToList(l, "randomForest", randomForest)
print("randomForest finished.")
}
## calculate mutual information if "clr" or "aracne" is in model
if (any(c("clr", "aracne") %in% model)) {
mi_x_z <- mpmi::cmi(t(x_z))$bcmi
rownames(mi_x_z) <- colnames(mi_x_z) <- rownames(x)
}
## add entry for clr if "clr" is in model
if ("clr" %in% model) {
clr <- threeDotsCall("clr", mi = mi_x_z, ...)
diag(clr) <- NaN
l <- addToList(l, "clr", clr)
print("clr finished.")
}
## add entry for aracne if "aracne" is in model
if ("aracne" %in% model) {
aracne <- threeDotsCall("aracne", mi = mi_x_z, ...)
diag(aracne) <- NaN
l <- addToList(l, "aracne", aracne)
print("aracne finished.")
}
## add entry for pearson if "pearson" is in mode
## FALSE %in% p is default and corresponds to original MetNet function
if ("pearson" %in% model & FALSE %in% p) {
pearson <- threeDotsCall("correlation", x = x, type = "pearson", ...)
diag(pearson) <- NaN
l <- addToList(l, "pearson", pearson)
print("pearson finished.")
}
## add entry for pearson_partial if "pearson_partial" is in model
if ("pearson_partial" %in% model) {
pearson_partial <- threeDotsCall("correlation", x = x,
type = "pearson_partial", ...)
diag(pearson_partial) <- NaN
l <- addToList(l, "pearson_partial", pearson_partial)
print("pearson_partial finished.")
}
## add entry for pearson_semipartial if "pearson_semipartial" is in model
if ("pearson_semipartial" %in% model) {
pearson_sp <- threeDotsCall("correlation", x = x,
type = "pearson_semipartial", ...)
diag(pearson_sp) <- NaN
l <- addToList(l, "pearson_semipartial", pearson_sp)
print("pearson_semipartial finished.")
}
## add entry for spearman if "spearman" is in model
## FALSE %in% p is default and corresponds to original MetNet function
if ("spearman" %in% model & FALSE %in% p) {
spearman <- threeDotsCall("correlation", x = x, type = "spearman", ...)
diag(spearman) <- NaN
l <- addToList(l, "spearman", spearman)
print("spearman finished.")
}
## add entry for spearman_partial if "spearman_partial" is in model
if ("spearman_partial" %in% model) {
spearman_partial <- threeDotsCall("correlation", x = x,
type = "spearman_partial", ...)
diag(spearman_partial) <- NaN
l <- addToList(l, "spearman_partial", spearman_partial)
print("spearman_partial finished.")
}
## add entry for spearman_semipartial if "spearman_semipartial" is in model
if ("spearman_semipartial" %in% model) {
spearman_sp <- threeDotsCall("correlation", x = x,
type = "spearman_semipartial", ...)
diag(spearman_sp) <- NaN
l <- addToList(l, "spearman_semipartial", spearman_sp)
print("spearman_semipartial finished.")
}
## add entry for pearson if "pearson" is in model and p=TRUE
## Changed to MetNet: if p=TRUE, negative and positive correlation values were calculated and corresponding p-values
if ("pearson" %in% model & TRUE %in% p) {
pearson <- threeDotsCall("correlation_p", x = x, type = "pearson", ...)
#diag(pearson) <- NaN
diag(pearson[[1]]) <- NaN
diag(pearson[[2]]) <- NaN
l <- addToList(l, "pearson", pearson)
print("pearson finished.")
}
## add entry for pearson if "pearson" is in model and P=TRUE
## Changed to MetNet: if p=TRUE, negative and positive correlation values were calculated and corresponding p-values
if ("spearman" %in% model & TRUE %in% p) {
spearman <- threeDotsCall("correlation_p", x = x, type = "spearman", ...)
#diag(spearman) <- NaN
diag(spearman[[1]]) <- NaN
diag(spearman[[2]]) <- NaN
l <- addToList(l, "spearman", spearman)
print("spearman finished.")
}
## add entry for bayes if "bayes" is in model
if ("bayes" %in% model) {
bayes <- threeDotsCall("bayes", x = x, ...)
diag(bayes) <- NaN
l <- addToList(l, "bayes", bayes)
print("bayes finished.")
}
return(l)
}
#' correlation_p is an additional function (complementary to correlation()), and needed for positive/negative pearson/spearman
#' correlation value and p-value calculation
correlation_p <- function(x, type = "pearson", use = "pairwise.complete.obs") {
## for pearson/spearman correlation
if (type %in% c("pearson", "spearman")) {
cor_list <- rcorr(x = t(x), type = type)
}
names(cor_list) <- c("Correlation Value", "n", "p-Value")
## exclude "n" column
cor_list <- cor_list[-2]
return(cor_list)
}
#'Changes to MetNet: Not tested anymore if object is a matrix (since object is a list)
addToList <- function(l, name, object) {
## test validity of objects
if (!is.list(l)) {
stop("l is not a list")
}
if (!is.character(name)) {
stop("name is not a character")
}
##left out since object could also be a list
# if (!is.matrix(object)) {
# stop("object is not a matrix")
# }
## add object to l
new_index <- length(l) + 1
l[[new_index]] <- object
## assign the name to the newly added entry
names(l)[new_index] <- name
return(l)
}#nothing changed
threeDotsCall <- function(fun, ...) {
formal_args <- formalArgs(fun)
args <- list(...)
if (any(duplicated(names(args)))) stop("duplicated args in ...")
input <- args[names(args) %in% formal_args]
## call the function
res <- do.call(fun, input)
return(res)
}#nothing changed
#'Changes to MetNet: new attribute for type: "threshold_p"
#'A list is created instead of a single matrix as output containing 1/0 assigned values of
#'model matrices (e.g. pearson and spearman) and consensus matrix.
#'
#'If "treshold_p" is selected in 'type' all values are assigned to 1 if their p-value
#'is BELOW a defined threshold (defined in'args')
#'If "treshold" is selected in 'type' all values are assigned to 1 if their Correlation-value
#'is ABOVE a defined threshold (defined in'args')
threshold <- function(statistical, type, args,
values = c("all", "min", "max"), ...) {
l <- statistical
## args, either N for tops
## or a list of threshold
if (any(duplicated(names(args)))) {
stop("names(args) contain duplicated entries")
}
##Changes to MetNet: new attribute for type: "threshold_p"
if (!type %in% c("top1", "top2", "mean", "threshold", "threshold_p"))
stop("type not in 'top1', 'top2', 'mean', 'threshold', 'threshold_p'")
## check args
if (type %in% c("threshold")) {
if (!(all(names(l) %in% names(args)))) {
stop("'args' does not contain entries for all 'model's in ",
"'statistical'")
}
if (!"threshold" %in% names(args) && length(args$threshold) != 1) {
stop("'args' does not contain entry 'threshold' of length 1")
}
}
## check args
if (type %in% c("threshold")) {
if (!(all(names(l) %in% names(args)))) {
stop("'args' does not contain entries for all 'model's in ",
"'statistical'")
}
if (!"threshold" %in% names(args) && length(args$threshold) != 1) {
stop("'args' does not contain entry 'threshold' of length 1")
}
}
## complementary to "threshold":
if (type %in% c("threshold_p")) {
if (!(all(names(l) %in% names(args)))) {
stop("'args' does not contain entries for all 'model's in ",
"'statistical'")
}
if (!"threshold_p" %in% names(args) && length(args$threshold) != 1) {
stop("'args' does not contain entry 'threshold' of length 1")
}
}
## check match.arg for values
values <- match.arg(values)
if (type %in% c("top1", "top2", "mean")) {
if (!("n" %in% names(args) && length(args$n) == 1 &&
is.numeric(args$n)))
stop("args does not contain the numeric entry `n` of length 1")
}
if (type == "threshold" || type == "threshold_p") {
## iterate through the list and remove the links below or above the
## threshold and write to list
l <- lapply(seq_along(l), function(x) {
## find corresponding model in l
name_x <- names(l)[x]
## get corresponding threshold in args
threshold_x <- args[[names(l)[x]]]
## Changed to MetNet
if ("threshold" %in% type) {
if("Correlation Value" %in% names(l[[name_x]][1])) {
## get corresponding adjacency matrix of Correlation Values in l
## is used when statistical was calculated with p=TRUE
l_x <- l[[name_x]]$`Correlation Value`
## only assign 1 to values that are above the threshold
ifelse(l_x > threshold_x, 1, 0)
}
else{
## get corresponding adjacency matrix in l
## corresponds to MetNet function
l_x <- l[[name_x]]
## for pearson/spearman correlation models (incl. partial and
## semi-partial), lasso, randomForest, clr, aracne and bayes higher
## values corresond to higher confidence
## only assign 1 to values that are above the threshold
ifelse(l_x > threshold_x, 1, 0)
}
}
else if("threshold_p" %in% type){
## get corresponding adjacency matrix of p-Values in l
## is used when statistical was calculated with p=TRUE
l_x <- l[[name_x]]$`p-Value`
## only assign 1 to values that are below the threshold
ifelse(l_x < threshold_x, 1, 0)
}
})
## allow for compatibility of arguments
## calculate consenses from the binary matrices
cons <- threeDotsCall(sna::consensus, dat = l, ...)
## threshold consensus that it is a binary matrix
cons <- ifelse(cons >= args$threshold, 1, 0)
rownames(cons) <- colnames(cons) <- colnames(l[[1]])
}
else { ## if type is in "top1", "top2" or "mean"
l_df <- lapply(seq_along(l), function(x) {
## find corresponding model in l
name_x <- names(l)[x]
## get corresponding adjacency matrix in l
l_x <- l[[name_x]]
## take the respective minimum or maximum depending on `values`,
## do not do anything if `values` is equal to `all`
if (values %in% c("min", "max")) {
## get values from the lower triangle
lower_tri <- l_x[lower.tri(l_x)]
## get values from the upper triangle (requires transposing)
l_x_t <- t(l_x)
upper_tri <- l_x_t[lower.tri(l_x_t)]
## get min of lower_tri and upper_tri
if (values == "min") {
values <- apply(rbind(lower_tri, upper_tri), 2, min)
} else {
values <- apply(rbind(lower_tri, upper_tri), 2, max)
}
## write back to the matrix
l_x[lower.tri(l_x)] <- values
l_x <- t(l_x)
l_x[lower.tri(l_x)] <- values
}
## for pearson/spearman correlation (incl. partial and
## semi-partial), lasso, randomForest, clr, aracne and bayes
## higher values corresond to higher confidence
if (grepl(name_x, pattern = "lasso|randomForest|bayes")) {
## set values that are equal to 0 to NaN (values that are 0)
## do not explain the variability
res <- getLinks(l_x, exclude = "== 0")
}
if (grepl(name_x, pattern = "pearson|spearman|clr|aracne")) {
res <- getLinks(l_x, exclude = NULL)
}
res
})
names(l_df) <- names(l)
## bind together the ranks of the models, stored in l_df
ranks <- lapply(l_df, function(x) x$rank)
ranks <- do.call("cbind", ranks)
colnames(ranks) <- names(l_df)
## calculate the consensus information, i.e. either get the first or
## second top rank per row or calculate the average across rows
## depending on the type argument
cons_val <- MetNet:::topKnet(ranks, type)
## bind row and col information with cons information
row_col <- l_df[[1]][, c("row", "col")]
ranks <- cbind(row_col, cons_val)
## get the top N features
n <- args$n
top_n <- sort(unique(cons_val))[1:n]
ranks_top <- ranks[cons_val %in% top_n, ]
## write links in ranks_top to binary adjacency matrix cons
cons <- matrix(0, nrow = ncol(l[[1]]), ncol = ncol(l[[1]]))
rownames(cons) <- colnames(cons) <- colnames(l[[1]])
cons[as.numeric(rownames(ranks_top))] <- 1
}
## Changes to MetNet: A list is created as output containing 1/0 assigned values of
## model matrices (e.g. pearson and spearman) and consensus matrix
names(l) <- names(statistical)
l[["Consensus"]] <- cons
class(l[[3]]) <- "numeric"
return(l)
}
#' Changes to MetNet: New attributes added, if model = "combined" (default) the result will be the same
#' as in MetNet, except the output list items were named to "combined" and "Character"
#' if model = "pearson" or "spearman" than also the corresponding weighted statistical
#' adjacency matrix is required as attribute (weighted_statistical = XY)
#' The output in this case will be a list containing 4 listitems, where combination relies on the
#' unweighted adjacency matrix of either Pearson or Spearman.
#' Moreover corresponding Correlation and p-values will be displayes as listitems
combine <- function(structural, statistical, threshold = 1, model = "combined", weighted_statistical) {
## Is changed since structural list is now lenght 3
if (!is.list(structural) | length(structural) != 3)
stop("structural is not a list of length 3")
if (!is.matrix(structural[[1]]) | !is.numeric(structural[[1]]))
stop("structural[[1]] is not a numeric matrix")
if (!is.matrix(structural[[2]]) | !is.character(structural[[2]]))
stop("structural[[2]] is not a character matrix")
## Additional to MetNet:
if (!is.matrix(statistical[[3]]) | !is.numeric(statistical[[3]]))
stop("statistical is not a numeric matrix")
if (!all(rownames(structural[[1]]) == rownames(structural[[2]])))
stop(c("rownames of structural[[1]] are not identical to rownames of ",
"structural[[2]]"))
if (!all(colnames(structural[[1]]) == colnames(structural[[2]])))
stop(c("colnames of structural[[1]] are not identical to colnames of ",
"structural[[2]]"))
if (!all(rownames(structural[[1]]) == rownames(statistical)))
stop("rownames are not identical")
if (!all(colnames(structural[[1]]) == colnames(statistical)))
stop("colnames are not identical")
if (!is.numeric(threshold)) stop("threshold is not numeric")
## create list to store results
res <- list()
## Changes to MetNet: Distinguish between default (model == "combined") and model = "pearson" or "spearman"
if(model == "pearson"){
## create the first entry of the list
## sum the matrices structural and statistical, if the value is above
## threshold then assign 1, otherwise 0
cons_num <- structural[[1]] + statistical[["pearson"]]
cons_num <- ifelse(cons_num > threshold, 1, 0)
## if p-values have previously been calculated
if("Correlation Value" %in% names(weighted_statistical[["pearson"]][1])){
cons_corr <- ifelse(cons_num == 1, weighted_statistical[["pearson"]][["Correlation Value"]], "")
cons_p <- ifelse(cons_num == 1, weighted_statistical[["pearson"]][["p-Value"]], "")}
else {
cons_corr <- ifelse(cons_num == 1, weighted_statistical[["pearson"]], "")
cons_p <- NaN
}}
if(model == "spearman"){
## create the first entry of the list
## sum the matrices structural and statistical, if the value is above
## threshold then assign 1, otherwise 0
cons_num <- structural[[1]] + statistical[["spearman"]]
cons_num <- ifelse(cons_num > threshold, 1, 0)
## if p-values have previously been calculated
if("Correlation Value" %in% names(weighted_statistical[["spearman"]][1])){
cons_corr <- ifelse(cons_num == 1, weighted_statistical[["spearman"]][["Correlation Value"]], "")
cons_p <- ifelse(cons_num == 1, weighted_statistical[["spearman"]][["p-Value"]], "")}
else {
cons_corr <- ifelse(cons_num == 1, weighted_statistical[["spearman"]], "")
cons_p <- NaN
}}
if(model == "combined"){
## create the first entry of the list
## sum the matrices structural and statistical, if the value is above
## threshold then assign 1, otherwise 0
cons_num <- structural[[1]] + statistical[[3]]
cons_num <- ifelse(cons_num > threshold, 1, 0)
cons_corr <- NaN
cons_p <- NaN
}
## create the second entry of the list
## if element in cons_num is equal to 1, take the element in structural[[2]]
## (the type of link), otherwise ""
cons_char <- ifelse(cons_num == 1, structural[[2]], "")
## assign to list
## Compared to MetNet names were assigned
res[[model]] <- cons_num
res[["Character"]] <- cons_char
## assign Correlation and p-values to list if model is "pearson" or "spearman"
if(model == "pearson" || model == "spearman"){
res[["Correlation Value"]] <- cons_corr
res[["p-Value"]] <- cons_p
}
return(res)
}
#' exportNet2gml is a function that exports adjacency matrices to gml using igraph
#' Needs following attributes:
#' x: adjacency matrix that needs to be exported
#' from: originated from wich function, possible values are
#' - "structural" Produces a gml file with edge attributes containing mass difference values, data saved as "structural_type.gml"
#' - "statistical+p" produces a gml file with edge attributes containing correlation values and p-values, saved as "statistical.'model'.gml"
#' (TO DO: ADD "statistical")
#' - "combine" produces a gml file with edge attributes containing correlation and p-values for pearson/ spearman correlations,
#' saved as "combined.gml"
exportNet2gml <- function (x, from, ...) {
if ("structural" %in% from) {
mat <- x[[1]]
mat_type <- x[[2]]
mat_mass <- x[[3]]
class(mat_mass) <- "numeric"
net <-
graph_from_adjacency_matrix(mat, mode = "undirected", weighted = T)
net_type <-
graph_from_adjacency_matrix(mat_type, mode = "undirected", weighted = T)
net_mass <-
graph_from_adjacency_matrix(mat_mass, mode = "undirected", weighted = T)
net_comb <- union(net, net_mass)
names(edge_attr(net_comb))[1] <- "adj"
names(edge_attr(net_comb))[2] <- "mass difference"
#net_plot <- plot(net_type, edge.width = 5, vertex.label.cex = 0.5, edge.color = "grey")
write_graph(net_comb, "structural_type.gml", format = c("gml"))
}
else if ("statistical+p" %in% from) {
for (i in 1:length(x)) {
cor_list <- x[[i]]
##Plot structural adjacency matrix and export to gml
net_cor <-
igraph::graph_from_adjacency_matrix(cor_list[[1]], mode = "undirected", weighted = T)
net_p <-
igraph::graph_from_adjacency_matrix(cor_list[[2]], mode = "undirected", weighted = T)
net_comb <- union(net_cor, net_p)
names(edge_attr(net_comb))[1] <- "correlation"
names(edge_attr(net_comb))[2] <- "p"
# #net_plot <- plot(net_type, edge.width = 5, vertex.label.cex = 0.5, edge.color = "grey")
q <- names(x[i])
write_graph(net_comb, file = sprintf('statistical.%s.gml', q), format = c("gml"))
}
}
else if ("combine" %in% from) {
if ("pearson" %in% names(x[1]) | "spearman" %in% names(x[1]) ){
class(x[[3]]) <- "numeric"
class(x[[4]]) <- "numeric"
net_cor <- graph_from_adjacency_matrix(x[[3]], mode = "undirected", weighted = T)
net_p <- graph_from_adjacency_matrix(x[[4]], mode = "undirected", weighted = T)
net <- union(net_cor, net_p)
names(edge_attr(net))[1] <- "correlation"
names(edge_attr(net))[2] <- "p"
}
else { #if "combined" or other model
net <-
graph_from_adjacency_matrix(x[[1]], mode = "undirected", weighted = T)
}
write_graph(net, "combined.gml", format = c("gml"))
}
}
#' adjacency_list is a function that creates a list of an adjacency matrix x
#' from: originated from which function, possiblie attributes are "structural", "statistical", "combine"
adjacency_list <- function(x, from){
if (!(all(from %in% c("structural", "statistical", "combine"))))
stop("'from' not implemented in adjacency_list")
if ("structural" %in% from) {
x[[2]][upper.tri(x[[2]])] <- ''
x[[3]][upper.tri(x[[3]])] <- ''
list_type <- melt(x[[2]]) %>% filter(Var1 != Var2) %>% filter(value != '')
list_mass <- melt(x[[3]]) %>% filter(Var1 != Var2) %>% filter(value != '')
combine <- add_column(list_type, `mass difference`= list_mass$value) %>% as.data.frame()
return(combine)
}
else if ("statistical" %in% from) {
for (i in seq_along(x)) {
if (i == 1) {
x[[i]][upper.tri(x[[i]])] <- ''
list_corr <- melt(x[[i]]) %>% filter(Var1 != Var2) %>% filter(value != '') %>%
select(Var1, Var2, value)
colnames(list_corr) <- c("Feature1", "Feature2", names(x[i]))
#return(list_corr)
}
if (i != 1){
model = names(x[i])
x[[i]][upper.tri(x[[i]])] <- ''
list_corr2 <- melt(x[[i]]) %>% filter(Var1 != Var2) %>% filter(value != '')
list_comb <- add_column(list_corr, list_corr2$value)
list_comb <- as.data.frame(list_comb)
colnames(list_comb)[i+2] <- c(names(x[i]))
list_corr <- list_comb
}
}
return(list_corr)
}
else if ("combine" %in% from){
x[[2]][upper.tri(x[[2]])] <- ''
x[[3]][upper.tri(x[[3]])] <- ''
x[[4]][upper.tri(x[[4]])] <- ''
list_mass <- melt(x[[2]]) %>% filter(Var1 != Var2) %>% filter(value != '')
list_corr <- melt(x[[3]]) %>% filter(Var1 != Var2) %>% filter(value != '')
list_p <- melt(x[[4]]) %>% filter(Var1 != Var2) %>% filter(value != '')
listed <- add_column(list_mass, `Correlation Value` = list_corr$value)
listed <- add_column(listed, `p-Value` = list_p$value)
return(listed)
}
}
#' sum_mass summarises the adjacency list containing mass difference values,
#' i.e. either adjacency list from structural or combine may be used
sum_mass <- function(adjacency_list){
if("mass difference" %in% names(adjacency_list)){
sum_mass <- adjacency_list %>% group_by(`mass difference`) %>% summarise(count=n()) %>%
as.data.frame()
sum_comb <- adjacency_list %>% group_by(`value`) %>% summarise(count=n()) %>%
as.data.frame() %>% add_column(sum_mass$`mass difference`)
colnames(sum_comb) <- c("Type", "Counts", "Mass Difference")
sum_comb <- sum_comb %>% select(Type, `Mass Difference`, Counts)}
else{
sum_comb <- adjacency_list %>% group_by(`value`) %>% summarise(count=n()) %>%
as.data.frame()
colnames(sum_comb) <- c("Type", "Counts")
}
plot_list <- ggplot(sum_comb, aes(x=Type, y=Counts, fill=Type)) + geom_bar(stat = "identity") + theme_minimal() +
labs(title = "Numbers of destinct type of a biochemical reaction")+ scale_fill_brewer(palette = "Blues") + theme(legend.position = "right")
plot(plot_list)
return(sum_comb)
}
|
94e659dd751955dc05b1a4ed8e626a9b3b51815c
|
9991cf135c447943bd7f7f1b58736ddee1b2fec1
|
/Rcode/PCA_based_training.R
|
35f75c27f8464dffdbdb135198d6e01715c481b1
|
[] |
no_license
|
madsherlock/SML-F16
|
2469d1f717c027a6ab084654bc803552d0f61ca5
|
b9c8df66a0214a13fd538dbaf9badc6bf98fc325
|
refs/heads/master
| 2021-01-21T04:44:49.768032
| 2016-06-06T07:26:08
| 2016-06-06T07:26:08
| 51,139,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,535
|
r
|
PCA_based_training.R
|
gc()
library(caret)
library(base)
library(doParallel); cl <- makeCluster(4); registerDoParallel(cl)
setwd("~/Dropbox/Eksamen/SML/SML-F16/Rcode")
load("../data/data.RData")
load("../data/testClass_new.RData")
load("../data/data-1-1-100-1.5.RData")
List_1_1 = trainingDigit
load("../data/data-1-2-100-1.5.RData")
List_1_2 = trainingDigit
load("../data/data-2-1-100-1.5.RData")
List_2_1 = trainingDigit
load("../data/data-2-2-100-1.5.RData")
List_2_2 = trainingDigit
load("../data/data-3-1-100-1.5.RData")
List_3_1 = trainingDigit
load("../data/data-4-1-100-1.5.RData")
List_4_1 = trainingDigit
load("../data/data-4-2-100-1.5.RData")
List_4_2 = trainingDigit
load("../data/data-4-3-100-1.5.RData")
List_4_3 = trainingDigit
load("../data/data-5-1-100-1.5.RData")
List_5_1 = trainingDigit
load("../data/data-5-2-100-1.5.RData")
List_5_2 = trainingDigit
load("../data/data-6-1-100-1.5.RData")
List_6_1 = trainingDigit
load("../data/data-7-1-100-1.5.RData")
List_7_1 = trainingDigit
load("../data/data-8-1-100-1.5.RData")
List_8_1 = trainingDigit
load("../data/data-12-1-100-1.5.RData")
List_12_1 = trainingDigit
load("../data/data-13-1-100-1.5.RData")
List_13_1 = trainingDigit
#PCA_based_training <- funktion(data = data, thres = thres)
#{
#label <- factor(c(label, recursive = TRUE)) - Single person Labels size
#trainingDigit <- data.frame(do.call(rbind, trainingDigit)) - Convert Troels version to Dataframe
#label <- c(label,label,label,label,label,label,label,label,label,label,label,label,label,label,label)
label_0 = unlist(list(label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]],label[[1]]))
label_1 = unlist(list(label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]],label[[2]]))
label_2 = unlist(list(label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]],label[[3]]))
label_3 = unlist(list(label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]],label[[4]]))
label_4 = unlist(list(label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]],label[[5]]))
label_5 = unlist(list(label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]],label[[6]]))
label_6 = unlist(list(label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]],label[[7]]))
label_7 = unlist(list(label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]],label[[8]]))
label_8 = unlist(list(label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]],label[[9]]))
label_9 = unlist(list(label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]],label[[10]]))
bigLabel = list(label_0,label_1,label_2,label_3,label_4,label_5,label_6,label_7,label_8,label_9)
str(bigLabel)
bigLabel <- factor(c(bigLabel, recursive = TRUE)) #Labels for multiple persons.
trainingDigit <- data.frame(do.call(rbind, c(List_1_1,List_1_2,List_2_1,List_2_2,List_3_1,List_4_1,List_4_2,List_4_3,List_5_1,List_5_2,List_6_1,List_7_1,List_8_1,List_12_1,List_13_1)))
k_list=expand.grid(k=c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20))
control <- trainControl(method = "repeatedcv",
number = 10,
repeats = 10,
preProcOptions = list(thresh = 0.8),
p = 0.9)
str(label)
str(trainingDigit)
knnFit <- train(x = trainingDigit, # trainingDigit = single person , bigTrainingDigit = All people
y = bigLabel, # bigLabel = all people , Label = SinglePerson
method = "knn",
tuneGrid = k_list,
trControl = control,
preProcess = "pca")
knnFit$results
#}
|
494be8793c1c91faa78bef46a4e4baf8ee36bfd7
|
480480c0e26fd4df5a47a46734994fe3c67ae3dc
|
/R/fars_functions.R
|
5f6ec793e3cc3819b8d09ec10ebbd351851242fd
|
[] |
no_license
|
Alice-MacQueen/farsr
|
d7b90d4ee077b2314012281f4359e3e5870d1912
|
51ef23f965d8712ffb0793392ef294f180a0dab7
|
refs/heads/master
| 2020-05-03T06:46:02.421978
| 2019-04-03T20:52:16
| 2019-04-03T20:52:16
| 178,481,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,488
|
r
|
fars_functions.R
|
#' @title Read Fatality Analysis Reporting System data
#'
#' @description \code{fars_read} reads in Fatality Analysis Reporting System (FARS) data
#'for a given \code{filename}, if the file exists.
#'
#' @param filename The name of the FARS data file to read.
#' @param path The path to the FARS data file to read. The default is the path
#' to the three supplied raw data files.
#'
#' @return A table of FARS data.
#'
#' @examples
#' \dontrun{fars_read(filename = "accident_2013.csv.bz2")}
#' \dontrun{fars_read(filename = "accident_2014.csv.bz2"))}
#' \dontrun{fars_read(filename = "accident_2015.csv.bz2")}
#' \dontrun{fars_read(filename = "file_does_not_exist.csv", path = "bad/path")} # Results in an error
#'
#' @import readr
#'
#' @export
fars_read <- function(filename, path = "inst/extdata") {
if(!file.exists(file.path(path, filename)))
stop("file '", filename, "' does not exist on this path")
if(path == "inst/extdata"){
data <- suppressMessages({
readr::read_csv(system.file("inst/extdata", filename, package = "farsr"),
progress = FALSE)
})
} else{
data <- suppressMessages({
readr::read_csv(file.path(path, filename), progress = FALSE)
})
}
dplyr::tbl_df(data)
}
#' @title Make a Fatality Analysis Reporting System file name.
#'
#' @description \code{make_filename} creates a Fatality Analysis Reporting System
#' (FARS) filename for a given four digit \code{year}.
#'
#' @param year The year you want to make a FARS filename for.
#'
#' @return A filename for a file containing FARS data.
#'
#' @examples
#' \dontrun{make_filename(year = 2014)} # Makes FARS filename for the year 2014.
#' \dontrun{make_filename(year = 14)} # Will not make a working FARS filename.
#'
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' @title Read in one or more years of FARS data.
#'
#' @description Reads in Fatality Analysis Reporting System data for one or more
#' years.
#'
#' @param years A year or vector of four-digit years for which you want to read
#' in FARS data.
#' @param path The path to the FARS data file to read. The default is the path
#' to the three supplied raw data files.
#'
#' @return A table or list of tables of FARS data.
#'
#' @examples
#' \dontrun{fars_read_years(years = 2014)} # Returns FARS data for the year 2014.
#' years <- c(2013, 2014, 2015)
#' \dontrun{fars_read_years(years = years)} # Returns a list of three tables of FARS data.
#' \dontrun{fars_read_years(years = 14)} # Results in an invalid year error.
#'
#' @export
fars_read_years <- function(years, path = "inst/extdata") {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file, path = path)
dplyr::mutate(dat, year = year) %>%
dplyr::select(.data$MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' @title FARS Monthly Fatality Summaries
#'
#' @description Creates summaries of monthly fatalities using Fatality Analysis
#' Reporting System data for a specified year or years.
#'
#' @param years The year or years to get monthly summaries of fatalities for.
#' @param path The path to the FARS data file to read. The default is the path
#' to the three supplied raw data files.
#'
#' @return A summary table of monthly fatalities for each year of FARS data.
#'
#' @examples
#' \dontrun{fars_summarize_years(years = 2014)} # Monthly fatality summaries for 2014.
#'years <- c(2013, 2014, 2015)
#' \dontrun{fars_summarize_years(years = years)} # Summary table for 2013-2015.
#' \dontrun{fars_summarize_years(years = 14)} # Will return an error.
#'
#' @import readr tidyr dplyr
#' @importFrom magrittr %>%
#'
#' @export
fars_summarize_years <- function(years, path = "inst/extdata") {
dat_list <- fars_read_years(years, path = path)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(.data$year, .data$MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(.data$year, n)
}
#' @title Plot FARS fatalities for a state and year.
#'
#' @description Makes a plot of Fatality Analysis Reporting System (FARS)
#' data for a given state number and year.
#'
#' @param year The year to be plotted
#' @param state.num The integer number of the state to be plotted, from 1-56.
#' @param path The path to the FARS data file to read. The default is the path
#' to the three supplied raw data files.
#'
#' @return A maps object.
#'
#' @examples
#' \dontrun{fars_map_state(state.num = 10, year = 2014)} # Returns a map for state 10.
#' \dontrun{fars_map_state(state.num = 100, year = 2014)} # Returns an error.
#'
#' @import dplyr readr maps graphics
#'
#' @export
fars_map_state <- function(state.num, year, path = "inst/extdata") {
filename <- farsr::make_filename(year)
data <- farsr::fars_read(filename, path = path)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, .data$STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
0e406ed08e811964d5b3357246a2001ce88e358a
|
2d310fd545505bb0fb7396011cd0c4859e8f0927
|
/man/old_prs.Rd
|
83daea9d263f1ea5d232827905161d974fbb617d
|
[
"MIT"
] |
permissive
|
olladapunaresh/pRs
|
b871d5ccec03b074c2cdc97eacad8c7d14c549dc
|
e0e81c76c87897930086cbd0fd4a477d481d1f7d
|
refs/heads/master
| 2023-03-18T18:53:56.621307
| 2018-12-10T13:40:08
| 2018-12-10T13:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 549
|
rd
|
old_prs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{old_prs}
\alias{old_prs}
\title{Construct several polygenic risk scores from a matrix of weights.}
\usage{
old_prs(input, debug, n, weights)
}
\arguments{
\item{weights}{A matrix of weights with each row being beta corresponding to the association between SNP at that position and the outcome.}
\item{name}{Path to .ped file (or binary file, working on this now).}
}
\description{
Construct several polygenic risk scores from a matrix of weights.
}
|
37223fd5a3184debcaf27ee96960280af676c15f
|
aebfb7d9c03a2d349f66c1c335287e4e14d58071
|
/man/get.corresponding.ts.data.according.to.the.combination.months.Rd
|
1dfbbcc018b67df850d5fb22b88a3db91888f3ec
|
[] |
no_license
|
lixixibj/foss
|
d99cf1e9edc25bdfabf405922557b6c3858782cd
|
5c02c6758a628b08a2549aee4b9c53fe05d714a0
|
refs/heads/master
| 2023-06-14T11:02:08.176633
| 2021-07-09T01:24:36
| 2021-07-09T01:24:36
| 266,596,085
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 781
|
rd
|
get.corresponding.ts.data.according.to.the.combination.months.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forecasting.with.sub.seasons.R
\name{get.corresponding.ts.data.according.to.the.combination.months}
\alias{get.corresponding.ts.data.according.to.the.combination.months}
\title{get the corresponding ts value according to the combinded months
test}
\usage{
get.corresponding.ts.data.according.to.the.combination.months(
ts,
combination.months,
formatted.date.of.the.ts
)
}
\arguments{
\item{ts}{train data of the ts, eg.}
\item{combination.months}{combined month, eg,c(1,2,3)}
\item{formatted.date.of.the.ts}{formatted date for the train data of ts}
}
\value{
new.ts : new ts constructed from the orginal ts
}
\description{
get the corresponding ts value according to the combinded months
test
}
|
f53efa89ad9e9fa8cd35b4133be01b59926f8473
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pcadapt/examples/manhattan_plot.Rd.R
|
6c2cb11807ff2c79a43a1059349e9cc4140cd32e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 172
|
r
|
manhattan_plot.Rd.R
|
library(pcadapt)
### Name: manhattan_plot
### Title: Manhattan Plot
### Aliases: manhattan_plot
### Keywords: internal
### ** Examples
## see ?pcadapt for examples
|
ca3f4cb6168a8f629067ac2b235e6f5552f02e72
|
83e0c8f3a857bb8e4cb3322ae700021284edf85c
|
/FunGraph_0.1.0/man/get_biSAD1.Rd
|
56e5fc4ba4af198aa5704dfa6c07e716b30fd080
|
[] |
permissive
|
xiahui625649/FunGraph
|
b7fc5845362e5efcaefc3624de7ab7b98121ec39
|
181ee42e6eb5131b30103f602dbaacb9bb0a36a1
|
refs/heads/main
| 2023-08-13T01:30:46.162953
| 2021-10-13T08:40:39
| 2021-10-13T08:40:39
| 440,708,149
| 1
| 0
|
Apache-2.0
| 2021-12-22T02:22:11
| 2021-12-22T02:22:10
| null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
get_biSAD1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base.R
\name{get_biSAD1}
\alias{get_biSAD1}
\title{generate biSAD1 covariance matrix}
\usage{
get_biSAD1(par, n)
}
\arguments{
\item{par}{vector with four number, first two for ck and the rest for stress}
\item{n}{scalar indicate length of time d}
}
\value{
biSAD1 covariance matrix
}
\description{
generate biSAD1 covariance matrix
}
\examples{
get_biSAD1(par=c(2,0.5,2,0.1),n=14)
}
|
2935f4504eb78bf72bb7c5bfb02158b254c118e0
|
dc66e3a263d415824ebab4db7b46f24b9eac4272
|
/Analysis/processCSVs.R
|
fcc16df710beea4727c91a6a25d82ead6ca87de9
|
[
"MIT"
] |
permissive
|
cquijanoch/VRTrendVis
|
d79342f454bd1e25545b0783a8a63b05a8d3563f
|
81eda0fc739bdf0beffb0de0422479e0be2f44f5
|
refs/heads/main
| 2023-03-31T20:29:49.950428
| 2021-04-08T10:16:39
| 2021-04-08T10:16:39
| 353,451,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,568
|
r
|
processCSVs.R
|
loadData <- function(path, id) {
data.df <- read.csv(path,sep=";", header = TRUE, encoding = "UTF-8")
data.df <- as.data.frame(data.df,stringsAsFactors=F)
data.df$ID <- id %>% as.factor()
data.df$Input <- data.df$Input %>% as.factor()
data.df$HMD <- data.df$HMD %>% as.factor()
data.df$TimeInitTutorial <- data.df$TimeInitTutorial %>% as.character()
data.df$TimeInitTutorial <- strptime(data.df$TimeInitTutorial, format="%Y%m%d%H%M%S")
data.df$TimeEndTutorial <- data.df$TimeEndTutorial %>% as.character()
data.df$TimeEndTutorial <- strptime(data.df$TimeEndTutorial, format="%Y%m%d%H%M%S")
data.df$TimeInitPractice <- data.df$TimeInitPractice %>% as.character()
data.df$TimeInitPractice <- strptime(data.df$TimeInitPractice, format="%Y%m%d%H%M%S")
data.df$TimeEndPractice <- data.df$TimeEndPractice %>% as.character()
data.df$TimeEndPractice <- strptime(data.df$TimeEndPractice, format="%Y%m%d%H%M%S")
data.df$VisModeID <- data.df$VisModeID %>% as.factor()
data.df$TaskID <- data.df$TaskID %>% as.factor()
data.df$TimeInitTask <- data.df$TimeInitTask %>% as.character()
data.df$TimeInitTask <- strptime(data.df$TimeInitTask, format="%Y%m%d%H%M%S")
data.df$TimeEndTask <- data.df$TimeEndTask %>% as.character()
data.df$TimeEndTask <- strptime(data.df$TimeEndTask, format="%Y%m%d%H%M%S")
data.df$UserAnswerID <- data.df$UserAnswerID %>% as.character()
data.df$CorrectAnswerID <- data.df$CorrectAnswerID %>% as.character()
data.df$NumAnswers <- data.df$NumAnswers %>% as.numeric()
data.df$NumCorrectAnswers <- data.df$NumCorrectAnswers %>% as.numeric()
data.df$SubspacesName <- data.df$SubspacesName %>% as.character()
data.df$InteractionMode <- data.df$InteractionMode %>% as.factor()
data.df$Controller <- data.df$Controller %>% as.factor()
data.df$TimeGrab <- data.df$TimeGrab %>% as.character()
data.df$TimeGrab <- gsub(",",".",data.df$TimeGrab) %>% as.numeric()
#d#ata.df$TimeGrab <- data.df$TimeGrab %>% as.numeric()
data.df$TimeScale <- data.df$TimeScale %>% as.character()
data.df$TimeScale <- gsub(",",".",data.df$TimeScale) %>% as.numeric()
#data.df$TimeScale <- data.df$TimeScale %>% as.numeric()
data.df$Selects <- data.df$Selects %>% as.numeric()
data.df$Q1 <- data.df$Q1 %>% as.numeric()
data.df$Q2 <- data.df$Q2 %>% as.numeric()
data.df$Q3 <- data.df$Q3 %>% as.numeric()
data.df$TimeEndExperiment <- data.df$TimeEndExperiment %>% as.character()
data.df$TimeEndExperiment <- strptime(data.df$TimeEndExperiment, format="%Y%m%d%H%M%S")
data.df$TimeHMDRemoved <- data.df$TimeHMDRemoved %>% as.character()
data.df$TimeHMDRemoved <- strptime(data.df$TimeHMDRemoved, format="%Y%m%d%H%M%S")
data.df$TimeHMDRecovery <- data.df$TimeHMDRecovery %>% as.character()
data.df$TimeHMDRecovery <- strptime(data.df$TimeHMDRecovery, format="%Y%m%d%H%M%S")
data.df$TimeCloseApplication <- data.df$TimeCloseApplication %>% as.character()
data.df$TimeCloseApplication <- strptime(data.df$TimeCloseApplication, format="%Y%m%d%H%M%S")
return(data.df)
}
test.df <- loadData("data/result_1.csv",1)
test.df <- rbind(test.df, loadData("data/result_2.csv",2))
test.df <- rbind(test.df, loadData("data/result_3.csv",3))
test.df <- rbind(test.df, loadData("data/result_4.csv",4))
test.df <- rbind(test.df, loadData("data/result_5.csv",5))
test.df <- rbind(test.df, loadData("data/result_6.csv",6))
test.df <- rbind(test.df, loadData("data/result_7.csv",7))
test.df <- rbind(test.df, loadData("data/result_8.csv",8))
test.df <- rbind(test.df, loadData("data/result_9.csv",9))
test.df <- rbind(test.df, loadData("data/result_10.csv",10))
test.df <- rbind(test.df, loadData("data/result_11.csv",11))
test.df <- rbind(test.df, loadData("data/result_12.csv",12))
test.df <- rbind(test.df, loadData("data/result_13.csv",13))
test.df <- rbind(test.df, loadData("data/result_14.csv",14))
test.df <- rbind(test.df, loadData("data/result_15.csv",15))
test.df <- rbind(test.df, loadData("data/result_16.csv",16))
test.df <- rbind(test.df, loadData("data/result_17.csv",17))
test.df <- rbind(test.df, loadData("data/result_18.csv",18))
subjetive.df <- test.df[!is.na(test.df$VisModeID),c("ID","VisModeID","Q1","Q2","Q3")]
subjetive.df <- ddply(subjetive.df, .(ID, VisModeID), function(x) x[c(nrow(x)), ])
task.df <- test.df[test.df$UserAnswerID != "",]
aggregate_timegrab.df <- aggregate(task.df$TimeGrab, by=list(ID=task.df$ID, VM=task.df$VisModeID, t=task.df$TaskID, IM=task.df$InteractionMode), FUN=sum)
aggregate_timescale.df <- aggregate(task.df$TimeScale, by=list(ID=task.df$ID, VM=task.df$VisModeID, t=task.df$TaskID, IM=task.df$InteractionMode), FUN=sum)
aggregate_timegrab.df <- inner_join(aggregate_timegrab.df[aggregate_timegrab.df$IM=="hand",],aggregate_timegrab.df[aggregate_timegrab.df$IM=="ray",],by=c("ID","VM","t"))
aggregate_timescale.df <- inner_join(aggregate_timescale.df[aggregate_timescale.df$IM=="hand",],aggregate_timescale.df[aggregate_timescale.df$IM=="ray",],by=c("ID","VM","t"))
aggregate_timegrab.df$GrabHand <- aggregate_timegrab.df$x.x / 60
aggregate_timegrab.df$GrabRay <- aggregate_timegrab.df$x.y / 60
aggregate_timescale.df$ScaleHand <- aggregate_timescale.df$x.x / 60
aggregate_timescale.df$ScaleRay <- aggregate_timescale.df$x.y / 60
aggregate_test.df <- data.frame(aggregate_timegrab.df$ID,aggregate_timegrab.df$VM,aggregate_timegrab.df$t,aggregate_timegrab.df$GrabHand,aggregate_timegrab.df$GrabRay,aggregate_timescale.df$ScaleHand,aggregate_timescale.df$ScaleRay)
colnames(aggregate_test.df) <- c('ID','VisModeID','TaskID','GrabHand','GrabRay','ScaleHand', 'ScaleRay')
task.df <- unique(inner_join(aggregate_test.df,task.df[,c( "ID",
"VisModeID",
"TaskID",
"TimeInitTask",
"TimeEndTask",
"NumAnswers",
"NumCorrectAnswers",
"Selects",
"SubspacesName",
"UserAnswerID")
], by=c("ID","VisModeID","TaskID")))
task.df$duration <- difftime(task.df$TimeEndTask, task.df$TimeInitTask, units="sec") %>% as.numeric()
task.df$error <- (task.df$NumAnswers - task.df$NumCorrectAnswers)/task.df$NumAnswers
task.df$binary_error <- task.df$error %>% as.logical()
task.df$accuracy <- 1 - task.df$binary_error %>% as.numeric()
task.df$points <- ((1 - task.df$error) * task.df$NumAnswers) %>% as.numeric()
task.df$points_factor <- task.df$points %>% as.factor()
task.df$VisModeID <- revalue(task.df$VisModeID, c("1" = "Animation",
"2" = "Overlaid",
"3" = "SMultiples",
"4" = "Mix"
))
###Mix Subspaces###
strsplits <- function(x, splits, ...)
{
for (split in splits)
{
x <- unlist(strsplit(x, split, ...))
}
return(x[!x == ""]) # Remove empty values
}
mix.df <- data.frame(task.df[task.df$VisModeID == "Mix",])
mix.df$SubspacesName <- substr(mix.df$SubspacesName, 0 ,nchar(mix.df$SubspacesName)-1)
mix.df$UserAnswerID <- substr(mix.df$UserAnswerID, 0 ,nchar(mix.df$UserAnswerID)-1)
mix.df <- data.frame(mix.df %>% mutate(SubspacesName=strsplit(SubspacesName, "-")) %>% unnest(SubspacesName))
mix_temp.df <- data.frame(mix.df)
mix_temp.df$ID <- mix_temp.df$ID %>% as.numeric()
mix_temp.df$SA <- strsplits(mix_temp.df$SubspacesName, c( "ScatterplotOverlay1",
"ScatterplotAnimated1",
"SMallA",
"SMallB",
"SMallC",
"SMallD",
"SMallE",
"SMallF",
"SMallG",
"SMallH",
"SMallI",
"SMallJ",
"SMallK",
"SMallL",
"SMallM",
"SMallN",
"SMallO",
"SMallP"
))
filterRepeats <- function(df, num)
{
result.df <- data.frame()
for (id in 1:max(df$ID))
{
for (tid in unique(df[df$ID == id,c("TaskID")]))
{
filter.df <- subset(df,df$ID == id & df$TaskID == tid)
if(dim(filter.df)[1] == num)
{
if (num == 1)
filter.df[1,c("SubspacesName")] <- substr(filter.df[1,"SubspacesName"], 0 ,nchar(filter.df[1,"SubspacesName"]) - nchar(filter.df[1,"A1"]))
if (num == 2)
{
filter.df[2,c("SubspacesName")] <- substr(filter.df[2,"SubspacesName"], 0 ,nchar(filter.df[2,"SubspacesName"]) - nchar(filter.df[2,"A2"]))
filter.df[1,c("SubspacesName")] <- substr(filter.df[1,"SubspacesName"], 0 ,nchar(filter.df[1,"SubspacesName"]) - nchar(filter.df[1,"A1"]))
}
if (num == 3)
{
filter.df[3,c("SubspacesName")] <- substr(filter.df[3,"SubspacesName"], 0 ,nchar(filter.df[3,"SubspacesName"]) - nchar(filter.df[3,"A3"]))
filter.df[2,c("SubspacesName")] <- substr(filter.df[2,"SubspacesName"], 0 ,nchar(filter.df[2,"SubspacesName"]) - nchar(filter.df[2,"A2"]))
filter.df[1,c("SubspacesName")] <- substr(filter.df[1,"SubspacesName"], 0 ,nchar(filter.df[1,"SubspacesName"]) - nchar(filter.df[1,"A1"]))
}
result.df <- rbind(result.df, filter.df)
}
else
{
cnum <- num
for (k in dim(filter.df)[1]:1)
{
if (num == 1)
{
if(filter.df[k,"A1"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A1"]))
result.df <- rbind(result.df, filter.df[k,])
break
}
}
if (num == 2)
{
if(cnum==2 & filter.df[k,"A2"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A2"]))
result.df <- rbind(result.df, filter.df[k,])
cnum <- cnum - 1
}
else if(cnum==1 & filter.df[k,"A1"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A1"]))
result.df <- rbind(result.df, filter.df[k,])
break
}
}
if (num == 3)
{
if(cnum==3 & filter.df[k,"A3"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A3"]))
result.df <- rbind(result.df, filter.df[k,])
cnum <- cnum - 1
}
else if(cnum==2 & filter.df[k,"A2"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A2"]))
result.df <- rbind(result.df, filter.df[k,])
cnum <- cnum - 1
}
else if(cnum==1 & filter.df[k,"A1"] == filter.df[k,"SA"])
{
filter.df[k,c("SubspacesName")] <- substr(filter.df[k,"SubspacesName"], 0 ,nchar(filter.df[k,"SubspacesName"]) - nchar(filter.df[k,"A1"]))
result.df <- rbind(result.df, filter.df[k,])
break
}
}
}
}
}
}
return(result.df)
}
mix_temp.df <- mix_temp.df %>% separate(UserAnswerID, c("A1","A2","A3"),"-")
#mix_temp.df <- mix_temp.df[!mix_temp.df$SA %in% c("A1","A2","A3"),]
mix_temp.df <- mix_temp.df[mix_temp.df$SA == mix_temp.df$A1 | mix_temp.df$SA == mix_temp.df$A2 | mix_temp.df$SA == mix_temp.df$A3,]
mix_temp.df <- mix_temp.df[!is.na(mix_temp.df$SA),]
mix_temp1.df <- mix_temp.df[mix_temp.df$NumAnswers == "1",]
mix_temp2.df <- mix_temp.df[mix_temp.df$NumAnswers == "2",]
mix_temp3.df <- mix_temp.df[mix_temp.df$NumAnswers == "3",]
mix_clean.df <- rbind(filterRepeats(mix_temp1.df,1),filterRepeats(mix_temp2.df,2),filterRepeats(mix_temp3.df,3))
mix_clean.df$SubspacesName <- substr(mix_clean.df$SubspacesName, 0 ,nchar(mix_clean.df$SubspacesName)-1)
mix_clean.df$ID <- mix_clean.df$ID %>% as.factor()
mix_clean.df$answers <- 1
mix_clean.df[mix_clean.df$TaskID == "1",c("Characteristic")] <- "Compare Axes"
mix_clean.df[mix_clean.df$TaskID == "6",c("Characteristic")] <- "Compare Axes"
mix_clean.df[mix_clean.df$TaskID == "5",c("Characteristic")] <- "Longer Trajectory"
mix_clean.df[mix_clean.df$TaskID == "11",c("Characteristic")] <- "Longer Trajectory"
mix_clean.df[mix_clean.df$TaskID == "3",c("Characteristic")] <- "Strict Compare"
mix_clean.df[mix_clean.df$TaskID == "8",c("Characteristic")] <- "Strict Compare"
mix_clean.df[mix_clean.df$TaskID == "9",c("Characteristic")] <- "Strict Compare"
mix_clean.df[mix_clean.df$TaskID == "4",c("Characteristic")] <- "Reversals"
mix_clean.df[mix_clean.df$TaskID == "7",c("Characteristic")] <- "Reversals"
mix_clean.df[mix_clean.df$TaskID == "2",c("Characteristic")] <- "Overlaid points"
mix_clean.df[mix_clean.df$TaskID == "10",c("Characteristic")] <- "Similar"
mix_clean.df[mix_clean.df$TaskID == "12",c("Characteristic")] <- "Placement"
mix_clean.df$TaskID <- ordered(mix_clean.df$TaskID, levels = c("12","11","10","9","8","7","6","5","4","3","2","1"))
plot_Mix_by_ID <- ggplot(mix_clean.df[mix_clean.df$TaskID != "12",], aes(x=ID,y=answers,fill=SubspacesName)) + geom_col() + coord_flip() + labs(y = "Answers", x = "Participant") +
scale_fill_discrete(name = "Visualization", labels = c("Animation", "Overlaid", "Small Multiples"))
ggsave(plot = plot_Mix_by_ID, filename = "plot_Mix_by_ID.png", device="png", width = 3.75, height = 2.25, units = "in", dpi = 300)
plot_Mix_by_TaskID <- ggplot(mix_clean.df[mix_clean.df$TaskID != "12",], aes(x=TaskID,y=answers,fill=SubspacesName)) + geom_col() + coord_flip()+ labs(y = "Answers", x = "Task") +
#scale_fill_discrete(name = "Visualization", labels = c("Animation", "Overlaid", "Small Multiples")) +
geom_hline(aes(yintercept=18),show.legend = F,color="red") +
geom_hline(aes(yintercept=36),show.legend = F,color="red") +
geom_hline(aes(yintercept=54),show.legend = F,color="red")
ggsave(plot = plot_Mix_by_TaskID, filename = "plot_Mix_by_TaskID.png", device="png", width = 3.75, height = 2.25, units = "in", dpi = 300)
plot_Mix_by_Characteristic <- ggplot(mix_clean.df, aes(x=Characteristic,y=answers,fill=SubspacesName)) + geom_col() + coord_flip()
plot_duration2 <- ggplot(task.df, aes(x=ID,y=duration,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_hand_vis <- ggplot(task.df, aes(x=ID,y=GrabHand + ScaleHand,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Hand")
plot_time_ray_vis <- ggplot(task.df, aes(x=ID,y=GrabRay + ScaleRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Ray")
plot_time_hand_task <- ggplot(task.df, aes(x=TaskID,y=GrabHand + ScaleHand,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Hand")
plot_time_ray_task <- ggplot(task.df, aes(x=TaskID,y=GrabRay + ScaleRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Ray")
plot_time_grab_vis <- ggplot(task.df, aes(x=ID,y=GrabHand + GrabRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Grab")
plot_time_grab_task <- ggplot(task.df, aes(x=TaskID,y=GrabHand + GrabRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Grab")
plot_time_scale_vis <- ggplot(task.df, aes(x=ID,y=ScaleHand + ScaleRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Scale")
plot_time_scale_task <- ggplot(task.df, aes(x=TaskID,y=ScaleHand + ScaleRay,fill=VisModeID)) + geom_col() + coord_flip() + labs(y = "Scale")
plot_time_grab_hand_vis <- ggplot(task.df, aes(x=ID,y=GrabHand,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_grab_ray_vis <- ggplot(task.df, aes(x=ID,y=GrabRay,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_grab_hand_task <- ggplot(task.df, aes(x=TaskID,y=GrabHand,fill=VisModeID)) + geom_col() + coord_flip()##
plot_time_grab_ray_task <- ggplot(task.df, aes(x=TaskID,y=GrabRay,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_scale_hand_vis <- ggplot(task.df, aes(x=ID,y=ScaleHand,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_scale_ray_vis <- ggplot(task.df, aes(x=ID,y=ScaleRay,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_scale_hand_task <- ggplot(task.df, aes(x=TaskID,y=ScaleHand,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_scale_ray_task <- ggplot(task.df, aes(x=TaskID,y=ScaleRay,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_selects_vis <- ggplot(task.df, aes(x=ID,y=Selects,fill=VisModeID)) + geom_col() + coord_flip()
plot_time_selects_task <- ggplot(task.df, aes(x=TaskID,y=Selects,fill=VisModeID)) + geom_col() + coord_flip()
plot_duration_final2 <- ggplot(task.df, aes(duration,fill=VisModeID)) + geom_histogram(binwidth = .25,show.legend = T) + facet_wrap(~ VisModeID, nrow = 2) +
geom_vline(aes(xintercept=mean(duration)),show.legend = F,color="red") +
geom_vline(aes(xintercept=(mean(duration) + 3 * sd(duration))),show.legend = F,color="blue")
cols <- c("0" = "#e31a1c", "1" = "#bdd7e7", "2" = "#6baed6", "3" = "#2171b5")
plot_points2 <- ggplot(task.df, aes(points, fill = points_factor)) +
geom_histogram(binwidth = 1) +
facet_grid(VisModeID ~ TaskID) +
scale_fill_manual(values = cols)
#data.df$TimeInitTutorial <- as.POSIXlt(data.df$TimeInitTutorial)
#data.df$TimeEndTutorial <- as.POSIXlt(data.df$TimeEndTutorial)
time_task <- aggregate(task.df$duration, by=list(ID=task.df$ID), FUN=sum)
time_experiment.df <- unique(test.df[!is.na(test.df$TimeCloseApplication),c("ID", "TimeInitTutorial", "TimeCloseApplication") ])
time_experiment.df$time_experiment <- difftime(time_experiment.df$TimeCloseApplication, time_experiment.df$TimeInitTutorial, units="min") %>% as.numeric()
time_experiment.df$time_total <- time_experiment.df$time_experiment + 5
#test.df$time_experiment <- difftime(test.df$TimeCloseApplication, test.df$TimeInitPractice, units="min") %>% as.numeric()
#time_experiment.df <- aggregate(test.df$time_experiment, by=list(ID=test.df$ID), FUN=sum)
|
7626f125bbe9d91d4f1b3d701492fc9bc1dc527b
|
856c5192f678eb7bf20a2e0638c6bd782ef8ed55
|
/docxRkey/ivypi/tmp.R
|
3bedf118b48fcc1444958821f62a90341cd9c47d
|
[] |
no_license
|
madjugglers/pattern-book
|
0ed1feca8a6f21366dda4e7979d7c31697f20b36
|
1475cd3135a3a1f6d8da4a9d7ffe91279b82033e
|
refs/heads/master
| 2023-06-05T01:11:08.094646
| 2023-05-22T20:56:48
| 2023-05-22T20:56:48
| 44,569,396
| 5
| 4
| null | 2023-05-22T20:56:50
| 2015-10-19T23:10:38
|
R
|
UTF-8
|
R
| false
| false
| 1,794
|
r
|
tmp.R
|
# Nov 5/17
rm( list=ls() )
library(igraph)
library(plotrix) ## use draw.arc function
# adjacency matrix for ivy-pi
A <- matrix(0, 10, 10 ) ## 10 positions
g <- graph.adjacency(A)
# give edge attributes I think
#ll <- rbind( c(-2,0)+c(2,0),
# c( -1/2, 1*0.866/2 )+c(2,0),
# c( 1/2, -1*.866/2)+c(2,0),
# # new 4
# # new 5
# c( 2, 0)+c(2,0),
# c( 1/2, 1*.866/2)+c(2,0),
# c( -1/2, -1*.866/2 )+c(2,0),
# # new 9
# # new 10
# )
#pdf( file="ivypi.pdf", height=5, width=5 )
par( mar=rep(1/2,4) )
#plot( g, layout=ll ,
# vertex.size=15, edge.arrow.size=1, edge.arrow.width=1,
# edge.lty=1, edge.color="blue",
# edge.width=2, vertex.color="magenta", margin=c(0,.4,0,.4), asp=0,
# rescale=FALSE,
# edge.curved=c(0,0), xlim=c( )
#symbols( -1/2, 0, circles=1/2, add=TRUE, lwd=2 , inches=FALSE )
#symbols( 1/2, 0, circles=1/2, add=TRUE , lwd=2, inches=FALSE )
plot( 0,0, xlim=c(-3,3), ylim=c(-3,3), axes=FALSE, xlab="", ylab="" )
#symbols( -1, 0, circles=1, add=TRUE, lwd=2, inches=FALSE )
#symbols( 1, 0, circles=1, add=TRUE, lwd=2, inches=FALSE )
draw.arc( -1, 0, angle1=(-pi/3), angle2=pi , lwd=2, col="black" )
# the followin attempt isn't working with curves...maybe just use lines/points
b1 <- sin( pi/3 )/( 1 + cos(pi/3) )
phi <- atan( b1 ) ## which is pi/6
hh <- 2*( sin(pi/3) + cos(pi/3) )
y3 <- -hh*cos( pi/6 )
x3 <- 1+hh*sin(pi/6)
draw.arc( x3, y3, angle1=(pi/3), angle2=(pi) , lwd=2, col="black" )
draw.arc( 1, 0, angle1=0, angle2=(2*pi-2*pi/3) , lwd=2, col="black" )
# flip side
draw.arc( -x3, y3, angle1=(0), angle2=(2*pi/3) , lwd=2, col="black" )
#?
draw.arc( -x3/6, y3, angle1=(pi/3), angle2=(pi) , lwd=2, col="black" )
|
807f0b1292ff12c9a2a75e1a8c225f85b0eb19ee
|
10d4fa42467a509279ff2dacc25d423c4a7d4e50
|
/R/admixture proportion.r
|
8fdbb3e461ec87bd33bf566419bbfadc48abf291
|
[] |
no_license
|
YaliZhang98/Binp37_biogeographical_algorithm
|
e777280a2d03d0fc1c54f4fb3ccab460a7676db1
|
7b516fc1fb8d579578f52e98762d56025a2bd74a
|
refs/heads/main
| 2023-07-06T20:27:28.443080
| 2021-08-18T21:03:00
| 2021-08-18T21:03:00
| 397,408,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 824
|
r
|
admixture proportion.r
|
# This script is used produce a stacked graph for ADMIXTURE proportions in gene pool.
library(RColorBrewer)
Dataset <- read.csv(file="admixture_proportions.csv",header=TRUE) # This file just contain subregion and allele frequencies
palette <- colorRampPalette(brewer.pal(12, "Paired"))(36)
png("admixture_proportion.png", width = 13,height = 8, units = 'in', res = 600)
par(xpd = T, mar = par()$mar + c(1,0,0,7), mgp = c(0,0.7,0), las=2)
bp <- barplot(t(as.matrix(Dataset[,-1:-2])),col = palette,
ylab = "",
border = NA)
mtext(text = c(Dataset$Continent_detail), side = 1, at = bp, line = 0, padj = 1, cex = 0.5,las = 2)
bp
legend("topright",inset = c(-0.15,0.2),c(test),fill = palette, bty = 1, cex = 0.6)
par(mar=c(5, 4, 4, 2) + 0.1)
dev.off()
|
3b856aafb403a4d5ee8b188fc82a964742e955dd
|
fa5eb1a6e94be9be5d1bc19d1807c6ed2983b2d0
|
/libapi/R/pubmed.get.abstracts.R
|
c5c5cc8f84874e68e8a0886e6a17b156dab5df51
|
[] |
no_license
|
bereginyas/rlib
|
57c8a4f3548b34ba9a69dd3774ab127cbd4632be
|
f511254f1ed46f5a7d43eea7884cf31ef2cda9ca
|
refs/heads/master
| 2022-01-07T12:54:04.819043
| 2019-05-25T05:34:24
| 2019-05-25T05:34:24
| 67,099,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
pubmed.get.abstracts.R
|
pubmed.get.abstracts = function(pmids) {
library(XML)
## divide pmids into groups so as not to exceed the maximum url length
max.pmids=200
num.pmids=length(pmids)
num.groups = ceiling(num.pmids/max.pmids)
pmid.groups = split(pmids,factor(1:num.pmids%%num.groups))
data.xml = xmlNode(name="pubmed")
i = 0
for (pmids in pmid.groups) {
i=i+max.pmids
print(paste(i,"/",num.pmids,sep=""))
pmid_string = paste(pmids,collapse=",")
xml.url = paste("https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&rettype=abstract&id=",pmid_string,sep="")
output = xmlRoot(xmlTreeParse(httpGET(xml.url)))
data.xml=append.xmlNode(data.xml,xmlChildren(output))
#output = xmlTreeParse(system(paste('wget -qO- --ignore-length "',xml.url,'"',sep=""),intern=T))
}
saveRDS(data.xml, "~/data.xml.RDS")
#journal=sapply(getNodeSet(data.xml,"//Journal/Title"),xmlValue),
#year=sapply(getNodeSet(data.xml,"//JournalIssue/PubDate/Year"),xmlValue),
data = data.frame(
pmid=sapply(getNodeSet(data.xml,"//MedlineCitation/PMID"),xmlValue),
title=sapply(getNodeSet(data.xml,"//ArticleTitle"),xmlValue),
abstract=as.character(sapply(getNodeSet(data.xml,"//Abstract"),xmlValue))
)
data
}
|
a1bc11c354797635d67cb5c54c6fa6f7a5791090
|
20fb140c414c9d20b12643f074f336f6d22d1432
|
/man/NISTmilligramTOgrain.Rd
|
5c947180b56128718cfc3d67b479dbb8d6938e42
|
[] |
no_license
|
cran/NISTunits
|
cb9dda97bafb8a1a6a198f41016eb36a30dda046
|
4a4f4fa5b39546f5af5dd123c09377d3053d27cf
|
refs/heads/master
| 2021-03-13T00:01:12.221467
| 2016-08-11T13:47:23
| 2016-08-11T13:47:23
| 27,615,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
rd
|
NISTmilligramTOgrain.Rd
|
\name{NISTmilligramTOgrain}
\alias{NISTmilligramTOgrain}
\title{Convert milligram to grain }
\usage{NISTmilligramTOgrain(milligram)}
\description{\code{NISTmilligramTOgrain} converts from milligram (mg) to grain (gr) }
\arguments{
\item{milligram}{milligram (mg) }
}
\value{grain (gr) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTmilligramTOgrain(10)
}
\keyword{programming}
|
2914da6c21bfdc7cd218730de7013984dfd646a7
|
6da3b4d3cd66f532e8b399f08fc9ebca3af10723
|
/articles/YamaguchiQE2019/Code/estimation/CalcLikelihoodType.R
|
eb5fa7b472d3b1ec4904ac0e372f235962036160
|
[] |
no_license
|
murattasdemir/Archive-of-Empirical-Dynamic-Programming-Research
|
ffbcc137e137cad42ccf3e0a3bbb5227f70f21a0
|
324fedb49f8d0792dc1df2e8b1302e19cbbe78b7
|
refs/heads/main
| 2023-05-28T12:45:24.380684
| 2021-05-27T16:56:37
| 2021-05-27T16:56:37
| 367,003,588
| 1
| 0
| null | 2021-05-13T09:47:32
| 2021-05-13T09:47:31
| null |
UTF-8
|
R
| false
| false
| 684
|
r
|
CalcLikelihoodType.R
|
CalcLikelihoodType <- function(param.type, x){
mat.param.type <- matrix(param.type, ncol=Gn.type-1)
exp.y <- cbind(1, exp(x %*% mat.param.type))
exp.y / rowSums(exp.y)
}
CalcWeightedLogLikType <- function(param.type, x, q){
## likeilhood for type
pi <- CalcLikelihoodType(param.type, x)
## weighted loglikelihood
wm.loglik <- -1 * sum(q * log(pi))
## gradient
q.pi <- q[,-1] - pi[,-1]
if(Gn.type == 2) q.pi <- matrix(q.pi, ncol = 1)
attr(wm.loglik, "gradient") <- -1 * apply(q.pi, 2, function(a) colSums(a * x))
wm.loglik
}
DiffWeightedLogLikType <- function(param.type, x, q){
attr(CalcWeightedLogLikType(param.type, x, q), "gradient")
}
|
4a86c9e4b2cd8223e26737216b5e7770c328edfb
|
5d4914933ebcf8035147875d4f0a25d4e7b109fd
|
/ONEL_Stan_MM.R
|
82fc8e9372c1b984c458b7f749e86ef9bafbe8bd
|
[] |
no_license
|
silvialiverani/GMCARMM
|
a28df135e92fcad01e649efd0eb8bcdede9fa730
|
1ea49922dfe8be9bd6ef045089549dcf2e0fb531
|
refs/heads/master
| 2023-03-01T03:38:59.193406
| 2021-02-04T23:16:53
| 2021-02-04T23:16:53
| 254,815,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,643
|
r
|
ONEL_Stan_MM.R
|
#### GENERAL SETUP ####
path <- file.path("....../StanCode")
setwd(path)
# Load package
library(dplyr)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# Data load - North East: 4 boroughs coverage 90% (OLD)
load("./ONEL_Data.RData")
#
#### DATASET PREPARATION ####
# Covariates dataset and min-max normalization
X_cent <- apply(cbind(sa_pop = sa_pop_data,
imd = imd_data),
2, function(x) (x-min(x))/diff(range(x)))
# Create datasets for RStan
sp_d_mm <- list(n = length(mortality), # number of MSOAs
m = length(prevalence), # number of GP
# Outcomes
y1 = mortality, # observed number of cases 1
y2 = prevalence, # observed number of cases 2#
# Offsets
log_offset1 = log(exp_mortality),
log_offset2 = log(exp_prevalence),
# Adjacecncy
W_n = sum(W) / 2, # number of neighbor pairs
W = W, # adjacency matrix
# Multiple membership
M_W = weight_ext)
sp_d_mm_cov <- list(n = length(mortality), # number of MSOAs
m = length(prevalence), # number of GP
# Outcomes
y1 = mortality, # observed number of cases 1
y2 = prevalence, # observed number of cases 2#
# Covariates
k = ncol(X_cent),
X = X_cent,
# Offsets
log_offset1 = log(exp_mortality),
log_offset2 = log(exp_prevalence),
# Adjacecncy
W_n = sum(W) / 2, # number of neighbor pairs
W = W, # adjacency matrix
# Multiple membership
M_W = weight_ext)
#
#### MODELS COMPILATION ####
# GMCAR
## NOCOV
mod_GMCAR_MM_NegBin <-
stan_model(file = "Models/GMCAR/GMCAR_MM_NegBin.stan")
## COV
mod_GMCAR_MM_NegBin_cov <-
stan_model(file = "Models/GMCAR/GMCAR_MM_NegBin_cov.stan")
# MCAR
## NOCOV
mod_MVCAR_MM_NegBin <-
stan_model(file = "Models/MCAR/MCAR_Gelfand_MM.stan")
## COV
mod_MVCAR_MM_NegBin_cov <-
stan_model(file = "Models/MCAR/MCAR_Gelfand_MM_Cov.stan")
#
#### HMC PARAMETERS ####
niter <- 6E3
nchains <- 4
#
#### SAMPLING ####
# GMCAR
## NOCOV
fit_gmcar_nocov_negbin <- sampling(mod_GMCAR_MM_NegBin, data = sp_d_mm,
iter = niter, chains = nchains,
control = list(adapt_delta = .99,
max_treedepth = 15))
## COV
fit_gmcar_cov_negbin <- sampling(mod_GMCAR_MM_NegBin_cov,
data = sp_d_mm_cov,
iter = niter, chains = nchains,
control = list(adapt_delta = .99,
max_treedepth = 15))
# MCAR
## NOCOV
fit_mvcar_nocov_negbin <- sampling(mod_MVCAR_MM_NegBin,
data = sp_d_mm,
iter = niter, chains = nchains,
control = list(adapt_delta = .99,
max_treedepth = 15))
## COV
fit_mvcar_cov_negbin <- sampling(mod_MVCAR_MM_NegBin_cov,
data = sp_d_mm_cov,
iter = niter, chains = nchains,
control = list(adapt_delta = .99,
max_treedepth = 15))
#
#### PRINT OUTPUTS ####
# GMCAR
## NOCOV
pars_vec <- c('nought', 'alpha1', 'alpha2', 'eta0', 'eta1',
'tau1', 'tau2', 'v_sig1', 'v_sig2', 'ppp1', 'ppp2')
print(fit_gmcar_nocov_negbin, pars = pars_vec, probs = c(.025,.5,.975))
## COV
pars_vec <- c('nought', 'alpha1', 'alpha2', 'eta0', 'eta1', 'beta1', 'beta2',
'tau1', 'tau2', 'v_sig1', 'v_sig2', 'ppp1', 'ppp2')
print(fit_gmcar_cov_negbin, pars = pars_vec, probs = c(.025,.5,.975))
# MVCAR
## NOCOV
pars_vec <- c('nought', 'alpha', 'eta0',
'tau1', 'tau2', 'v_sig1', 'v_sig2', 'ppp1', 'ppp2')
print(fit_mvcar_nocov_negbin, pars = pars_vec, probs = c(.025,.5,.975))
## COV
pars_vec <- c('nought', 'alpha', 'eta0', 'beta1', 'beta2',
'tau1', 'tau2', 'v_sig1', 'v_sig2', 'ppp1', 'ppp2')
print(fit_mvcar_cov_negbin, pars = pars_vec, probs = c(.025,.5,.975))
#
|
e10447d94512c92533efbb4d7ba46597ecfdc4ed
|
d7d556c4ce38c2dec8dc94b8f5858879085ee9a6
|
/man/beeswarm.Rd
|
e9c9476ded2cff79728591026369efcfc2babafd
|
[] |
no_license
|
aroneklund/beeswarm
|
126788c86b8bd72e01e3eccadd20a021107cb4e2
|
d641db509e7a3f9c8e76147bf779fcb141cdba51
|
refs/heads/master
| 2023-06-07T23:04:06.996955
| 2023-01-21T21:58:03
| 2023-01-21T21:58:03
| 39,942,151
| 41
| 9
| null | 2023-05-28T17:38:58
| 2015-07-30T09:20:49
|
R
|
UTF-8
|
R
| false
| false
| 12,661
|
rd
|
beeswarm.Rd
|
\name{beeswarm}
\alias{beeswarm}
\alias{beeswarm.default}
\alias{beeswarm.formula}
\title{Bee swarm plot}
\description{
Create a bee swarm plot. A bee swarm plot is a one-dimensional scatter plot similar to \code{\link{stripchart}}, but with various methods to separate coincident points such that each point is visible. Also, \code{beeswarm} introduces additional features unavailable in \code{stripchart}, such as the ability to control the color and plotting character of each point.
}
\usage{
beeswarm(x, \dots)
\method{beeswarm}{formula}(formula, data = NULL, subset, na.action = NULL,
pwpch = NULL, pwcol = NULL, pwbg = NULL, pwcex = NULL, dlab, glab, \dots)
\method{beeswarm}{default}(x,
method = c("swarm", "compactswarm", "center", "hex", "square"),
vertical = TRUE, horizontal = !vertical,
cex = 1, spacing = 1, breaks = NULL,
labels, at = NULL,
corral = c("none", "gutter", "wrap", "random", "omit"),
corralWidth, side = 0L,
priority = c("ascending", "descending", "density", "random", "none"),
fast = TRUE,
pch = par("pch"), col = par("col"), bg = NA,
pwpch = NULL, pwcol = NULL, pwbg = NULL, pwcex = NULL,
do.plot = TRUE, add = FALSE, axes = TRUE, log = FALSE,
xlim = NULL, ylim = NULL, dlim = NULL, glim = NULL,
xlab = NULL, ylab = NULL, dlab = "", glab = "",
\dots)
}
\arguments{
\item{formula}{A formula, such as \code{y ~ grp}, where \code{y} is a
numeric vector of data values to be split into groups according to
the grouping variable \code{grp} (usually a factor).}
\item{data}{A data.frame (or list) from which the variables in
\code{formula} should be taken.}
\item{subset}{An optional vector specifying a subset of observations
to be used.}
\item{na.action}{A function which indicates what should happen
when the data contain \code{NA}s. The default is to quietly ignore missing
values in either the response or the group.}
\item{x}{ A numeric vector, or a data frame or list of numeric vectors, each of which is plotted as an individual swarm.}
\item{method}{ Method for arranging points (see Details). }
\item{vertical, horizontal}{ Orientation of the plot. \code{horizontal} takes precedence if both are specified. }
\item{cex}{ Size of points relative to the default given by \code{par("cex")}. Unlike other plotting functions, this must be a single value. (But see also the \code{pwcex} argument)}
\item{spacing}{ Relative spacing between points.}
\item{breaks}{ Breakpoints for data discretization (optional). Used only if \code{method} is \code{"square"}, \code{"hex"}, or \code{"center"}. If \code{NULL}, breakpoints are chosen automatically. If \code{NA}, data is not discretized at all (similar to \code{stripchart} with \code{method = "stack"}).}
\item{labels}{ Labels for each group. Recycled if necessary. By default, these are inferred from the data. }
\item{at}{ Numeric vector giving the locations where the swarms should be drawn; defaults to \code{1:n} where \var{n} is the number of groups. }
\item{corral}{ Method to adjust points that would be placed outside their own group region (see Details). }
\item{corralWidth}{ Width of the "corral" in user coordinates. If missing, a sensible value will be chosen. }
\item{side}{ Direction to perform jittering: 0: both directions; 1: to the right or upwards; -1: to the left or downwards.}
\item{priority}{ Order used to perform point layout when method is \code{"swarm"} or \code{"compactswarm"}; ignored otherwise (see Details).}
\item{fast}{ Use compiled version of algorithm? This option is ignored for all methods except \code{"swarm"} and \code{"compactswarm"}.}
\item{pch, col, bg}{ Plotting characters and colors, specified by group. Recycled if necessary (see Details). }
\item{pwpch, pwcol, pwbg, pwcex}{ \dQuote{Point-wise} plotting characteristics, specified for each data point (see Details). }
\item{do.plot}{ Draw a plot? }
\item{add}{ Add to an existing plot? }
\item{axes}{ Draw axes and box? }
\item{log}{ Use a logarithmic scale on the data axis? }
\item{xlim, ylim}{ Limits of the plot. }
\item{dlim, glim}{ An alternative way to specify limits (see Details). }
\item{xlab, ylab}{ Axis labels. }
\item{dlab, glab}{ An alternative way to specify axis labels (see Details). }
\item{\dots}{ Further arguments passed to \code{\link{plot}}. }
}
\details{
Several methods for placing the points are available; each method uses a different algorithm to avoid overlapping points.
The default method, \code{swarm}, places points in increasing order. If a point would overlap an existing point, it is shifted sideways (along the group axis) by a minimal amount sufficient to avoid overlap. With this method \code{breaks} is ignored.
The methods \code{square}, \code{hex}, and \code{center} do the same thing, but they first discretize the values along the continuous data axis, in order to enable more efficient packing: \code{square} places the points on a square grid, \code{hex} uses a hexagonal grid, and \code{center} uses a centered square grid. By default, the number of breakpoints for discretization is determined by a combination of the available plotting area and the plotting character size. The discretization of the data can be explicitly controlled using \code{breaks}. If \code{breaks} is set to \code{NA}, the data will not be grouped into intervals; this may be a sensible option if the data is already discrete. NOTE that these three methods adjust the data to fit into a grid, and therefore the resulting plots should be intepreted with this in mind.
In contrast to most other plotting functions, changing the size of the graphics device will often change the position of the points.
The plotting characters and colors can be controlled in two ways. First, the arguments \code{pch}, \code{col} and \code{bg} can specify plotting characters and colors in the same way as \code{\link{stripchart}} and \code{\link{boxplot}}: in short, the arguments apply to each group as a whole (and are recycled if necessary).
Alternatively, the \dQuote{point-wise} characteristics of each individual data point can be controlled using \code{pwpch}, \code{pwcol}, and \code{pwbg}, which override \code{pch}, \code{col} and \code{bg} if these are also specified. Likewise, \code{pwcex} controls the size of each point relative to the default (which may be adjusted by \code{cex}). Notably, the point layout algorithm is applied without considering the point-wise arguments; thus setting \code{pwcex} larger than 1 will usually result in partially overlapping points. These arguments can be specified as a list or vector. If supplied using the formula method, the arguments can be specified as part of the formula interface; i.e. they are affected by \code{data} and \code{subset}.
The \code{dlab} and \code{glab} labels may be used instead of \code{xlab} and \code{ylab} if those are not specified. \code{dlab} applies to the continuous data axis (the Y axis unless \code{horizontal} is \code{TRUE}); \code{glab} to the group axis. Likewise, \code{dlim} and \code{glim} can be used to specify limits of the axes instead of \code{xlim} or \code{ylim}.
This function is intended to be mostly compatible with calls to \code{\link{stripchart}} or \code{\link{boxplot}}. Thus, code that works with these functions should work with \code{beeswarm} with minimal modification.
By default, swarms from different groups are not prevented from overlapping. Thus, large data sets, or data sets with uneven distributions, may produce somewhat unpleasing beeswarms. If this is a problem, consider reducing \code{cex}. Another approach is to control runaway points (those that would be plotted outside a region allotted to each group) with the \code{corral} argument: The default, \code{"none"}, does not control runaway points. \code{"gutter"} collects runaway points along the boundary between groups. \code{"wrap"} implements periodic boundaries. \code{"random"} places runaway points randomly in the region. \code{"omit"} omits runaway points. See Examples below.
When using the \code{"swarm"} method, \code{priority} controls the order in which the points are placed; this generally has a noticeable effect on the resulting appearance. \code{"ascending"} gives the "traditional" beeswarm plot in which the points are placed in an ascending order. \code{"descending"} is the opposite. \code{"density"} prioritizes points with higher local density. \code{"random"} places points in a random order. \code{"none"} places points in the order provided.
Whereas the \code{"swarm"} method places points in a predetermined order, the \code{"compactswarm"} method uses a greedy strategy to determine which point will be placed next. This often leads to a more tightly-packed layout. The strategy is very simple: on each iteration, a point that can be placed as close as possible to the non-data axis is chosen and placed. If there are two or more equally good points, \code{priority} is used to break ties.
}
\value{ A data frame with plotting information, invisibly. }
\seealso{ \code{\link{stripchart}}, \code{\link{boxplot}} }
\examples{
## One of the examples from 'stripchart'
beeswarm(decrease ~ treatment,
data = OrchardSprays, log = TRUE,
pch = 16, col = rainbow(8))
## One of the examples from 'boxplot', with a beeswarm overlay
boxplot(len ~ dose, data = ToothGrowth,
main = "Guinea Pigs' Tooth Growth",
xlab = "Vitamin C dose mg",
ylab = "Tooth length")
beeswarm(len ~ dose, data = ToothGrowth, col = 2, add = TRUE)
## Compare the 5 methods
op <- par(mfrow = c(2,3))
for (m in c("swarm", "compactswarm", "center", "hex", "square")) {
beeswarm(len ~ dose, data = ToothGrowth, method = m, main = m)
}
par(op)
## Demonstrate the use of 'pwcol'
data(breast)
beeswarm(time_survival ~ ER, data = breast,
pch = 16, pwcol = 1 + as.numeric(event_survival),
xlab = "", ylab = "Follow-up time (months)",
labels = c("ER neg", "ER pos"))
legend("topright", legend = c("Yes", "No"),
title = "Censored", pch = 16, col = 1:2)
## The list interface
distributions <- list(runif = runif(200, min = -3, max = 3),
rnorm = rnorm(200),
rlnorm = rlnorm(200, sdlog = 0.5))
beeswarm(distributions, col = 2:4)
## Demonstrate 'pwcol' with the list interface
myCol <- lapply(distributions, function(x) cut(x, breaks = quantile(x), labels = FALSE))
beeswarm(distributions, pch = 16, pwcol = myCol)
legend("bottomright", legend = 1:4, pch = 16, col = 1:4, title = "Quartile")
## Demonstrate the 'corral' methods
par(mfrow = c(2,3))
beeswarm(distributions, col = 2:4,
main = 'corral = "none" (default)')
beeswarm(distributions, col = 2:4, corral = "gutter",
main = 'corral = "gutter"')
beeswarm(distributions, col = 2:4, corral = "wrap",
main = 'corral = "wrap"')
beeswarm(distributions, col = 2:4, corral = "random",
main = 'corral = "random"')
beeswarm(distributions, col = 2:4, corral = "omit",
main = 'corral = "omit"')
## Demonstrate 'side' and 'priority'
par(mfrow = c(2,3))
beeswarm(distributions, col = 2:4,
main = 'Default')
beeswarm(distributions, col = 2:4, side = -1,
main = 'side = -1')
beeswarm(distributions, col = 2:4, side = 1,
main = 'side = 1')
beeswarm(distributions, col = 2:4, priority = "descending",
main = 'priority = "descending"')
beeswarm(distributions, col = 2:4, priority = "random",
main = 'priority = "random"')
beeswarm(distributions, col = 2:4, priority = "density",
main = 'priority = "density"')
## Demonstrate 'side' and 'priority' for compact method
par(mfrow = c(2,3))
beeswarm(distributions, col = 2:4, method = "compactswarm",
main = 'Default')
beeswarm(distributions, col = 2:4, method = "compactswarm", side = -1,
main = 'side = -1')
beeswarm(distributions, col = 2:4, method = "compactswarm", side = 1,
main = 'side = 1')
beeswarm(distributions, col = 2:4, method = "compactswarm",
priority = "descending", main = 'priority = "descending"')
beeswarm(distributions, col = 2:4, method = "compactswarm",
priority = "random", main = 'priority = "random"')
beeswarm(distributions, col = 2:4, method = "compactswarm",
priority = "density", main = 'priority = "density"')
## Demonstrate pwcol, pwpch, pwbg, and pwcex
beeswarm(mpg ~ cyl, data = mtcars, cex = 3,
pwcol = gear, pwbg = am + 1, pwpch = gear + 18, pwcex = hp / 335)
}
\keyword{ hplot }
|
9687c876f984b5746524dd7e7e2d3a634b7c5955
|
ec99cf538b9a6dd25ba391254310829fb08790b1
|
/code/tidalHelpers/Helpers/event_based_helper.R
|
b1df628595360c1a910273205b902445f739d645
|
[] |
no_license
|
katerobsau/NWO_Project
|
61c0898dc31e4411ee8e3a2c0889ad116c7a5876
|
74f7f0652a7986bc0aceeee1fd6f6c278367d9ba
|
refs/heads/master
| 2021-08-08T01:52:28.623061
| 2020-07-12T12:52:05
| 2020-07-12T12:52:05
| 200,646,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,251
|
r
|
event_based_helper.R
|
# cluster code
# print("Build this into the package!!!")
# print("Only want the functions in here")
source("../source/surge_exploratory_analyis.R")
na_value = 99
init_var = utils_init()
main_data_dir = "/Users/katesaunders/Documents/No_Back_Up_Data/ENS/"
dates_vec = list.files(main_data_dir)
member_ref = get_ensemble_ref(init_var$num_members)
risk_level = -0.42
# -----------------------------------------------------------------------------
num_days = length(dates_vec)
peak_surge_ens <- peak_surge_obs <- vector("list", num_days)
peak_level_ens <- peak_level_obs <- vector("list", num_days)
duration_surge_ens <- duration_surge_obs <- vector("list", num_days)
duration_risk_ens <- duration_risk_obs <- vector("list", num_days)
for(i in 1:num_days){
date_val = dates_vec[i] #"2011112900" # #"2012010200" #
data_dir = paste(main_data_dir, date_val, "/", sep = "")
# Get Ensemble data
ensemble_data = combine_ensemble_data(date_val, lead_time = "00000",
member_ref = member_ref,
data_dir = data_dir)
if(is.null(ensemble_data)) next
# Deal with NA data
ensemble_data = ensemble_data %>%
NA_check()
# Get Observed data
obs_data <- ensemble_data %>%
select(t, harm, sur, obs) %>%
distinct()
# "Question to self: Do I want to remove NA obs?" &
# "How does this effect my analysis")
### EVENT BASED STATISTICS
# peak surge levels (time and height)
peak_surge_obs[[i]] <- get_peak(df = obs_data, type = "obs",
var_name = "sur") %>%
left_join(ensemble_data) %>%
mutate(date = date_val)
peak_surge_ens[[i]] <- get_peak(df = ensemble_data, type = "ens",
var_name = "wsur") %>%
mutate(date = date_val)
# peak water levels (time and height)
peak_level_obs[[i]] <- get_peak(df = obs_data, type = "obs",
var_name = "obs") %>%
left_join(ensemble_data) %>%
mutate(date = date_val)
peak_level_ens[[i]] <- get_peak(df = ensemble_data, type = "ens",
var_name = "wtot") %>%
mutate(date = date_val)
# longest surge period of positive surge
duration_surge_ens[[i]] <- get_duration(df = ensemble_data, type = "ens",
threshold_value = 0, var_name = "wsur",
var_summary = "max") %>%
mutate(date = date_val)
duration_surge_obs[[i]] <- get_duration(df = obs_data, type = "obs",
threshold_value = 0, var_name = "sur",
var_summary = "max") %>%
left_join(ensemble_data) %>%
mutate(date = date_val)
# period of risk
duration_risk_ens[[i]] <- get_duration(df = ensemble_data, type = "ens",
threshold_value = risk_level,
var_name = "wtot",
var_summary = "total") %>%
mutate(date = date_val)
duration_risk_obs[[i]] <- get_duration(df = obs_data, type = "obs",
threshold_value = risk_level,
var_name = "obs",
var_summary = "total") %>%
mutate(date = date_val)
}
### --------------------------------------------------------------------------------
peak_surge_obs_all <- do.call("rbind", peak_surge_obs)
ggplot(peak_surge_obs_all %>%
group_by(member) %>%
summarise(mean_wsur = mean(wsur)) %>%
ungroup()) +
geom_point(aes(x = wsur, y = sur, col = t)) +
scale_color_distiller(palette = "Spectral") +
geom_abline(slope = 1, intercept = 0, linetype = "dotted")
# for each of the g functions I want to pull out the data from the raw ensemble
# ie. the mean value
# Then I want to calculate the BSS so Sum (F - O)^2
# Could do this for different lead times (Not just the longest one)
# loop over my data
|
4fa2a8bb60f703ff8a2761b2b8fec772390f3c05
|
8ac06e475183e8519f543fce41e72ec0e7226309
|
/man/goldenRatioRolors.Rd
|
c5999dab6b31d851b64937398ba0885cabd1ed62
|
[] |
no_license
|
kashenfelter/Dmisc
|
0a43b7fbd83c874996501c83f54b2f46ca050af7
|
7e8ed7c1477f67376de6832fa1bfaf20170e5136
|
refs/heads/master
| 2020-03-13T22:06:00.018345
| 2017-08-21T17:08:19
| 2017-08-21T17:08:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
rd
|
goldenRatioRolors.Rd
|
\name{goldenRatioRolors}
\alias{goldenRatioRolors}
\title{Choose n colors using the golden ratio}
\usage{
goldenRatioRolors(n, s = 0.5, v = 1, alpha = 1)
}
\arguments{
\item{n}{Integer. The number of colors you want}
\item{s}{Numeric. Saturation - input into hsv}
\item{v}{Numeric. Value - input into hsv}
\item{alpha}{Numeric. The alpha blending value that is
input into hsv.}
}
\description{
This choices n colors for you based on breaking up the
hues according to a sequence generated by the golden
ratio.
}
\examples{
cols <- goldenRatioColors(5)
plot(1:5, 1:5, col = cols)
n <- 5
xs <- seq(1, n)
plot(c(1, n+1), c(0, 1), type = "n")
rect(xs, 0, xs + 1, 1, col = goldenRatioColors(n))
}
|
2c318135192e14a636adc8790b2bfc7ba7ea9749
|
466a14350411044a071faa1702294d06b1543edf
|
/R/survival.R
|
3a448870ee8dd938e5694228d119b7450d53bee2
|
[] |
no_license
|
cran/rattle
|
8fc67846c6dac6c282e905fe87ff38f0694056da
|
3875c10d0ae6c7a499d918bc501e121861067e06
|
refs/heads/master
| 2022-05-02T14:29:57.688324
| 2022-03-21T12:10:02
| 2022-03-21T12:10:02
| 17,699,048
| 18
| 33
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,251
|
r
|
survival.R
|
# Rattle Survival
#
# Time-stamp: <2017-09-10 10:23:43 Graham Williams>
#
# Copyright (c) 2009 Togaware Pty Ltd
#
# This files is part of Rattle.
#
# Rattle is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Rattle is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rattle. If not, see <https://www.gnu.org/licenses/>.
########################################################################
# GUI
setGuiDefaultsSurvival <- function()
{
theWidget("model_survival_time_var_label")$setText(Rtxt("No time variable selected"))
theWidget("model_survival_status_var_label")$setText(Rtxt("No status variable selected"))
theWidget("model_survival_coxph_radiobutton")$setActive(TRUE)
theWidget("model_survival_plots_label")$setSensitive(FALSE)
theWidget("model_survival_plot_survival_button")$setSensitive(FALSE)
theWidget("model_survival_plot_residual_button")$setSensitive(FALSE)
}
on_model_survival_coxph_radiobutton_toggled <- function(button)
{
if (button$getActive())
theWidget("model_survival_function_label")$setText("coxph")
else
# 091114 This is the only other alternative for now, so only use
# the one callback unless we add alternative buidlers.
theWidget("model_survival_function_label")$setText("survreg")
activate.buttons <- button$getActive() && ! is.null(crs$survival)
theWidget("model_survival_plots_label")$setSensitive(activate.buttons)
theWidget("model_survival_plot_survival_button")$setSensitive(activate.buttons)
theWidget("model_survival_plot_residual_button")$setSensitive(activate.buttons)
}
on_model_survival_plot_survival_button_clicked <- function(button)
{
plotSurvivalModel()
}
on_model_survival_plot_residual_button_clicked <- function(button)
{
plotResidualModels()
}
########################################################################
# Model Tab
buildModelSurvival <- function(formula, dataset, tv=NULL, method=c("para", "coxph"))
{
# If tv is not NULL, then we will be updating the textview object as
# we proceed, as well as sending information to the log. The aim is
# for this function to run totally independent of the GUI, but to
# also support it. A developer can use this function, supply their
# own textview object and their own implementations of resetTextview
# and appendTextview for the modelling output, and startLog and
# appendLog for a log of the commands, and setStatusBar for a
# summary of what has been done.
gui <- not.null(tv)
if (gui) startLog(Rtxt("Survival Model"))
sampling <- not.null(crs$train)
# Load the required package into the library.
lib.cmd <- "library(survival, quietly=TRUE)"
if (! packageIsAvailable("survival", Rtxt("build a Survival model"))) return(NULL)
if (gui) appendLog(Rtxt("Require the survival package."), lib.cmd)
eval(parse(text=lib.cmd))
# Build a model.
method <- ifelse(method=="para", "survreg", "coxph")
model.cmd <- paste(method, "(", formula,
",\n data=", dataset,
if (! is.null(crs$weights))
sprintf(",\n weights=(%s)%s",
crs$weights,
ifelse(sampling, "[crs$train]", "")),
")", sep="")
if (gui) appendLog(Rtxt("Build the Survival model."),
sprintf('crs$survival <- %s', model.cmd))
# Note that this crs$survival is not the global crs$survival! We use
# it here to be consistent in terms of the commands that are
# reported to the log, but we return this value and in the outer
# call we globally assign to crs$survival, at least in the context
# of the Rattle GUI.
start.time <- Sys.time()
crs$survival <- try(eval(parse(text=model.cmd)), silent=TRUE)
time.taken <- Sys.time()-start.time
if (inherits(crs$survival, "try-error"))
{
msg <- errorMessageFun(method, crs$survival)
if (any(grep(Rtxt("Invalid survival times for this distribution"), crs$survival)))
{
errorDialog(Rtxt("The building of the survival model failed.",
"The error indicates an invalid Time variable.",
"This can be the case when using survreg and there is",
"a zero time value (as might result from an imputation).",
"Please review the source data and ensure the Time values",
"are correct."))
setTextview(tv)
}
else if (any(grep(Rtxt("NA/NaN/Inf in foreign function call"), crs$survival)))
{
errorDialog(Rtxt("Your data contains variables with too many categoric values.",
"Please reduce the number of categoric values or remove any",
"identifier variables from the input variables in order to",
"generate a survival model.",
"\n\nThe actual error message was:"),
"\n\n", paste(crs$survival, "\n"))
setTextview(tv)
}
else
{
if (gui)
{
errorDialog(msg)
return(NULL)
}
}
return(FALSE)
}
# Print the results of the modelling.
if (gui)
{
print.cmd <- "summary(crs$survival)"
appendLog(Rtxt("Print the results of the modelling."), print.cmd)
resetTextview(tv, tvsep=FALSE,
sprintf(Rtxt("Summary of the Survival model (built using %s):"),
method),
"\n\n",
collectOutput(print.cmd))
if (method=="coxph")
{
print.cmd <- paste(print.cmd, "cox.zph(crs$survival)", sep="; ")
appendTextview(tv, tvsep=FALSE, "\n\n",
Rtxt("Test the proportional hazards ",
"assumption for a Cox regression model:"),
"\n\n",
collectOutput(print.cmd))
}
}
# Finish up.
if (gui)
{
time.msg <- sprintf("\nTime taken: %0.2f %s", time.taken,
attr(time.taken, "units"))
appendTextview(tv, "\n", time.msg)
appendLog(time.msg)
setStatusBar(Rtxt("A survival model has been generated."), time.msg)
}
return(crs$survival)
}
showModelSurvivalExists <- function(state=!is.null(crs$survival))
{
# If a survival model exists then make sensitive the relevant
# buttons that require the model to exist. For the Survival model
# this will be the plot functions.
if (state && class(crs$survival) == "coxph")
{
theWidget("model_survival_plots_label")$setSensitive(TRUE)
theWidget("model_survival_plot_survival_button")$setSensitive(TRUE)
theWidget("model_survival_plot_residual_button")$setSensitive(TRUE)
}
theWidget("score_class_radiobutton")$
setActive(class(crs$survival) == "survreg")
theWidget("score_probability_radiobutton")$
setSensitive(class(crs$survival) == "coxph")
}
plotSurvivalModel <- function()
{
startLog(Rtxt("Survival chart."))
plot.cmd <- paste('plot(survfit(crs$survival), xlab=crs$target,',
'ylab="Survival Probability", col=3)\n',
genPlotTitleCmd('Survival Chart', crs$target, 'to',
crs$risk), sep="")
appendLog(Rtxt("Plot the survival chart for",
"the most recent survival model."), plot.cmd)
newPlot()
eval(parse(text=plot.cmd))
}
plotResidualModels <- function()
{
startLog(Rtxt("Survival model residuals plot."))
# 100417 Use the max number per page count from the plots page.
pmax <- theWidget("plots_per_page_spinbutton")$getValue()
plot.cmd <- paste('temp <- cox.zph(crs$survival)',
sprintf("pmax <- %d", pmax),
"pcnt <- 0",
'nr <- nrow(temp$var)',
"if (nr < pmax) pmax <- nr",
'for (vnum in 1:nr)',
'{',
' if (pcnt %% pmax == 0) newPlot(pmax)',
' pcnt <- pcnt + 1',
' plot(temp, var=vnum)',
' abline(0, 0, lty=3)',
' # A linear fit.',
' abline(lm(temp$y[,vnum] ~ temp$x)$coefficients, lty=4, col=3)',
'}',
sep="\n")
appendLog(Rtxt("Plot the scaled Schoenfeld residuals of proportional hazards."),
plot.cmd)
eval(parse(text=plot.cmd))
}
########################################################################
# Export
exportSurvivalModel <- function()
{
# Make sure we have a model first!
if (noModelAvailable(crs$survival, crv$SURVIVAL)) return(FALSE)
startLog(Rtxt("Export survival model."))
save.name <- getExportSaveName(crv$SURVIVAL)
if (is.null(save.name)) return(FALSE)
ext <- tolower(get.extension(save.name))
# Generate appropriate code.
pmml.cmd <- sprintf("pmml(crs$survival%s)",
ifelse(length(crs$transforms) > 0,
", transforms=crs$transforms", ""))
if (ext == "xml")
{
appendLog(Rtxt("Export survival regression as PMML."),
sprintf('saveXML(%s, "%s")', pmml.cmd, save.name))
XML::saveXML(eval(parse(text=pmml.cmd)), save.name)
}
setStatusBar("The", toupper(ext), "file", save.name, "has been written.")
}
########################################################################
# Evaluate
genPredictSurvival <- function(dataset)
{
# Generate a command to obtain the prediction results when applying
# the model to new data. For the coxph model we return the median of
# the expected survival time. 091115 Note that the code to extract
# this is somewhat convoluted until I generate the actual formula
# that is used to calculate the median. This will also be needed to
# convert to C code.
is.coxph <- class(crs$survival) == "coxph"
if (is.coxph)
cmd <- sprintf(paste("crs$pr <- survival:::survmean(survfit(crs$survival,",
'%s), scale=1, rmean="none")[[1]][,5]'), dataset)
else
cmd <- sprintf("crs$pr <- predict(crs$survival, %s)", dataset)
return(cmd)
}
genResponseSurvival <- function(dataset)
{
# Generate a command to obtain the response when applying the model
# to new data.
return(genPredictSurvival(dataset))
}
genProbabilitySurvival <- function(dataset)
{
# Generate a command to obtain the probability when applying the
# model to new data. For the coxph model we predict the risk. This
# is not a probability, but is a number relative to 1, so that
# greater than 1 has a higher risk that the average of the event
# occuring, and below 1 has a lower risk of the event occuring than
# the average.
is.coxph <- class(crs$survival) == "coxph"
return(sprintf("crs$pr <- predict(crs$survival, %s%s)", dataset,
ifelse(is.coxph, ', type="risk"', "")))
}
|
df5ff4c78358658d7ac183ce4403a70b69790c14
|
3500ffd0e4ad10570e4ecadd7e96dd50f366a481
|
/0_bin/5_table_functions.R
|
3c3ede1cc12fe4bfd91bc1d892cf920163a81df4
|
[
"MIT"
] |
permissive
|
boyercb/rmc-peru
|
cd48e044f2f9d58db4fcbcae8f8949f8a2098a32
|
65f1a5465dfbef2efdbf9aab955507b91ad81198
|
refs/heads/master
| 2023-08-30T11:15:09.128624
| 2023-08-18T17:20:14
| 2023-08-18T17:20:14
| 286,563,359
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,843
|
r
|
5_table_functions.R
|
make_report_table <-
function(models,
outcomes,
outcome_labels,
treatment = "treatment",
title,
general_note = "Notes: First column for each outcome is design-based least squares estimator that includes fixed effects for randomization strata and batch. Second column adjusts for baseline covariates selected using double-post-selection lasso. Covariates are mean-centered and interacted with treatment. Heteroscedasticity-consistent robust standard errors (HC2) for all specifications are shown in parentheses and p-values in square brackets.",
data) {
dep_var_means <- colMeans(
data[data[[treatment]] == 0, outcomes],
na.rm = TRUE
)
dep_var_sd <- colSds(
data[data[[treatment]] == 0, outcomes],
na.rm = TRUE
)
rows <- tibble(
covariates = rep(c("No", "Yes"), length(outcomes)),
FE = rep(c("Yes", "Yes"), length(outcomes)),
dep_var_mean = rep(paste0("{", specd(dep_var_means, 3), "}"), each = 2),
dep_var_sd = rep(paste0("{", specd(dep_var_sd, 3), "}"), each = 2),
)
rows <- as_tibble(t(rows))
rows <- add_column(rows, c("Covariates", "Fixed Effects", "Control Mean", "Control SD"), .before = 1)
attr(rows, 'position') <- 4:7
gm <- tribble(
~raw, ~clean, ~fmt,
"nobs", "Observations", function(x) paste0("{", x, "}"),
"r.squared", "R$^2$", function(x) paste0("{", specd(x, 3), "}")
)
header <- c(1, rep(2, length(outcomes)))
names(header) <- c(" ", outcome_labels)
ms <- modelsummary(
models = models,
output = 'latex',
statistic = c('({std.error})','[{p.value}]'),
coef_omit = paste0("^(?!", treatment, "$)"),
coef_rename = c("remained_in_chat" = 'remained in chat'),
gof_omit = "(AIC)|(BIC)|(RMSE)",
add_rows = rows,
align = paste0('l', str_flatten(rep('d', length(models)))),
stars = c('*' = .1, '**' = .05, '***' = .01),
title = title,
gof_map = gm,
escape = FALSE
)
ms <- ms|>
kable_styling(latex_options = "hold_position") |>
add_header_above(header) |>
footnote(
general = general_note,
symbol = "* p $<$ 0.1, ** p $<$ 0.05, *** p $<$ 0.01",
symbol_manual = "",
general_title = "",
threeparttable = TRUE,
escape = FALSE
)
ms <- gsub(" \\{\\}", " ", ms)
ms <- gsub("\\multicolumn{3}{l}{\\rule{0pt}{1em}* p $<$ 0.1, ** p $<$ 0.05, *** p $<$ 0.01}\\\\", "", ms, fixed = TRUE)
ms <- gsub("\\multicolumn{5}{l}{\\rule{0pt}{1em}* p $<$ 0.1, ** p $<$ 0.05, *** p $<$ 0.01}\\\\", "", ms, fixed = TRUE)
ms <- gsub("\\multicolumn{7}{l}{\\rule{0pt}{1em}* p $<$ 0.1, ** p $<$ 0.05, *** p $<$ 0.01}\\\\", "", ms, fixed = TRUE)
ms <- gsub("\\multicolumn{9}{l}{\\rule{0pt}{1em}* p $<$ 0.1, ** p $<$ 0.05, *** p $<$ 0.01}\\\\", "", ms, fixed = TRUE)
ms
}
|
80b04bf4e97b9c761103e14c6779be9e485d2c82
|
36a876c6de4fa152e2e59dbe592c9d2d81cbd2eb
|
/man/scratchings.Rd
|
1519d21741b7be18a0219eea720918a450ba8532
|
[] |
no_license
|
jkadcav/competitor
|
9255d878d51249537e444c0aea437a023cb9b6ce
|
9283296cc7de07248cd9e558098a407235137225
|
refs/heads/master
| 2021-01-12T12:15:59.769629
| 2016-11-18T12:27:06
| 2016-11-18T12:27:06
| 72,398,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 372
|
rd
|
scratchings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{scratchings}
\alias{scratchings}
\title{Retrieve competitor scratched property}
\usage{
scratchings(eventId)
}
\arguments{
\item{eventId}{Database ID of the event}
}
\description{
Retrieve competitor scratched property
}
\examples{
scratchings( 1175769 )
}
\keyword{competitors}
|
0d69e8cdf056b3512628e137de89c84c9f7459e7
|
8078d61b576fc31a7ff3c59cf83688042f8660db
|
/qExponential/man/ptsal.Rd
|
a6b6e3ee0ba84e3dec70fac1a9c361f8a5912f63
|
[] |
no_license
|
Alessandra23/q-Exponential-mfmm
|
ac6704459e5c91e51e9c9a783db022dafc49cde8
|
16789a8653836231908335f890ec236999d009fc
|
refs/heads/master
| 2023-07-12T23:42:13.588015
| 2021-08-17T19:20:07
| 2021-08-17T19:20:07
| 231,951,866
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 681
|
rd
|
ptsal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qexp_functions.R
\name{ptsal}
\alias{ptsal}
\title{Calculate the cumulative distribution function
Input: vector of data values, distributional parameters, left-censoring
threshold, usual flags
Output: vector of (log) probabilities}
\usage{
ptsal(
x,
shape = 1,
scale = 1,
q = tsal.q.from.shape(shape),
kappa = tsal.kappa.from.ss(shape, scale),
xmin = 0,
lower.tail = TRUE,
log.p = FALSE
)
}
\description{
Calculate the cumulative distribution function
Input: vector of data values, distributional parameters, left-censoring
threshold, usual flags
Output: vector of (log) probabilities
}
|
a12a8207b5fc169aed3fda9fa3c464c30bf69ec2
|
822c34d52c71b6cad5d3c582c7c7148e9d7582cb
|
/4week4/plot2.r
|
82cf586491e792452dc348e75178de99e4d17901
|
[] |
no_license
|
WeiHanLer/Data-Science-Specialization-Exploratory-Data-Analysis
|
1335b7fffd3d6958149419cbb9ec97e34cb271ba
|
5f1f22e3a487b9bc3ebdeb08e44c3572f6d6c8d1
|
refs/heads/master
| 2020-03-20T21:36:48.066251
| 2019-05-06T05:08:12
| 2019-05-06T05:08:12
| 137,750,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 439
|
r
|
plot2.r
|
library(dplyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
baltimore<- subset(NEI, fips=="24510")
aggregatevalue<-aggregate(Emissions~year,baltimore,sum)
png('plot2.png')
barplot(height=aggregatevalue$Emissions, names.arg=aggregatevalue$year, xlab="Years",
ylab=expression('Total Emissions'),
main=expression('Total Emissions in Baltimore by Years'))
dev.off()
|
e5cd149e6360e635069f3c94fd9057309a4d8829
|
71bc62be599664aa98d3318cadaf015ff270db08
|
/R10c-DataManipulation.R
|
964d413bb850280e3f41a95746b63bef4234ec96
|
[] |
no_license
|
soonyoungcheon/R
|
94db30e75d6bd2a495fcf397a2e41ed89892681a
|
00e840cfbca09a53ea251e0e716bdb40f11b79f1
|
refs/heads/master
| 2020-04-11T21:22:28.883897
| 2018-12-17T09:14:12
| 2018-12-17T09:14:12
| 159,113,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 357
|
r
|
R10c-DataManipulation.R
|
# aggregate - 집계 관련 함수
# dplyr의 group by 보다 비교적 단순하게 코드 작성
library(dplyr)
data('diamonds',package = "ggplot2")
diamonds
# aggreate(집계대상, 데이터,적용함수)
# cut 별 평균 price 집계
aggregate(price~cut, diamonds, mean)
# cut/color 별 평균 price 집계
aggregate(price~cut + color, diamonds, mean)
|
960faad51b033cd14c6c698b5557f7698c04bb17
|
ec8464fb698d72afada518f80df9a450d23a57fa
|
/man/offset-set.Rd
|
d20d52d875973efdd12efa09f5274f2a509a7e63
|
[] |
no_license
|
planetMDX/BioQC
|
96cdc0b0c2aa6b8d2d0055ee278011df0d9c0e64
|
93531776b8e98debf93309cf06a0857a3907fdaf
|
refs/heads/master
| 2021-01-01T19:35:37.467470
| 2017-08-02T08:23:10
| 2017-08-02T08:23:10
| 98,616,169
| 0
| 0
| null | 2017-08-02T07:28:33
| 2017-07-28T06:26:36
|
R
|
UTF-8
|
R
| false
| true
| 949
|
rd
|
offset-set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllMethods.R
\docType{methods}
\name{offset<-}
\alias{offset<-}
\alias{offset-set}
\alias{offset<-,IndexList,numeric-method}
\alias{offset<-,SignedIndexList,numeric-method}
\title{Set the offset of an \code{IndexList} or a \code{SignedIndexList} object}
\usage{
`offset<-`(object, value)
\S4method{offset}{IndexList,numeric}(object) <- value
\S4method{offset}{SignedIndexList,numeric}(object) <- value
}
\arguments{
\item{object}{An \code{IndexList} or a \code{SignedIndexList} object}
\item{value}{The value, that the offset of \code{object} is set too. If it
isn't an integer, it's coerced into an integer.}
}
\description{
Set the offset of an \code{IndexList} or a \code{SignedIndexList} object
}
\examples{
myIndexList <- IndexList(list(1:5, 2:7, 3:8), offset=1L)
offset(myIndexList)
offset(myIndexList) <- 3
offset(myIndexList)
}
|
c27df250f608faf0fa99e771e7432f8de2d250e6
|
96ef0481a0f4baa05237718907608e31dcf10304
|
/MLPACK2.R
|
12e4622459d1748e8e1425ac2dd065be04fb0a41
|
[] |
no_license
|
yuhenghuang/Rcpp
|
8ba67c65b4a5e6f32375e251b1170e3b172d455c
|
5c6fb11c347fc2e72bb8d98c29a0ae4552ce8d31
|
refs/heads/master
| 2023-01-09T00:40:39.292823
| 2020-10-23T13:25:35
| 2020-10-23T13:25:35
| 282,114,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
MLPACK2.R
|
library(RcppMLPACK)
Rcpp::sourceCpp("MLPACK2.cpp")
logistic_regression(matrix(c(1, 2, 3, 1, 2, 3), nrow=2, byrow=TRUE), matrix(c(1L, 1L, 0L), nrow = 1))
data(trainSet)
trainmat <- t(trainSet[, -5]) ## train data
trainlab <- trainSet[, 5]
linear_regression(trainmat, trainlab)
naive_bayes_classifier(trainmat, trainlab, 2L)
testmat <- t(testSet[, -5]) ## test data
testlab <- testSet[, 5]
res <- naive_bayes_classifier(trainmat, trainlab, 2L, testmat) ## also classify
res
all.equal(res[[4]][1,], testlab)
res <- nn_ml(t(randu), 2L)
res$clusters
res$result
res <- kmeans(t(randu), 20L)
dim(res$centroids)
length(res$assignments)
|
9a6e741d87aa44653cde5760fc4d2bbbaab8406d
|
3a4b61726631bcce875ab36bde6177cd771b8cdf
|
/covid.R
|
d63201e28f46f5f5f9e9b0b518cfdebf4046470a
|
[] |
no_license
|
grtvishnu/Covid_visualization
|
0a292a1a82dff7d964f94552822c295a2ea2bc80
|
3495f3dfcb589826cbade2d015cd1a038e4a092d
|
refs/heads/master
| 2021-05-23T01:06:07.854440
| 2021-01-19T15:33:31
| 2021-01-19T15:33:31
| 253,165,878
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,574
|
r
|
covid.R
|
# Load libraries
library(tidyverse)
library(scales)
library(lubridate)
library(gganimate)
library(gifski)
covid <- read_csv("time-series-19-covid-combined.csv")
# Remove unnecessary Columns and Rename
covid <- covid %>%
select(Date, country = `Country/Region`, Confirmed, Recovered, Deaths)
# Create a useful data frame
covid_stat <- covid %>%
group_by(country) %>%
summarise(Death = max(Deaths), Confirmed = max(Confirmed), Recovered = max(Recovered)) %>%
mutate(Active_case = Confirmed - Recovered)
# top 10 Confirmed
covid_stat %>%
arrange(desc(Confirmed)) %>%
top_n(10) %>%
ggplot(aes(x = reorder(country, -Confirmed), y = Confirmed, fill = country)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = comma) +
ggtitle("Top 10 Total Confirmed")
# Top 10 Deaths
covid_stat %>%
arrange(desc(Death)) %>%
top_n(10) %>%
ggplot(aes(x = reorder(country, -Death), y = Death, fill = country)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = comma) +
ggtitle("Top 10 Total Deaths")
# Top 10 Recovered
covid_stat %>%
arrange(desc(Recovered)) %>%
top_n(10) %>%
ggplot(aes(x = reorder(country, -Recovered), y = Recovered, fill = country)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = comma) +
ggtitle("Top 10 Total Recovered")
# Top 10 Active Cases
covid_stat %>%
top_n(10) %>%
ggplot(aes(x = reorder(country, Active_case), y = Active_case, fill = country)) +
geom_bar(stat = "identity") +
coord_flip() +
scale_y_continuous(labels = comma) +
ggtitle("Top 10 Total Active Case")
# animated graph Total death India (Line Graph)
p1 <- covid %>%
group_by(country, Date) %>%
summarise(Death = max(Deaths), Confirmed = max(Confirmed), Recovered = max(Recovered)) %>%
arrange(desc(Death)) %>%
filter(country == "India") %>%
ggplot(aes(Date, Death)) +
ggtitle("Total Death In India") +
geom_line(color = "blue") +
scale_y_continuous(labels = comma) +
geom_point(size = 1.5) +
transition_reveal(Death)
animate(p1, height = 600, width = 800, fps = 30, duration = 10, end_pause = 60, res = 100)
anim_save("india.gif")
# Animated Graph Multiple Countries (Line Graph)
p2 <- covid %>%
group_by(country, Date) %>%
summarise(Death = max(Deaths), Confirmed = max(Confirmed), Recovered = max(Recovered)) %>%
arrange(desc(Confirmed)) %>%
filter(country == "India" |
country == "US" |
country == "Brazil" |
country == "Russia") %>%
ggplot(aes(Date, Confirmed, color = country)) +
ggtitle("Confirmed Cases") +
geom_line() +
scale_y_continuous(labels = comma) +
geom_point(size = 1.5) +
transition_reveal(Confirmed)
animate(p2, height = 600, width = 800, fps = 30, duration = 10, end_pause = 60, res = 100)
anim_save("all confirmed.gif ")
# Animated Graph total death multiple Countries (Bar plot)
p3 <- covid %>%
group_by(country, Date) %>%
summarise(Death = max(Deaths), Confirmed = max(Confirmed), Recovered = max(Recovered)) %>%
arrange(desc(Confirmed)) %>%
filter(country == "India" |
country == "US" |
country == "Brazil" |
country == "Russia") %>%
ggplot(aes(x = reorder(country, -Death), y = Death, fill = country)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = comma) +
transition_time(Date) +
labs(
title = "Animated Bar Graph",
subtitle = "Day : {frame_time}"
)
animate(p3, height = 600, width = 800, fps = 30, duration = 10, end_pause = 60, res = 100)
anim_save("bar.gif")
|
608a34b36c53b40b6c9ea4dfe3ba70200cd7b09a
|
f90f184082d6096b87c56b5e0011a7ef87ef023c
|
/CleanData/Week4Quiz/Exercise4.R
|
21c98f8939536ac98aa47503fa6b7556edd35fd1
|
[] |
no_license
|
pig4tti/r-assignments-resolutions
|
bbbc0d9df460c0c95a07b6993d19a05ce16ecf4e
|
588feb95cd5e5be7ee83294e19332e9c43e88472
|
refs/heads/master
| 2021-01-10T06:18:07.902312
| 2016-02-25T11:02:50
| 2016-02-25T11:02:50
| 48,855,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
Exercise4.R
|
source("Utils.R")
fileName <- "clean_data_w4_ex2.csv"
ensureFileExists(fileName, "http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv")
# loads the csv
colClasses = c("character", rep("NULL", 2), rep("character", 2), rep("NULL", 5)) # read only the columns that matters
gdp <- read.csv(file.path("./data/", fileName), skip = 5, nrows = 190, blank.lines.skip = TRUE, header = FALSE, colClasses = colClasses)
colnames(gdp) <- c("CountryCode", "Economy", "GDP2012")
educationalDataFileName <- "clean_data_w4_ex4.csv"
ensureFileExists(educationalDataFileName, "http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv")
educationalData <- read.csv(file.path("./data/", educationalDataFileName))
mergedData <- merge(gdp, educationalData, by.x = "CountryCode", by.y = "CountryCode")
nrow(mergedData[grepl("^Fiscal year end: June", mergedData$Special.Notes) , ]) # 13
|
a03d1324ed3f25e636bd784a62695cb38f0cec37
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-r/tests/testdir_jira/runit_pubdev_1240.R
|
d4cefbf6a353108d4a384a516be1056671228fcc
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,163
|
r
|
runit_pubdev_1240.R
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.merge.examples <- function() {
census_path <- locate("smalldata/chicago/chicagoCensus.csv")
Log.info("Import Chicago census data...")
census_raw <- h2o.importFile(census_path, parse=FALSE)
census_setup <- h2o.parseSetup(census_raw)
census_setup$column_types[2] <- "Enum" # change from String -> Enum
census <- h2o.parseRaw(census_raw, col.types=census_setup$column_types)
Log.info("Set column names to be syntactically valid for R")
names(census) <- make.names(names(census))
names(census)[names(census) == "Community.Area.Number"] <- "Community.Area"
print(summary(census))
Log.info("Create a small R dataframe and push to H2O")
crimeExamples.r <- data.frame(IUCR = c(1811, 1150),
Primary.Type = c("NARCOTICS", "DECEPTIVE PRACTICE"),
Location.Description = c("STREET", "RESIDENCE"),
Domestic = c("false", "false"),
Beat = c(422, 923),
District = c(4, 9),
Ward = c(7, 14),
Community.Area = c(46, 63),
FBI.Code = c(18, 11),
Day = c(8, 8),
Month = c(2, 2),
Year = c(2015, 2015),
WeekNum = c(6, 6),
WeekDay = c("Sun", "Sun"),
HourOfDay = c(23, 23),
Weekend = c(1, 1),
Season = c(1, 1))
crimeExamples <- as.h2o(crimeExamples.r)
names(crimeExamples) <- make.names(names(crimeExamples))
print(head(crimeExamples))
Log.info("Merge created crime examples with Chicago census data")
crimeExamplesMerge <- h2o.merge(crimeExamples, census, all.x=TRUE, all.y=FALSE)
print(summary(crimeExamplesMerge))
}
doTest("Merging H2O H2OFrames causes IllegalArgumentException", test.merge.examples)
|
0fc3397c35cbb59374e1ec80d4c6106074801332
|
f463aa07156de93a453bed1c532cdc0f193b4649
|
/week_011/week11-boardgames.R
|
ecf1c4de27503c85d63f42110e939d1cda748a23
|
[] |
no_license
|
mehmetalivarol/tidytuesday
|
f984e07384ce9a96291eba1cf11353c2ad390f0d
|
caf8bfb1ea597c5c3fe4e022d9b7fdfe5b5b2087
|
refs/heads/master
| 2020-04-28T21:03:50.222823
| 2019-03-14T10:03:12
| 2019-03-14T10:03:12
| 175,370,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 551
|
r
|
week11-boardgames.R
|
library(tidyverse)
library(ggwordcloud)
proj_set("C:/Users/mali/Documents/R/tidytuesday")
board_games <-
readr::read_csv(
"https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-03-12/board_games.csv"
)
wc <- board_games %>%
group_by(name) %>%
summarise(UsrRating = sum(users_rated)) %>%
filter(UsrRating > 10000)
wc %>%
ggplot(aes(
label = name,
size = UsrRating,
color = UsrRating
)) + geom_text_wordcloud() + scale_color_gradient(low = "#FF6347", high = "#40E0D0") +
theme_light()
|
0cd35fbdd7960485c53ca482a53e8b369140ed89
|
aa0e96ff0e39b8b35ff343ac7db7f7c954f3a741
|
/R/utility.R
|
c5873f852dc8e680abfdde29e45ed001e599ac90
|
[] |
no_license
|
mahagadorn/nacdb
|
906573b393f0a249423b6e839c0ce3d1cec3164b
|
e5c5c562c914f49ca0f1e22dbe3a8dd7f30793ac
|
refs/heads/master
| 2020-03-15T12:42:40.513363
| 2018-05-04T22:55:53
| 2018-05-04T22:55:53
| 132,150,242
| 1
| 0
| null | 2018-05-04T14:25:21
| 2018-05-04T14:25:21
| null |
UTF-8
|
R
| false
| false
| 9,509
|
r
|
utility.R
|
#' Takes a matrix of data for a species, checks if its numeric, then puts the table into a long-format dataframe
#'
#' @param x a matrix of data, generally species in the columns and sites in the row
#' @param row.metadata metadata for the sites; in long format, it will be stored in each row with with the site pertaining to the data
#' @param col.metadata metadata for the species; will be stored in every 'n'th row, where 'n' is the number of rows in the original table
#' @param total.metadata metadata for table; will include publishing information
#' @importFrom reshape2 melt
#' @return data set in long format, with all metadata included
.matrix.melt <- function(x, study.metadata=data.frame(units=NA, other=NA),
site.metadata=data.frame(id=NA,year=NA,name=NA,lat=NA,long=NA,address=NA,area=NA,other=NA),
species.metadata=data.frame(species=NA, taxonomy=NA, other=NA)){
#######################
# Argument handling ###
#######################
for(i in seq_along(names(study.metadata)))
if(is.factor(study.metadata[,i]))
study.metadata[,i] <- as.character(study.metadata[,i])
for(i in seq_along(names(site.metadata)))
if(is.factor(site.metadata[,i]))
site.metadata[,i] <- as.character(site.metadata[,i])
for(i in seq_along(names(species.metadata)))
if(is.factor(species.metadata[,i]))
species.metadata[,i] <- as.character(species.metadata[,i])
if(!is.numeric(x))
stop("'value' is not numeric")
if(!is.matrix(x))
stop("'x' is not a matrix")
if(length(dim(x))!=2)
stop("'x' is not a two-dimensional matrix")
if(!identical(rownames(x), site.metadata$id))
stop("Mismatch between site (names?) and site meta-data")
if(!identical(colnames(x), species.metadata$species))
stop("Mismatch between species (names?) and species meta-data")
######################
# Dispatch ########
# to .df.melt ########
# and return ########
######################
site.id <- rownames(x)[as.numeric(row(x))]
species <- colnames(x)[as.numeric(col(x))]
value <- as.numeric(x)
return(.df.melt(species, site.id, value, study.metadata, site.metadata, species.metadata))
}
.df.melt <- function(species, site.id, value,
study.metadata=data.frame(units=NA, other=NA),
site.metadata=data.frame(id=NA,year=NA,name=NA,lat=NA,long=NA,address=NA,area=NA,other=NA),
species.metadata=data.frame(species=NA, taxonomy=NA, other=NA)){
#######################
# Argument handling ###
#######################
if(!is.numeric(value))
stop("'value' is not numeric")
if(any(is.na(value)))
stop("No NAs in 'value'")
if(any(is.na(species)))
stop("No NAs in 'species'")
if(any(is.na(site.id)))
stop("No NAs in 'site.id'")
species <- as.character(species)
site.id <- as.character(site.id)
######################
# Meta-data ##########
######################
.create.other <- function(metadata, columns){
if(!all(columns %in% names(metadata))){
other <- metadata[,!names(metadata) %in% columns, drop=FALSE]
metadata <- metadata[,names(metadata) %in% columns, drop=FALSE]
other <- sapply(seq_along(names(other)), function(y) paste(names(other)[y],other[,y],sep=":"))
if(nrow(metadata) > 1)
other <- paste(other, collapse=";") else other <- apply(other, 1, paste, collapse=";")
metadata$other <- other
} else {
metadata$other <- NA
}
return(metadata[,c(columns,"other")])
}
# Study
if(nrow(study.metadata) > 1)
stop("Only one row of meta-data per study")
if(!all("units" %in% names(study.metadata)))
stop("Incorrectly formatted study meta-data")
if(is.na(study.metadata$units))
stop("Study must have units of measurement")
study.metadata <- .create.other(study.metadata, "units")
# Site
if(!all(c("id","year","name","lat","long","address","area") %in% names(site.metadata)))
stop("Incorrectly formatted site meta-data")
if(length(intersect(unique(site.id), site.metadata$id)) != nrow(site.metadata))
stop("Site meta-data must contain information about all sites")
if(length(intersect(site.metadata$id,unique(site.id))) != nrow(site.metadata))
stop("Site meta-data must only contain information about present sites")
site.metadata <- .create.other(site.metadata, c("id","year","name","lat","long","address","area"))
# Species
if(!all(c("species","taxonomy") %in% names(species.metadata)))
stop("Incorrectly formatted species meta-data")
if(length(intersect(unique(species), species.metadata$species)) != nrow(species.metadata))
stop("Species meta-data must contain information about all species")
if(length(intersect(species.metadata$species,unique(species))) != nrow(species.metadata))
stop("Species meta-data must only contain information about present species")
species.metadata <- .create.other(species.metadata, c("species","taxonomy"))
######################
# Format and return ##
######################
# Reformat data
output <- list(
data=data.frame(site.id, species, value),
spp.metadata=species.metadata,
site.metadata=site.metadata,
study.metadata=study.metadata
)
for(i in seq_along(output))
for(j in seq_len(ncol(output[[i]])))
if(is.factor(output[[i]][,j]))
output[[i]][,j] <- as.character(output[[i]][,j])
class(output) <- "nacdb"
return(output)
}
# Takes a data already in long format that will be converted to a string of metadata. Each row will be a single string, and the
# function will return the list of these strings
#
# @param data a dataframe exclusively containing the columns of metadata
# @return a list of metadata strings
.make.metadata <- function(data){
sapply(1:nrow(data), function(y) {
char.list <- c(rbind(colnames(data), "=", as.character(data[y,]), ", "))
char.list <- head(char.list, -1)
metadata <- paste(char.list, collapse="")
return(metadata)
})
}
# Unzips a file from a downloaded zip file
# param file name of file to be extracted from zip
# param zip location and name of zip file (e.g.,
# ~/Downlaods/a_file.zip)
# param to.save.dir directory to save resulting file (DEFAULT: a new
# temporary directory will be used)
# param to.save.name name to save the file as (DEFAULT: it will be
# named paste(zip,file, sep='_'))
# return Complete path to unzipped file
#' @importFrom utils unzip download.file
#' @importFrom reshape2 melt
#' @importFrom httr GET
#' @importFrom stats setNames
.unzip <- function(file, zip, to.save.dir, to.save.name){
if(missing(to.save.dir))
to.save.dir <- tempdir()
if(missing(to.save.name))
to.save.name <- file
files <- unzip(zip, list=TRUE)
if(!file %in% files$Name)
stop("Required file not in zipfile ", zip)
file <- unzip(zip, file)
file.rename(file, file.path(to.save.dir, to.save.name))
return(file.path(to.save.dir, to.save.name))
}
.fac.sim <- function(x){
x <- Filter(Negate(is.na), x)
x <- x[x != "" & x != " "]
x <- unique(x)
return(paste(x,collapse="_"))
}
#' @importFrom stats model.matrix
.expand.factor <- function(factor_to_expand, name){
names <- rep(name, length(unique(factor_to_expand)))
output <- model.matrix(~factor_to_expand-1)
colnames(output) <- paste(names, gsub("factor_to_expand", "", colnames(output)), sep="_")
return(as.data.frame(output))
}
.download <- function(url, dir, save.name, cache=TRUE){
destination <- file.path(dir, save.name)
suffix <- .file.suffix(url, 4)
if(cache==TRUE & file.exists(destination)){
if(!is.na(suffix))
attr(destination, "suffix") <- suffix
return(destination)
}
result <- download.file(url, destination, quiet=TRUE)
if(result != 0)
stop("Error code", result, " downloading file; file may not exist")
if(!is.na(suffix))
attr(destination, "suffix") <- suffix
return(destination)
}
.save.name <- function(doi, save.name, file){
if(is.na(save.name)){
save.name <- paste(doi,file, sep="_")
save.name <- gsub(.Platform$file.sep, "_", save.name, fixed=TRUE)
}
return(save.name)
}
.grep.url <- function(url, regexp, which=1){
html <- as.character(GET(url))
return(.grep.text(html, regexp, which))
}
.grep.text <- function(text, regexp, which=1){
links <- gregexpr(regexp, text)
if(which > length(links[[1]]))
stop("SI number '", which, "' greater than number of detected SIs (", length(links[[1]]), ")")
pos <- as.numeric(links[[1]][which])
return(substr(text, pos, pos+attr(links[[1]], "match.length")[which]-1))
}
.file.suffix <- function(text, max.length=4){
suffix <- .grep.text(text, "[a-zA-Z]+$")
if(nchar(suffix) <= max.length & nchar(suffix) > 0)
return(suffix)
return(NA)
}
prog.bar <- function(x, y){
if(y < 100){
cat(".")} else {
z <- Filter(function(z) z>=0, seq(1,y,length.out=100)-x)
if(length(z) > 0)
tryCatch(if(z[1] < 1) if((length(z) %% 10)==0) cat("|") else cat("."), error=function(z) cat("."))
}
}
|
47406233e2be42fdc90075ec66f054fbc07c437d
|
ca4bb0db52b6756e52e007c3d41e9643064b1825
|
/scripts/max_mean_zscore_VS_rhythms.R
|
7f31d32b451f5fd98f450ae9e13a39722faf504b
|
[] |
no_license
|
laloumdav/cost_noise_conservation_rhythmicity
|
a894da59af7aeab76ce4656fc6c41d7096476e31
|
27c93a9d59cbde0b960db5896509ac1f15a4e133
|
refs/heads/main
| 2022-12-21T02:23:11.556945
| 2022-06-27T12:58:54
| 2022-06-27T12:58:54
| 299,549,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,954
|
r
|
max_mean_zscore_VS_rhythms.R
|
library(ggplot2)
library(ggridges)
library(officer)
library(magrittr)
library(ggpubr)
species.list <- c("arabidopsis", "cyanobacteria", "ostreococcus", "mouse")
tissue.list <- c("leaves", NA, NA, "liver")
main.dir <- gsub("/NA", "", paste("~/Documents/cost_theory_workingspace/DATA", species.list, tissue.list, sep="/"))
percent.rhythmic = 0.15
##############
transcriptome.data <- list()
proteome.data <- list()
tot.data <- data.frame(max.expr.value=NA,
mean.expr.value=NA,
rhythm.pvalue=NA,
nb.genes=NA,
data=NA,
species=NA,
rhythmic.order=NA,
rhythmic=NA,
random.group=NA)
for (i in 1:length(species.list)) {
tot.data.tmp <- read.table(paste(main.dir[i], "tot_data.txt", sep="/"), sep="\t", head=TRUE, fill=TRUE, check.names = FALSE)
transcriptome.data[[i]] <- tot.data.tmp[, grep("RNA", colnames(tot.data.tmp))]
transcriptome.data[[i]] <- unique(subset(transcriptome.data[[i]], !is.na(RNA_mean.level)))
proteome.data[[i]] <- tot.data.tmp[, grep("protein", colnames(tot.data.tmp))]
proteome.data[[i]] <- unique(subset(proteome.data[[i]], !is.na(protein_mean.level)))
tot.data.transcripts <- data.frame(max.expr.value = transcriptome.data[[i]]$RNA_max.level,
mean.expr.value = transcriptome.data[[i]]$RNA_mean.level,
rhythm.pvalue = transcriptome.data[[i]][, grep("^RNA_rhythm.pvalue$|^RNA_rhythm.pvalue_Brown$", colnames(transcriptome.data[[i]]))],
nb.genes = nrow(transcriptome.data[[i]]),
data = rep("transcripts", nrow(transcriptome.data[[i]])),
species = rep(species.list[i], nrow(transcriptome.data[[i]])))
tot.data.proteins <- data.frame(max.expr.value = proteome.data[[i]]$protein_max.level,
mean.expr.value = proteome.data[[i]]$protein_mean.level,
rhythm.pvalue = proteome.data[[i]]$protein_rhythm.pvalue,
nb.genes = nrow(proteome.data[[i]]),
data = rep("proteins", nrow(proteome.data[[i]])),
species = rep(species.list[i], nrow(proteome.data[[i]])))
# order genes by their rhythm p-value
row.nb <- order(tot.data.transcripts$rhythm.pvalue)
tot.data.transcripts[row.nb, "rhythmic.order"] <- 1:nrow(tot.data.transcripts)
row.nb <- order(tot.data.proteins$rhythm.pvalue)
tot.data.proteins[row.nb, "rhythmic.order"] <- 1:nrow(tot.data.proteins)
# Lets consider the first n percent (percent.rhythmic) as rhythmic genes
tot.data.transcripts$rhythmic <- (tot.data.transcripts$rhythmic.order/nrow(tot.data.transcripts) <= percent.rhythmic)
rhythmic.group.size.transcripts = length(which(tot.data.transcripts$rhythmic == TRUE))
tot.data.proteins$rhythmic <- (tot.data.proteins$rhythmic.order/nrow(tot.data.proteins) <= percent.rhythmic)
rhythmic.group.size.proteins = length(which(tot.data.proteins$rhythmic == TRUE))
# Lets take a random group of genes of size equal to rhythmic genes group size and affect TRUE randomly (size of TRUE genes = rhythmic.group.size)
random.true <- sample(1:nrow(tot.data.transcripts), size=rhythmic.group.size.transcripts, replace = FALSE)
tot.data.transcripts[random.true, "random.group"] <- TRUE
random.true <- sample(1:nrow(tot.data.proteins), size=rhythmic.group.size.proteins, replace = FALSE)
tot.data.proteins[random.true, "random.group"] <- TRUE
tot.data.tmp <- rbind(tot.data.transcripts, tot.data.proteins)
tot.data <- rbind(tot.data, tot.data.tmp)
}
tot.data <- tot.data[-1, ]
proteome.data[[i]] <- read.table(paste(main.dir[i], "tot_data.txt", sep="/"), head=TRUE, fill=TRUE, check.names = FALSE)
tot.data.proteins <- data.frame(max.expr.value = proteome.data[[i]]$protein_max.level,
mean.expr.value = proteome.data[[i]]$protein_mean.level,
aa.synthesis.average.cost = proteome.data[[i]]$aa.synthesis.average.cost,
protein_rhythm.pvalue = proteome.data[[i]]$protein_rhythm.pvalue,
RNA_rhythm.pvalue = proteome.data[[i]]$RNA_rhythm.pvalue,
nb.genes = nrow(proteome.data[[i]]),
data = rep("proteins", nrow(proteome.data[[i]])),
species = rep(species.list[i], nrow(proteome.data[[i]])))
row.nb <- order(tot.data.proteins$rhythm.pvalue)
tot.data.proteins[row.nb, "rhythmic.order"] <- 1:nrow(tot.data.proteins)
# Lets consider the first n percent (percent.rhythmic) as rhythmic genes
tot.data.proteins$rhythmic <- (tot.data.proteins$rhythmic.order/nrow(tot.data.proteins) <= percent.rhythmic)
rhythmic.group.size.proteins = length(which(tot.data.proteins$rhythmic == TRUE))
# Lets take a random group of genes of size equal to rhythmic genes group size and affect TRUE randomly (size of TRUE genes = rhythmic.group.size)
random.true <- sample(1:nrow(tot.data.proteins), size=rhythmic.group.size.proteins, replace = FALSE)
tot.data.proteins[random.true, "random.group"] <- TRUE
tot.data.proteins <- subset(tot.data.proteins, (!is.na(tot.data.proteins[, "aa.synthesis.average.cost"])))
t.test(tot.data.proteins[tot.data.proteins$protein_rhythm.pvalue<=0.02 &
tot.data.proteins$RNA_rhythm.pvalue>0.1, "mean.expr.value"],
tot.data.proteins[tot.data.proteins$protein_rhythm.pvalue>0.1 |
tot.data.proteins$RNA_rhythm.pvalue<=0.02, "mean.expr.value"])
wilcox.test(tot.data.proteins[tot.data.proteins$protein_rhythm.pvalue<=0.02 &
tot.data.proteins$RNA_rhythm.pvalue>0.1, "mean.expr.value"],
tot.data.proteins[tot.data.proteins$protein_rhythm.pvalue>0.1 |
tot.data.proteins$RNA_rhythm.pvalue<=0.02, "mean.expr.value"])
nrRNA-rProt
nrRNA-nrProt
### PLOTS ###
subset.rhythmic.data <- subset(tot.data, rhythmic == TRUE)
subset.rhythmic.data$median.mean.expr.value <- NA
subset.rhythmic.data$median.max.expr.value <- NA
subset.random.data <- subset(tot.data, random.group == TRUE)
for (i in 1:length(species.list)) {
subset.data.transcripts.tmp <- subset(subset.random.data, species == species.list[i] & data == "transcripts")
subset.data.proteins.tmp <- subset(subset.random.data, species == species.list[i] & data == "proteins")
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "transcripts", "median.mean.expr.value"] <- median(subset.data.transcripts.tmp$mean.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "proteins", "median.mean.expr.value"] <- median(subset.data.proteins.tmp$mean.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "transcripts", "median.max.expr.value"] <- median(subset.data.transcripts.tmp$max.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "proteins", "median.max.expr.value"] <- median(subset.data.proteins.tmp$max.expr.value, na.rm = TRUE)
}
subset.tot.data <- rbind(subset.random.data, subset.rhythmic.data)
subset.tot.data$rhythmic.vs.random <- c( rep("random", nrow(subset.random.data)), rep("rhythmic", nrow(subset.rhythmic.data)))
### 1. max ###
##############
maxExprHistogramsPerGroup <- ggplot(subset.tot.data, aes(x=log10(max.expr.value), fill=rhythmic.vs.random)) +
geom_histogram(aes(y = ..count..), position = 'identity', alpha=0.6, na.rm = TRUE, bins = 40) +
scale_fill_manual(values = c("#00AFBB", "#E7B800")) +
scale_color_manual(values = c("#00AFBB", "#E7B800")) +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
theme_bw(base_size = 11, base_rect_size = 0.1) +
scale_color_grey() +
labs(x = "max expression (log)", fill="genes group", caption=paste("proportion of rhythmic or random genes group = ", percent.rhythmic*100, "%", sep="")) +
theme(axis.title.x = element_text(size=12),
axis.title.y = element_text(size=12),
plot.caption = element_text(size=11, face = "italic", hjust=0))
### 2. mean ###
##############
meanExprHistogramsPerGroup <- ggplot(subset.tot.data, aes(x=log10(mean.expr.value), fill=rhythmic.vs.random)) +
geom_histogram(aes(y = ..count..), position = 'identity', alpha=0.6, na.rm = TRUE, bins = 40) +
scale_fill_manual(values = c("#00AFBB", "#E7B800")) +
scale_color_manual(values = c("#00AFBB", "#E7B800")) +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
theme_bw(base_size = 11, base_rect_size = 0.1) +
scale_color_grey() +
labs(x = "mean expression (log)", fill="genes group", caption=paste("proportion of rhythmic or random genes group = ", percent.rhythmic*100, "%", sep="")) +
theme(axis.title.x = element_text(size=12),
axis.title.y = element_text(size=12),
plot.caption = element_text(size=11, face = "italic", hjust=0))
##############
# Add the plots to the word document:
read_docx(path = "~/Documents/cost_theory_workingspace/figures_rmd.docx") %>%
cursor_begin() %>%
cursor_reach(keyword = "##max_expr_histograms_per_group##") %>%
body_add_gg(value = maxExprHistogramsPerGroup, width = 5, height = 7) %>%
cursor_forward() %>%
body_remove() %>%
cursor_reach(keyword = "##mean_expr_histograms_per_group##") %>%
body_add_gg(value = meanExprHistogramsPerGroup, width = 5, height = 7) %>%
cursor_forward() %>%
body_remove() %>%
cursor_end() %>%
print(target = "~/Documents/cost_theory_workingspace/figures_rmd.docx")
####################################################################################
### Comparison of mean values of expression between both groups of genes ###########
####################################################################################
alternative.wilcox.option <- "two.sided" # choice between "two.sided", "less", "greater"
caption.text <- paste("Box-plot log scaled",
#"\n",
paste("Wilcox test (alternative='", alternative.wilcox.option, "'):", sep=""),
"ns: p > 0.05",
"*: p <= 0.05",
"**: p <= 0.01",
"***: p <= 0.001",
"****: p <= 0.0001",
#"\n",
paste("proportion of rhythmic or random genes group = ", percent.rhythmic*100, "%", sep=""),
sep="\n")
### PLOTS ###
subset.rhythmic.data <- subset(tot.data, rhythmic == TRUE)
subset.rhythmic.data$median.mean.expr.value <- NA
subset.rhythmic.data$median.max.expr.value <- NA
subset.random.data <- subset(tot.data, random.group == TRUE)
for (i in 1:length(species.list)) {
subset.data.transcripts.tmp <- subset(subset.random.data, species == species.list[i] & data == "transcripts")
subset.data.proteins.tmp <- subset(subset.random.data, species == species.list[i] & data == "proteins")
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "transcripts", "median.mean.expr.value"] <- median(subset.data.transcripts.tmp$mean.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "proteins", "median.mean.expr.value"] <- median(subset.data.proteins.tmp$mean.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "transcripts", "median.max.expr.value"] <- median(subset.data.transcripts.tmp$max.expr.value, na.rm = TRUE)
subset.random.data[subset.random.data$species == species.list[i] & subset.random.data$data == "proteins", "median.max.expr.value"] <- median(subset.data.proteins.tmp$max.expr.value, na.rm = TRUE)
}
subset.tot.data <- rbind(subset.random.data, subset.rhythmic.data)
subset.tot.data$rhythmic.vs.random <- c( rep("random", nrow(subset.random.data)), rep("rhythmic", nrow(subset.rhythmic.data)))
### 1. max ###
##############
maxMeanExprComparedPerGroup <- ggboxplot(subset.tot.data, x = "rhythmic.vs.random",
y = "max.expr.value", yscale = "log10",
color = "rhythmic.vs.random", palette = c("#00AFBB", "#E7B800"),
width = 0.4,
#add = "jitter",
short.panel.labs = T) +
geom_hline(aes(yintercept = median.max.expr.value), linetype = 2, color="grey39") +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
stat_compare_means(method = "wilcox.test", method.args = list(alternative = alternative.wilcox.option),
label="p.signif" , label.x.npc = 0.4, label.y.npc = 0.9, na.rm = TRUE) +
labs(y = "max expression level", color="genes group:", caption=caption.text) +
theme(axis.title.x = element_blank(),
axis.text.y = element_text(size=6),
axis.text.x = element_text(size=10),
axis.title.y = element_text(size=12),
plot.caption = element_text(size=11, face = "italic", hjust=0),
legend.position = "right",
strip.background = element_rect(fill="gray44", colour="black", size = 0.1),
strip.text = element_text(size=10, colour="white"))
### 2. mean ###
##############
meanMeanExprComparedPerGroup <- ggboxplot(subset.tot.data, x = "rhythmic.vs.random",
y = "mean.expr.value", yscale = "log10",
color = "rhythmic.vs.random", palette = c("#00AFBB", "#E7B800"),
width = 0.4,
#add = "jitter",
short.panel.labs = T) +
geom_hline(aes(yintercept = median.mean.expr.value), linetype = 2, color="grey39") +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
stat_compare_means(method = "wilcox.test", method.args = list(alternative = alternative.wilcox.option),
label="p.signif" , label.x.npc = 0.4, label.y.npc = 0.9, na.rm = TRUE) +
labs(y = "mean expression level", color="genes group:", caption=caption.text) +
theme(axis.title.x = element_blank(),
axis.text.y = element_text(size=6),
axis.text.x = element_text(size=10),
axis.title.y = element_text(size=12),
plot.caption = element_text(size=11, face = "italic", hjust=0),
legend.position = "right",
strip.background = element_rect(fill="gray44", colour="black", size = 0.1),
strip.text = element_text(size=10, colour="white"))
##############
# Add the plots to the word document:
read_docx(path = "~/Documents/cost_theory_workingspace/figures_rmd.docx") %>%
cursor_begin() %>%
cursor_reach(keyword = "##max_expr_compared_between_groups##") %>%
body_add_gg(value = maxMeanExprComparedPerGroup, width = 5.5, height = 8.5) %>%
cursor_forward() %>%
body_remove() %>%
cursor_reach(keyword = "##mean_expr_compared_between_groups##") %>%
body_add_gg(value = meanMeanExprComparedPerGroup, width = 5.5, height = 8.5) %>%
cursor_forward() %>%
body_remove() %>%
cursor_end() %>%
print(target = "~/Documents/cost_theory_workingspace/figures_rmd.docx")
#rhythm.data <- subset(tot.data, rhythmic == TRUE)
#random.data <- subset(tot.data, random.group == TRUE)
#random.data <- random.data[sample(1:nrow(random.data), replace = F, size = nrow(rhythm.data)), ]
#shapiro.test(rhythm.data$mean.expr.value[sample(1:length(rhythm.data$mean.expr.value), replace = FALSE, size = 5000)])
##############
# Also save it as an R object:
caption.text <- paste("Box-plot log scaled",
paste("Wilcox test (alternative='", alternative.wilcox.option, "'):", sep=""),
sep="\n")
### 1. max ###
##############
maxMeanExprComparedPerGroup <- ggboxplot(subset.tot.data, x = "rhythmic.vs.random",
y = "max.expr.value", yscale = "log10", outlier.size=0.1,
fill = "rhythmic.vs.random", palette = c("seagreen4", "#E7B800"),
width = 0.4, alpha=0.8, size=0.2,
#add = "jitter",
short.panel.labs = T) +
geom_hline(aes(yintercept = median.max.expr.value), linetype = 2, color="grey39", size=0.2) +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
stat_compare_means(method = "wilcox.test", method.args = list(alternative = alternative.wilcox.option),
label="p.signif" , label.x.npc = 0.4, label.y.npc = 0.9, na.rm = TRUE, size=2.2) +
labs(y = "max expression level", fill="genes group:" #, caption=caption.text
) +
theme(axis.text.y = element_text(size=5),
axis.title.y = element_text(size=7),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
plot.caption = element_text(size=5, face = "italic", hjust=0),
legend.position = "right",
legend.text = element_text(size=5),
legend.title = element_text(size=5),
strip.background = element_blank(),
panel.background = element_rect(fill = "grey93"),
panel.spacing = unit(1, "lines"),
strip.text = element_text(size=6, face = "bold", margin = margin(0)),
axis.ticks.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(size = 0.1))
### 2. mean ###
##############
meanMeanExprComparedPerGroup <- ggboxplot(subset.tot.data, x = "rhythmic.vs.random",
y = "mean.expr.value", yscale = "log10", outlier.size=0.1,
fill = "rhythmic.vs.random", palette = c("seagreen4", "#E7B800"),
width = 0.4, alpha=0.8, size=0.2,
#add = "jitter",
short.panel.labs = T) +
geom_hline(aes(yintercept = median.mean.expr.value), linetype = 2, color="grey39") +
facet_wrap(species ~ data, scales = "free", ncol = 2) +
stat_compare_means(method = "wilcox.test", method.args = list(alternative = alternative.wilcox.option),
label="p.signif" , label.x.npc = 0.4, label.y.npc = 0.9, na.rm = TRUE, size=2.2) +
labs(y = "mean expression level", fill="genes group:" #, caption=caption.text
) +
theme(axis.text.y = element_text(size=5),
axis.title.y = element_text(size=7),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
plot.caption = element_text(size=5, face = "italic", hjust=0),
legend.position = "right",
legend.text = element_text(size=5),
legend.title = element_text(size=5),
strip.background = element_blank(),
panel.background = element_rect(fill = "grey93"),
panel.spacing = unit(1, "lines"),
strip.text = element_text(size=6, face = "bold", margin = margin(0)),
axis.ticks.x = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_line(size = 0.1))
out.rds.name <- paste("~/Documents/cost_theory_workingspace/rds_objects", "maxMeanExprComparedPerGroup.rds", sep="/")
saveRDS(maxMeanExprComparedPerGroup, file = out.rds.name)
out.rds.name <- paste("~/Documents/cost_theory_workingspace/rds_objects", "meanMeanExprComparedPerGroup.rds", sep="/")
saveRDS(meanMeanExprComparedPerGroup, file = out.rds.name)
|
c7808547eccb2218b5b467e0806448c064174963
|
f2ff7459d16c3c238c54481f9c1d96e5b2c2a780
|
/clusterization/kappa.R
|
fd62c71ed9be227ceb311c1a17ee9cdda20e7f5b
|
[] |
no_license
|
nevmenandr/Zhukovsky
|
c987c80fdefb61db72bed30c37cc90e6eee7aaf1
|
73fdcc5460b02f5f61d7d294787503f0b636ddab
|
refs/heads/master
| 2021-01-10T07:08:55.917091
| 2015-10-09T12:09:53
| 2015-10-09T12:09:53
| 43,612,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
kappa.R
|
library(irr)
# if err:
# install.packages("irr")
p <- read.table('forms_no_stopwords', h=FALSE, sep=' ')
pd <- data.frame(p)
kappa2(pd)
|
95e8197d55fe9f7091db1f537d9083bad9c062be
|
66018934e63468130a003b44d5ac3261c393856c
|
/man/kobo_unhcr_style_histo_big.Rd
|
f27a425ae910c04bd932890162104b915f66379c
|
[] |
no_license
|
unhcr/koboloadeR
|
e0e5fb10950d0500a7d5a614db6dc097be2ea0ec
|
7758385b9b5c13b4652161f7c127d162ff5df2e9
|
refs/heads/master
| 2023-03-10T08:23:51.511979
| 2023-02-28T15:52:07
| 2023-02-28T15:52:07
| 110,141,421
| 28
| 31
| null | 2022-02-05T14:14:47
| 2017-11-09T16:47:24
|
R
|
UTF-8
|
R
| false
| true
| 456
|
rd
|
kobo_unhcr_style_histo_big.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kobo_unhcr_style_histo_big.R
\name{kobo_unhcr_style_histo_big}
\alias{kobo_unhcr_style_histo_big}
\title{UNHCR ggplot2 theme}
\usage{
kobo_unhcr_style_histo_big()
}
\value{
Return UNHCR Style
}
\description{
Return ggplot2 styling for histogram with big labels for powerpoints
}
\examples{
kobo_unhcr_style_histo_big()
}
\author{
Edouard Legoupil - with inspiration from bbc
}
|
45e60496f5c79eb27f614e90bbba90479cc160fb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/emuR/examples/buildtrack.Rd.R
|
2287a4cf655537b2274dba1b327ac7ccc4aacf62
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
buildtrack.Rd.R
|
library(emuR)
### Name: buildtrack
### Title: Build trackdata objects from the output of by()
### Aliases: buildtrack
### Keywords: manip
### ** Examples
#vowlax.fdat is a track data objects of formant of the vowlax segment list
#calculate the difference between adjacent formant values
p = by(vowlax.fdat[1,2],INDICES=NULL, diff)
p
#now build a track data object out of these values
m = buildtrack(p)
m
|
31293e6158ddc3d1271cdae19bdde1180b5f9456
|
2f6d7a99ce3155d2c635c39013a0da1418208b40
|
/man/getRandomNames.Rd
|
1d74ed45d95b2420c5579561acd4b54c2f0b168e
|
[
"MIT"
] |
permissive
|
oganm/ogbox
|
c75eb1d8f4df00be214731e085e6c19e141992cc
|
ba99a46487836af5ab4fb5b013bc92cf35ad8e95
|
refs/heads/master
| 2020-04-04T07:07:27.383911
| 2019-07-29T23:00:12
| 2019-07-29T23:00:12
| 37,562,559
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 863
|
rd
|
getRandomNames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomNames.R
\name{getRandomNames}
\alias{getRandomNames}
\title{#' @export
get_random_name = function(retry){
left = left()
right = right()
name = paste0(left[sample(x = length(left), size = 1)],'_',right[sample(x = length(right), size = 1)])
if(name == 'boring_wozniak'){
get_random_name(retry)
}
if(retry>0){
name = paste0(name,sample(10,1))
}
return(name)
}}
\usage{
getRandomNames(n = 1, alliterate = FALSE)
}
\description{
#' @export
get_random_name = function(retry){
left = left()
right = right()
name = paste0(left[sample(x = length(left), size = 1)],'_',right[sample(x = length(right), size = 1)])
if(name == 'boring_wozniak'){
get_random_name(retry)
}
if(retry>0){
name = paste0(name,sample(10,1))
}
return(name)
}
}
|
b53e83e81017309131d25db8fae42c44ea86417c
|
d4cd3909b5c5ff996e405a9dbcdb830a9f18599f
|
/market.edu.R
|
6cd95e0361c2fefb5eac1a8f8f73527b2d23998d
|
[] |
no_license
|
jevzimite/Projects.compiled
|
6bb39df27ed44871c240fea4408967248f76293d
|
df1fdcaa12bf8d339a2ca782e28c425a44c12409
|
refs/heads/main
| 2023-05-01T13:30:24.549058
| 2021-05-25T22:11:07
| 2021-05-25T22:11:07
| 332,641,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,518
|
r
|
market.edu.R
|
library(readxl)
marketing_data <- read_excel("Desktop/R/marketing_data.xlsm")
#### libraries ####
# library(ggbiplot)
library(psych)
library(ggplot2)
library(reshape2)
library(cowplot)
library(heatmaply)
library(factoextra)
library(readxl)
library(ggfortify)
library(ggpubr)
#### data ####
df<- marketing_data
dfN<- df
dfN[,c(1:3,23)]=NULL
dfS<-scale(dfN)
dfN<- normalize(df)
set.seed(1)
#### kmeans/pca data ####
pca<-prcomp(dfS)
scree1<-fviz_screeplot(pca) #2-5 groups
k1<-kmeans(dfS,2)
cluster<-autoplot(k1, dfS, frame=1, size=2, alpha=.33)+theme_bw()
countries<-autoplot(pca, data= df, loadings=1, loadings.label = 1, alpha=.0, size=10, colour = "Education", loadings.colour = "darkred", loadings.label.colour= 1, frame=1)+theme_bw()
teen<-autoplot(pca, data= df, loadings=1, loadings.label = 1, alpha=.05, size=10, colour = "Teenhome", loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
kid<- autoplot(pca, data= df, loadings=1, loadings.label = 1, alpha=.05, size=10, colour = "Kidhome", loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
s1<-autoplot(pca, alpha=.33)+theme_bw()
#### amount ####
mntDf<-df[,c(7:12)]
mntDf<- scale(mntDf)
pca.mnt<- prcomp(mntDf)
scree2<- fviz_screeplot(pca.mnt) #2-3
k2<- kmeans(mntDf, 2)
cluster.mnt<-autoplot(k2, data= mntDf, loadings.label = 0, alpha=.33, size=2, frame=1)+theme_bw()
mnt1<-autoplot(pca.mnt, data= df, loadings=1, loadings.label = 1, alpha=.5, colour="Education", size=1, loadings.colour = "darkred", loadings.label.colour= 1, frame=1)+theme_bw()
mnt2<-autoplot(pca.mnt, data= df, loadings=1, loadings.label = 1, alpha=.1, colour="Kidhome", size=10, loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
mnt3<-autoplot(pca.mnt, data= df, loadings=1, loadings.label = 1, alpha=.1, colour="Teenhome", size=10, loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
s2<-autoplot(pca.mnt, alpha=.33)+theme_bw()
#### number ####
numDf<- df[,c(13:17)]
numDF<- scale(numDf)
pca.num<- prcomp(numDf)
scree3<- fviz_screeplot(pca.num) #3-5 . . . 4
k3<- kmeans(numDf, 2)
cluster.num<- autoplot(k3, dfS, frame=1, size=2, alpha=.33)+theme_bw()
num1<-autoplot(pca.num, data= df, loadings=1, loadings.label = 1, alpha=.1, size=10, colour = "Teenhome", loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
num2<-autoplot(pca.num, data= df, loadings=1, loadings.label = 1, alpha=.1, size=10, colour = "Kidhome", loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
num3<-autoplot(pca.num, data= df, loadings=1, loadings.label = 1, alpha=.0, size=10, colour = "Education", loadings.colour = "darkred", loadings.label.colour= 1, frame=1)+theme_bw()
s3<-autoplot(pca.num, alpha=.33)+theme_bw()
#### plots ####
wineNsweets<-ggplot()+
geom_point(mapping= aes(x=dfN$MntWines, y=dfN$MntSweetProducts), size = 3, alpha=.15)+
geom_smooth(mapping= aes(x=dfN$MntWines, y=dfN$MntSweetProducts), color="black", size=0, alpha=.15, fill="darkred")+
geom_smooth(mapping= aes(x=dfN$MntWines, y=dfN$MntSweetProducts), method= "lm",color="black", size=.5, alpha=0, fill="darkred", linetype = 8)+
theme_bw()+labs(x="Wine", y="Sweet products")
fishNmeats<-ggplot()+
geom_point(mapping= aes(x=dfN$MntFishProducts, y=dfN$MntMeatProducts), size = 3, alpha=.15)+
geom_smooth(mapping= aes(x=dfN$MntFishProducts, y=dfN$MntMeatProducts), color="black", size=0, alpha=.15, fill="darkred")+
geom_smooth(mapping= aes(x=dfN$MntFishProducts, y=dfN$MntMeatProducts), method= "lm",color="black", size=.5, alpha=0, fill="darkred", linetype = 8)+
theme_bw()+labs(x= "Fish products", y="Meat products")
sweetNfruits<-ggplot()+
geom_point(mapping= aes(x=dfN$MntFruits, y=dfN$MntSweetProducts), size = 3, alpha=.15)+
geom_smooth(mapping= aes(x=dfN$MntFruits, y=dfN$MntSweetProducts), color="black", size=0, alpha=.15, fill="darkred")+
geom_smooth(mapping= aes(x=dfN$MntFruits, y=dfN$MntSweetProducts), method= "lm",color="black", size=.5, alpha=0, fill="darkred", linetype = 8)+
theme_bw()+labs(x="Fruits", y="Sweet products")
#### plot grids ####
a<-plot_grid(s1, cluster, scree1, teen, kid, countries, labels = c("PCA","k-means Cluster", "","PCA (kid in household","PCA (Teen in household)", "PCA (Based on education)"))
b<-plot_grid(s2, cluster.mnt, scree2, mnt3, mnt2, mnt1, labels = c("PCA","k-means Cluster", "","PCA (kid in household","PCA (Teen in household)", "PCA (Based on education)"))
b1<-plot_grid( fishNmeats, wineNsweets,sweetNfruits)
c<-plot_grid(s3, cluster.num, scree3, num2, num1, num3, labels = c("PCA","k-means Cluster", "","PCA (kid in household","PCA (Teen in household)", "PCA (Based on education)"))
sweets<-autoplot(pca, data= dfS, loadings=0, loadings.label = 0, alpha=.01, size=15, colour = "MntSweetProducts", loadings.colour = "darkred", loadings.label.colour= 0, frame=0)+theme_bw()
teens <- autoplot(pca, data= dfS, loadings=0, loadings.label = 0, alpha=.01, size=15, colour = "Teenhome", loadings.colour = "darkred", loadings.label.colour= 0, frame=0)+theme_bw()
fruits <- autoplot(pca, data= dfS, loadings=0, loadings.label = 0, alpha=.01, size=15, colour = "MntFruits", loadings.colour = "darkred", loadings.label.colour= 0, frame=0)+theme_bw()
catalog <- autoplot(pca, data= dfS, loadings=0, loadings.label = 0, alpha=.01, size=15, colour = "NumCatalogPurchases", loadings.colour = "darkred", loadings.label.colour= 0, frame=0)+theme_bw()
indexes<-plot_grid(cluster, sweets, teens, fruits, catalog)
k3<-kmeans(numDf, 4)
# sales<-autoplot(pca.num, data= dfS, loadings=1, loadings.label = 1, alpha=.1, size=15, colour = "NumStorePurchases", loadings.colour = "darkred", loadings.label.colour= 1, frame=0)+theme_bw()
# cl.sales<- autoplot(k3, dfS, frame=1, size=3, alpha=.33, loadings=1)+theme_bw()
# plot_grid(store,cl.sales)
# a<-autoplot(pca, data= numDf, loadings=1, loadings.label = 1, alpha=.01, size=15, colour = "MntSweetProducts", loadings.colour = "darkred", loadings.label.colour= 0, frame=0)+theme_bw()
a<-autoplot(pca.num, numDf, colour="NumStorePurchases", size=15,alpha=0.07, loadings=TRUE, loadings.label=1,loadings.label.colour=1, loadings.colour="darkred")
aa<-autoplot(k3, numDf, size=15, alpha=.05, frame=1, loadings=TRUE, loadings.label=1,loadings.label.colour=1, loadings.colour="darkred")
ab<-autoplot(pca.num, numDf, colour="NumCatalogPurchases", size=15,alpha=0.07, loadings=TRUE, loadings.label=1,loadings.label.colour=1, loadings.colour="darkred")
plot_grid(a,ab)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.