blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf2d530a32196991dc5896d285d10cc361f60ab1
|
9262e777f0812773af7c841cd582a63f92d398a4
|
/inst/userguide/figures/Covar--Covar_sec6_01_set-up-seasonal-dat.R
|
ec6b474e5e5fe30e80e18d653878dd4825db0fee
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nwfsc-timeseries/MARSS
|
f0124f9ba414a28ecac1f50c4596caaab796fdd2
|
a9d662e880cb6d003ddfbd32d2e1231d132c3b7e
|
refs/heads/master
| 2023-06-07T11:50:43.479197
| 2023-06-02T19:20:17
| 2023-06-02T19:20:17
| 438,764,790
| 1
| 2
|
NOASSERTION
| 2023-06-02T19:17:41
| 2021-12-15T20:32:14
|
R
|
UTF-8
|
R
| false
| false
| 478
|
r
|
Covar--Covar_sec6_01_set-up-seasonal-dat.R
|
###################################################
### code chunk number 11: Covar_sec6_01_set-up-seasonal-dat
###################################################
years <- fulldat[, "Year"] >= 1965 & fulldat[, "Year"] < 1975
phytos <- c(
"Diatoms", "Greens", "Bluegreens",
"Unicells", "Other.algae"
)
dat <- t(fulldat[years, phytos])
# z.score data again because we changed the mean when we subsampled
dat <- zscore(dat)
# number of time periods/samples
TT <- ncol(dat)
|
267de738fbff8a82401188dec5a3f9431af94aa5
|
82d9da6f33a3e8165850e05ab8fb3296b5623bba
|
/scripts/pipeline/import_plates.R
|
9333d196be41b4378f01ca6068b0f8120a24dd17
|
[] |
no_license
|
tanaylab/tet-gastrulation
|
6d7da1145f4d3b36ae27a17bfd8f3d7d9d16e83f
|
12360727690611199a6b680ca86facb380524ca1
|
refs/heads/main
| 2023-04-07T01:02:55.146723
| 2022-06-24T09:51:34
| 2022-06-24T09:51:34
| 500,408,241
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,985
|
r
|
import_plates.R
|
import_plates <- function(mat_nm, metadata, base_dir = "data/umi.tables/") {
metadata$Amp.Batch.ID <- metadata$plate
metadata$Seq.Batch.ID <- metadata$plate
metadata$Batch.Set.ID <- metadata$plate
write.table(x = metadata, file = paste("config/key_", mat_nm, ".txt", sep = ""), quote = F, sep = "\t", row.names = F)
mcell_import_multi_mars(
mat_nm = mat_nm,
dataset_table_fn = paste("config/key_", mat_nm, ".txt", sep = ""),
base_dir = base_dir,
patch_cell_name = F,
force = TRUE
)
mat <- scdb_mat(mat_nm)
nms <- c(rownames(mat@mat), rownames(mat@ignore_gmat))
bad_genes <- c(grep("^mt\\-", nms, v = T), "Neat1", grep("ERCC", nms, v = T), "Atpase6", "Xist", "Malat1", "Cytb", "AK018753", "AK140265", "AK163440", "DQ539915")
mat <- scm_ignore_genes(scmat = mat, ig_genes = bad_genes)
# ignore small and large cells
small_cells <- colnames(mat@mat)[colSums(mat@mat) < 2000]
large_cells <- colnames(mat@mat)[colSums(mat@mat) > 12000]
mat <- scm_ignore_cells(scmat = mat, ig_cells = c(small_cells, large_cells))
# read metadata
mat_md <- mat@cell_metadata
mat_md$cell <- rownames(mat@cell_metadata)
f <- colnames(mat_md) %in% c("molecule", "spike_count")
mat_md <- mat_md[, !f]
metadata_cells <- read.table(file = "data/metadata_cells_scrna_sequencing.txt", h = T, sep = "\t", stringsAsFactors = F)
metadata_cells <- metadata_cells[colnames(metadata_cells) != "plate"]
mat_md <- left_join(mat_md, metadata_cells, by = "cell")
rownames(mat_md) <- mat_md$cell
mat@cell_metadata <- mat_md
# ignore empty wells and wells with duplicate cells
new_ignore_cells <- unique(c(mat_md$cell[mat_md$embryo %in% c("empty", "duplicate")], mat@ignore_cells))
mat <- scm_ignore_cells(scmat = mat, ig_cells = new_ignore_cells)
scdb_add_mat(id = mat_nm, mat = mat)
file.remove(paste("config/key_", mat_nm, ".txt", sep = ""))
}
|
5b1912d097181dc092f2bed141cd3d9a91408f1c
|
b49fb76ade4a6bfcc163436857833b2fe9bc29c3
|
/man/plot.apc.Rd
|
c1a38aa1f6d77e51b2595d5baecf5522b63d0e04
|
[] |
no_license
|
volkerschmid/bamp
|
8659109d5f399e8609cea05475261871a6bfd249
|
f89888f9874f83a8fe6046d4ca4fbd8624d6f07f
|
refs/heads/master
| 2023-02-23T18:11:51.839627
| 2023-02-15T09:39:17
| 2023-02-15T09:39:17
| 116,521,258
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 848
|
rd
|
plot.apc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_apc.R
\name{plot.apc}
\alias{plot.apc}
\title{Plot apc object}
\usage{
\method{plot}{apc}(x, quantiles = c(0.05, 0.5, 0.95), ...)
}
\arguments{
\item{x}{apc object}
\item{quantiles}{quantiles to plot. Default: \code{c(0.05,0.5,0.95)} is median and 90\% credible interval.}
\item{...}{Additional arguments will be ignored}
}
\value{
plot
}
\description{
Plot apc object
}
\details{
Plot of age, period and cohort effects from apc objects. If covariates have been used for period/cohort, a second plot with covariate, absolute effect and relative effect is created. Absolute effect is relative effect times covariate.
}
\examples{
\dontrun{
data(apc)
model <- bamp(cases, population, age="rw1", period="rw1", cohort="rw1", periods_per_agegroup = 5)
plot(model)
}
}
|
cf23b1c672297c55b8b36358dcf24f5a24c5b10d
|
7539525778c356cafc44b7ac1f1b412004812ac1
|
/r/scripts/probit_logit.R
|
0632c116142e04f136492675c42edb5147f3fc8b
|
[] |
no_license
|
rotabori/project_ad_analisis_datos
|
c4fa834fdb12eff351f4389b261b7e874ee3a06d
|
d92f09bc0f2d09e86748118657dd33ca4f13d2d5
|
refs/heads/master
| 2023-09-01T20:04:25.312240
| 2023-08-24T23:05:40
| 2023-08-24T23:05:40
| 195,309,809
| 3
| 3
| null | 2021-02-03T21:46:35
| 2019-07-04T23:11:41
|
Stata
|
UTF-8
|
R
| false
| false
| 2,941
|
r
|
probit_logit.R
|
## PROJECT: ANALISIS DE DATOS
## PROGRAM: probit_logit.r
## PROGRAM TASK: REGRESION LINEAL
## AUTHOR: RODRIGO TABORDA
## AUTHOR: JUAN PABLO MONTENEGRO
## DATE CREATEC: 2020/06/02
## DATE REVISION 1:
## DATE REVISION #:
####################################################################;
## #0 PROGRAM SETUP
####################################################################;
rm(list=ls())
## #0.1 ## SET PATH FOR READING/SAVING DATA;
# setwd()
### #0.2 ## INSTALL PACKAGES;
# #SOLO ES NECESARIO INSTALARLOS UNA VEZ
#
# install.packages("margins")
# #PAQUETE PARA EFECTOS MARGINALES
#
# install.packages("ggplot2")
# #PAQUETE PARA GRAFICOS
#
# install.packages("readstata13")
# #PAQUETE PARA LECTURA ARCHIVO STATA13
#
## #0.3 ## CALL PACKAGES;
#ES NECESARIO LLAMARLOS CADA RUTINA
library(margins)
#PAQUETE PARA EFECTOS MARGINALES
library(ggplot2)
#PAQUETE PARA GR?FICOS
library(readstata13)
#PAQUETE PARA LECTURA ARCHIVO STATA13
####################################################################;
## #20 ## DATOS IEFIC;
####################################################################;
## #20.1 ## DATA-IN;
iefic <- read.dta13("http://www.rodrigotaborda.com/ad/data/iefic/2016/iefic_2016_s13.dta")
# iefic <- na.omit(iefic)
# REMOVE N.A. OBSERVATIONS
iefic01 <- iefic[c("p2540", "ingreso")]
iefic01 <- na.omit(iefic01)
## #20.2 ## REGRESION LINEAL;
## #20.3 ## REGRESION LOGIT;
logit01 <- glm(data = iefic01,
formula = p2540 ~ ingreso,
family = "binomial")
summary(logit01)
## #20.3.1 ## VALORES PREDICHOS;
logit01_b <- logit01$coefficients
ingreso_pred <- c(seq(from = 0, to = 20000000, by = 500000))
logit01_xb <- matrix(data = 0, ncol = 1, nrow = length(ingreso_pred))
for (i in 1:length(ingreso_pred)) {
logit01_xb[i,] <- logit01_b[1] + logit01_b[2]*ingreso_pred[i]
}
logit01_pred <- exp(logit01_xb)/(1+exp(logit01_xb))
plot(logit01_pred, type = "l")
## #20.3.2 ## EFECTO MARGINAL;
logit01_mgeff <- (exp(logit01_xb)/(1+exp(logit01_xb))^2) * logit01_b[2]
plot(logit01_mgeff, type = "l")
## #20.3.4 ## EFECTO MARGINAL + MARGINS;
summary(margins(logit01, variables = "ingreso"))
logit01_mgeff01 <- margins(logit01, at = list(ingreso = seq(from = 0, to = 20000000, by = 500000)))
cplot(logit01, "ingreso", what = "prediction", main = "Probabilidad predicha")
#PROBABILIDAD PREDICHA
cplot(logit01, "ingreso", what = "effect", main = "Efecto marginal", draw = T)
#EFECTO MARGINAL
####################################################################;
## #99 CLEAN
####################################################################;
rm(list=ls())
|
823397a87da547764ce581e7969ba436db217490
|
3da3895c22be687f0a079877e1c52e9dea283e96
|
/R Programming/Week 4/Week 4.R
|
95ba920999091f014d6f374d31c414243b3b8834
|
[] |
no_license
|
meethariprasad/Data-Science
|
b165a04031a5efb266e9e79e0074547e3930e062
|
70abd81773b78bf4e597fb6afd1ed879af5a28d6
|
refs/heads/master
| 2021-01-17T23:23:09.036056
| 2020-03-30T08:38:30
| 2020-03-30T08:38:30
| 84,219,764
| 0
| 0
| null | 2017-03-07T16:10:42
| 2017-03-07T16:10:42
| null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
Week 4.R
|
# Generating random variables from normal distribution
x = rnorm(10) #generate random numbers from normal distribution
x
summary(x)
x = rnorm(10, 20, 2) #generate random numbers from normal distribution with
# mean = 20, sd = 2
x
summary(x)
# Generate random numbers from a linear model, where x, the predictor, is a normal distribution
set.seed(20)
x = rnorm(100) # standard normal distribution; mean=0, sd=1
e = rnorm(100, 0, 2) # standard normal distribution; mean=0, sd=2
y = 0.5 + 2 * x + e # the linear model
summary(y)
plot(x,y)
# Generate random numbers from a linear model, where x, the predictor, is a binomial distribution
set.seed(20)
x = rbinom(100, 1, 0.5) # binomial distribution
e = rnorm(100, 0, 2)
y = 0.5 + 2 * x + e
summary(y)
plot(x,y)
# R profiling
system.time(readLines("http://www.jhsph.edu"))
hilbert = function(n)
{
i = 1:n
1/ outer (i-1, i, "+")
}
x = hilbert(1000)
system.time(svd(x))
hilbert = function(n) # R profiling using the Rprof
{
i = 1:n
1/ outer (i-1, i, "+")
}
x = hilbert(1000)
Rprof("hilbert")
y = (svd(x))
Rprof(NULL)
summaryRprof("hilbert")
|
7236f9f55bb37c0ee14b499644b55dc31e7c35a6
|
ab73d60d1734a8d08eec9c410744b4441750ad95
|
/man/checkYearValidity.Rd
|
46e7793ff2f0ac9468115f144b76dfa5b2cf4c2c
|
[] |
no_license
|
gsimchoni/yrbss
|
d608ef13375c3a4a5901b4aef99709e534e0c476
|
c181dc14e7429ef37c65da4ca8db5903e3fa4d37
|
refs/heads/master
| 2020-09-05T05:25:36.680035
| 2017-06-18T16:14:41
| 2017-06-18T16:14:41
| 94,413,209
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
checkYearValidity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkYearValidity.R
\name{checkYearValidity}
\alias{checkYearValidity}
\title{Check the validity of a year}
\usage{
checkYearValidity(year)
}
\arguments{
\item{year}{a four-digit number representing the desired year}
}
\value{
an ERROR if the year is not in the yrbss_data_binary dataset,
the year itself if it is valid, and 2015 if it is NULL.
}
\description{
Helper method to check the validity of a year
}
\examples{
checkLocationValidity("CA")
}
|
654c13d1a1f5d67253e946c4c6116f897f40b1fa
|
263646b68232338dc2c2a637de9e7f6a5f9e1195
|
/plot4.r
|
adf373613ab43fc1681499670525c8da54e6d540
|
[] |
no_license
|
antekai/ExploratoryDataAnalysis
|
0572e0748c7133ac4feb4bfbc7efaffd5b3004d5
|
7e12c470c570b73f9941b1c78bd9e13d9b37d3af
|
refs/heads/master
| 2021-01-22T14:24:39.795944
| 2014-07-12T16:03:55
| 2014-07-12T16:03:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,888
|
r
|
plot4.r
|
## Getting data (reading the txt file data as powerDF,
## note for R: T==TRUE,F==FALSE)
powerDF <- read.table( "c:/household_power_consumption.txt", header = T, sep = ";",
na.strings = "?", stringsAsFactors = F,
colClasses = c("character","character","numeric","numeric",
"numeric","numeric","numeric","numeric","numeric"))
## creating the DateTime variable
powerDF$DateTime <- strptime(paste(powerDF$Date, powerDF$Time), format="%d/%m/%Y %H:%M:%S")
## Cleaning Data (subsetting on required dates)
powerDF$Date <- as.Date(powerDF$Date, format="%d/%m/%Y")
power2plot <- subset(powerDF, powerDF$Date == "2007-02-01" | powerDF$Date == "2007-02-02")
## Exploratory Data Analysis (creating the plot)
par(mfrow = c(2, 2), ps = 12,bg=NA)
with(power2plot,
{ plot(power2plot$DateTime, power2plot$Global_active_power, type = "l",
ylab = "Global Active Power", xlab = "")
plot(power2plot$DateTime, power2plot$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(power2plot$DateTime, power2plot$Sub_metering_1, type = "l", col = "black", ylab = "Energy sub metering", xlab = "")
lines(power2plot$DateTime, power2plot$Sub_metering_2, type = "l", col = "red")
lines(power2plot$DateTime, power2plot$Sub_metering_3, type = "l", col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1,
pt.cex = .5, cex = .5, box.lty = 1, box.lwd = 1, bty = "n")
plot(power2plot$DateTime, power2plot$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
## saving the plot as .png file & configuring dimensions (same as example plots)
dev.copy(png, "plot4.png", units="px", width = 504, height = 504)
dev.off()
|
76e439b8b09a7c59b914fec79bd11b07bdc73ce9
|
28615c76fdbaf72c3e5ffefa89c413fa57f554af
|
/steam/scripts/colourspace_transform.r
|
48a923d7ef71be9b0b739a313fe3cb98deebfaf8
|
[] |
no_license
|
emcake/emcake.github.io
|
a1e7472dc7093a90be578d9dc8dc61550c622c9c
|
780aad391886ba3a6eb3f78f35ffe7ea55c4af66
|
refs/heads/master
| 2020-12-30T15:22:09.610945
| 2017-05-12T22:43:41
| 2017-05-12T22:43:41
| 91,133,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,956
|
r
|
colourspace_transform.r
|
xyzpiv <- function( n )
{
#return (n > 0.04045 ? Math.Pow((n + 0.055) / 1.055, 2.4) : n / 12.92) * 100.0;
x <- if (n > 0.04045) ((n+0.055)/1.055)^2.4 else n/12.92;
return (x * 100);
}
rgb2xyz2 <- function( r, g, b )
{
rp = xyzpiv( r );
gp = xyzpiv( g );
bp = xyzpiv( b );
X<- rp*0.4124 + gp*0.3576 + bp*0.1805;
Y<- rp*0.2126 + gp*0.7152 + bp*0.0722;
Z<- rp*0.0193 + gp*0.1192 + bp*0.9505;
return (c(X, Y, Z));
}
rgb2xyz <- function( i )
{
return (rgb2xyz2( i[1], i[2], i[3]));
}
rgb2xyzM <- function( M )
{
retM = M;
d = dim( M );
for ( x in 1:d[1] )
{
for( y in 1:d[2] )
{
retM[x,y,] = rgb2xyz( M[x,y,] );
}
}
return (retM );
}
labpiv <- function( n )
{
eps = 216/24389;
kap = 24389/27;
p <- if(n < eps) (kap * n + 16) / 116 else n^ (1/3);
return(p)
}
xyz2lab2 <- function( x, y, z )
{
white = rgb2xyz2(1, 1, 1);
xn = labpiv(x / white[1]);
yn = labpiv(y / white[2]);
zn = labpiv(z / white[3]);
L = max(0, 116*yn - 16);
A = 500*(xn-yn);
B = 200*(yn-zn);
return(c(L, A, B));
}
xyz2lab <- function( i )
{
return (xyz2lab2( i[1], i[2], i[3] ));
}
rgb2lab <- function( r, g, b )
{
xyz = rgb2xyz( r, g, b );
return (xyz2lab( xyz ));
}
rgb2labM <- function( M )
{
retM = M;
d = dim(M);
for ( x in 1:d[1] )
{
for( y in 1:d[2] )
{
retM[x,y,] = xyz2lab( rgb2xyz( M[x,y,] ) );
}
}
return( retM );
}
rgb2i <- function( M )
{
d = dim(M);
retM = matrix( data=NA, nrow=d[1], ncol=d[2] );
for ( x in 1:d[1] )
{
for( y in 1:d[2] )
{
retM[x,y] = mean(M[x,y,]);
}
}
return (retM);
}
rgb2xyz.Image <- function( i )
{
lab = rgb2xyz( i[,,1], i[,,2], i[,,3] );
d=dim( i );
return (Image( lab, dim=d, colormode=Color ));
}
rgb2lab.Image <- function( i )
{
lab = rgb2lab( i[,,1], i[,,2], i[,,3] );
d=dim( i );
return (Image( lab, dim=d, colormode=Color ));
}
|
a794f642a4567dce489e910c6e51661d06b2ec97
|
61048c416c7b2e1a6750536f30b6a0c6c0025b92
|
/r-packages/omop-utils/man/typedTbl.Rd
|
5d0da14c88d35a89aaee00ecddce33428d9c3bf2
|
[] |
no_license
|
terminological/etl-analysis
|
81481af7cc2c0c02e4228bdcd2b26dc25968e652
|
4978f1211ffe33231b1edf1fdc4c487d9fe17163
|
refs/heads/master
| 2020-06-18T05:07:38.117358
| 2020-03-12T23:44:13
| 2020-03-12T23:44:13
| 196,173,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,611
|
rd
|
typedTbl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilityFunctions.R
\name{typedTbl}
\alias{typedTbl}
\title{#' Normalise an omop dataframe to a consistent format to construct a single feature set accross
#'
#' with the following columns:
#'
#' * cohort_person_id,
#' * cohort_entry_datetime,
#' * feature_source, e.g. measurement, observation, etc...
#' * feature_name,
#' * feature_concept_id,
#' * feature_value_as_number,
#' * feature_value_as_date,
#' * feature_value_as_name,
#' * feature_value_as_concept_id,
#' * feature_days_offset,
#' * feature_display
#'
#' or the equivalent with observation_ prefix
#'
#' @param omopDf - a df which may be a dbplyr table
#' @param outcome - is the
#' @return a dbplyr dataframe
normaliseColumns = function(omopDf, prefix = "feature") {
omopDf = omopDf %>% omop$inferType()
if (""class(omopDf)
omopDf = omopDf %>% mutate(prefix_source = ) %>%
rename(
prefix_datetime = any_of(c("")),
prefix_concept_id = any_of(c("")),
prefix_value_as_number = any_of(c("")),
prefix_value_as_number_min = any_of(c("")),
prefix_value_as_number_max = any_of(c("")),
prefix_value_as_number_unit = any_of(c("")),
prefix_value_as_date = any_of(c("")),
prefix_value_as_name = any_of(c("")),
prefix_value_as_concept_id = any_of(c("")),
prefix_days_offset = days_offset,
prefix_display = c()
)
replaceList = colnames(omopDf) %>% stringr::str_replace("prefix",prefix)
replaceList = replaceList[replaceList != colnames(omopDf)]
names(replaceList) = colnames(omopDf)
omopDf = omopDf %>% rename(replaceList)
return(omopDf)
#x = tibble(y=c(1,2,3),w=c(3,2,1))
#x %>% select(y,z=any_of(c("a","c")))#,"w","d","y")))
}}
\usage{
typedTbl(con, tableName)
}
\arguments{
\item{con}{- an omop database connection}
\item{tableName}{- the table name}
}
\value{
the dbplyr table with addition class information
}
\description{
c("care_site","cdm_source","concept","concept_ancestor","concept_class","concept_relationship","concept_synonym","condition_era",
"condition_occurrence","cost","device_exposure","domain","dose_era","drug_era","drug_exposure","drug_strength",
"fact_relationship","location","location_history","measurement","metadata","note","note_nlp","observation","observation_period",
"payer_plan_period","persist","persistance","person","procedure_occurrence","provider","relationship","source_to_concept_map",
"specimen","survey_conduct","visit_detail","visit_occurrence","vocabulary")
}
\details{
a dbplyr table with addition class information
}
|
9b32d17cdaca4077c5a1346cd2e63d25429ebf4d
|
1f666e790464c93210443ff9c1456391aae7bc9b
|
/שיעור2.R
|
4228a82858755eb9d1101c99ccf49cf147822f3b
|
[] |
no_license
|
yosefLuzon/EXmacine
|
470a9652b8d4db0bc42db06f41b998963ef49468
|
0510e8a5dcc31aaf93bafced9cb0fe08416acb75
|
refs/heads/master
| 2020-03-10T19:12:34.135798
| 2018-09-14T13:48:19
| 2018-09-14T13:48:19
| 129,543,158
| 0
| 0
| null | null | null | null |
WINDOWS-1255
|
R
| false
| false
| 2,425
|
r
|
שיעור2.R
|
v1<-c(1,2,3,4,5,6)
v2<-c(7,8,9,10,11,12)
mat1 <- rbind(v1,v2)
mat2<-cbind(v1,v2)
mat1
class<-(mat1)
#random numbers
# normal distu
dnorm(0,0,0)
qnorm(0.5,mean=0,sd=1)
#הסתברות שהתוצאה תהיה קטנה או שווה ל2
pnorm(1,0,1)
#דגימה מיתוך התפלגות נורמאלית
rnorm(10,0,1)
# דגימות-10, התפלגות נורמאלית-ממוצע 0
mean(rnorm(1000000,0,1))#חוק מספרים הגדולים
#דגימה מיתוך התפלגות אחידה דיפולטיבי מינימום 0 מקסימום 1
e<-runif(5)*10
#גרפים
data<-matrix(c(1:6,c(10,11,15,17,55,78)),6,2)
#יצירת גרף של עמודה 1 מול עמודה 2
plot(data[,1], data[,2])
data
#עמ ליצור קו נוסיף סוג L, עמ לנ=התחיל מנקודה מסוימת נוסיף משתנה של גבול.
plot(data[,1], data[,2],type = "l", ylim=c(0,60))
#היסטורגמה
#דגימות מהתפלגות אחידה
u<-runif(1000,0,100)
#בניית ההיסטוגרם
hist(u, ylim=c(1,50))
n<-rnorm(1000,0,100)
hist(n)
#שני גרפים אחד ליד השני חייב להריץ את הפונקצייה par
par(mfrow=c(2,1))
plot(seq(-4,4,0.01), dnorm(seq(-4,4,0.01)),type="l")
plot(seq(-4,4,0.1), dnorm(seq(-4,4,0.1),mean=0,sd=0.5),type="l")
#יצירת פונקצייה
toFar<-function(cel){
x<-cel*1.8
x+32}
#אחרי הכתיבה של הפונקצייה נריץ אותה על מנת שתהיה זמינה
toFar(33)
toFar1<-function(cel){
x<-cel*1.8+32
return(x)}
toFar1(33)
#פונקציית גזירה
derivf<-function(x){
(f(x+0.1)-f(x))/0.01
}
f<-function(x){
x^3+X+10
}
derivf(5)
#lists
l<-list(owner='jack', sum=3000)
l[[1]]
#$notation
l$sum
#data frame מיבנה נתונים כמו DB כאשר כל ווקטור במטריצה יכול להחזיק סוג מידע אחרכל עמודה חייבת להיות מאותו סוג.
brands<-c('Ford','Mazda','Fiat')
from<-c('us','japan','italy')
rank<-c(3,2,1)
cars<-data.frame(brands,from,rank)
cars$brands
#levels? האם הם חלק מקבוצה מובנית של 3 אבריםאו שזה שלושה ערכים.
cars$rank
typeof(cars$from)
#filters
brands.filter<-cars$brands=='Fiat'
cars[brands.filter,1:2]
#מידע על הרשימה
summary(cars)
#lavel שr נתן לערכים ברשימה
str(cars)
#איך מיבאים קובץ מבחוץ?
worms<-read.table("worms.txt", header = T)
worms$Damp
typeof(worms$Damp)
|
7f3592159f6f3f9fb63554cdd615def0ef776cd4
|
8097fb5b06d4a1b8fbc53359c2c2d676cb5c3b4a
|
/man/coalesce_values.Rd
|
cfa8ea83e08ef96550a6bcda1bb646e72c14c0bf
|
[] |
no_license
|
ying14/yingtools2
|
03e2e64f04754a842ee7684d6605d6e0ef1c67ee
|
d3056b6fe8a906311639c75c0dd456f18fe0ff7c
|
refs/heads/master
| 2023-08-16T21:14:22.291830
| 2023-08-14T20:02:58
| 2023-08-14T20:02:58
| 54,286,753
| 40
| 13
| null | 2020-07-01T16:55:13
| 2016-03-19T20:54:56
|
R
|
UTF-8
|
R
| false
| true
| 915
|
rd
|
coalesce_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yingtools2.R
\name{coalesce_values}
\alias{coalesce_values}
\title{Coalesce values into one summary variable.}
\usage{
coalesce_values(..., sep = "=", collapse = "|", omit.na = FALSE)
}
\arguments{
\item{...}{variables to coalesce together.}
\item{sep}{character string separating variable name and value. Default is "="}
\item{collapse}{character string separating variable/value pairs. Default is "|"}
\item{omit.na}{whether or not to remove variable in the case of NA.}
}
\value{
A character vector of same length as the variables, displaying variable names plus values.
}
\description{
Summarize the value of several variables in a single character vector by concatenating variable name and values.
}
\examples{
cid.patients \%>\%
mutate(demographics=coalesce_values(agebmt>60,sex,race)) \%>\%
count(demographics,sort=TRUE)
}
|
7403c442c7a984b86e6656ad6d876029ff767c48
|
50fadb4afb4eab0c32165b3fff0b1a0f2048e9f4
|
/inst/templates/bs4Dash/boilerplates/controlbar.R
|
28f54aff3c75288611d9fe479f8b897f241b668d
|
[] |
no_license
|
RinteRface/RinteRfaceVerse
|
65ce34771754dd613b34712196ba97f7365337b2
|
70c7f618db45d129dff4390bed0cb4d51ef2c3b7
|
refs/heads/master
| 2020-05-06T14:07:12.335036
| 2019-04-10T14:18:44
| 2019-04-10T14:18:44
| 180,173,469
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
controlbar.R
|
controlbar <- bs4DashControlbar(skin = "dark", title = NULL, width = 250)
|
b14586677da2ca644bb0b7dc82baa7f5a51b06c5
|
6a0170ee4cfcf02221cfc483df49c4523f4dbb5f
|
/scratchpad/cluster_old.R
|
f09dba49ab85ff3354d3845edf3363037b8c1b18
|
[] |
no_license
|
emilieea88/beer-data-science
|
0f85cf00259b5465d6b027025814bab9640c0677
|
b9992760195f85559bb88aa66792c06c006c3639
|
refs/heads/master
| 2022-01-08T05:21:04.217954
| 2018-06-04T10:06:58
| 2018-06-04T10:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,668
|
r
|
cluster_old.R
|
# source("./run_it.R")
# source("./read_from_db.R")
source("./most_popular_styles.R")
library(NbClust)
# ------------------- kmeans ------------
# only using top beer styles
# select only predictor and outcome columns, take out NAs, and scale the data
beer_for_clustering <- popular_beer_dat %>%
select(name, style, styleId, style_collapsed,
abv, ibu, srm) %>% # not very many beers have SRM so may not want to omit based on it...
na.omit() %>%
filter(
abv < 20 & abv > 3
) %>%
filter(
ibu < 200
)
beer_for_clustering_predictors <- beer_for_clustering %>%
select(abv, ibu, srm) %>%
rename(
abv_scaled = abv,
ibu_scaled = ibu,
srm_scaled = srm
) %>% scale() %>%
as_tibble()
# # take out outliers
# beer_for_clustering <- beer_for_clustering_w_scaled %>%
# filter(
# abv_scaled < 5 & abv_scaled > -2 # take out the nonalcoholic beers
# ) %>%
# filter(
# ibu_scaled < 5
# )
# beer_for_clustering <- bind_cols(beer_for_clustering, beer_for_clustering_w_scaled)
# beer_for_clustering_predictors <- beer_for_clustering %>%
# select(
# abv_scaled, ibu_scaled, srm_scaled
# )
# # separate into predictors and outcomes and scale the predictors
# beer_for_clustering_predictors_w_outliers <- beer_for_clustering %>% select(abv, ibu, srm) %>% rename(
# abv_scaled = abv,
# ibu_scaled = ibu,
# srm_scaled = srm
# ) %>% scale() %>%
# as_tibble()
# take out some abv and ibu outliers from the clustered beer data
# filter(
# !(ibu > 300) # take out outliers
# ) %>%
# filter(
# !(abv > 20)
# )
beer_for_clustering_outcome <- beer_for_clustering %>% select(name, style, styleId, style_collapsed)
# what's the optimal number of clusters?
# nb <- NbClust(beer_for_clustering_predictors, distance = "euclidean",
# min.nc = 2, max.nc = 15, method = "kmeans")
# hist(nb$Best.nc[1,], breaks = max(na.omit(nb$Best.nc[1,])))
# do clustering
set.seed(9)
clustered_beer_out <- kmeans(x = beer_for_clustering_predictors, centers = 10, trace = TRUE)
clustered_beer <- as_tibble(data.frame(cluster_assignment = factor(clustered_beer_out$cluster),
beer_for_clustering_outcome, beer_for_clustering_predictors,
beer_for_clustering %>% select(abv, ibu, srm)))
# the three combinations of plots
clustered_beer_plot_abv_ibu <- ggplot(data = clustered_beer, aes(x = abv, y = ibu, colour = cluster_assignment)) +
geom_jitter() + theme_minimal() +
ggtitle("k-Means Clustering of Beer by ABV, IBU, SRM") +
labs(x = "ABV", y = "IBU") +
labs(colour = "Cluster Assignment")
clustered_beer_plot_abv_ibu
clustered_beer_plot_abv_srm <- ggplot(data = clustered_beer, aes(x = abv, y = srm, colour = cluster_assignment)) +
geom_jitter() + theme_minimal() +
ggtitle("k-Means Clustering of Beer by ABV, IBU, SRM") +
labs(x = "ABV", y = "SRM") +
labs(colour = "Cluster Assignment")
clustered_beer_plot_abv_srm
clustered_beer_plot_ibu_srm <- ggplot(data = clustered_beer, aes(x = ibu, y = srm, colour = cluster_assignment)) +
geom_jitter() + theme_minimal() +
ggtitle("k-Means Clustering of Beer by ABV, IBU, SRM") +
labs(x = "IBU", y = "SRM") +
labs(colour = "Cluster Assignment")
clustered_beer_plot_ibu_srm
# take a look at individual clusters
cluster_1 <- clustered_beer %>% filter(cluster_assignment == "1")
cluster_1
cluster_6 <- clustered_beer %>% filter(cluster_assignment == "6")
cluster_6
cluster_9 <- clustered_beer %>% filter(cluster_assignment == "9")
cluster_9
# see how styles clustered themselves
# table of counts
cluster_table_counts <- table(style = clustered_beer$style_collapsed, cluster = clustered_beer$cluster_assignment)
# cb_spread <- clustered_beer %>% select(
# cluster_assignment, style
# ) %>% group_by(cluster_assignment) %>%
# spread(key = cluster_assignment, value = style, convert = TRUE)
# tsne
# library(tsne)
#
# cb <- clustered_beer %>% sample_n(100)
#
# colors = rainbow(length(unique(cb$style)))
# names(colors) = unique(cb$style)
#
# ecb = function (x,y) {
# plot(x,t='n');
# text(x, labels=cb$style, col=colors[cb$style]) }
#
# tsne_beer = tsne(cb[,4:6], epoch_callback = ecb, perplexity=20)
#
#
# ---------- functionize --------
source("./most_popular_styles.R")
library(NbClust)
# only using top beer styles
# select only predictor and outcome columns, take out NAs, and scale the data
cluster_it <- function(df, preds, to_scale, resp, n_centers) {
df_for_clustering <- df %>%
select_(.dots = c(response_vars, cluster_on)) %>%
na.omit() %>%
filter(
abv < 20 & abv > 3
) %>%
filter(
ibu < 200
)
df_all_preds <- df_for_clustering %>%
select_(.dots = preds)
df_preds_scale <- df_all_preds %>%
select_(.dots = to_scale) %>%
rename(
abv_scaled = abv,
ibu_scaled = ibu,
srm_scaled = srm
) %>%
scale() %>%
as_tibble()
df_preds <- bind_cols(df_preds_scale, df_all_preds[, (!names(df_all_preds) %in% to_scale)])
df_outcome <- df_for_clustering %>%
select_(.dots = resp) %>%
na.omit()
set.seed(9)
clustered_df_out <- kmeans(x = df_preds, centers = n_centers, trace = TRUE)
clustered_df <- as_tibble(data.frame(
cluster_assignment = factor(clustered_df_out$cluster),
df_outcome, df_preds,
df_for_clustering %>% select(abv, ibu, srm)))
return(clustered_df)
}
# ----------- main clustering into 10 clusters -------
cluster_on <- c("abv", "ibu", "srm")
to_scale <- c("abv", "ibu", "srm")
response_vars <- c("name", "style", "styleId", "style_collapsed")
clustered_beer <- cluster_it(df = popular_beer_dat,
preds = cluster_on,
to_scale = to_scale,
resp = response_vars,
n_centers = 10)
# ----------------- pared styles -----------------
styles_to_keep <- c("Blonde", "India Pale Ale", "Stout", "Tripel", "Wheat")
bn_certain_styles <- beer_ingredients_join %>%
filter(
style_collapsed %in% styles_to_keep
)
cluster_on <- c("abv", "ibu", "srm", "total_hops", "total_malt")
to_scale <- c("abv", "ibu", "srm")
response_vars <- c("name", "style", "style_collapsed")
certain_styles_clustered <- cluster_it(df = bn_certain_styles,
preds = cluster_on,
to_scale = to_scale,
resp = response_vars,
n_centers = 5)
table(style = certain_styles_clustered$style_collapsed, cluster = certain_styles_clustered$cluster_assignment)
ggplot() +
geom_point(data = certain_styles_clustered,
aes(x = abv, y = ibu,
shape = cluster_assignment,
colour = style_collapsed), alpha = 0.5) +
geom_point(data = style_centers_certain_styles,
aes(mean_abv, mean_ibu), colour = "black") +
geom_text_repel(data = style_centers_certain_styles,
aes(mean_abv, mean_ibu, label = style_collapsed),
box.padding = unit(0.45, "lines"),
family = "Calibri",
label.size = 0.3) +
ggtitle("Selected Styles (colors) matched with Cluster Assignments (shapes)") +
labs(x = "ABV", y = "IBU") +
labs(colour = "Style") +
theme_bw()
# **
cluster_prep <- function(df, preds, to_scale, resp) {
# browser()
df_for_clustering <- df %>%
select_(.dots = c(response_vars, cluster_on)) %>%
na.omit() %>%
filter(
abv < 20 & abv > 3 # Only keep beers with ABV between 3 and 20 and an IBU less than 200
) %>%
filter(
ibu < 200
)
df_all_preds <- df_for_clustering %>%
select_(.dots = preds)
df_preds_scale <- df_all_preds %>%
select_(.dots = to_scale) %>%
rename(
abv_scaled = abv,
ibu_scaled = ibu,
srm_scaled = srm
) %>%
scale() %>%
as_tibble()
df_preds <- bind_cols(df_preds_scale, df_all_preds[, (!names(df_all_preds) %in% to_scale)])
df_outcome <- df_for_clustering %>%
select_(.dots = resp) %>%
na.omit()
cluster_prep_out <- list(preds = df_preds, outcome = df_outcome)
return(cluster_prep_out)
}
cluster_on <- c("abv", "ibu", "srm", "total_hops", "total_malt")
to_scale <- c("abv", "ibu", "srm", "total_hops", "total_malt")
response_vars <- c("name", "style", "style_collapsed")
cp <- cluster_prep(df = beer_totals,
preds = cluster_on,
to_scale = to_scale,
resp = response_vars)
|
ad412cd707a04637bc2f1686e2fa05400535abf2
|
157252491600a136fff59d5c5d4f3e00003cddfe
|
/resources/geneAnnotations/Exome/b37/getCanonicalTargets.R
|
90ed397eb6b3a3074b3ad87967771f1010caa419
|
[] |
no_license
|
soccin/seqCNA
|
76137d35dc6500d0424a2114bb5ed358e135b423
|
8cc0cb28c45b3345f2029a08ba9951e6ec2ab804
|
refs/heads/master
| 2023-09-03T08:45:57.762043
| 2023-08-18T23:34:59
| 2023-08-18T23:34:59
| 72,948,490
| 0
| 3
| null | 2022-10-15T19:52:57
| 2016-11-05T19:26:50
|
R
|
UTF-8
|
R
| false
| false
| 1,669
|
r
|
getCanonicalTargets.R
|
library(tidyverse)
library(data.table)
library(magrittr)
library(stringr)
ISOFORM0="/opt/common/CentOS_6-dev/vcf2maf/v1.6.12/data/isoform_overrides_at_mskcc"
ISOFORM1="/opt/common/CentOS_6-dev/vcf2maf/v1.6.12/data/isoform_overrides_uniprot"
isoform0=read_tsv(ISOFORM0,
col_names=c("TID","gene_name","refseq_id","ccds_id"),skip=1)
isoform1=read_tsv(ISOFORM1,
col_names=c("TID","gene_name","refseq_id","ccds_id"),skip=1)
isoforms=bind_rows(isoform0,isoform1) %>% distinct(gene_name,.keep_all=T)
canonicalTranscripts = isoforms %>% distinct(TID) %>% pull
# canonical=read_tsv("ucsc_mm10_knownCanonical.txt.gz")
# canonicalTranscriptsUCSC=canonical %>% distinct(transcript) %>% pull
# canonicalTranscripts=read_tsv("ucsc_mm10_knownToEnsembl.txt.gz") %>%
# rename(ID=`#name`) %>%
# filter(ID %in% canonicalTranscriptsUCSC) %>%
# distinct(value) %>%
# pull
GTFFILE="/ifs/depot/annotation/H.sapiens/ensembl/v75/Homo_sapiens.GRCh37.75.gtf"
gtf=read_tsv(GTFFILE,col_names=F,comment="#",col_types=list(X1 = col_character()))
iList=gtf %>%
mutate(transcript_id=str_match(X9,'transcript_id "([^;]*)";')[,2]) %>%
mutate(gene_name=str_match(X9,'gene_name "([^;]*)";')[,2]) %>%
mutate(exon_number=str_match(X9,'exon_number "(\\d+)";')[,2]) %>%
select(-X9) %>%
filter(X3=="exon") %>%
filter(transcript_id %in% canonicalTranscripts) %>%
mutate(X1=gsub("^chr","",X1)) %>%
select(X1,X4,X5,gene_name,transcript_id,exon_number) %>%
rename(chrom=X1,start=X4,stop=X5,gene=gene_name,transcript=transcript_id,exon=exon_number)
ozfile=gzfile("gene_annotations.txt.gz","w",compression=9)
write_tsv(iList,ozfile)
close(ozfile)
|
5f42d446eca9db61ab35fc63557364bd01cb4e10
|
adc2f9e770264b77610c8a974c1e947aebfecd3c
|
/run_analysis.R
|
74ebb42e4448fe0f74f489a84b79ad1fb2263d41
|
[] |
no_license
|
sdronava/data_cleaning_proj
|
1ebdb3ab90987941830a3f2e0891c6dac646c388
|
b5ba8c8d09e8330ef1ce6ebe4bc0f2c440f22de7
|
refs/heads/master
| 2020-05-31T06:54:17.592793
| 2015-02-22T14:50:58
| 2015-02-22T14:50:58
| 31,162,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,876
|
r
|
run_analysis.R
|
library(dplyr)
runAnalysis <- function() {
# load the train data set with just the means and stds
trainSet <- createTrainDataSet ()
# load the test data set with just the means and stds
testSet <- createTestDataSet ()
# join the two data sets with just the means and stds
completeSet <- rbind(trainSet, testSet)
##print(dim(completeSet))
## Calculate the averages of means and stds per subject per activity.
result <- createAverages(completeSet)
## Write the result out.
if(!file.exists("./results")) { dir.create("./results")}
write.table(result, file = "results/avg_mean_and_stds_all_data.txt",row.names=FALSE, na="", sep=",", quote=FALSE)
}
createAverages <- function(data) {
activities <- getActivityNames()
subjects <- unique(getAllSubjects())
features <- getAnalysisFeatures()
#######################################
##result <- merge(activities , subjects, by=NULL) ## cross join - 6X30 = 180.
##result$V1.x = NULL
##result <- rename(result, Activity=V2, Subject=V1.y)
#################################
##Create Result Data frame.
colNames <- c("subject", "activity")
result <- as.data.frame(c("subject"))
result["Activity"] <- c("activity")
for(featIdx in 1:length(features[, 2]))
{
iFeature <- toString(features[featIdx, 2])
colNames <- c(colNames, iFeature)
result[toString(iFeature)] <- c(iFeature)
}
names(colNames) <- colNames
names(result) <- colNames
## Split by subjects (30 data sets)
dataBySubj <- split(data, data[, 2])
numOfSubjs <- length(subjects[,1])
for(subjIdx in 1 : numOfSubjs )
{
iSubj <- toString(subjIdx)
subjData <- dataBySubj[[ iSubj ]];
dataByActivity <- split(subjData , subjData [, 3])
numOfActivities <- length(activities[, 1])
for(activityIdx in 1: numOfActivities )
{
iActivity <- activities[activityIdx, 2]
activityData <- dataByActivity[[ iActivity ]]
newrow <- as.data.frame(c(iSubj))
newrow["Activity"] <- toString(iActivity)
for(featIdx in 1:length(features[, 2]))
{
iFeature <- toString(features[featIdx, 2])
iValue <- mean(activityData [[ toString(iFeature) ]])
newrow[toString(iFeature)] <- as.numeric(iValue)
}
names(newrow) <- colNames
result <- rbind(result, newrow)
}
}
result <- result[-c(1), ]
#Rename the columns to meet tidy data standards.
##newColNames(colNames)
#Send the results
result
}
newColNames <- function(oldNames) {
n <- length(oldNames)
newNames <- c()
for(idx in 1 : n) {
old <- oldNames[idx]
## print(old)
}
}
createTestDataSet <- function() {
### Get Features to be analysed.
analysisFeatures <- getAnalysisFeatures()
#### Read activity names.
activityNames <- getActivityNames()
#### Read y_test
testActivities <- read.csv("UCI HAR Dataset/test/y_test.txt", sep=" ", header=FALSE)
testActivities <- as.data.frame(testActivities$V1)
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("1", activityNames[1,2], s, fixed=TRUE)))
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("2", activityNames[2,2], s, fixed=TRUE)))
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("3", activityNames[3,2], s, fixed=TRUE)))
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("4", activityNames[4,2], s, fixed=TRUE)))
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("5", activityNames[5,2], s, fixed=TRUE)))
testActivities <- as.data.frame(apply(testActivities, 1, function(s) sub("6", activityNames[6,2], s, fixed=TRUE)))
##print(dim(testActivities ))
testSubjects <- getTestSubjects()
#### X_test.txt
testReadings <- read.csv("UCI HAR Dataset/test/X_test.txt", sep="", header=FALSE)
##print(dim(testReadings))
# Join the columns to create a data set for the test data.
temp <- data.frame(id=1:2947)
temp[["Subject"]] = testSubjects[ , 1]
temp[["Activity"]] = testActivities [ , 1]
##print(dim(temp))
sz <- length(analysisFeatures[, 1])
for( index in 1:sz ) {
columnName <- analysisFeatures[index, 2]
columnNum <- analysisFeatures[index, 1]
temp[[toString(columnName)]] = testReadings [, columnNum]
}
##print(dim(temp))
temp
}
createTrainDataSet <- function() {
##### Read features.txt, rename and identify columns in the data set of interest.
analysisFeatures = getAnalysisFeatures()
#### Read activity names.
activityNames <- getActivityNames()
#### Read y_train
activities <- read.csv("UCI HAR Dataset/train/y_train.txt", sep=" ", header=FALSE)
activities <- as.data.frame(activities$V1)
activities <- as.data.frame(apply(activities , 1, function(s) sub("1", activityNames[1,2], s, fixed=TRUE)))
activities <- as.data.frame(apply(activities , 1, function(s) sub("2", activityNames[2,2], s, fixed=TRUE)))
activities <- as.data.frame(apply(activities , 1, function(s) sub("3", activityNames[3,2], s, fixed=TRUE)))
activities <- as.data.frame(apply(activities , 1, function(s) sub("4", activityNames[4,2], s, fixed=TRUE)))
activities <- as.data.frame(apply(activities , 1, function(s) sub("5", activityNames[5,2], s, fixed=TRUE)))
activities <- as.data.frame(apply(activities , 1, function(s) sub("6", activityNames[6,2], s, fixed=TRUE)))
##print(dim(activities ))
#### Read subject_test
subjects <- getTrainSubjects()
##print(dim(subjects))
#### X_test.txt
readings <- read.csv("UCI HAR Dataset/train/X_train.txt", sep="", header=FALSE)
##print(dim(readings))
# Join the columns to create a data set for the test data.
data <- data.frame(id=1:7352)
data [["Subject"]] = subjects[ , 1]
data [["Activity"]] = activities [ , 1]
##print(dim(data ))
sz <- length(analysisFeatures[, 1])
for( index in 1:sz ) {
columnName <- analysisFeatures[index, 2]
columnNum <- analysisFeatures[index, 1]
data [[toString(columnName)]] = readings [, columnNum]
}
##print(dim(data ))
data
}
getActivityNames <- function() {
#### Read activity names.
activityNames <- read.csv("UCI HAR Dataset/activity_labels.txt", sep=" ", header=FALSE)
activityNames
}
getTestSubjects <- function() {
#### Read subject_test
subjects <- as.data.frame(read.csv("UCI HAR Dataset/test/subject_test.txt", sep=" ", header=FALSE))
##print(dim(testSubjects))
}
getTrainSubjects <- function() {
subjects <- as.data.frame(read.csv("UCI HAR Dataset/train/subject_train.txt", sep=" ", header=FALSE))
}
getAllSubjects <- function(){
s1 <- getTestSubjects()
s2 <- getTrainSubjects()
all <- rbind(s1, s2)
}
getAnalysisFeatures <- function() {
##### Read features.txt, rename and identify columns in the data set of interest.
featureList <- read.csv("UCI HAR Dataset/features.txt", sep=" ", header=FALSE);
featureList <- rename(featureList, number=V1, name=V2);
analysisFeatures = featureList[grepl("-mean[(][)]|-std[(][)]", featureList$name) == TRUE, ]
analysisFeatures
}
|
292a21f1318eff73ac19d3f8ae06ead4ae99c7e6
|
ebcb5cdc14c62b6009e73e5abfb27b13e301e63a
|
/dlt.caret.smda.R
|
ba1bac8def0c8f78920e0b791699a6fc7a608c3a
|
[] |
no_license
|
dlt-lee/dlt
|
06cb40dd4cfad26a7052b58a48502a5231ce16ac
|
242f9cf53720474d39d1997174e1a395731aa7f7
|
refs/heads/master
| 2021-12-24T12:07:20.073163
| 2021-11-18T05:50:41
| 2021-11-18T05:50:41
| 135,522,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,978
|
r
|
dlt.caret.smda.R
|
data<-dlt
count<-dim(dlt)[1]
#dlt.mda <- function(data,count) {
library(caret)
library(sparseLDA)
library(mda)
library(rda)
trains_1 <-tail(data,count)[1:(count-3),]
trains_2 <-tail(data,count)[2:(count-2),]
trains_3 <-tail(data,count)[3:(count-1),]
results<-tail(data,(count-3))
tests_1<-tail(data,count)[1:(count-2),]
tests_2<-tail(data,count)[2:(count-1),]
tests_3<-tail(data,(count-2))
#A:
trn1<-trains_1$n
trn2<-trains_2$n
trn3<-trains_3$n
a1.1<-trains_1$a1
a2.1<-trains_1$a2
a3.1<-trains_1$a3
a4.1<-trains_1$a4
a5.1<-trains_1$a5
a1.2<-trains_2$a1
a2.2<-trains_2$a2
a3.2<-trains_2$a3
a4.2<-trains_2$a4
a5.2<-trains_2$a5
a1.3<-trains_3$a1
a2.3<-trains_3$a2
a3.3<-trains_3$a3
a4.3<-trains_3$a4
a5.3<-trains_3$a5
resa1<-results$a1
resa2<-results$a2
resa3<-results$a3
resa4<-results$a4
resa5<-results$a5
#B:
b1.1<-trains_1$b1
b2.1<-trains_1$b2
b1.2<-trains_2$b1
b2.2<-trains_2$b2
b1.3<-trains_3$b1
b2.3<-trains_3$b2
resb1<-results$b1
resb2<-results$b2
trains.a1<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resa1)
trains.a2<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resa2)
trains.a3<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resa3)
trains.a4<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resa4)
trains.a5<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resa5)
trains.b1<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resb1)
trains.b2<-data.frame(trn1,trn2,trn3,
a1.1,a2.1,a3.1,a4.1,a5.1,
a1.2,a2.2,a3.2,a4.2,a5.2,
a1.3,a2.3,a3.3,a4.3,a5.3,
b1.1,b2.1,
b1.2,b2.2,
b1.3,b2.3,
resb2)
set.seed(100)
ctrl<-trainControl(method = "LGOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE,
#index = list(trainset = trains.a1),
savePredictions = TRUE)
smdaFit.a1<-train(resa1~
#a1.1+a2.1+a3.1+a4.1+a5.1+
a1.2+a2.2+a3.2+a4.2+a5.2+
a1.3+a2.3+a3.3+a4.3+a5.3+
#b1.1+b2.1+
b1.2+b2.2+
b1.3+b2.3,
data = trains.a1,
method = "smda",
metric = "ROC",
tuneGrid = expand.grid(.subclasses = 1:14),
trControl = ctrl
)
|
5fee8ce0b1b7710bc3f296852f7b0223b7b3bc2b
|
3b3674cc7cf9a06c1926533f532ccc091bac2f14
|
/30_mirna_seq/02_r_code/02_comparisons/03_literature_mirs.R
|
e32d6475a7e1e3f1c6277375273515425a1a349d
|
[] |
no_license
|
slobentanzer/integrative-transcriptomics
|
8618c6eef9b58da9c31a188e34ff527f3f9f8d04
|
e9e0a7b6f7ed7687f40fbea816df9d094ba293a2
|
refs/heads/master
| 2022-07-18T10:32:51.331439
| 2021-01-19T15:17:52
| 2021-01-19T15:17:52
| 214,313,249
| 2
| 2
| null | 2022-06-29T17:42:35
| 2019-10-11T00:57:06
|
R
|
UTF-8
|
R
| false
| false
| 5,469
|
r
|
03_literature_mirs.R
|
#LITERATURE MIRS IN SCZ AND BD####
rm(list=ls())
home= '~/GitHub/'
rootdir = paste(home, "integrative-transcriptomics", sep="")
setwd(rootdir)
library(ggplot2)
library(venn)
library(RColorBrewer)
library(RNeo4j)
graph <- startGraph("http://localhost:7474/db/data/")
mir.matrix <- readRDS(file = "working_data/mir_de_matrix_countchange.rds")
mir.matrix <- mir.matrix[rowSums(mir.matrix[,1:8]) != 0,]
nrow(mir.matrix) #DE mirs
nrow(mir.matrix[rowSums(mir.matrix[,1:4]) != 0,]) #DE mirs
nrow(mir.matrix[rowSums(mir.matrix[,5:8]) != 0,]) #DE mirs
long_term <- rownames(mir.matrix)
long_term <- unlist(lapply(strsplit(long_term, "-"), function(x) paste(x[1:3], collapse = "-")))
long_term <- gsub("r", "R", long_term)
long_term_lookup <- data.frame(shortname = long_term, name = rownames(mir.matrix))
long_term <- unique(long_term)
#literature mirs####
#SCZ####
{
bc2011mirs <- read.table("./raw_data/bc2011scz_mirs.txt", encoding = "UTF-8")
bc2011mirs <- bc2011mirs[,1]
bc2011mirs <- bc2011mirs[c(grep("miR‐", bc2011mirs, fixed = T), grep("let‐", bc2011mirs, fixed = T))]
bc2011mirs <- bc2011mirs[-grep("SNP", bc2011mirs, fixed = T)]
bc2011mirs <- gsub("(", "", bc2011mirs, fixed = T)
bc2011mirs <- gsub(")", "", bc2011mirs, fixed = T)
bc2011mirs <- gsub("*", "", bc2011mirs, fixed = T)
bc2011mirs <- gsub("‐", "-", bc2011mirs, fixed = T)
bc2011mirs <- gsub("-3p", "", bc2011mirs, fixed = T)
bc2011mirs <- gsub("-5p", "", bc2011mirs, fixed = T)
bc2011mirs[2] <- "miR-128"
bc2011mirs <- unique(bc2011mirs)
}
idx <- lapply(bc2011mirs, function(x) grep(paste0(x, "$"), long_term))
scz_found <- unique(unlist(lapply(idx, function(x) long_term[x])))
length(bc2011mirs)
length(long_term)
length(scz_found)
length(scz_found)/length(long_term)
#BD####
{
fries2018mirs <- scan("./raw_data/fries2018bd_mirs.txt", what = "")
fries2018mirs <- fries2018mirs[c(grep("miR-", fries2018mirs, fixed = T), grep("let-", fries2018mirs, fixed = T))]
fries2018mirs <- gsub("*", "", fries2018mirs, fixed = T)
fries2018mirs <- gsub(",", "", fries2018mirs, fixed = T)
fries2018mirs <- gsub(".", "", fries2018mirs, fixed = T)
fries2018mirs <- gsub(";", "", fries2018mirs, fixed = T)
fries2018mirs <- gsub("-3p", "", fries2018mirs, fixed = T)
fries2018mirs <- gsub("-5p", "", fries2018mirs, fixed = T)
fries2018mirs <- unique(fries2018mirs)
}
idx <- lapply(fries2018mirs, function(x) grep(paste0(x, "$"), long_term))
bd_found <- unique(unlist(lapply(idx, function(x) long_term[x])))
length(fries2018mirs)
length(bd_found)
length(bd_found)/length(long_term)
intersect_lit <- unique(fries2018mirs[fries2018mirs %in% bc2011mirs])
intersect_found <- bd_found[bd_found %in% scz_found]
#save####
save(bc2011mirs, fries2018mirs, long_term, file = "./working_data/mirna_seq/mir_disease_literature.RData")
#table####
both_mirs <- unique(c(bc2011mirs, fries2018mirs))
both_mirs <- both_mirs[order(both_mirs)]
both_mirs <- data.frame(name = both_mirs)
both_mirs$BD <- both_mirs$name %in% fries2018mirs
both_mirs$SCZ <- both_mirs$name %in% bc2011mirs
both_mirs$citation <- NA
for(i in 1:nrow(both_mirs)){
BD <- as.numeric(both_mirs$BD[i])
SCZ <- as.numeric(both_mirs$SCZ[i])
if(BD+SCZ == 2){
both_mirs$citation[i] <- "Fries et al., 2018 & Beveridge and Cairns, 2012"
} else if (BD == 1) {
both_mirs$citation[i] <- "Fries et al., 2018"
} else {
both_mirs$citation[i] <- "Beveridge and Cairns, 2012"
}
}
both_mirs$name <- paste0("hsa-", both_mirs$name)
write.table(both_mirs, file = "./out/DataS4.csv", row.names = F, quote = T, sep = ";")
#predicted overlap####
primate_pred <- readRDS(file = "./working_data/mirna_seq/predicted_mirs_primate_overlap.rds")
conserved_pred <- readRDS(file = "./working_data/mirna_seq/predicted_mirs_conserved_overlap.rds")
tfm_pred <- readRDS(file = "./working_data/mirna_seq/predicted_mirs_tf_mir_overlap.rds")
primate_pred <- unlist(lapply(strsplit(primate_pred, "-"), function(x) paste(x[2:3], collapse = "-")))
conserved_pred <- unlist(lapply(strsplit(conserved_pred, "-"), function(x) paste(x[2:3], collapse = "-")))
tfm_pred <- unlist(lapply(strsplit(tfm_pred, "-"), function(x) paste(x[2:3], collapse = "-")))
primate_pred[primate_pred %in% bc2011mirs]
primate_pred[primate_pred %in% fries2018mirs]
conserved_pred[conserved_pred %in% bc2011mirs]
conserved_pred[conserved_pred %in% fries2018mirs]
tfm_pred[tfm_pred %in% bc2011mirs]
tfm_pred[tfm_pred %in% fries2018mirs]
#venn####
length(bc2011mirs[bc2011mirs %in% gsub("hsa-", "", long_term)])
length(fries2018mirs[fries2018mirs %in% gsub("hsa-", "", long_term)])
bc_fries_mirs <- fries2018mirs[fries2018mirs %in% bc2011mirs]
length(bc_fries_mirs)
length(bc_fries_mirs[bc_fries_mirs %in% gsub("hsa-", "", long_term)])
length(long_term)
length(bc2011mirs)
length(fries2018mirs)
allmirs <- unique(c(gsub("hsa-", "", long_term), bc2011mirs, fries2018mirs))
venn_frame <- matrix(ncol = 3, nrow = length(allmirs), rep(0))
rownames(venn_frame) <- allmirs
venn_frame <- data.frame(venn_frame)
colnames(venn_frame) <- c("DE", "SCZ", "BD")
#countchange
for(i in 1:3) {
temp <- switch(i, gsub("hsa-", "", long_term), bc2011mirs, fries2018mirs)
venn_frame[, i] <- as.numeric(rownames(venn_frame) %in% temp)
}
svg("img/literature_venn.svg")
venn(venn_frame,
zcolor = brewer.pal(4, "Set1"), cexsn = .7, cexil = 1)
dev.off()
venn_frame[rowSums(venn_frame) == 3,]
venn_frame[rowSums(venn_frame) == 2,]
saveRDS(venn_frame, file = "working_data/literature_de_overlap_frame.rds")
|
b8652f2225407965c0070b6a70b0f2a3afa66602
|
187842c58b7690395eb7405842ac28bc4cafd718
|
/man/vcov.nlstac.Rd
|
f000f36b02975c798472d4193f8633334e26df1e
|
[] |
no_license
|
cran/nlstac
|
d5e38b819795e2862e1b8c7e3e94d0a9af8fbc2f
|
298e5206c29e091929eb76091ba2cb67a22e8316
|
refs/heads/master
| 2023-04-13T16:09:33.706071
| 2023-04-11T14:20:02
| 2023-04-11T14:20:02
| 310,516,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,050
|
rd
|
vcov.nlstac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcov.nlstac.R
\name{vcov.nlstac}
\alias{vcov.nlstac}
\title{Calculate Variance-Covariance Matrix for a nlstac Fitted Model Object}
\usage{
\method{vcov}{nlstac}(object, ...)
}
\arguments{
\item{object}{An object of class \code{"nlstac"} obtained by the \code{nls_tac} function.}
\item{...}{Ignored, for compatibility issues.}
}
\value{
A matrix of the estimated covariances between the parameter estimates.
}
\description{
Returns the variance-covariance matrix of the main parameters of a fitted model object.
The “main” parameters of model correspond to those returned by coef,
}
\author{
\strong{Mariano Rodríguez-Arias} (\email{arias@unex.es}).
\emph{Deptartment of Mathematics}
\strong{Juan Antonio Fernández Torvisco} (\email{jfernandck@alumnos.unex.es}).
\emph{Department of Mathematics}
University of Extremadura (Spain)
\strong{Rafael Benítez} (\email{rafael.suarez@uv.es}).
\emph{Department of Business Mathematics}
University of Valencia (Spain)
}
|
c24caa6675b880736c866fe9c568d0c33033db57
|
678ea8ca41e724e5bc9f7fbfeeeeb4278565784e
|
/inst/util/mkRPPATumorDataset.R
|
067902c993afc5463fa5575f6ca81a5cf319db0e
|
[] |
no_license
|
rmylonas/SuperCurvePAF
|
3b5bddd6c86ecfee161e1da4790f856352d4e0d9
|
d55a7b5017ae96704b11dba0a671baef79a65e99
|
refs/heads/master
| 2021-01-23T22:37:10.698784
| 2015-06-10T11:40:11
| 2015-06-10T11:40:11
| 38,314,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,306
|
r
|
mkRPPATumorDataset.R
|
###
### $Id: mkRPPATumorDataset.R 947 2015-01-21 17:44:54Z proebuck $
### (Re)creates 'rppaTumor' dataset object found in 'data' directory.
###
local({
##-------------------------------------------------------------------------
makeRPPAs <- function(antibody,
filename,
datadir,
xform=function(x) tolower(x)) {
## Check argumments
stopifnot(is.character(antibody) && length(antibody) == 1)
stopifnot(is.character(filename) && length(filename) == 1)
stopifnot(is.character(datadir) && length(datadir) == 1)
stopifnot(is.function(xform))
## Begin processing
assign(varname <- make.names(xform(antibody)),
RPPA(filename,
path=datadir,
antibody=antibody),
envir=environment(makeRPPAs))
return(varname)
}
##
## Tumor data with 3 antibodies
##
extdata.dir <- system.file("extdata", package="SuperCurveSampleData")
rawdata.dir <- file.path(extdata.dir, "rppaTumorData")
proteinassayfile <- file.path(rawdata.dir, "proteinAssay.tsv")
proteinassay.df <- read.delim(proteinassayfile)
rppas <- apply(proteinassay.df,
1,
function(proteinassay, datadir) {
makeRPPAs(proteinassay["Antibody"],
proteinassay["Filename"],
datadir)
},
rawdata.dir)
## :BUG: last two lines of layout info file look hinky.
layoutinfofile <- "layoutInfo.tsv"
slidedesignfile <- "slidedesign.tsv"
assign(design <- "tDesign",
RPPADesign(rppa <- get(rppas[1]),
grouping="blockSample",
center=TRUE,
aliasfile=layoutinfofile,
designfile=slidedesignfile,
path=rawdata.dir))
## Update package data directory
filename <- sprintf("%s.RData", sub("Data$", "", basename(rawdata.dir)))
dataset <- file.path(system.file("data", package="SuperCurve"), filename)
save(list=c(rppas, design),
file=dataset,
compress="xz",
compression_level=9)
})
|
18d6146200463cdfd05fc7d5a066bbbef35960a4
|
3100f891537e474960e9c1f6e986a811eec7efe0
|
/cachematrix.R
|
9854f5f8b2d1fd0a66488249789de066891bbc1b
|
[] |
no_license
|
jleonard7/datasciencecoursera
|
492741cdf2b5b103499b36769b848b96a5555043
|
d059034dd8e5d774370bcc05cffb58da2b112397
|
refs/heads/master
| 2021-01-17T09:11:08.913244
| 2017-05-06T19:22:14
| 2017-05-06T19:22:14
| 83,981,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
cachematrix.R
|
## The purpose of these functions are to allow the user to
## cache the inverse of an invertible matrix. This is useful
## to reduce execution time of programs and calculations
## The function below defines 4 functions which are then
## stored in a list. The list is used in second function to
## cache inverse of a matrix or retrieve the cached inverse
makeCacheMatrix <- function(x = matrix()) {
i_mat <- NULL
set_mat <- function(y) {
x <<- y
i_mat <<- NULL
}
get_mat <- function() x
setInverse<- function(solve) i_mat <<- solve
getInverse <- function() i_mat
# This list contains 4 functions
list(set = set_mat, get = get_mat,
setInverse = setInverse,
getInverse = getInverse)
}
## This function receives as an input a list that contains
## the necessary functions to set and get a matrix as well
## as set the inverse and retrieve the inverse value
cacheSolve <- function(x, ...) {
# References the getInverse function of the list x
i_mat <- x$getInverse()
# If value is returned then says getting cached result
if(!is.null(i_mat)) {
message("Getting cached inverse of this matrix")
# This makes it exit the function
return(i_mat)
}
# Calls function to retrieve data
data <- x$get()
# Calls function to get inverse of matrix
i_mat <- solve(data, ...)
# Stores inverse for future retrieval
x$setInverse(i_mat)
# Returns value
i_mat
}
|
84ae5c47b9af8dfb04e0f4e042cf6466df459878
|
6c6e0a5f91591b5fd0a0bfc194a36ae6ba846112
|
/plot6.R
|
207d8bcb5adbddf63d862719bda3cc98abf3fb8f
|
[] |
no_license
|
TarushiRMittal/Exploratory-Data-Analysis-Week-4
|
9209a2a5fa55f991df1976343be7b988350cdd91
|
0a0c390fd46da3d331e84bdd217b63656d108bf9
|
refs/heads/master
| 2020-06-16T18:58:27.132626
| 2019-07-07T22:47:24
| 2019-07-07T22:47:24
| 195,671,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
plot6.R
|
emiss_stats <- readRDS("summarySCC_PM25.rds")
classification_code_source <- readRDS("Source_Classification_Code.rds")
baltimoreLA_cars <- subset(emiss_stats, emiss_stats$fips=="24510" | emiss_stats$fips=="06037" & emiss_stats$type=="ON-ROAD")
baltimoreLA_cars_annual <- aggregate(baltimoreLA_cars$Emissions, by=list(baltimoreLA_cars$fips, baltimoreLA_cars$year), FUN=sum)
colnames(baltimoreLA_cars_annual) <- c("City", "Year", "Emissions")
library(ggplot2)
qplot(Year, Emissions, data = baltimoreLA_cars_annual, color = City, geom = "line") + ggtitle("Emissions of PM2.5 in Baltimore City (24510) and LA County (06037)") + ylab("Total Emissions from Motor Vehicles (tons)") + xlab("Year")
|
8040ac0f28693b6d810c305a739715f8a869a882
|
6caaaa04f0227d61c3a3795050e0e358cd49cf5b
|
/R/altitudeSysDef.R
|
0f2ee477b399ad1adc6a9dc5e89a2969ca59e8ae
|
[
"MIT"
] |
permissive
|
cboettig/build.eml
|
29df6528845efc9a0347ff0ca8f2838db6d7a41e
|
1195dc7f109c75448200047c2918e226adbbe75a
|
refs/heads/master
| 2020-04-08T17:53:24.909984
| 2018-12-03T19:57:42
| 2018-12-03T19:57:42
| 159,584,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
altitudeSysDef.R
|
#' altitudeSysDef
#'
#' altitudeSysDef
#'
#'
#' @inheritParams common_attributes
#' @param altitudeDatumName The identification given to the surface taken as the surface of reference from which altitudes are measured. See [altitudeDatumName()]
#' @param altitudeResolution The minimum distance possible between two adjacent altitude values, expressed in Altitude Distance Units of measure. See [altitudeResolution()]
#' @param altitudeDistanceUnits Units in which altitude is measured. See [altitudeDistanceUnits()]
#' @param altitudeEncodingMethod The means used to encode the altitudes. See [altitudeEncodingMethod()]
#'
#' @return a altitudeSysDef list object
#'
#' @export
altitudeSysDef <- function(altitudeDatumName = NULL,
altitudeResolution = NULL,
altitudeDistanceUnits = NULL,
altitudeEncodingMethod = NULL){
Filter(Negate(is.null),
list(
altitudeDatumName = altitudeDatumName,
altitudeResolution = altitudeResolution,
altitudeDistanceUnits = altitudeDistanceUnits,
altitudeEncodingMethod = altitudeEncodingMethod))}
|
d9830b64055735236a3ae412ab6d9ff928040d7b
|
b694dcea2899a28c6d23cfd0539c17bba84b2bc1
|
/octane_blog_01_eda.R
|
771a45bba6167be303c22663fcb99d9a0a5a26e6
|
[] |
no_license
|
jeffgriesemer/r-project
|
78fee3b8d17b3956e6ffe2a78ca628e74e310cd9
|
ad4c5724396c8de904d14d7f8514c187e7ee1f8a
|
refs/heads/master
| 2020-12-24T10:57:06.852179
| 2019-05-03T02:28:36
| 2019-05-03T02:28:36
| 73,116,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,175
|
r
|
octane_blog_01_eda.R
|
library(tidyverse)
library(dplyr)
library(lubridate)
# visual
library(ggplot2)
library(ggrepel)
library(scales)
library(themes)
#install.packages("googlesheets")
library(googlesheets)
rm(list=ls())
df_stats <- gs_title("golf_stats")
df_2018_drive <- df_stats %>% gs_read(ws = "2018 driving accur")
df_2018 <- df_2018_drive %>% select(RANK=RANK_2018,PLAYER_NAME,ROUNDS,FAIRWAY_PCT) %>% filter(PLAYER_NAME == 'Tiger Woods') %>%
mutate(YEARMO='2018-12', CATEGORY='Driving Accuracy')
df_tiger_2019 <- df_stats %>% gs_read(ws = "2019 driving accr")
df_tiger_2019_03 <- df_tiger_2019 %>% filter(PLAYER_NAME == 'Tiger Woods') %>% select(RANK=RANK_LAST_WEEK_2019,PLAYER_NAME,ROUNDS,FAIRWAY_PCT) %>%
mutate(YEARMO='2019-03', CATEGORY='Driving Accuracy')
df_tiger_2019_04 <- df_tiger_2019 %>% filter(PLAYER_NAME == 'Tiger Woods') %>% select(RANK=RANK_NOW,PLAYER_NAME,ROUNDS,FAIRWAY_PCT) %>%
mutate(YEARMO='2019-04', CATEGORY='Driving Accuracy')
df_tiger_drives <- rbind(df_2018,df_tiger_2019_03)
df_tiger_drives <- rbind(df_tiger_drives, df_tiger_2019_04)
df_tiger_drives
##### gir
df_2018_gir <- df_stats %>% gs_read(ws = "2018 gir")
df_tiger_2018_gir <- df_2018_gir %>% filter(PLAYER_NAME == 'Tiger Woods') %>%
select(RANK=RANK_LAST_WEEK,PLAYER_NAME,ROUNDS,PCT_HIT) %>%
mutate(YEARMO='2018-12', CATEGORY='GIR')
df_2019_gir <- df_stats %>% gs_read(ws = "2019 gir")
df_tiger_2019_03_gir <- df_2019_gir %>% filter(PLAYER_NAME == 'Tiger Woods') %>%
select(RANK=RANK_LAST_WEEK,PLAYER_NAME,ROUNDS,PCT_HIT) %>%
mutate(YEARMO='2019-03', CATEGORY='GIR')
df_tiger_2019_04_gir <- df_2019_gir %>% filter(PLAYER_NAME == 'Tiger Woods') %>%
select(RANK=RANK_THIS_WEEK,PLAYER_NAME,ROUNDS,PCT_HIT) %>%
mutate(YEARMO='2019-04', CATEGORY='GIR')
df_tiger_gir <- rbind(df_tiger_2018_gir, df_tiger_2019_03_gir)
df_tiger_gir <- rbind(df_tiger_gir, df_tiger_2019_04_gir)
df_tiger_gir
### putting ###
df_2018_gir <- df_stats %>% gs_read(ws = "2018 gir")
df_tiger_2018_gir <- df_2018_gir %>% filter(PLAYER_NAME == 'Tiger Woods') %>%
select(RANK=RANK_LAST_WEEK,PLAYER_NAME,ROUNDS,PCT_HIT) %>%
mutate(YEARMO='2018-12', CATEGORY='GIR')
###############
df_2019 <- df_2019_drive %>% filter(RANK_NOW <= 130) %>% mutate(tiger_flag= ifelse(PLAYER_NAME == 'Tiger Woods', T, F)) %>% arrange(RANK_NOW)
df_2019$PLAYER_NAME <- as.factor(df_2019$PLAYER_NAME)
fct_reorder()
ggplot() + geom_point(data=df_2019, aes(x=reorder(PLAYER_NAME,RANK_NOW), y=RANK_NOW, color=tiger_flag)) +
geom_point(data=df_2018, aes(x=PLAYER_NAME, y=RANK_2018, color=tiger_flag)) +coord_flip()
df_2018 <- read.table("clipboard", header = T, sep = "\t",comment="&", fill = T)
names(df$X.) <- "HIT PCT"
df_mrg <- merge(df,df_2018, by="PLAYER.NAME", all.x=T)
df_mrg_final <- df_mrg %>% select(PLAYER.NAME, RANK_NOW = RANK.THIS.WEEK.x, PCT_NOW = X..x)
df_2019 <- df %>% select(1:7)
ggplot( df_mrg, aes(x = PLAYER.NAME, y = X., color=)) +
geom_point(size = 2, alpha = .6) +
geom_smooth(size = 1.5, color = "darkgrey") +
scale_y_continuous(label = scales::dollar, limits = c(50000, 250000)) +
scale_x_continuous(breaks = seq(0, 60, 10), limits = c(0, 60)) +
labs(x = "x", y = "y", title = "title", subtitle = "subtitle") +
#theme_minimal() +
geom_text_repel(aes(label= ))
### espn stats
library(ggrepel)
library(ggplot2)
library(dplyr)
library(viridis)
rm(list=ls())
library(tidyverse)
library(googlesheets)
df_stats <- gs_title("golf_stats_espn")
df_2019 <- df_stats %>% gs_read(ws = "2019_stats")
df_2019$DRVE_TOTAL <- NULL
# data wrangling - cleanup
str(df_2019)
describe(df_2019$AGE)
df_2019_filtered <- df_2019 %>%
mutate(AGE_numeric = !(is.na(as.numeric(AGE)))) %>%
filter(AGE_numeric == TRUE) %>%
mutate(AGE = as.numeric(AGE))
df_2019_filtered <- rename(df_2019_filtered, "RANK_DRV_ACC" = "RK")
# EDA
library(DataExplorer)
#introduce(df_2019_filtered)
plot_intro(df_2019_filtered)
plot_missing(df_2019)
plot_bar(df_2019)
plot_histogram(df_2019_filtered)
plot_boxplot(df_2019_filtered, by = "YDS_DRIVE")
plot_boxplot(df_2019_filtered %>% select(-1), by = "AGE")
#plot_boxplot(df_2019, by = "PLAYER")
plot_correlation(na.omit(df_2019), maxcat = 5L)
plot_correlation(na.omit(df_2019), type = "c")
plot_correlation(na.omit(df_2019), type = "d")
library(funModeling)
library(Hmisc)
eda_func <- function(data)
{
glimpse(df_2019)
df_status(df_2019)
freq(df_2019)
profiling_num(df_2019)
plot_num(data)
describe(df_2019$AGE)
}
eda_func(df)
sink("file")
describe(df)
sink()
# cor(df_2019)
# error - must be numeric
library(ggcorrplot)
corr <- cor(df_2019_filtered %>% select(-1,-2))
ggcorrplot(corr, type = "lower", outline.col = "black",
lab=TRUE,
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
# visualize as a circle indicating significance (circle size) and coorelation
ggcorrplot(corr, type = "lower", outline.col = "black",
method="circle",
ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726"))
######## create ntile
df_2019_filtered <- df_2019_filtered %>% mutate(RNK_GIR = ntile(GREENS_REG, 10),
RNK_PUT = ntile(-PUTT_AVG,10),
RNK_SAVE = ntile(SAVE_PCT,10),
RNK_AGE = ntile(AGE,10),
RNK_YDS = ntile(YDS_DRIVE,10),
RNK_ACC = ntile(DRIVING_ACC,10))
#df_2019_filtered <- df_2019_filtered %>% mutate(RNK_INDEX = RNK_GIR + RNK_PUT + RNK_SAVE + RNK_AGE + RNK_YDS + RNK_ACC)
#df_2019_filtered <- df_2019_filtered %>% mutate(RNK_INDEX_GRP = ntile(RNK_INDEX, 10))
library(ggrepel)
#ggplot(df_2019_filtered %>% filter(RNK_INDEX_GRP > 5), aes(x=PLAYER, y=RNK_INDEX_GRP,color=as.factor(RNK_INDEX_GRP))) +
# geom_point() +
# coord_flip() +
# geom_label_repel(aes(label=PLAYER))
df_rnk <- gather(df_2019_filtered, "VARIABLE", "RANK", 10:15)
df_rnk_tiger <- df_rnk %>% filter(grepl("Tiger",PLAYER))
ggplot(df_rnk_tiger, aes(x=VARIABLE, y=RANK)) + geom_point()
##### outliers
df_2019_filtered %>% filter(AGE > 50)
df_2019_filtered %>% filter(GREENS_REG > 73)
### regression line - age vs driving distance
ggplot(df_2019_filtered, aes(x=AGE, y=YDS_DRIVE)) +
geom_point(size=3, color="forest green")
ggplot(df_2019_filtered, aes(x=AGE, y=YDS_DRIVE)) +
geom_point(size=3, color="forest green") +
geom_smooth(method = "lm")
ggplot(df_2019_filtered, aes(x=AGE, y=YDS_DRIVE)) +
geom_point(size=3, color="forest green") +
geom_smooth(method = "loess")
ggplot(df_2019_filtered, aes(x=DRIVING_ACC, y=YDS_DRIVE, color=AGE)) +
geom_point(size=3) +
geom_smooth(method = "lm")
library(viridis)
ggplot(df_2019_filtered, aes(x=DRIVING_ACC, y=YDS_DRIVE, color=AGE)) +
geom_point(size=3) +
geom_smooth(method = "lm") +
scale_color_viridis()
library(ggrepel)
ggplot(df_2019_filtered, aes(x=DRIVING_ACC, y=YDS_DRIVE, color=AGE)) +
geom_point(size=4, alpha=.8) +
geom_smooth(method = "lm") +
scale_color_viridis() +
scale_y_continuous(limits=c(250,325)) +
geom_label_repel(data=df_2019_filtered %>% filter((YDS_DRIVE >= 305 | YDS_DRIVE < 275) | DRIVING_ACC > 70),
aes(label=PLAYER), size=3.5) +
labs(title="PGA statistics - 2019", subtitle = "Yards Per Drive vs. Driving Accuracy by Age",
caption="Stats from http://www.espn.com/golf/statistics",
x="Driving Accuracy",y="Yards Per Drive")
################################################################
df_2019$YDS_RND <- round(df_2019$YDS_DRIVE, -1)
df_2019$YDS_DECILE <- cut(df_2019$YDS_RND, 10, labels=c('10','20','30','40','50','60','70','80','90','100'))
df_2019$RNK_YDS <- rank(-df_2019$YDS_DRIVE)
df_2019$RNK_ACC <- rank(df_2019$DRIVING_ACC)
df_2019$RNK_GIR <- rank(-df_2019$GREENS_REG)
df_2019$RNK_PUT <- rank(df_2019$PUTT_AVG)
ggplot(df_2019, aes(x=YDS_DRIVE, y=PLAYER, fill=YDS_RND)) +
geom_tile(size=3) +
geom_tile(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(color="tiger")) +
geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=paste(PLAYER, ' / Rank #', RNK_YDS, sep=''), color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Driving Distance", ylab="") +
scale_fill_viridis(discrete = F, option="D", guide=guide_legend(title="Driving Yards"))
ggplot(df_2019, aes(x=DRIVING_ACC, y=PLAYER, fill=DRIVING_ACC)) +
geom_tile(size=3) +
geom_tile(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(color="tiger")) +
geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=paste(PLAYER, ' / Rank #',RNK_ACC,sep=''), color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Driving Accuracy", ylab="") +
scale_fill_viridis(discrete = F, option="D", guide=guide_legend(title="Driving Accuracy"))
ggplot(df_2019, aes(x=GREENS_REG, y=PLAYER, fill=GREENS_REG)) +
geom_tile(size=3) +
geom_tile(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(color="tiger")) +
geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=PLAYER, color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Greens in Regulation", ylab="") +
scale_fill_viridis(discrete = F, option="D", guide=guide_legend(title="Greens in Reg"))
ggplot(df_2019, aes(x=PUTT_AVG, y=PLAYER, fill=PUTT_AVG)) +
geom_tile(size=3) +
geom_tile(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'), aes(color="tiger")) +
#geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=paste(PLAYER, ' / Rank #',RNK_PUT,sep=''), color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Putting Average", ylab="") +
scale_fill_viridis(discrete = F, option="D", guide=guide_legend(title="Putting"))
ggplot(df_2019, aes(x=reorder(PLAYER,DRIVING_ACC), y=DRIVING_ACC, fill=YDS_DRIVE)) +
geom_bar(stat="identity",size=3) +
geom_bar(stat="identity",data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(color="tiger")) +
geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=paste(PLAYER, ' / Rank #',RNK_ACC,sep=''), color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Driving Accuracy", ylab="") +
scale_fill_viridis(discrete = F, option="D", guide=guide_legend(title="Driving Distance"))
ggplot(df_2019, aes(x=reorder(PLAYER,DRIVING_ACC), y=DRIVING_ACC, color=YDS_DRIVE)) +
geom_point(size=4) +
geom_point(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(color="tiger")) + coord_flip() +
geom_text_repel(data=df_2019 %>% filter(PLAYER == 'Tiger Woods'),aes(label=paste(PLAYER, ' / Rank #', RNK_ACC,sep=''), color="tiger"),size=4) +
labs(title="Tiger Woods - 2019", subtitle = "Driving Accuracy", ylab="") +
scale_color_viridis(discrete = F, option="D", guide=guide_legend(title="Driving Distance"))
df_2019
ggplot(df_2019, aes(x=YDS_DRIVE, y=PLAYER, fill=RNK_YDS, label=PLAYER)) + geom_point() +
# gghighlight(PLAYER == 'Tiger Woods', label_key = PLAYER)
geom_text_repel(data=subset(df_2019, RNK_YDS = 107), aes(x=YDS_DRIVE, y=PLAYER))
|
a221fc6d9d200266948c2336878f9381de1b667f
|
5490e86ba63d4123a2686ee36121f718f883a11f
|
/R/OUwieAvg.R
|
c7067a0629266781b32c064d30ff74e737e4d624
|
[] |
no_license
|
willgearty/pcmtools
|
359bab4dc30167e075ab076cdba97048a03cd2dd
|
14fb49ce08d1c677971a756bb699554070482253
|
refs/heads/master
| 2020-08-07T12:22:04.595425
| 2019-10-29T22:52:05
| 2019-10-29T22:52:05
| 213,449,197
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,941
|
r
|
OUwieAvg.R
|
#' Calculate AIC weights
#'
#' This function takes a vector of AIC (Akaike Information Criterion) values and returns a vector of AIC weights using the formula from Burnham and Anderson (2002).
#'
#' If \code{na.rm = FALSE} and any values in \code{AIC} are \code{NA}, all returned values will be \code{NA}.
#'
#' @param AIC A vector of values.
#' @param na.rm Whether to remove NA values.
#' @return A named vector of weights with names inherited from \code{AIC}.
#' @export
#' @examples
#' AIC <- c(NA, 5, 10, 20, 25)
#' #ignore NAs
#' AICweights(AIC)
#'
#' #should return all NAs
#' AICweights(AIC, na.rm = FALSE)
AICweights <- function(AIC, na.rm = TRUE){
deltAIC <- deltaAIC(AIC, na.rm = na.rm)
expAIC <- exp(-.5 * deltAIC)
weights <- expAIC / sum(expAIC, na.rm = na.rm)
names(weights) <- names(AIC)
return(weights)
}
#' Calculate deltaAIC
#'
#' Calculate deltaAIC (Akaike Information Criterion), the absolute difference between the lowest AIC value and the other AIC values.
#'
#' If \code{na.rm = FALSE} and any values in \code{AIC} are \code{NA}, all returned values will be \code{NA}.
#'
#' @param AIC A vector of values.
#' @param na.rm Whether to remove NA values.
#' @return A vector of weights.
#' @export
#' @examples
#' AIC <- c(NA, 5, 10, 20, 25)
#' deltaAIC(AIC)
#'
#' #should return all NAs
#' deltaAIC(AIC, na.rm = FALSE)
deltaAIC <- function(AIC, na.rm = TRUE){
minAIC <- min(AIC, na.rm = na.rm)
deltAIC <- AIC - minAIC
return(deltAIC)
}
#' Extract parameters from OUwie results
#'
#' Extract various parameter values from the results of (many) OUwie analyses and maps the parameter values to different regimes based on the inputted regime map.
#' Returns the parameters in a 4-D array, where the first dimension is the models, the second dimension is the parameters (including AICc), the third dimension is the regimes, and the fourth dimension is the replicates.
#'
#' The \code{regime.mat} is the most important component, as it indicates which parameters should be mapped to which regime for each model.
#' For example, in an OU1 model, the user would likely want the parameters mapped to all regimes, whereas in an OUM model, the user would likely want the parameters for each regime mapped exclusively to that regime.
#' In more complex scenarios, the user may have multiple OUM models in which regimes are split or combined in different manners, such that the parameters for one regime in one OUM model may map to multiple regimes in the overall dataset.
#' The \code{rownames} of this matrix should identify names for the regimes and the \code{colnames} should identify the models.
#' It is assumed that the order of the models/\code{colnames} in \code{regime.mat} matches the order of the models in \code{ou.results}.
#'
#' Valid options for \code{params} are "Alpha", "Sigma.sq", "Theta", "Theta.se", "Halflife" (phylogenetic half-life), "Stat.var" (stationary variance), "AIC", "AICc", and "BIC".
#'
#' @param ou.results A list of lists (or just a list) of unmodified results from an OUwie analysis
#' @param regime.mat A data frame mapping regimes to total regime options for each model (see details)
#' @param params A vector specifying which parameter should be calculated/returned (see details)
#' @return An \code{ouwiepars} object. Basically a 4-D array that can be passed to other functions for further analysis/visualization.
#' @export
#' @examples
#' \dontrun{
#' library(OUwie)
#' data(tworegime)
#' ou.results <- list()
#' ou.results[[1]] <- OUwie(tree,trait,model=c("BM1"))
#' ou.results[[2]] <- OUwie(tree,trait,model=c("BMS"), root.station = FALSE)
#' ou.results[[3]] <- OUwie(tree,trait,model=c("OUM"))
#' ou.results[[4]] <- OUwie(tree,trait,model=c("OUMV"))
#'
#' #Both regimes have same parameters for BM1 model. Both regimes have different parameters for other models.
#' regime.mat <- data.frame(BM1 = c(1, 1), BMS = c(1,2), OUM = c(1,2), OUMV = c(1,2), row.names = c(1,2))}
#'
#' OUwieParSumm(ou.results, regime.mat)
OUwieParSumm <- function(ou.results, regime.mat, params = c("Alpha","Sigma.sq","Theta","Theta.se", "AICc")){
if(is(ou.results[[1]], "OUwie")) ou.results <- list(ou.results)
nruns <- length(ou.results)
regimes <- rownames(regime.mat)
nregs <- length(regimes)
mods <- colnames(regime.mat)
nmods <- length(mods)
nparams <- length(params)
ou.parameters <- array(NA, dim=c(nmods, nparams, nregs, nruns), dimnames=list(mods, params, regimes))
for(j in 1:nruns){
for(i in 1:nmods){
reg.temp <- colnames(ou.results[[j]][[i]]$solution)
nreg.temp <- length(reg.temp)
#loop through parameters
for(param in params){
#Record AICc values
if(param == "AIC"){
ou.parameters[i, "AIC", , j] <- ou.results[[j]][[i]]$AIC
} else if(param == "AICc"){
ou.parameters[i, "AICc", , j] <- ou.results[[j]][[i]]$AICc
} else if(param == "BIC"){
ou.parameters[i, "BIC", , j] <- ou.results[[j]][[i]]$BIC
} else if(param == "Alpha"){
if(ou.results[[j]][[i]]$model=="BMS" | ou.results[[j]][[i]]$model=="BM1"){
ou.parameters[i, param, , j] <- 0
} else {
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- ou.results[[j]][[i]]$solution[1,k]
}
}
} else if(param == "Sigma.sq"){
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- ou.results[[j]][[i]]$solution[2,k]
}
} else if(param == "Theta"){
if(ou.results[[j]][[i]]$model=="BM1" | ou.results[[j]][[i]]$model=="OU1" | ou.results[[j]][[i]]$model=="BMS"){
ou.parameters[i, param, , j] <- ou.results[[j]][[i]]$theta[1,1]
} else {
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- ou.results[[j]][[i]]$theta[k,1]
}
}
} else if(param == "Theta.se"){
if(ou.results[[j]][[i]]$model=="BM1" | ou.results[[j]][[i]]$model=="OU1" | ou.results[[j]][[i]]$model=="BMS"){
ou.parameters[i, param, , j] <- ou.results[[j]][[i]]$theta[1,2]
} else {
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- ou.results[[j]][[i]]$theta[k,2]
}
}
} else if(param == "Halflife"){
if(ou.results[[j]][[i]]$model=="BMS" | ou.results[[j]][[i]]$model=="BM1"){
ou.parameters[i, param, , j] <- NA
} else {
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- log(2)/ou.results[[j]][[i]]$solution[1,k]
}
}
} else if(param == "Stat.var"){
if(ou.results[[j]][[i]]$model=="BMS" | ou.results[[j]][[i]]$model=="BM1"){
ou.parameters[i, param, , j] <- NA
} else {
for(k in 1:nreg.temp){
ou.parameters[i, param, which(regime.mat[,i] == reg.temp[k], useNames = FALSE), j] <- ou.results[[j]][[i]]$solution[2,k]/(2 * ou.results[[j]][[i]]$solution[1,k])
}
}
}
}
}
}
class(ou.parameters) <- "ouwiepars"
return(ou.parameters)
}
#' Model average the parameters across (many) OUwie results using AICc
#'
#' Internally calculates AICc weights and uses them to model average the parameter values output from \code{OUwieParSumm}.
#'
#' \code{na.rm = TRUE} will remove any models where \code{AICc == NA}, but will use other models for model averaging;
#' \code{na.rm = FALSE} will remove any replicates (rows of the output) with any models where \code{AICc == NA}.
#'
#' @param ou.parameters An object of class \code{ouwiepars} as output from \code{OUwieParSumm} or \code{CleanOUwieParameters}
#' @param OU.only Whether any Brownian motion models should be dropped before model averaging
#' @param na.rm Whether to ignore model results with \code{NA} AICc values
#' @return A list with two elements:
#' \item{Weights}{A data.frame of the AICc weights for each model across the replicates}
#' \item{Counts}{A named vector giving the number of replicates in which each model has the highest AICc weight}
#' @export
#' @examples
#' ou.parameters <- OUwieParSumm(ou.results, regime.mat)
#' OUwieModelAvg(ou.parameters)
OUwieModelAvg <- function(ou.parameters, OU.only = FALSE, na.rm = TRUE){
nmods <- dim(ou.parameters)[1]
mods <- dimnames(ou.parameters)[[1]]
params <- dimnames(ou.parameters)[[2]][!(dimnames(ou.parameters)[[2]] == "AICc")]
nparams <- length(params)
nregs <- dim(ou.parameters)[3]
regs <- dimnames(ou.parameters)[[3]]
nruns <- dim(ou.parameters)[4]
ou.avg <- array(NA, dim=c(nparams, nregs, nruns), dimnames=list(params, regs))
use <- vector(mode = "logical", length = nmods)
if(OU.only) use <- !(grepl("BM", mods)) else use <- rep(TRUE, nmods)
for(i in 1:nruns){
aicc.weights <- AICweights(ou.parameters[use, "AICc", 1, i], na.rm = na.rm)
#loop through parameters
for (param in params){
ou.avg[param, , i] <- colSums(aicc.weights * ou.parameters[use, param, , i], na.rm = na.rm)
}
#need to calculate model-averaged theta.se values differently
#from page 162 of Burnham and Anderson 2002
if("Theta.se" %in% params & "Theta" %in% params){
for (j in 1:nregs){
variance <- (ou.parameters[use, "Theta.se", j, i])^2
theta.avg <- ou.avg["Theta", j, i]
ou.avg["Theta.se", j, i] <- sum(aicc.weights * sqrt(variance + (ou.parameters[use, "Theta", j, i] - theta.avg)^2), na.rm = na.rm)
}
}
}
return(ou.avg)
}
#' Clean extracted parameters from OUwie results
#'
#' Cleans the parameters that are extracted by \code{OUwieParamSum} based on user-specified upper and lower bounds.
#'
#' Parameter estimates outside of these bounds will result in the AICc being changed to NA,
#' which will affect downstream model averaging (see \code{OUwieModelAvg}).
#'
#' @param ou.parameters An object of class \code{ouwiepars} as output from \code{OUwieParSumm}
#' @param lower A list of lower bounds for model parameters
#' @param upper A list of upper bounds for model parameters
#' @return An \code{ouwiepars} object
#' @export
#' @examples
#' ou.parameters <- OUwieParSumm(ou.results, regime.mat)
#'
#' #Sets the AICc for the BM1 model to NA, so it wouldn't be included in downstream model averaging
#' OUwieCleanPar(ou.parameters, upper = list("AICc" = 45))
OUwieCleanPar <- function(ou.parameters, lower = list(), upper = list()){
nruns <- dim(ou.parameters)[4]
params <- dimnames(ou.parameters)[[2]]
try(if(!all(names(lower) %in% params))
stop(paste("Incorrect lower bound parameter(s) specified: ",paste0(names(lower)[!(names(lower) %in% params)],collapse=", "))))
try(if(!all(names(upper) %in% params))
stop(paste("Incorrect upper bound parameter(s) specified: ",paste0(names(upper)[!(names(upper) %in% params)],collapse=", "))))
for(i in 1:nruns){
for(param in names(lower)){
ou.parameters[which(rowSums(ou.parameters[ , param, , i] < lower[[param]]) > 0), "AICc", , i] <- NA
}
for(param in names(upper)){
ou.parameters[which(rowSums(ou.parameters[ , param, , i] > upper[[param]]) > 0), "AICc", , i] <- NA
}
}
return(ou.parameters)
}
#' Summarize the model fit across (many) OUwie results using AICc
#'
#' Returns AICc weights and counts for OUwie results that have been processed using \code{OUwieParSumm}.
#'
#' \code{na.rm = TRUE} will remove any models where \code{AICc == NA}, but will use other models for AIC weight calculation;
#' \code{na.rm = FALSE} will remove any replicates (rows of the output) with any models where \code{AICc == NA}.
#'
#' @param ou.parameters An object of class \code{ouwiepars} as output from \code{OUwieParSumm} or \code{CleanOUwieParameters}
#' @param na.rm Whether to ignore model results with \code{NA} AICc values
#' @return A list with two elements:
#' \item{Weights}{A data.frame of the AICc weights for each model across the replicates}
#' \item{Counts}{A named vector giving the number of replicates in which each model has the highest AICc weight}
#' @export
#' @examples
#' ou.parameters <- OUwieParSumm(ou.results, regime.mat)
#' OUwieAICSumm(ou.parameters)
OUwieAICSumm <- function(ou.parameters, na.rm = TRUE){
nruns <- dim(ou.parameters)[4]
mods <- dimnames(ou.parameters)[[1]]
weights <- as.data.frame(array(NA, dim=c(nruns,length(mods)), dimnames=list(seq(1,nruns),mods)))
for(i in 1:nruns){
weights[i,] <- AICweights(ou.parameters[ , "AICc", 1, i], na.rm = na.rm)
}
counts <- table(factor(unlist(apply(weights,1,which.max)),levels = seq(1:length(mods))))
names(counts) <- mods
AICsumm <- list(weights,counts)
names(AICsumm) <- c("Weights","Counts")
return(AICsumm)
}
|
350a4040aa194e0277a79fdd274aeded541cade6
|
4dfae026a7c16a91e0eee543fbc1404009246db2
|
/tests/testthat/test-order_cells.R
|
7da8873efefdf53aa10f06b915d14507dfc46b61
|
[
"MIT"
] |
permissive
|
cole-trapnell-lab/monocle3
|
2d32dddb777ba384470f3842b0fd7d27b857cd5b
|
2b17745d949db1243e95e69e39d2b4b1aa716c09
|
refs/heads/master
| 2023-09-03T07:06:43.428228
| 2023-08-18T22:50:49
| 2023-08-18T22:50:49
| 167,440,342
| 280
| 110
|
NOASSERTION
| 2023-01-24T21:25:37
| 2019-01-24T21:26:18
|
R
|
UTF-8
|
R
| false
| false
| 11,896
|
r
|
test-order_cells.R
|
context("test-order_cells")
skip_not_travis <- function ()
{
if (identical(Sys.getenv("TRAVIS"), "true")) {
return(invisible(TRUE))
}
skip("Not on Travis")
}
cds <- load_a549()
set.seed(100)
test_that("order_cells error messages work", {
skip_on_travis()
expect_error(order_cells(cds), "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP, cluster_cells, and learn_graph before running order_cells." )
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 20)
cds <- reduce_dimension(cds)
expect_error(order_cells(cds), "No cell clusters for UMAP calculated. Please run cluster_cells with reduction_method = UMAP and run learn_graph before running order_cells.")
cds <- cluster_cells(cds)
expect_error(order_cells(cds), "No principal graph for UMAP calculated. Please run learn_graph with reduction_method = UMAP before running order_cells.")
cds <- learn_graph(cds)
expect_error(order_cells(cds, root_cells = c("G07_B02_RT_587"), root_pr_nodes = c("Y_1")), "Please specify either root_pr_nodes or root_cells, not both.")
expect_error(order_cells(cds, root_cells = c("hannah")), "All provided root_cells must be present in the cell data set.")
expect_error(order_cells(cds, root_pr_nodes = c("hannah")), "All provided root_pr_nodes must be present in the principal graph.")
expect_error(order_cells(cds), "(When not in interactive mode, either root_pr_nodes or root_cells must be provided.|No root node was chosen!)")
expect_error(order_cells(cds, reduction_method = "tSNE"), "Currently only 'UMAP' is accepted as a reduction_method.")
})
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 20)
cds <- reduce_dimension(cds, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds, cluster_method = "louvain")
cds <- learn_graph(cds)
test_that("order_cells works", {
skip_on_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 11.9, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.0538, tol = 1e-3)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_10"))
expect_equal(max(pseudotime(cds)), 6.34, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.0538, tol = 1e-3)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 13.2, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 1.5, tol = 1e-1)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 7.26, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 1.5, tol = 1e-1)
})
cds <- reduce_dimension(cds, max_components = 3, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds, cluster_method = "louvain")
cds <- learn_graph(cds)
test_that("order_cells works 3d", {
skip_on_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 10.0, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_10"))
expect_equal(max(pseudotime(cds)), 8.64, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 10.4, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 10.2, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
})
cds <- cluster_cells(cds, random_seed = 100)
cds <- learn_graph(cds)
test_that("order_cells works leiden", {
skip_on_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 9.94, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.49, tol = 1e-2)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_2"))
expect_equal(max(pseudotime(cds)), 4.72, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.35, tol = 1e-2)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 8.03, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121 , tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 5.85, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121 , tol = 1e-3)
})
cds <- reduce_dimension(cds, max_components = 3, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds)
cds <- learn_graph(cds)
test_that("order_cells works leiden 3d", {
skip_on_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 9.94, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.49, tol = 1e-2)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_2"))
expect_equal(max(pseudotime(cds)), 4.72, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.35, tol = 1e-2)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 8.03, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 5.85, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
})
#### TRAVIS ####
cds <- load_a549()
set.seed(100)
test_that("order_cells error messages work", {
skip_not_travis()
expect_error(order_cells(cds), "No dimensionality reduction for UMAP calculated. Please run reduce_dimension with reduction_method = UMAP, cluster_cells, and learn_graph before running order_cells." )
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 20)
cds <- reduce_dimension(cds)
expect_error(order_cells(cds), "No cell clusters for UMAP calculated. Please run cluster_cells with reduction_method = UMAP and run learn_graph before running order_cells.")
cds <- cluster_cells(cds)
expect_error(order_cells(cds), "No principal graph for UMAP calculated. Please run learn_graph with reduction_method = UMAP before running order_cells.")
cds <- learn_graph(cds)
expect_error(order_cells(cds, root_cells = c("G07_B02_RT_587"), root_pr_nodes = c("Y_1")), "Please specify either root_pr_nodes or root_cells, not both.")
expect_error(order_cells(cds, root_cells = c("hannah")), "All provided root_cells must be present in the cell data set.")
expect_error(order_cells(cds, root_pr_nodes = c("hannah")), "All provided root_pr_nodes must be present in the principal graph.")
expect_error(order_cells(cds), paste("When not in interactive mode, either",
"root_pr_nodes or root_cells must be",
"provided."))
expect_error(order_cells(cds, reduction_method = "tSNE"), "Currently only 'UMAP' is accepted as a reduction_method.")
})
cds <- estimate_size_factors(cds)
cds <- preprocess_cds(cds, num_dim = 20)
cds <- reduce_dimension(cds, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds, cluster_method = "louvain")
cds <- learn_graph(cds)
test_that("order_cells works", {
skip_not_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 11.9, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.0538, tol = 1e-4)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_10"))
expect_equal(max(pseudotime(cds)), 6.34, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.0538, tol = 1e-4)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 13.2, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 1.5, tol = 1e-1)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 7.26, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 1.5, tol = 1e-1)
})
cds <- reduce_dimension(cds, max_components = 3, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds, cluster_method = "louvain")
cds <- learn_graph(cds)
test_that("order_cells works 3d", {
skip_not_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 10.4, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_10"))
expect_equal(max(pseudotime(cds)), 8.64, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 10.4, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 10.4, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.664, tol = 1e-3)
})
cds <- cluster_cells(cds, random_seed = 100)
cds <- learn_graph(cds)
test_that("order_cells works leiden", {
skip_not_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 9.94, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.49, tol = 1e-2)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_2"))
expect_equal(max(pseudotime(cds)), 4.72, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.35, tol = 1e-2)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 8.03, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 6.1, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
})
cds <- reduce_dimension(cds, max_components = 3, umap.fast_sgd=FALSE)
cds <- cluster_cells(cds)
cds <- learn_graph(cds)
test_that("order_cells works leiden 3d", {
skip_not_travis()
cds <- order_cells(cds, root_pr_nodes = "Y_1")
expect_equal(max(pseudotime(cds)), 9.94, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.49, tol = 1e-2)
cds <- order_cells(cds, root_pr_nodes = c("Y_1", "Y_2"))
expect_equal(max(pseudotime(cds)), 4.72, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 4.35, tol = 1e-2)
cds <- order_cells(cds, root_cells = "G07_B02_RT_587")
expect_equal(max(pseudotime(cds)), 8.03, tol = 1e-2)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
cds <- order_cells(cds, root_cells = c("G07_B02_RT_587", "F06_A01_RT_598"))
expect_equal(max(pseudotime(cds)), 6.1, tol = 1e-1)
expect_equal(min(pseudotime(cds)), 0)
expect_equal(as.numeric(pseudotime(cds)[1]), 0.121, tol = 1e-3)
})
|
b34cf74a262617329b73d6662888cd9cea0c2e7c
|
4ab888da78d52fcacb6a22affa53f09f9e0da9a8
|
/inst/doc/Francais.R
|
84b676387582701af53971a9c2b9b56c62660348
|
[] |
no_license
|
MarionLi0/antaresFlowbased
|
a291ead418fe29f99baa0cad9dbb181b2e9ff0b8
|
9207cd7564b4f821f4d25acf30ba7d1f09d5e286
|
refs/heads/master
| 2021-01-21T18:24:48.240411
| 2017-05-16T12:25:39
| 2017-05-16T12:25:39
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 599
|
r
|
Francais.R
|
## ---- eval=FALSE---------------------------------------------------------
# antaresRead::setSimulationPath("D:/exemple_test", 0)
#
# # initialisation de l'étude flowbased
# initFlowBased()
## ---- eval=FALSE---------------------------------------------------------
# # chemin du solver antares
# setSolverAntares(path = "C:/Program Files/RTE/Antares/5.0.9/bin/antares-5.0-solver.exe")
#
# # affichage du solver renseigne
# getSolverAntares()
## ---- eval=FALSE---------------------------------------------------------
# res_fb <- runSimulationFB(simulationName = "flowBased-Tuto")
|
714b8712934f4e07ff098fbdc1bf1a7ec742e1cc
|
4b8dddc2ced41524396dd1b035fbcdebf08cdef8
|
/final/zhihu_tfidf_score.R
|
fb005cb1af79ef89da35bfb63a22ba9891351938
|
[] |
no_license
|
OOmegaPPanDDa/shiny_dsr_zhihu
|
8ecfecff3f9e79382c9d66ff9d7187c06b610517
|
5e728b560d9da8a6baca54f8d4623564c9ff14e0
|
refs/heads/master
| 2021-01-11T22:50:07.659968
| 2017-01-13T04:49:39
| 2017-01-13T04:49:39
| 78,510,048
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,533
|
r
|
zhihu_tfidf_score.R
|
# source('~/dsr/script/zhihu_preprocessing.R')
# library(dplyr)
# library(rJava)
# library(tm)
# library(tmcn)
# library(SnowballC)
# library(slam)
# library(XML)
# library(RCurl)
# library(Rwordseg)
# library(Matrix)
# if (!require('tmcn')) {
# install.packages('tmcn',repos = 'http://R-Forge.R-project.org')
# }
# library(tmcn)
# if (!require('Rwordseg')) {
# install.packages("Rwordseg",repos = 'http://R-Forge.R-project.org')
# }
# library(Rwordseg)
space_tokenizer <- function(x){
unlist(strsplit(as.character(x[[1]]),'[[:space:]]+'))
}
#text_filter <- function(data_frame) {
# Keep only the columns with text
# data_frame <- data.frame(data_frame$question_title, data_frame$question_detail, data_frame$ans)
# colnames(data_frame) <- c("question_title","question_detail", "ans")
# Remove symbols
# data_frame$question_title <- clean_text(data_frame$question_title)
# data_frame$question_detail <- clean_text(data_frame$question_detail)
# data_frame$ans <- clean_text(data_frame$ans)
# data_frame$question_title[is.na(data_frame$question_title)] <- ''
# data_frame$question_detail[is.na(data_frame$question_detail)] <- ''
# data_frame$ans[is.na(data_frame$ans)] <- ''
# Remove empty rows or NA
# return(data_frame)
#}
#stop_word_vector <- function(df) {
# df$question <- paste(df$question_title, df$question_detail)
#View(df)
# document <- c(unique(df$question),unique(df$ans))
# stop_word <- get_stop_word(document)
# return(stop_word)
#}
tf_idf_score <- function(df){
# df <- text_filter(df)
# stop_words <- stop_word_vector(df)
#df$ans_seg <- sapply(df$ans, function(x) paste(seg_worker[x], collapse = ' '))
# Transform the entire answer column into a corpus
d_corpus <- VCorpus(VectorSource(as.vector(df$ans_seg)))
# Remove punctuation
d_corpus <- tm_map(d_corpus, removePunctuation)
# Remove numbers
d_corpus <- tm_map(d_corpus, removeNumbers)
#inspect(d_corpus)
#print(toTrad(stopwordsCN()))
#Remove stopwords
# d_corpus <- tm_map(d_corpus, removeWords, toTrad(stopwordsCN()))
# d_corpus <- tm_map(d_corpus, removeWords, stop_words)
# Remove whitespace
d_corpus = tm_map(d_corpus, stripWhitespace)
# Transform back into vector
d_corpus <- Corpus(VectorSource(d_corpus))
# Use control list with space tokenizer
control_list=list(wordLengths=c(2,Inf),tokenize=space_tokenizer)
tdm <- TermDocumentMatrix(Corpus(VectorSource(d_corpus)), control = control_list)
# Tf-idf computation
tf <- apply(tdm, 2, sum) # term frequency
idf <- function(word_doc){ log2( (length(word_doc)) / (nnzero(word_doc)+1)) }
idf <- apply(tdm, 1, idf)
dic_tfidf <- as.matrix(tdm)
for(i in 1:nrow(tdm)){
for(j in 1:ncol(tdm)){
dic_tfidf[i,j] <- (dic_tfidf[i,j] / tf[j]) * idf[i]
}
}
# Dealing with query
q = paste(df$question_title[1], df$question_detail[1])
q_seg <- filter_segment(seg_worker[q], stop_words)
query_frame <- as.data.frame(table(q_seg))
query_frame <- query_frame %>% na.omit()
# Get short doc matrix
all_term <- rownames(dic_tfidf)
loc <- which(is.element(all_term, query_frame$q_seg))
s_tdm <- dic_tfidf[loc,]
query_frame <- query_frame[is.element(query_frame$q_seg, rownames(s_tdm)),]
s_tdm[is.na(s_tdm)]=0
# Result : cos similarity ranking
cos_tdm <- function(x, y){ x%*%y / sqrt(x%*%x * y%*%y) }
#print(s_tdm)
#print(query_frame)
doc_cos <- apply(s_tdm, 2, cos_tdm, y = query_frame$Freq)
doc_cos[is.nan(doc_cos)] <- 0
return(doc_cos)
}
|
7178133a620f8fd91c5c6f3cd2f7a040468866ec
|
3733c7ac7146a6cbd6454d7af07b6265ae70e8e8
|
/tidytextapp_applications.R
|
815e35b00b7fe45c4c1fcde67d196ecee0908ebc
|
[] |
no_license
|
aashishkpandey/tidytextapp
|
830556b5edcda663665038aa5f1ab5a25f6110e7
|
acf75037587b14b856f7efb97b0a66432b73abde
|
refs/heads/master
| 2021-04-26T23:54:19.237943
| 2018-05-29T13:25:36
| 2018-05-29T13:25:36
| 123,877,983
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,335
|
r
|
tidytextapp_applications.R
|
source('https://raw.githubusercontent.com/aashishkpandey/tidytextapp/master/tidytextapp_functions.R')
#--------------------------------------------------------#
# Create DTM of BD
#--------------------------------------------------------#
system.time({
years = 2015:2016
for (year in years) {
bd.df = readRDS(paste0("D:\\31127 Aashish\\10KTech\\clean_data\\bd.df.",year,".Rds"))
bdtext = gsub('table of contents|table of content',' ',bd.df$bd.text)
bd.dtm = create_DTM( text = bdtext,
docID = bd.df$file,
replace_ngrm = T,
rm_stop_words=T,
textcleaning = T,
lower=T,
alphanum=T,
drop_num=T,
stop_custom = c('will','was','can'),
smart_stop_words = T,
tfidf = F,
bi_gram_pct = 0.02,
min_freq = 5,
filter = 'pct',
py.sent_tknzr = T)
saveRDS(bd.dtm,paste0('D:\\31127 Aashish\\10KTech\\clean_data\\dtm.bd.',year,'.Rds'))
}
})
#--------------------------------------------------------#
# Create DTM of RF
#--------------------------------------------------------#
system.time({
years = 2015:2016
for (year in years) {
rf.df = readRDS(paste0("D:\\31127 Aashish\\10KTech\\clean_data\\rf.df.",year,".Rds"))
rftext = gsub('table of contents|table of content',' ',rf.df$rf.text)
rf.dtm = create_DTM( text = rftext,
docID = rf.df$file,
replace_ngrm = T,
rm_stop_words=T,
textcleaning = T,
lower=T,
alphanum=T,
drop_num=T,
stop_custom = c('will','was','can'),
smart_stop_words = T,
tfidf = F,
bi_gram_pct = 0.02,
min_freq = 5,
filter = 'pct',
py.sent_tknzr = T)
saveRDS(rf.dtm,paste0('D:\\31127 Aashish\\10KTech\\clean_data\\dtm.rf.',year,'.Rds'))
}
})
|
c55d66a9a6e03e3305877e524125a3dbd4af27b4
|
60491b8d44eaa4ee02c7ae9d90d9d6991febbcd6
|
/code/24_7_study/cgm/cgm_data_overview.R
|
d45a921bddec9368d2244d8e05dc37b4eec770f8
|
[
"MIT"
] |
permissive
|
jaspershen/microsampling_multiomics
|
ae2be38fe06679f43b980b76ea152109bbdd8fce
|
dea02f1148e5aad3243c057a98f565f889be302f
|
refs/heads/main
| 2023-04-14T17:44:20.010840
| 2022-09-05T23:23:26
| 2022-09-05T23:23:26
| 469,214,924
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,906
|
r
|
cgm_data_overview.R
|
##cgms
no_function()
library(tidyverse)
###cgm
masstools::setwd_project()
rm(list = ls())
source("code/tools.R")
load("data/24_7_study/cgm/data_preparation/expression_data")
load("data/24_7_study/cgm/data_preparation/sample_info")
load("data/24_7_study/cgm/data_preparation/variable_info")
load("data/24_7_study/summary_info/day_night_df")
load("data/24_7_study/summary_info/all_accurate_time")
setwd("data/24_7_study/cgm/data_overview")
day_night_df =
day_night_df %>%
dplyr::mutate(
start_time = as.POSIXct(hms::as_hms(start)),
end_time = as.POSIXct(hms::as_hms(end)),
week = format(day, "%a")
) %>%
dplyr::mutate(week = paste(
week,
lubridate::month(day),
lubridate::day(day),
sep = "-"
)) %>%
dplyr::mutate(week = factor(week, unique(week)))
##cgm
temp_data_cgm =
data.frame(accurate_time = sample_info$accurate_time,
day = as.character(sample_info$day),
hour = sample_info$hour,
time = sample_info$time,
value = as.numeric(expression_data[1,])) %>%
dplyr::mutate(
time = as.POSIXct(time),
week = format(accurate_time, "%a")
) %>%
dplyr::mutate(week = paste(
week,
lubridate::month(day),
lubridate::day(day),
sep = "-"
)) %>%
dplyr::mutate(week = factor(week, unique(week)))
library(plyr)
temp =
temp_data_cgm %>% plyr::dlply(.variables = .(day))
temp %>%
lapply(function(x){
as.character(range(x$accurate_time))
}) %>%
do.call(rbind, .)
temp %>%
lapply(function(x){
as.character(range(x$time))
}) %>%
do.call(rbind, .)
library(scales)
plot_cgm1 =
ggplot() +
geom_rect(
mapping = aes(
xmin = start,
xmax = end,
ymin = -Inf,
ymax = Inf
),
fill = "lightyellow",
data = day_night_df,
# alpha = 0.5,
show.legend = FALSE
) +
geom_line(aes(x = accurate_time,
y = value,
group = 1),
data = temp_data_cgm,
show.legend = FALSE) +
labs(y = "Continuous glucose monitoring", x = "") +
scale_x_datetime(
breaks = date_breaks("4 hour"),
date_labels = "%a %H:%M",
limits = c(min(all_accurate_time),
max(all_accurate_time)),
timezone = "America/Los_Angeles"
) +
geom_smooth(aes(x = accurate_time,
y = value),
method = "loess",
se = FALSE,
span = 0.01,
color = "red",
data = temp_data_cgm) +
scale_y_continuous(expand = expansion(mult = c(0,0))) +
base_theme +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size = 10),
axis.line.x = element_blank(),
# axis.ticks.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_rect(fill = alpha("grey", 0.2)),
plot.margin = margin(t = 0, r = 0, b = 0, l = 0, unit = "pt"))
plot_cgm1
plot_cgm2 =
ggplot() +
geom_rect(
mapping = aes(
xmin = start_time,
xmax = end_time,
ymin = -Inf,
ymax = Inf
),
fill = "lightyellow",
data = day_night_df,
show.legend = FALSE
) +
geom_line(aes(x = time,
y = value,
group = 1,
color = day),
data = temp_data_cgm,
show.legend = FALSE) +
geom_point(aes(x = time,
y = value,
color = day),
size = 0.3,
alpha = 0.5,
data = temp_data_cgm,
show.legend = FALSE) +
ggsci::scale_color_lancet() +
labs(y = "Continuous glucose monitoring", x = "") +
scale_x_datetime(
breaks = scales::date_breaks("6 hour"),
date_labels = "%H:%M",
expand = expansion(mult = c(0,0))
# timezone = "America/Los_Angeles"
) +
scale_y_continuous(expand = expansion(mult = c(0,0))) +
base_theme +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1, size = 10),
axis.line.x = element_blank(),
# axis.ticks.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_rect(fill = alpha("grey", 0.2)),
plot.margin = margin(t = 0, r = 0, b = 0, l = 0, unit = "pt")) +
facet_grid(rows = vars(week), scales = "free_y")
plot_cgm2
# plot_cgm2 +
# geom_smooth(aes(x = accurate_time,
# y = value),
# method = "loess",
# se = FALSE,
# span = 0.05,
# color = "red",
# data = temp_data_cgm)
plot_cgm3 =
ggplot() +
geom_rect(
mapping = aes(
xmin = start_time,
xmax = end_time,
ymin = -Inf,
ymax = Inf
),
fill = "lightyellow",
data = day_night_df %>%
dplyr::filter(day == "2019-05-01"),
show.legend = FALSE
) +
geom_line(aes(x = time,
y = value,
group = week,
color = week),
data = temp_data_cgm,
show.legend = TRUE) +
labs(y = "Continuous glucose monitoring", x = "") +
scale_color_manual(values = week_color) +
scale_x_datetime(
breaks = scales::date_breaks("2 hour"),
date_labels = "%H:%M",
expand = expansion(mult = c(0,0))
) +
scale_y_continuous(expand = expansion(mult = c(0,0))) +
base_theme +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size = 10),
axis.line.x = element_blank(),
legend.position = "top",
# axis.ticks.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_rect(fill = alpha("grey", 0.2)),
plot.margin = margin(t = 0, r = 0, b = 0, l = 0, unit = "pt")) +
guides(color = guide_legend(nrow = 1))
plot_cgm3
# ggsave(plot_cgm1, filename = "plot_cgm1.pdf", width = 14, height = 3)
# ggsave(plot_cgm2, filename = "plot_cgm2.pdf", width = 7, height = 14)
# ggsave(plot_cgm3, filename = "plot_cgm3.pdf", width = 14, height = 7)
|
87a30a1beb7f7e43862df1db4a4bacd430ba88d1
|
b5dcfcde6c991b0e1272562d95ac1d8bb3eff9b8
|
/man/parameters.Rd
|
602f29636c591be25c272ab673d913a66bbc93f4
|
[] |
no_license
|
jkennel/aquifer
|
080b0b25c77ebe3c09bd4c1719fefc072e2c7649
|
8a5b812906953c046443035196f89912c7048545
|
refs/heads/master
| 2022-08-09T04:43:35.876031
| 2022-07-25T18:09:20
| 2022-07-25T18:09:20
| 80,688,849
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,849
|
rd
|
parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameters.R
\name{parameters}
\alias{parameters}
\title{parameters}
\usage{
parameters(frequency, period, omega, alpha_w, storage_aquifer,
storage_confining, specific_yield, transmissivity_aquifer,
diffusivity_vadose, diffusivity_aquifer, diffusivity_confining,
thickness_vadose, thickness_aquifer, thickness_confining, height_water,
radius_well, radius_casing, loading_efficiency, attenuation, inverse,
gravity)
}
\arguments{
\item{frequency}{the frequency in cycles per time (example units: cycles per day)}
\item{period}{the period of signal (example units: days)}
\item{omega}{the angular frequency \eqn{2 \pi \omega} (example units: radians / time)}
\item{alpha_w}{the dimensionless frequency}
\item{storage_aquifer}{the aquifer specific storage}
\item{storage_confining}{the confining layer specific storage}
\item{specific_yield}{the specific yield at the water table}
\item{transmissivity_aquifer}{the aquifer transmissivity}
\item{diffusivity_vadose}{pneumatic diffusivity of vadose zone}
\item{diffusivity_aquifer}{aquifer diffusivity}
\item{diffusivity_confining}{confining layer diffusivity}
\item{thickness_vadose}{the thickness of the vadose zone}
\item{thickness_aquifer}{the aquifer thickness}
\item{thickness_confining}{the confining layer thickness}
\item{height_water}{the depth from the water table}
\item{radius_well}{the radius at the screened portion of the well}
\item{radius_casing}{the radius at the location of the water level}
\item{loading_efficiency}{static loading efficiency}
\item{attenuation}{the attenuation factor of the capillary fringe}
\item{inverse}{if true water level follows the inverse of a barometric pressure change}
\item{gravity}{the acceleration due to gravity}
}
\description{
parameters
}
|
c46e31d252910d588d52c6d43ab4d75825755315
|
b588e0a4df002a71bc1948f660b0f033bab57858
|
/ag.R
|
a2c9949dc7d581c2dbbf716835673e9b8524ea47
|
[] |
no_license
|
ccqa86/Proyecto-Maestria
|
2eacb09dee422c255d722c22f6922106e6eead64
|
29bf33516abc74280b763c70c473a1e7defd2fcd
|
refs/heads/master
| 2023-01-03T10:12:34.574582
| 2020-11-02T00:30:02
| 2020-11-02T00:30:02
| 294,197,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,601
|
r
|
ag.R
|
##### Algoritmo genético ######
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/crearpob.R")
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/fitnessind.R")
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/fitnesspob.R")
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/seleccionarind.R")
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/cruzarind.R")
source("C:/Users/Carmen C/Documents/R/Proyecto-Maestria/mutarind.R")
optimizar_ga <- function(
funcion_objetivo,
n_variables,
nmax,
miu,
escenario,
des,
optimizacion,
limite_inf = NULL,
limite_sup = NULL,
n_poblacion = 20,
n_generaciones = 50,
elitismo = 0.1,
prob_mut = 0.5,
distribucion = "aleatoria",
media_distribucion = 1,
sd_distribucion = 1,
min_distribucion = -1,
max_distribucion = 1,
metodo_seleccion = "ruleta",
metodo_cruce = "uniforme",
parada_temprana = FALSE,
rondas_parada = NULL,
tolerancia_parada = NULL,
verbose = 1,
...) {
#prob_mut_total=c(0.05,0.1,0.15,0.2,0.25)
#a<-1
# ARGUMENTOS
# =============================================================================
# funcion_objetivo: nombre de la función que se desea optimizar. Debe de haber
# sido definida previamente.
# n_variables: longitud de los individuos.
# optimizacion: "maximizar" o "minimizar". Dependiendo de esto, la relación
# del fitness es directamente o indirectamente proporcional al
# valor de la función.
# limite_inf: vector con el límite inferior de cada variable. Si solo se
# quiere imponer límites a algunas variables, emplear NA para
# las que no se quiere acotar.
# limite_sup: vector con el límite superior de cada variable. Si solo se
# quiere imponer límites a algunas variables, emplear NA para
# las que no se quieren acotar.
# n_poblacion: número total de individuos de la población.
# n_generaciones: número total de generaciones creadas.
# elitismo: porcentaje de mejores individuos de la población actual que
# pasan directamente a la siguiente población.
# prob_mut: probabilidad que tiene cada posición del individuo de mutar.
# distribucion: distribución de la que obtener el factor de mutación. Puede
# ser: "normal", "uniforme" o "aleatoria".
# media_distribucion: media de la distribución si se selecciona distribucion="normal".
# sd_distribucion: desviación estándar de la distribución si se selecciona
# distribucion="normal".
# min_distribucion: mínimo la distribución si se selecciona distribucion="uniforme".
# max_distribucion: máximo la distribución si se selecciona distribucion="uniforme".
# metodo_seleccion: método para establecer la probabilidad de selección. Puede
# ser: "ruleta", "rank" o "tournament".
# metodo_seleccion: método para cruzar los individuos. Puede ser: "uniforme",
# "punto_simple".
# parada_temprana: si durante las últimas "rondas_parada" generaciones la diferencia
# absoluta entre mejores individuos no es superior al valor de
# "tolerancia_parada", se detiene el algoritmo y no se crean
# nuevas generaciones.
# rondas_parada: número de generaciones consecutivas sin mejora mínima para que
# se active la parada temprana.
# tolerancia_parada: valor mínimo que debe tener la diferencia de generaciones
# consecutivas para considerar que hay cambio.
# verbose: Nivel de detalle para que se imprima por pantalla el
# resultado de cada paso del algoritmo (0, 1, 2)
# RETORNO
# =============================================================================
# La función devuelve una lista con 5 elementos:
# fitness: una lista con el fitness del mejor individuo de cada
# generación.
# mejores_individuos: una lista con la combinación de predictores del mejor
# individuo de cada generación.
# mejor_individuo: combinación de predictores del mejor individuo encontrado
# en todo el proceso.
# diferencia_abs: una lista con la diferencia absoluta entre el fitness
# del mejor individuo de generaciones consecutivas.
# df_resultados: un dataframe con todos los resultados anteriores.
start_time <- Sys.time()
# COMPROBACIONES INICIALES
# ----------------------------------------------------------------------------
# Si se activa la parada temprana, hay que especificar los argumentos
# rondas_parada y tolerancia_parada.
if (isTRUE(parada_temprana) &
(is.null(rondas_parada) | is.null(tolerancia_parada)) ) {
stop(paste(
"Para activar la parada temprana es necesario indicar un valor",
"de rondas_parada y de tolerancia_parada."
))
}
# ESTABLECER LOS LÍMITES DE BÚSQUEDA SI EL USUARIO NO LO HA HECHO
# ----------------------------------------------------------------------------
if (is.null(limite_sup) | is.null(limite_inf)) {
warning(paste(
"Es altamente recomendable indicar los límites dentro de los",
"cuales debe buscarse la solución de cada variable.",
"Por defecto se emplea: [-10^3, 10^3]."
))
}
if (any(
is.null(limite_sup), is.null(limite_inf), any(is.na(limite_sup)),
any(is.na(limite_inf))
)) {
warning(paste(
"Los límites empleados por defecto cuando no se han definido son:",
" [-10^3, 10^3]."
))
cat("\n")
}
# Si no se especifica limite_inf, el valor mínimo que pueden tomar las variables
# es 1.
if (is.null(limite_inf)) {
limite_inf <- rep(x = 1, times = n_variables)
}
# Si no se especifica limite_sup, el valor máximo que pueden tomar las variables
# es 100.
if (is.null(limite_sup)) {
limite_sup <- rep(x = 100, times = n_variables)
}
# Si los límites no son nulos, se reemplazan aquellas posiciones NA por el valor
# por defecto 1 y 100.
if (!is.null(limite_inf)) {
limite_inf[is.na(limite_inf)] <- 1
}
if (!is.null(limite_sup)) {
limite_sup[is.na(limite_sup)] <- 100
}
# ALMACENAMIENTO DE RESULTADOS
# ----------------------------------------------------------------------------
# Por cada generación se almacena, la población, el mejor individuo, su fitness,
# y la diferencia absoluta respecto a la última generación.
poblaciones <- vector(mode = "list", length = n_generaciones)
resultados_fitness <- vector(mode = "list", length = n_generaciones)
resultados_individuo <- vector(mode = "list", length = n_generaciones)
diferencia_abs <- vector(mode = "list", length = n_generaciones)
# ITERACIÓN DE POBLACIONES
# ----------------------------------------------------------------------------
for (i in 1:n_generaciones) {
if (verbose %in% c(1,2)) {
cat("-------------------", "\n")
cat("Generación:", paste0(i, "\\", n_generaciones), "\n")
cat("-------------------", "\n")
}
if (i == 1) {
# CREACIÓN DE LA POBLACIÓN INICIAL
# ------------------------------------------------------------------------
poblacion <- crear_poblacion(
n_poblacion = n_poblacion,
n_variables = n_variables,
nmax = nmax,
miu = miu,
escenario = escenario,
des = des,
limite_inf = limite_inf,
limite_sup = limite_sup,
verbose = verbose %in% c(2)
)
}
poblacion<-round(poblacion,3)
# CALCULAR FITNESS DE LOS INDIVIDUOS DE LA POBLACIÓN
# --------------------------------------------------------------------------
fitness_ind_poblacion <- calcular_fitness_poblacion(
poblacion = poblacion,
funcion_objetivo = funcion_objetivo,
optimizacion = optimizacion,
verbose = verbose %in% c(2)
)
# SE ALMACENA LA POBLACIÓN Y SU MEJOR INDIVIDUO
# --------------------------------------------------------------------------
poblaciones[[i]] <- poblacion
fitness_mejor_individuo <- max(fitness_ind_poblacion)
mejor_individuo <- poblacion[which.max(fitness_ind_poblacion), ]
resultados_fitness[[i]] <- fitness_mejor_individuo
resultados_individuo[[i]] <- mejor_individuo
# SE CALCULA LA DIFERENCIA ABSOLUTA RESPECTO A LA GENERACIÓN ANTERIOR
# --------------------------------------------------------------------------
# La diferencia solo puede calcularse a partir de la segunda generación.
if (i > 1) {
diferencia_abs[[i]] <- abs(resultados_fitness[[i - 1]] - resultados_fitness[[i]])
}
# NUEVA POBLACIÓN
# --------------------------------------------------------------------------
nueva_poblacion <- matrix(
data = NA,
nrow = nrow(poblacion),
ncol = ncol(poblacion)
)
# ELITISMO
# --------------------------------------------------------------------------
# El elitismo indica el porcentaje de mejores individuos de la población
# actual que pasan directamente a la siguiente población. De esta forma, se
# asegura que, la siguiente generación, no sea nunca inferior.
if (elitismo > 0) {
n_elitismo <- ceiling(nrow(poblacion) * elitismo)
posicion_n_mejores <- order(fitness_ind_poblacion, decreasing = TRUE)
posicion_n_mejores <- posicion_n_mejores[1:n_elitismo]
nueva_poblacion[1:n_elitismo, ] <- poblacion[posicion_n_mejores, ]
} else {
n_elitismo <- 0
}
# CREACIÓN DE NUEVOS INDIVIDUOS POR CRUCES
# --------------------------------------------------------------------------
for (j in (n_elitismo + 1):nrow(nueva_poblacion)) {
# Seleccionar parentales
indice_parental_1 <- seleccionar_individuo(
vector_fitness = fitness_ind_poblacion,
metodo_seleccion = metodo_seleccion,
verbose = verbose %in% c(2)
)
indice_parental_2 <- seleccionar_individuo(
vector_fitness = fitness_ind_poblacion,
metodo_seleccion = metodo_seleccion,
verbose = verbose %in% c(2)
)
parental_1 <- poblacion[indice_parental_1, ]
parental_2 <- poblacion[indice_parental_2, ]
# Cruzar parentales para obtener la descendencia
descendencia <- cruzar_individuos(
parental_1 = parental_1,
parental_2 = parental_2,
metodo_cruce = metodo_cruce,
verbose = verbose %in% c(2),
escenario = escenario
)
# Mutar la descendencia
descendencia <- mutar_individuo(
individuo = descendencia,
prob_mut = prob_mut,
limite_inf = limite_inf,
limite_sup = limite_sup,
distribucion = distribucion,
media_distribucion = media_distribucion,
sd_distribucion = sd_distribucion,
min_distribucion = min_distribucion,
max_distribucion = max_distribucion,
verbose = verbose %in% c(2),
escenario = escenario
)
nueva_poblacion[j, ] <- descendencia
}
# if (poblacion==nueva_poblacion ){
# prob_mut=prob_mut_total[a]
# print(prob_mut)
# if (a<length(prob_mut_total) ){
# a=a+1
# }
#}
poblacion <- round(nueva_poblacion,3)
# CRITERIO DE PARADA
# --------------------------------------------------------------------------
# Si durante las últimas n generaciones, la diferencia absoluta entre mejores
# individuos no es superior al valor de tolerancia_parada, se detiene el
# algoritmo y no se crean nuevas generaciones.
if (parada_temprana && (i > rondas_parada)) {
ultimos_n <- tail(unlist(diferencia_abs), n = rondas_parada)
if (all(ultimos_n < tolerancia_parada)) {
cat(
"Algoritmo detenido en la generacion", i,
"por falta cambio mínimo de", tolerancia_parada,
"durante", rondas_parada,
"generaciones consecutivas.",
"\n"
)
break()
}
}
}
# IDENTIFICACIÓN DEL MEJOR INDIVIDUO DE TODO EL PROCESO
# ----------------------------------------------------------------------------
indice_mejor_individuo_global <- which.max(unlist(resultados_fitness))
mejor_fitness_global <- resultados_fitness[[indice_mejor_individuo_global]]
mejor_individuo_global <- resultados_individuo[[indice_mejor_individuo_global]]
# Se identifica el valor de la función objetivo para el mejor individuo.
if (optimizacion == "maximizar") {
mejor_valor_global <- mejor_fitness_global
} else {
mejor_valor_global <- -1*mejor_fitness_global
}
# RESULTADOS
# ----------------------------------------------------------------------------
# Para crear el dataframe se convierten las listas a vectores del mismo tamaño.
resultados_fitness <- unlist(resultados_fitness)
diferencia_abs <- c(NA, unlist(diferencia_abs))
# Si hay parada temprana, algunas generaciones no se alcanzan: Se eliminan sus
# posiciones de las listas de resultados
resultados_individuo <- resultados_individuo[!sapply(resultados_individuo, is.null)]
poblaciones <- poblaciones[!sapply(poblaciones, is.null)]
# Para poder añadir al dataframe la secuencia variables, se concatenan.
variables <- sapply(
X = resultados_individuo,
FUN = function(x) {
paste(x, collapse = ", ")
}
)
df_resultados <- data.frame(
generacion = seq_along(resultados_fitness),
fitness = resultados_fitness,
predictores = variables,
diferencia_abs = diferencia_abs
)
resultados <- list(
mejor_individuo_global = mejor_individuo_global,
mejor_valor_global = mejor_valor_global,
mejor_fitness_por_generacion = resultados_fitness,
mejor_individuo_por_generacion = resultados_individuo,
diferencia_abs = diferencia_abs,
df_resultados = df_resultados,
poblaciones = poblaciones,
funcion_objetivo = funcion_objetivo
)
end_time <- Sys.time()
# INFORMACIÓN ALMACENADA EN LOS ATRIBUTOS
# ----------------------------------------------------------------------------
attr(resultados, "class") <- "optimizacion_ga"
attr(resultados, 'fecha_creacion') <- end_time
attr(resultados, 'duracion_optimizacion') <- paste(
difftime(end_time, start_time, "secs"),
"secs"
)
attr(resultados, 'optimizacion') <- optimizacion
attr(resultados, 'lim_inf') <- limite_inf
attr(resultados, 'lim_sup') <- limite_sup
attr(resultados, 'n_poblacion') <- n_poblacion
attr(resultados, 'generaciones') <- i
attr(resultados, 'valor_variables') <- mejor_individuo_global
attr(resultados, 'mejor_fitness') <- mejor_fitness_global
attr(resultados, 'optimo_encontrado') <- mejor_valor_global
attr(resultados, 'n_poblacion') <- n_poblacion
attr(resultados, 'elitismo') <- elitismo
attr(resultados, 'prob_mut') <- prob_mut
attr(resultados, 'metodo_seleccion') <- metodo_seleccion
attr(resultados, 'metodo_cruce') <- metodo_cruce
attr(resultados, 'parada_temprana') <- parada_temprana
attr(resultados, 'rondas_parada') <- rondas_parada
attr(resultados, 'tolerancia_parada') <- tolerancia_parada
# INFORMACIÓN DEL PROCESO (VERBOSE)
# ----------------------------------------------------------------------------
if (verbose %in% c(1,2)) {
cat("-----------------------", "\n")
cat("Optimización finalizada", "\n")
cat("-----------------------", "\n")
cat("Fecha finalización =", as.character(Sys.time()), "\n")
cat("Duración selección = ")
print(difftime(end_time, start_time))
cat("Número generaciones =", i, "\n")
cat("Límite inferior =", paste(limite_inf, collapse = ", "), "\n")
cat("Límite superior =", paste(limite_sup, collapse = ", "), "\n")
cat("Optimización =", optimizacion,"\n")
cat("Óptimo encontrado =", mejor_valor_global,"\n")
cat("Valor variables =", mejor_individuo_global, "\n")
cat("\n")
}
return(resultados)
}
print.optimizacion_ga <- function(obj){
# Función print para objetos optimizacion_ga
cat("----------------------------------------------", "\n")
cat("Resultados optimización por algoritmo genético", "\n")
cat("----------------------------------------------", "\n")
cat("Fecha creación =", attr(obj, 'fecha_creacion'), "\n")
cat("Duración selección = ", attr(obj, 'duracion_optimizacion'), "\n")
cat("Número generaciones =", attr(obj, 'generaciones'), "\n")
cat("Límite inferior =", attr(obj, 'lim_inf'), "\n")
cat("Límite superior =", attr(obj, 'lim_sup'), "\n")
cat("Optimización =", attr(obj, 'optimizacion'), "\n")
cat("Óptimo encontrado =", attr(obj, 'optimo_encontrado'), "\n")
cat("Valor variables =", attr(obj, 'valor_variables'), "\n")
cat("Función objetivo =", "\n")
cat("\n")
print(obj$funcion_objetivo)
}
|
97a57701fce1713d589eb8ec146f95b43a795d00
|
6fe2e5bc5971de72c47e61f0702753d6803f8af4
|
/man/add_clusters.Rd
|
4577c83711ac2520d16b685aafba2cd5cce25648
|
[
"MIT"
] |
permissive
|
babasaraki/gggenomes
|
19946a9c4c3a1803b4d75d923046fba7e63c7d27
|
92dd85720b185f78680a1f4989496b2933616e8a
|
refs/heads/master
| 2023-02-24T08:03:59.381942
| 2020-07-13T17:22:33
| 2020-07-13T17:22:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 238
|
rd
|
add_clusters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sublinks.R
\name{add_clusters}
\alias{add_clusters}
\title{Add gene clusters}
\usage{
add_clusters(x, parent_track_id, ...)
}
\description{
Add gene clusters
}
|
ceb28ff943ece6aff7ab3dca4df231dbbc0bc0db
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DMRMark/examples/MakeGSoptions.Rd.R
|
43dc99b827ff639f68299878cfab7efd0c4a2b8a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
MakeGSoptions.Rd.R
|
library(DMRMark)
### Name: MakeGSoptions
### Title: Encapsulate prior parameters and Gibbs Sampler (GS) control
### parameters
### Aliases: MakeGSoptions
### ** Examples
# MakeGSoptions
opts <- MakeGSoptions()
|
5033b09639056fa3e24c461aa04cc7702bf53b3c
|
c6a6b77f3b71ea68f1281b043dd60f17dd85381c
|
/inst/unitTests/test_conversions.R
|
0232772d66ee6a3db8dede4bffa43d8d980e5660
|
[] |
no_license
|
benilton/oligoClasses
|
df76a4ee4d755342ae32b07c9acb5355153e3f4f
|
be0e1088c52ee8827c86f061e80ffe9b44982a88
|
refs/heads/master
| 2021-01-10T21:40:35.903511
| 2019-11-23T12:22:08
| 2019-11-23T12:22:08
| 1,779,156
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
r
|
test_conversions.R
|
test_conversions <- function(){
p <- matrix(runif(20), nc=2)
integerRepresentation <- as.integer(-1000*log(1-p))
int2 <- p2i(p)
checkTrue(all.equal(integerRepresentation, int2))
}
test_oligoSnpSet <- function(){
data(oligoSetExample)
checkTrue(validObject(as(oligoSet, "SnpSet2")))
}
test_makeFeatureRanges <- function(){
data(oligoSetExample)
gr <- makeFeatureGRanges(featureData(oligoSet), genome=genomeBuild(oligoSet))
checkTrue(validObject(gr))
gr2 <- makeFeatureGRanges(oligoSet)
checkIdentical(gr, gr2)
}
##test_RangedDataHMM2GRanges <- function(){
## if(require(VanillaICE)){
## data(hmmResults, package="VanillaICE")
## checkTrue(validObject(as(hmmResults, "GRanges")))
## obj <- as(hmmResults, "GRangesList")
## checkTrue(validObject(obj))
## checkEquals(names(obj), unique(sampleNames(hmmResults)))
## }
##}
|
5abf744989f857531f304e80eee5bee6aa357be6
|
a61f32d6d17b43240abe8d0e7424c13cd9ada27f
|
/exercise_10_3.R
|
b7af3d5ef93db67758fc05021e1b04c4012d4b50
|
[] |
no_license
|
synflyn28/r-lessons
|
a393a16724c6fe574a78bc83e8a1003d1083f90d
|
6f6294c1414cfb78af77da34ca15fcdcd769f0e7
|
refs/heads/master
| 2020-05-23T08:02:11.738606
| 2018-10-23T01:40:39
| 2018-10-23T01:40:39
| 80,488,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
exercise_10_3.R
|
#Question A
loopvec1 <- 5:7
loopvec2 <- 9:6
foo <- matrix(NA, length(loopvec1), length(loopvec2))
for(i in 1:length(loopvec1)) {
foo[i,] <- loopvec1[i] * loopvec2
}
#Question B
chars <- c("Peter","Homer","Lois","Stewie","Maggie","Bart")
num_vals <- rep(NA, times=length(chars))
for (i in 1:length(chars)) {
num_vals[i] <- switch(EXPR=chars[i],Homer=12,Marge=34,Bart=56,Lisa=78,Maggie=90,NA)
}
#Question C
#i
mylist <- list(
aa=c(3.4, 1), bb=matrix(1:4, 2, 2),
cc=matrix(c(T, T, F, T, F, F), 3, 2),
dd="string here",
ee=list(c("hello", "you"), matrix(c("hello", "there"))),
ff=matrix(c("red", "green", "blue", "yellow"))
)
#ii
mylist <- list("tricked you", as.vector(matrix(1:6, 3, 2)))
#iii
mylist <- list(
list(1,2,3), list(c(3,2), 2),
list(c(1, 2), matrix(c(1,2))),
rbind(1:10, 100:91)
)
matrix_count <- 0
for (member in mylist) {
if (is.matrix(member)) {
matrix_count <- matrix_count + 1
} else if (is.list(member)) {
for (submember in member) {
if (is.matrix(submember)) {
matrix_count <- matrix_count + 1
}
}
}
}
|
9880eeeae4e98e9e3dd27917568f3e3736b6df54
|
3763e5f2b164e831fd870c1ddd1cf6ff33b2c1a0
|
/yapay-sinir-aglari.R
|
baa8aef901b4c60a73f8efa3fe7da1646eabfd50
|
[] |
no_license
|
alzey73/R-Programming-for-Data-Science
|
d27f85784f7650a97431b63e92d6e0adc6d8f68c
|
ed385c3bcfcfebdb66d09f522f1ea257482c52c9
|
refs/heads/master
| 2022-10-20T00:49:18.923517
| 2020-05-27T17:48:33
| 2020-05-27T17:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,754
|
r
|
yapay-sinir-aglari.R
|
# YAPAY SİNİR AĞLARI
### İlk önce bütün girdileri temizleyelim
rm(list = ls())
## Kütüphaneler
library(caret)
library(tidyverse)
library(AppliedPredictiveModeling)
library(pls) #kismi en kucuk kareler ve pcr icin
library(elasticnet)
library(broom) #tidy model icin
library(glmnet)
library(MASS)
library(ISLR)
library(PerformanceAnalytics)
library(funModeling)
library(Matrix)
library(kernlab) #svm
library(e1071) #svm icin
library(rpart) #cart icin
library(pgmm) #olive data seti icin
library(dslabs)
library(rpart.plot) #rpart gorsel icin
library(partykit) #karar agaci gorseli icin
library(ipred) #bagging icin
library(randomForest)
library(gbm)
library(nnet)
library(neuralnet)
library(GGally)
library(NeuralNetTools) #garson fonksiyonu icin
library(FNN)
library(dplyr)
## Verisetimizi alalım
# http://archive.ics.uci.edu/ml/datasets/Yacht+Hydrodynamics
dff <- read_table(
file = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data',
col_names = c(
'longpos_cob',
'prismatic_coeff',
'len_disp_ratio',
'beam_draut_ratio',
'length_beam_ratio',
'froude_num',
'residuary_resist'
)
)
glimpse(dff)
summary(dff)
profiling_num(dff)
ggpairs(dff)
chart.Correlation(dff, histogram = T, pch=19)
olcekleme <- function(x) {
(x - min(x)) / (max(x)-min(x))
}
dff <- na.omit(dff)
sapply(dff, FUN = olcekleme)
train_indeks <- createDataPartition(dff$residuary_resist, p = 0.8, times = 1)
head(train_indeks)
train <- dff[train_indeks$Resample1, ]
test <- dff[-train_indeks$Resample1, ]
train_x <- train %>% dplyr::select(-residuary_resist)
train_y <- train %>% dplyr::select(residuary_resist)
test_x <- test %>% dplyr::select(-residuary_resist)
test_y <- test %>% dplyr::select(residuary_resist)
training <- data.frame(train_x, residuary_resist = train_y)
names(training)
# neuralnet kullanacağız ve bunun için formülün açıkça girilmesi gerekiyor.
ysa_formul <- residuary_resist ~ longpos_cob + prismatic_coeff + len_disp_ratio +
beam_draut_ratio + length_beam_ratio + froude_num
#bağımlı ve bağımsız değilkenlerle oluşan formülü yazdık
neuralnet(formula = ysa_formul, data = training)
plot(neuralnet(formula = ysa_formul, data = training, hidden = c(2,1), stepmax = 100),
rep="best")
mynn <-
nnet(
residuary_resist ~ longpos_cob + prismatic_coeff + len_disp_ratio + beam_draut_ratio + length_beam_ratio + froude_num,
data = training,
size = 2,
decay = 1.0e-5,
maxit = 5000
)
ysa_formul <- residuary_resist ~ longpos_cob + prismatic_coeff + len_disp_ratio + beam_draut_ratio + length_beam_ratio + froude_num
ysa1 <- neuralnet(ysa_formul, data = training)
plot(ysa1)
ysa1$result.matrix
|
0e12d55ed6633ee628b187cd408112b73afb6cd6
|
079921b991ba463dc449bf5cd42cece21e2cb022
|
/R/est.R0.AR.R
|
d1709625f1c3253d00ba4c1040ca9f4c6885e271
|
[] |
no_license
|
cran/R0
|
6356959c0252ebd3d535a6e1b5605246a8337418
|
ba0053a2b1c3feda26e946202c35735ea8b117f1
|
refs/heads/master
| 2022-09-26T11:50:43.225873
| 2022-09-05T14:10:07
| 2022-09-05T14:10:07
| 17,681,766
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,453
|
r
|
est.R0.AR.R
|
# Name : est.R0.AR
# Desc : Estimation of basic Reproduction Number using Attack Rate method
# (derived from SIR model), as presented by Dietz.
# Date : 2011/11/09
# Author : Boelle, Obadia
###############################################################################
# Function declaration
est.R0.AR <- function#Estimate R0 from attack rate of an epidemic
### Estimate R0 from attack rate of an epidemic.
##details<< For internal use. Called by est.R0.
##details<< In the simple SIR model, the relation between R0 and the Attack Rate is in the form \eqn{R0 = -ln((1-AR)/S0) / (AR - (1-S0))}.
##note<< This is the implementation of the formula by Dietz (1993).
##references<<Dietz, K. "The Estimation of the Basic Reproduction Number for Infectious Diseases." Statistical Methods in Medical Research 2, no. 1 (March 1, 1993): 23-41.
(AR=NULL, ##<< Attack rate as a percentage from total population
incid=NULL, ##<< Sum of incident cases, possibly in the form of a vector of counts.
pop.size=NULL, ##<< Population size in which the incident cases were observed.
##details<< If the population size is provided, the variance of R0 is estimated using the delta method.
## The hypothesis are that of homogeneous mixing, no more transmission (epidemic ended), no change in transmission or interventions during the epidemic. This estimate may be correct in closed populations, and may be less valid in other cases.
S0=1, ##<< Initial proportion of the population considered susceptible.
##details<< The correction for incomplete susceptibility is based on the SIR model equations.
checked=FALSE, ##<< Internal flag used to check whether integrity checks were ran or not.
... ##<< parameters passed to inner functions
)
# Code
{
# Various class and integrity checks
if (checked == FALSE) {
integrity.checks(epid, t, GT=NULL, begin=NULL, end=NULL, date.first.obs=NULL, time.step=NULL, AR, S0, methods="AR")
}
if (!is.null(incid)) {
epid <- check.incid(incid)
}
else {
epid <- NULL
}
#Required : either (AR, incidence) or (AR, pop.size) to start simulation
if (is.null(AR) & any(c(is.null(incid),is.null(pop.size)))) {
stop("Either 'AR' alone or both 'AR / incid' and 'pop.size' must be provided")
}
#If Attack Rate is not provided, it's computed as sum(incid)/pop.size
if (is.null(AR)) {
#if incid provided as a series of incident cases, first sum
if (length(incid) > 1) {
incid = sum(incid)
}
if (any(c(incid,pop.size) <= 0 )){
stop(paste("'incid'=",incid," and 'pop.size'=",pop.size," must be nonnegative"))
}
if (pop.size < incid){
stop(paste("'pop.size'=",pop.size," must be greater than 'incid'=",incid))
}
#Actual AR is now computed
AR <- incid/pop.size
}
#AR could also be provided
else {
#Obviously AR is between 0 and 1
if (AR <=0 | AR >= 1) {
stop(paste("'AR'=",AR," must be between 0 and 1"))
}
if (is.null(pop.size)) {
pop.size <- NA
}
}
#R0 is derived from Attack Rate based on SIR model (see Dietz)
R0.from.AR = function(AR, S0) {-log((1-AR)/S0)/(AR - (1-S0))}
R0 = R0.from.AR(AR,S0)
##details<< CI is computed for the attack rate considering the population size (\eqn{CI(AR) = AR +/- 1.96*sqrt(AR*(1-AR)/n)}),
## and so the CI for the reproduction number is computed with this extreme values.
CI95 <- c(R0.from.AR(AR-1.96*sqrt(AR *(1-AR)/pop.size),S0),R0.from.AR(AR+1.96*sqrt(AR *(1-AR)/pop.size),S0))
# variance of R0 is estimated using Delta method.
var.R0 <- ((-((-1 + AR + S0)/(-1 + AR)) + log((1 - AR)/S0))/(-1 + AR + S0)^2) * AR *(1-AR)/pop.size
return(structure(list(epid=epid, R=R0, var=var.R0, conf.int = CI95, begin.nb=1, end.nb=length(incid), AR=AR, method="Attack Rate", method.code="AR"),class="R0.R"))
##value<<
## A list with components:
## \item{epid}{The vector of incidence, after being correctly formated by check.incid. Used only by plot.fit.}
## \item{R}{The estimate of the reproduction ratio.}
## \item{conf.int}{The 95% confidence interval for the R estimate.}
## \item{AR}{Original attack rate.}
## \item{begin.nb}{First date of incidence record. Used only by plot.fit.}
## \item{end.nb}{Last date of incidence record. Used only by plot.fit.}
## \item{method}{Method used for the estimation.}
## \item{method.code}{Internal code used to designate method.}
}
|
b23ea94353e2d780290ecc99352e0bda21570a70
|
9125098458ffb2c97389767fc021369ba6bc417e
|
/R/transDT.R
|
1a06c31f50e661052a2946a82153852fbeecea78
|
[] |
no_license
|
Dave-Clark/ecolFudge
|
d22dd02ee5fd094a7775e58334c1d8b7ce22e29f
|
1f15001229acad3bba31cc1bf185661190aeca9e
|
refs/heads/master
| 2021-07-05T09:52:57.486820
| 2020-07-29T10:19:32
| 2020-07-29T10:19:32
| 146,623,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 813
|
r
|
transDT.R
|
#' A function for quickly transposing data.table objects
#'
#' This function allows you transpose data.table objects very rapidly, whilst maintaining control of row and column names.
#' @param dt The data.table object you wish to transpose.
#' @param transCol The name of the column that you wish to pivot on. Values in this column will become the new colnames.
#' @param rowID This will be the name of the new rownames column
#' @keywords data.table
#' @export
#' @examples
#' transDT()
transDT <- function(dt, transCol, rowID){
newRowNames <- colnames(dt)
newColNames <- dt[, transCol, with = F]
transposedDt <- transpose(dt[, !colnames(dt) %in% transCol, with = F])
colnames(transposedDt) <- unlist(newColNames)
transposedDt[, rowID] <- newRowNames[newRowNames != transCol]
return(transposedDt)
}
|
e2c8e1a12566f625099272671fea6c4a19e51428
|
d746fef241f9a0e06ae48cc3b1fe72693c43d808
|
/tesseract/rotate/d7js34-015.r
|
9dbed9fa0485308d67d09678dcc592dfaf672d65
|
[
"MIT"
] |
permissive
|
ucd-library/wine-price-extraction
|
5abed5054a6e7704dcb401d728c1be2f53e05d78
|
c346e48b5cda8377335b66e4a1f57c013aa06f1f
|
refs/heads/master
| 2021-07-06T18:24:48.311848
| 2020-10-07T01:58:32
| 2020-10-07T01:58:32
| 144,317,559
| 5
| 0
| null | 2019-10-11T18:34:32
| 2018-08-10T18:00:02
|
JavaScript
|
UTF-8
|
R
| false
| false
| 195
|
r
|
d7js34-015.r
|
r=0.84
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7js34/media/images/d7js34-015/svc:tesseract/full/full/0.84/default.jpg Accept:application/hocr+xml
|
378cd3d340aee43dcef9499182e91375329121d0
|
ee5e3fcc38b89b49ac6de735ad9e7c6ead222758
|
/R/MSEdata.R
|
2cb64c8d7337018c0e22cf54582c20e6eb03603b
|
[] |
no_license
|
OlivierBinette/dgaFast
|
2dea5aaa89578a4bf981c8eb13975db3656edc3c
|
82a7d7599814d04f0a116f1ed52fc1838b8fb9e5
|
refs/heads/master
| 2022-12-29T18:51:46.501827
| 2020-10-20T15:08:38
| 2020-10-20T15:08:38
| 299,384,279
| 1
| 0
| null | 2020-10-05T14:12:43
| 2020-09-28T17:33:26
|
R
|
UTF-8
|
R
| false
| false
| 4,290
|
r
|
MSEdata.R
|
#' MSE data format
#'
#' The function \code{MSEdata()} transforms an existing dataframe to the "MSE" format,
#' ensuring it contains a "count" column and that the other columns refer to
#' inclusion (1) or exclusion (0) on a set of lists.
#'
#' Zero counts of unobserved capture patterns are added and duplicates capture patterns
#' are aggregated.
#'
#' @param data Original MSE dataframe. It should contain a column named "count"
#' with the observed counts of capture patterns, as well as columns representing
#' the different lists, as follows:
#'\preformatted{ c1 c2 count
#' 0 1 7
#' 1 0 3
#' 1 1 4}
#'
#' @seealso \code{\link{plotMSE}}
#' @export
MSEdata <- function(data) {
assert(inherits(data, "data.frame"))
# Validate count column
assert("count" %in% names(data),
msg = "A column named 'count' should be specified.")
assert(is.numeric(data$count),
all(data$count >= 0),
all((data$count %% 1) == 0),
msg = "Count column should only contain non-negative integers.")
# Validate other columns
listnames = base::setdiff(names(data), "count")
for (list in listnames) {
assert(is.numeric(data[,list, drop=TRUE]))
assert(all(data[,list, drop=TRUE] %in% c(0,1)),
msg="List columns can only contain zeros and ones.")
}
data = clean_MSE_data(data)
attr(data, "class") <- c("MSEdata", attr(data, "class"))
return(data)
}
#' Standardize MSE data format
#'
#' @param data MSE dataframe to be cleaned up.
#'
#' @importFrom dplyr %>%
clean_MSE_data <- function(data) {
nlists = ncol(data) - 1
data = data %>%
group_by_at(vars(-count)) %>%
count(wt=count, name="count") %>%
ungroup()
# Binary table with all combinations of zeros and ones
X = eval(parse(text=
paste0("table(", paste0(rep("c(0,1)", nlists), collapse=","), ")")
)) %>%
as.data.frame.table %>%
map_dfc(as.numeric) - 1
# Removing the count for unobserved cases and removing superfluous column
X = X[2:nrow(X), 1:nlists]
X = data.frame(integer.base.b(1:(2^nlists - 1), 2))
# Match column names of the data to those of the binary matrix
listnames = setdiff(names(data), "count")
colnames(X) = listnames
# Join the binary table with the observed counts
result = left_join(X, data, by=listnames)
# Reorder observations
o1 = order(rowApply(result, function(x) paste0(x, collapse="")))
result = result[rev(o1),]
o2 = order(rowSums(result[, listnames]))
result = result[o2,]
# Set NA counts to zero
result[is.na(result[,"count"]), "count"] = 0
rownames(result) = 1:nrow(result)
return(result)
}
#' Inheritance check
#' @param data MSE dataframe.
is.MSEdata <- function(data) {
inherits(data, "MSEdata")
}
#' Get list names
#'
#' @param mse_data object of class `MSEdata`.
#' @return names of the MSE lists.
#'
#' @export
list.names <- function(mse_data) {
assert(is.MSEdata(mse_data))
return(base::setdiff(names(mse_data), "count"))
}
#' Set list names
#'
#' @param mse_data MSE dataframe.
#' @param value list of names.
#'
`list.names<-` <- function(mse_data, value) {
assert(is.MSEdata(mse_data))
assert(length(value) == ncol(mse_data)-1)
colnames(mse_data)[colnames(mse_data) != "count"] = value
mse_data
}
#' Number of observed cases
#'
#' @param mse_data MSE dataframe.
nobs.MSEdata <- function(mse_data) {
assert(is.MSEdata(mse_data))
return(sum(mse_data$count))
}
#' Number of lists
#' @param mse_data MSE dataframe.
nlists <- function(mse_data) {
assert(is.MSEdata(mse_data))
return(length(list.names(mse_data)))
}
#' @param mse_data MSE dataframe.
#' @param lists lists to omi.
omit <- function(mse_data, lists) {
assert(is.MSEdata(mse_data))
cols = setdiff(names(mse_data), lists)
return(MSEdata(mse_data[, cols]))
}
#' Merge lists
#'
#' @param mse_data MSEdata object from which to merge lists.
#' @param ... names of the lists to be merged.
#'
merge <- function(mse_data, ...) {
assert(is.MSEdata(mse_data))
args = list(...)
data = MSEdata(mse_data)
for (lists in args) {
data[paste(lists, collapse="-")] <- 1*(rowSums(data[,lists]) > 0)
}
data = data[, setdiff(names(data), unlist(args))]
return(MSEdata(data))
}
|
e5413be8cd53699addda82f50a4bb8ff1c664166
|
cb826d1f9ad59f0c44cbd2c5d884d47c8723ded9
|
/MyPackage/R/impuArima.R
|
c14465fc173f9942d69f5adb979b771b4ab95b2d
|
[] |
no_license
|
sophiaaading/NCSA_Rpackage
|
701fdc90a037d7b46305142111bca4f579343226
|
ff5dc9ca9056c15718b1076b86aa35f120d0b1c4
|
refs/heads/master
| 2021-07-09T06:30:36.442878
| 2020-11-13T17:57:39
| 2020-11-13T17:57:39
| 210,968,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
impuArima.R
|
#' Imputation with ARIMA Model
#' @param individualDataset individual dataset
#' @return The dataset after imputed by ARIMA Model
#' @example impu_arima(aList(clean(dataset))[[1]])
#' @export
# install.packages("forecast")
# library("forecast")
impu_arima <- function(individualDataset) {
impu <- naInsert(individualDataset)
t <- impu[,1]
x0 <- x <- zoo(unfactor(impu[,3]), t)
fit <- auto.arima(x)
kr <- KalmanRun(x, fit$model)
id.na <- which(is.na(x))
for (i in id.na) {
x[i] <- fit$model$Z %*% kr$states[i,]
}
plot(x0, xlab = "DisplayTime", ylab = "GlucoseValue")
points(t[id.na], x[id.na], col = "red", pch = 20)
# print(t[id.na])
# print(x[id.na])
return(x)
}
|
8613ec820b601f2ee895a3b0dd7a14ee77cfcec9
|
1b840a4c27f41d4dfdf719c572908b452aedeffa
|
/R/geolevel_get_empty_geometry.R
|
2744d04dfec52c3532b603a8b5a369e8744d35fe
|
[
"MIT"
] |
permissive
|
josesamos/geodimension
|
7da26e55b64ed11969ce600f49137fe48e509bb9
|
8eda23973a70d96d78140b6469674104754047ce
|
refs/heads/master
| 2023-01-14T01:30:37.651220
| 2020-11-27T13:13:10
| 2020-11-27T13:13:10
| 314,524,426
| 0
| 0
|
NOASSERTION
| 2023-09-11T21:24:02
| 2020-11-20T10:45:47
|
R
|
UTF-8
|
R
| false
| false
| 1,365
|
r
|
geolevel_get_empty_geometry.R
|
# empty geometry ----------------------------------------------------------
#' Get empty geometry instances
#'
#' Get the instances of the data table that do not have associated geometry for
#' the specified geometry type.
#'
#' @param gl A `geolevel` object.
#' @param geometry A string, type of geometry of the layer.
#'
#' @return A `tibble`.
#'
#' @family level definition functions
#' @seealso
#'
#' @examples
#' library(tidyr)
#' library(sf)
#'
#' us_state_point <-
#' coordinates_to_geometry(layer_us_state,
#' lon_lat = c("intptlon", "intptlat"))
#'
#' state <-
#' geolevel(name = "state",
#' layer = layer_us_state,
#' key = c("geoid")) %>%
#' add_geometry(layer = us_state_point)
#'
#' empty_geometry_instances <- state %>%
#' get_empty_geometry_instances(geometry = "point")
#'
#' @export
get_empty_geometry_instances <- function(gl,
geometry = NULL) {
UseMethod("get_empty_geometry_instances")
}
#' @rdname get_empty_geometry_instances
#' @export
get_empty_geometry_instances.geolevel <- function(gl,
geometry = NULL) {
stopifnot(geometry %in% names(gl$geometry))
if (is.null(geometry)) {
geometry <- names(gl$geometry)[1]
}
gl$data[!(gl$data[[1]] %in% gl$geometry[[geometry]][[1]]), ]
}
|
a97549b2e4c8b59892ee27497e5c2c030b87ee64
|
7682ad70789d6c01b608260e6280fd6a696ec397
|
/0509-4-CART.R
|
577f123e0b58e7f3cf90b86f53763fc518576dc2
|
[] |
no_license
|
tkionshao/r-datamining-example
|
e952cefd77ccab4b3b7302bb8a0fb7b11904c607
|
1f95aec93609e214363dc9995306458ae0091446
|
refs/heads/master
| 2020-03-14T05:03:29.711479
| 2018-06-28T14:15:44
| 2018-06-28T14:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
0509-4-CART.R
|
setwd("E:\\MegaSync\\MEGAsync\\R\\tryByself")
data <- read.table("babies.csv", header = TRUE, sep = ",")
# Missing compentsation.
for(i in 1:7){
n_null <- is.na(data[,i])
n_mean <- mean(data[,i], na.rm = TRUE)
data[n_null,i] <- n_mean
}
# Slipt to Train and Test.
n = 0.3*nrow(data)
n_test = sample(1:nrow(data),n)
train = data[-n_test,]
test = data[n_test,]
# Modeling.
library(rpart)
baby.tree=rpart(bwt~. ,data=train)
baby.tree
plot(baby.tree)
text(baby.tree , cex=.6)
# Prediction for train
predicted <- predict(baby.tree,data = train)
yTest = train$bwt
train.MAPE <- mean(abs(yTest-predicted)/yTest)
cat("MAPE(train)=",train.MAPE*100,"%\n")
# Prediction for test
predicted <- predict(baby.tree,newdata = test)
yTest = test$bwt
test.MAPE <- mean(abs(yTest-predicted)/yTest)
cat("MAPE(test)=",test.MAPE*100,"%\n")
|
b672bcfbec0daa3e3271125ea15352121db1b2eb
|
c80d09a871404fe135fa72549c6875806e24ded9
|
/inst/shiny/fire_viewer_db/model.R
|
383521ad77f138c3e05c0322cfcab3c352b82528
|
[] |
no_license
|
raffscallion/goesfire
|
6ecd35dda7221b416ebd0b3a2277024206428a2d
|
b9784e4b92cd3d599ed05194be61b1c5dc672967
|
refs/heads/master
| 2022-07-13T00:27:11.284327
| 2022-07-05T18:33:30
| 2022-07-05T18:33:30
| 134,614,089
| 1
| 2
| null | 2019-12-21T00:56:10
| 2018-05-23T19:05:28
|
R
|
UTF-8
|
R
| false
| false
| 6,348
|
r
|
model.R
|
# Take pixels from map viewport and produce bluesky input
model_inputs <- function(file, data, name, size, type, tz) {
# Convert all times to local time as specified by user
data <- mutate(data, StartTime = lubridate::with_tz(StartTime, tz))
hourly <- get_hourly_data(data)
profile <- get_diurnal_profile(hourly, name, tz)
daily <- create_bluesky_daily(hourly, name, size, type, tz)
zfile <- zip_files(file, daily, profile, hourly, name)
}
# Get Hourly Profile redux (The S2 version using FRE and per pixel profiles)
get_hourly_data <- function(df) {
# Count valid power values by location - need at least 2 to interpolate, otherwise use
# the minimum value of 75
valids <- df %>%
dplyr::group_by(lon, lat) %>%
dplyr::summarise(ValidCount = sum(is.finite(PM25)))
invalids <- dplyr::filter(valids, ValidCount < 2)
valids <- dplyr::filter(valids, ValidCount >= 2)
hourly <- df %>%
dplyr::inner_join(valids, by = c("lon", "lat")) %>%
dplyr::group_by(lon, lat) %>%
dplyr::mutate(Interpolated = imputeTS::na_interpolation(Power),
InterpolatedPM = imputeTS::na_interpolation(PM25),
Hour = lubridate::round_date(StartTime, unit = "hour")) %>%
dplyr::group_by(lat, lon, Hour) %>%
dplyr::summarise(Power = mean(Interpolated, na.rm = TRUE),
PM25 = sum(InterpolatedPM, na.rm = TRUE),
Count = n()) %>%
dplyr::filter(is.finite(Power)) %>%
dplyr::mutate(FRE = Power * 3600) # MW * s = MJ
hourly_invalids <- df %>%
dplyr::inner_join(invalids, by = c("lon", "lat")) %>%
dplyr::group_by(lon, lat) %>%
dplyr::mutate(Hour = lubridate::round_date(StartTime, unit = "hour")) %>%
dplyr::group_by(lat, lon, Hour) %>%
dplyr::summarise(Power = 75,
PM25 = 5,
Count = n()) %>%
dplyr::filter(is.finite(Power)) %>%
dplyr::mutate(FRE = Power * 3600) # MW * s = MJ
hourly <- dplyr::bind_rows(hourly, hourly_invalids) %>%
dplyr::ungroup() %>%
dplyr::mutate(Day = lubridate::floor_date(Hour, unit = "days"))
}
get_diurnal_profile <- function(hourly, name, tz) {
# Create daily fraction of total FRE - currently the profile applies to the entire event
profile <- hourly %>%
dplyr::group_by(Hour) %>%
dplyr::summarise(HourlyFRE = sum(FRE, na.rm = TRUE)) %>%
dplyr::mutate(Day = lubridate::floor_date(Hour, unit = "days"))
daily <- profile %>%
dplyr::group_by(Day) %>%
dplyr::summarise(DailyFRE = sum(HourlyFRE))
profile <- inner_join(profile, daily, by = "Day") %>%
dplyr::mutate(FractionOfDay = HourlyFRE / DailyFRE,
Fire = name,
LocalDay = strftime(Day, format = "%Y-%m-%d"),
LocalHour = strftime(Hour, format = "%Y-%m-%d %H:00", tz = tz)) %>%
dplyr::select(LocalDay, LocalHour, FractionOfDay, Fire)
# Fill in missing hours with zeroes
days <- dplyr::select(profile, LocalDay) %>%
dplyr::distinct() %>%
.$LocalDay
h <- seq.int(0, 23)
all_hours <- paste0(" ", sprintf("%02d", h), ":00")
complete_set <- tidyr::crossing(days, all_hours) %>%
dplyr::mutate(LocalHour = paste0(days, all_hours),
Fire = name) %>%
dplyr::select(LocalDay = days, LocalHour, Fire)
profile <- dplyr::left_join(complete_set, profile,
by = c("LocalDay", "LocalHour", "Fire")) %>%
dplyr::mutate(FractionOfDay = dplyr::if_else(is.na(FractionOfDay), 0, FractionOfDay))
}
create_bluesky_daily <- function(df, fire_name, final_area, type, tz) {
# Convert hourly FRE to per pixel daily area
total_FRE <- sum(df$FRE, na.rm = TRUE)
daily <- df %>%
dplyr::mutate(Day = lubridate::floor_date(Hour, "days")) %>%
dplyr::group_by(lon, lat, Day) %>%
dplyr::summarise(FRE_Daily = sum(FRE, na.rm = TRUE)) %>%
dplyr::mutate(Fraction = FRE_Daily / total_FRE,
area = Fraction * final_area)
# Create an id for each location
locs <- daily %>%
dplyr::ungroup() %>%
dplyr::select(lon, lat) %>%
dplyr::distinct() %>%
dplyr::mutate(id = dplyr::row_number())
# Convert to bluesky fire_locations format
daily %>%
dplyr::inner_join(locs, by = c("lon", "lat")) %>%
dplyr::mutate(id = paste(fire_name, id, sep = "_"),
event_id = fire_name,
fire_type = type,
date_time = strftime(Day, format = "%Y%m%d0000%z", tz = tz),
date_time = paste0(stringr::str_sub(date_time, 1, 15), ":00")) %>%
dplyr::select(id, Day, event_id, fire_type, date_time, latitude = lat,
longitude = lon, area)
}
## Create three csv files for each day, and zip them up
zip_files <- function(file, points, profile, hourly, name) {
dir.create(t_dir <- tempfile())
days <- points %>%
dplyr::ungroup() %>%
dplyr::select(Day) %>%
dplyr::distinct() %>%
.$Day
bluesky_files <- function(day, points, profile, hourly, name, temp_dir) {
# filenames based on fire name and timestamp
points_name <- paste0(temp_dir, "/", "fire_locations_",
strftime(day, format = "%Y%m%d_"), name, ".csv")
profile_name <- paste0(temp_dir, "/", name, "_diurnal_profile_localtime_",
strftime(day, format = "%Y%m%d"), ".csv")
hourly_name <- paste0(temp_dir, "/", name, "_hourly_localtime_",
strftime(day, format = "%Y%m%d"), ".csv")
# subset data by day
points <- dplyr::filter(points, Day == day)
profile <- dplyr::filter(profile, LocalDay == as.character(day))
hourly <- dplyr::filter(hourly, Day == day)
readr::write_csv(points, points_name)
readr::write_csv(profile, profile_name)
readr::write_csv(hourly, hourly_name)
}
# Add a summary of acres and pm2.5 (in tons) per day
areas <- points %>%
group_by(Day) %>%
summarise(Area_acres = sum(area))
by_day <- hourly %>%
group_by(Day) %>%
summarise(PM25_tons = sum(PM25) / 907.185) %>%
inner_join(areas, by = "Day") %>%
mutate(TonsPerAcre = PM25_tons / Area_acres)
by_day_name <- paste0(t_dir, "/", "daily_totals_", name, ".csv")
readr::write_csv(by_day, by_day_name)
purrr::walk(days, bluesky_files, points, profile, hourly, name, t_dir)
tar(file, t_dir, compression = "gzip")
return(t_dir)
}
|
18b1d1b0d1dea36a25f31264bd798a621cf3bd9e
|
87842e166d17bb1c11d957bf0eeeb67d5422c186
|
/coursera programs/profile_most_probable_kmer.R
|
8fc84a49ce759d42ff2bfa5094bbe6d3d56d2a51
|
[] |
no_license
|
girija2204/Bioinformatics-I-Finding-Hidden-Messages-in-DNA
|
1c7d9d9401ded89c151ff57874eea85d089fc1c6
|
0b24e4c66b1ec2884a8638e4136708edacea5938
|
refs/heads/master
| 2021-06-03T21:49:33.314182
| 2021-05-14T10:19:51
| 2021-05-14T10:19:51
| 88,222,712
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
profile_most_probable_kmer.R
|
source("/home/giri/Downloads/MTech Thesis/coursera programs/find_probability_and_best_probabilty.R")
profile_most_probable_kmer = function(text, k, profile_matrix){
probability = 0
pmp_kmer = 0
for (i in 1:(nchar(text)-k+1)) {
kmer = substr(text, i, i+k-1)
pp = find_probability(kmer, profile_matrix)
if(pp > probability){
probability = pp
pmp_kmer = kmer
}
}
if(pmp_kmer == 0) pmp_kmer = substr(text, 1, k)
return(pmp_kmer)
}
dna_set = c("TCGGGGGTTTTT","CCGGTGACTTAC","ACGGGGATTTTC","TTGGGGACTTTT","AAGGGGACTTCC","TTGGGGACTTCC","TCGGGGATTCAT","TCGGGGATTCCT", "TAGGGGAACTAC","TCGGGTATAACC")
Greedy_or_Laplace = 0
result = Scope(dna_set, Greedy_or_Laplace)
text = "AAGAATCAGTCA"
k = 6
# profile_matrix = rbind(c(0,0,0),c(0,0,1),c(1,1,0),c(0,0,0))
pmp_kmer = profile_most_probable_kmer(text, k, result[[3]])
print(pmp_kmer)
|
e514551676d5b67ee69f17cfb4c0a6c741d7729b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/likelihoodExplore/examples/likcauchy.Rd.R
|
808657ff15bc4afe751701fa49b9aa25d4cefd6d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
likcauchy.Rd.R
|
library(likelihoodExplore)
### Name: likcauchy
### Title: Cauchy Log Likelihood Function
### Aliases: likcauchy
### ** Examples
likcauchy(x = rcauchy(n = 2))
|
3feba0c53e7d9f0f2cbd553cebc73f2ffc98a2a3
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m10-u5-v0.pddl_planlen=106/dungeon_i10-m10-u5-v0.pddl_planlen=106.R
|
3d12bd02efddbc613daaf5affc1913c0c734698d
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
dungeon_i10-m10-u5-v0.pddl_planlen=106.R
|
5d39dce903943a535f7a9ae1bebe45ef dungeon_i10-m10-u5-v0.pddl_planlen=106.qdimacs 18263 56860
|
17dc5da7dcf70b5487ad1110d5d4c17f9d42e505
|
68af5b4db04bde1c466c9e9ada5605f940f565b0
|
/R-code/RNA-Seq/Radigoraphic_groups/Progression_vs_No_Progression/Progression.R
|
85cad6e3b6513608fa6a021567b528f96152ad31
|
[] |
no_license
|
shrumin/Rheumatoid_Arthritis---Analysis-and-visualisation-of-autoantibody-profiles-of-rheumatoid-arthritis-
|
2e887992eade4d6dc30fae0a2b9f3037d3434462
|
f15e2984a2f5e3f1a3d7346824f2436603763efa
|
refs/heads/main
| 2023-07-14T17:31:25.169110
| 2021-08-25T08:12:46
| 2021-08-25T08:12:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,189
|
r
|
Progression.R
|
setwd("~/Desktop/RA")
dat <- read.csv(file="Progression_data.csv", row.names=1)
#filtering and naming genes
library(biomaRt)
ensembl = useMart("ensembl",dataset="hsapiens_gene_ensembl")
nonprotein=biomaRt::getBM(attributes = c("ensembl_transcript_id", "transcript_version", "ensembl_gene_id", "external_gene_name", "entrezgene_id", "description", "gene_biotype"), filters='biotype', values=c("rRNA"), mart = ensembl)
#All protein coding genes
t2g <- biomaRt::getBM(attributes = c("ensembl_transcript_id", "transcript_version", "ensembl_gene_id", "external_gene_name", "entrezgene_id", "description", "gene_biotype"), filters='biotype', values="protein_coding", mart = ensembl)
#Ribosomal proteins
ribo.proteins <- unique(t2g$ensembl_gene_id[grep("ribosomal protein", t2g$description)])
rRNA.proteins <- unique(nonprotein$ensembl_gene_id[grep("rRNA", nonprotein$biotype)])
#remove version from identifier
rownames(dat) <- sub("\\.\\d+", "", rownames(dat))
#remove any ribosomal proteins from the RNA-Seq from the counts
dat <- dat[!rownames(dat) %in% ribo.proteins,]
dat <- dat[!rownames(dat) %in% rRNA.proteins,]
#design exp table without acetylated
colTable <- read.csv("Progression_exp_info.csv",row.names=1)
#Deseq analysis
library(DESeq2)
dds <- DESeqDataSetFromMatrix(countData=dat,colData=colTable,design= ~ group)
keep <- rowSums(counts(dds) >=10) >= 10
dds <- dds[keep,]
dds <- DESeq(dds,test="LRT",reduced = ~1)
resultsNames(dds)
#results
res_Progression<- results(dds,contrast=c("group","Progession","No_Progession"))
res_Progression_sort=res_Progression[order(res_Progression$padj),]
res_Progression_sig<- subset(res_Progression_sort,padj<0.05)
res_Progression_sig$symbol <- t2g$external_gene_name[match(rownames(res_Progression_sig), t2g$ensembl_gene_id)]
res_Progression_sig$entrezid <- t2g$entrezgene[match(rownames(res_Progression_sig), t2g$ensembl_gene_id)]
res_Progression_sig=na.omit(res_Progression_sig)
write.csv(res_Progression_sig, "sig_Progression_vs_No_Progression.csv", row.names=TRUE)
#normalised count
normalized_counts <- counts(dds, normalized=TRUE)
write.csv(normalized_counts, file="normalized_counts_Progression.csv", quote=F, col.names=NA)
|
19724017e042d28382f4b8c45890e8b74a800657
|
53755dcb7c54fa6059cc060e4638a743d3ef9f1b
|
/plot2.R
|
1f9f1769be0257780c82b40782fd433fec3246fd
|
[] |
no_license
|
sabank/ExData_Plotting1
|
7b561add8c0d75fe92358c2fda13ec2b833d22c3
|
6e2362e489fe205e4a681b6c312a9059bfc94ac9
|
refs/heads/master
| 2021-01-15T09:24:04.136437
| 2015-04-11T07:36:11
| 2015-04-11T07:36:11
| 33,553,181
| 0
| 0
| null | 2015-04-07T15:56:59
| 2015-04-07T15:56:59
| null |
UTF-8
|
R
| false
| false
| 2,021
|
r
|
plot2.R
|
### This program describes the sequence of actions undertaken to plot data as scatter plot.
### It takes 3 arguments, start date, end date, variable column number.
plot2 <- function(x="2007-02-01",y="2007-02-02",z=3){
## defines path to and reads txt file and replace "?" by NA
path <- file.path(getwd(),"03_data/household_power_consumption.txt")
file <- read.table(path,header=TRUE,sep=";", na.strings ="?")
## coerce chr to date in variable 'Date' (i.e column 1)
file$Date <- as.Date(file$Date, "%e/%m/%Y")
## subset rows between dates 'x' and 'y'
data <- file[file$Date >= x & file$Date <= y,]
## convert chr to time in variable 'Time' (i.e column 2)
data$Time <- as.POSIXct(paste(data$Date, as.character(data$Time)))
## prepare header for plot's title and labels
# replace "_" by " " in columns name
colnames(data)<-gsub("_"," ",colnames(data))
# split header name when reaching " ". Result is a 'list' of items composed of words.
headername <- strsplit(colnames(data), " ")
# casefolding to uppercase the 1st letter of each word in each item of the list
for (i in 1:length(headername)){
# take 1st letter of each word
l1 <- substring(headername[[i]],1,1)
# take remaining letters of each word
l2 <- substring(headername[[i]],2)
# translate all 1st letters in upper case
l1 <- toupper(l1)
# concatenate strings l1 and l2 and replace each item of the list
# paste0(...,collapse) equivalent to paste(...,sep="",collapse)
headername[[i]] <- paste0(l1, l2, collapse=" ")
}
# update columns name
colnames(data)<-headername
## plot data 'Global active power' (i.e 3rd column) as scatter plot type line
plot(data$Time, data[,z], "l", xlab= "", ylab=paste(colnames(data[z]),"(kilowatts)"),
font.axis = 1)
## export plot in png file in working directory
dev.copy(png,"plot2.png", width = 480, height = 480)
dev.off()
}
|
a7c4205970f31deadf817a696b4c04c59f0526f2
|
099552f15fc47a5b988684d22126ef799d578e03
|
/R/get_node_attr_from_selection.R
|
c2a6beff6928952f98d08aeb1a3ac45eba6b45bd
|
[] |
no_license
|
statwonk/DiagrammeR
|
261ff3f9e74035172ad5029b0e13b16813db16a5
|
75253f412daeb87c1c04f69659e4a6e05b681df8
|
refs/heads/master
| 2021-01-23T18:46:54.192760
| 2015-11-08T16:42:44
| 2015-11-08T16:42:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,406
|
r
|
get_node_attr_from_selection.R
|
#' Get node attributes based on a selection of nodes
#' @description From a graph object of class \code{dgr_graph}, get node
#' attribute properties for nodes available in a selection.
#' @param graph a graph object of class \code{dgr_graph} that is created
#' using \code{create_graph}.
#' @examples
#' \dontrun{
#' library(magrittr)
#'
#' # Create a simple graph
#' nodes <-
#' create_nodes(nodes = c("a", "b", "c", "d"),
#' type = "letter",
#' label = TRUE,
#' value = c(3.5, 2.6, 9.4, 2.7))
#'
#' edges <-
#' create_edges(from = c("a", "b", "c"),
#' to = c("d", "c", "a"),
#' rel = "leading_to",
#' color = c("pink", "blue", "red"))
#'
#' graph <-
#' create_graph(nodes_df = nodes,
#' edges_df = edges)
#'
#' # Select nodes "a" and "c" in the graph and get the node
#' # attributes for that selection
#' graph %>% select_nodes(nodes = c("a", "c")) %>%
#' get_node_attr_from_selection()
#' #> nodes type label value
#' #> 1 a letter a 3.5
#' #> 3 c letter c 9.4
#' }
#' @return a node data frame.
#' @export get_node_attr_from_selection
get_node_attr_from_selection <- function(graph){
if (is.null(graph$selection$nodes)){
stop("There is no selection of nodes available.")
}
nodes_df <- get_node_attr(graph, graph$selection$nodes)
return(nodes_df)
}
|
f478ef27da949e77375110cca8a8ac93e5a99be1
|
df443ce148759af76cbd558cd7401fbbc72506a6
|
/1 linear regression plots Wk2.R
|
9780e783b8ed7d3c5dcb38fcf3b7786f7ce344f0
|
[] |
no_license
|
wrona-42067898/Linear-Regression
|
9e6bbf976c13ef952fd4fc5d14f8c0877d6813e4
|
1a69cdb6eca370dac354728ac974b51380507a4b
|
refs/heads/master
| 2020-04-15T01:21:56.400491
| 2019-01-06T04:11:16
| 2019-01-06T04:11:16
| 164,273,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,418
|
r
|
1 linear regression plots Wk2.R
|
###Linear Regression 2 discrete variables###
library(statsr)
library(dplyr)
library(ggplot2)
data(mlb11)
#Ask whether variable x predicts variable y (y is response variable)
#Scatter plot with linear model overlayed
ggplot(data = mlb11, aes(x=runs, ?=new_onbase)) +
geom_point() +
stat_smooth(method = "lm", se = FALSE)
#Save the linear model information in a variable, can be accessed with summary()
my_lm <- lm(runs ~ new_onbase, data = mlb11)
#Check 1) Plot of the residuals, are they uniformly di?tributed?
ggplot(data=my_lm, aes(x=.fitted, y=.resid)) +
geom_point() +
geom_hline(yintercept=0, linetype="dashed") +
xlab("Fitted values") +
ylab("Residuals")
#Check 2) Is the histogram of residuals approximately normal?
ggplot(data = my_lm, ae?(x = .resid)) +
geom_histogram(binwidth = 25) +
xlab("Residuals")
#Check 3) Does QQ plot suggest a linear relationship?
ggplot(data = my_lm, aes(sample = .resid)) +
stat_qq()
########################################################
summary(my_lm)
#new_obs R2=0.9349
#new_slug R2=0.8969
#new_onbase R2=0.8491
#Example of comparing 4 different variables to see the best predictor of runs
summary(lm(runs ~ at_bats, data = mlb11))$r.squared
summary(lm(runs ~ hits, data = mlb11))$r.squared
summary(lm(runs?~ wins, data = mlb11))$r.squared
summary(lm(runs ~ bat_avg, data = mlb11))$r.squared
|
843505b4e2cbef767f2c7a77768f3f5a013c3f03
|
8ee250e304ccdc04181a66ee99e694ee96036822
|
/app.R
|
bc024093188c284c7e6bc4c047a285d3de90b5aa
|
[] |
no_license
|
ajay-aggarwal01/capStoneProject
|
f6c9e7130311b88832f81e6e53735c78bd412d39
|
3b404760ed38b14e06144a1dffeaccb458f28cf6
|
refs/heads/master
| 2020-06-23T03:20:54.595736
| 2019-08-01T01:46:44
| 2019-08-01T01:46:44
| 198,492,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,158
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("Predict Next Word"),
tabsetPanel( type='tab',
tabPanel("App Main Page",
sidebarLayout(
sidebarPanel(
helpText("Application Instruction:",br(),
"Type some text into the text box under the \"Text Input your phrase here\" heading"),
textInput('userInput',label="Input your phrase here:",value=""),
#actionButton('goButton',"Guess!"),
br(),
helpText("Note:",br(),
"The following predicted word will show up automatically as you input.")),
mainPanel(
h4("Here are the top 10 predictions:"),
##verbatimTextOutput('guess')
verbatimTextOutput('guess')
## cat(paste(text1, text2, text3, sep="\n"))
##textOutput('guess')
)
)
),
tabPanel( "Data Summary",
h4("Introduction"),
p("Application: Text Prediction Application."),
p("The purpose of this project is to build a predictive text models. When someone types: 'I went to the' "),
p("the keyboard presents then options for what the next word might be. "),
h4("Text Prediction Model"),
p("Prediction model will be based on backoff model in NLP. I have used 4-grams to calculate the probability of a word in text. Model will go back to a n-1 gram level to calculate the probabilities of finding a word with prob=0."),
h3("Data Details"),
imageOutput("datasummary")
),
tabPanel("Exploratory Analysis",
p("Exploratory Analysis of the data involves understanding the distribution of words and relationship between the words in the corpora.
- Calculated the frequencies of words and word pairs - build figures and tables to understand variation in the frequencies of words and word pairs in the data"),
h3("Unigram bar data analysis"),
p("An n-gram consisting of a single item from a sequence of words in a text file"),
p("Following displays the bar and word cloud of frequency distribution of a single words"),
imageOutput("barngram1"),
h3("Unigram wordcloud data analysis"),
imageOutput("wcngram1"),
h3("Bigram bar data analysis"),
p("An n-gram consisting of a two item from a sequence of words in a text file"),
p("Following displays the bar and word cloud of frequency distribution of a two words"),
imageOutput("barngram2"),
h3("Bigram wordcloud data analysis"),
imageOutput("wcngram2"),
h3("Triigram bar data analysis"),
p("An n-gram consisting of a three item from a sequence of words in a text file"),
p("Following displays the bar and word cloud of frequency distribution of a three words"),
imageOutput("barngram3"),
h3("Trigram wordcloud data analysis"),
imageOutput("wcngram3"),
h3("Quadgram bar data analysis"),
p("An n-gram consisting of a four item from a sequence of words in a text file"),
p("Following displays the bar and word cloud of frequency distribution of a four words"),
imageOutput("barngram4"),
h3("Quadgram wordcloud data analysis"),
imageOutput("wcngram4"),
h3("Pentagram bar data analysis"),
p("An n-gram consisting of a five item from a sequence of words in a text file"),
p("Following displays the bar and word cloud of frequency distribution of a five words"),
imageOutput("barngram5"),
h3("Bigram wordcloud data analysis"),
imageOutput("wcngram5"),
h5("For more information of exploratory analysis of dateset, please refer to my milestone document."),
p(" "),
p(""),
a(p("LINK"), href="http://rpubs.com/ajay_jalan/508765")
),
hr(),
h4("Author: Ajay Aggarwal :-)",
p(""),
p(""),
a(p("Github Repo."), href="https://github.com/ajay-aggarwal01/capStoneProject")
)
)
)
isValid <- function(input) {
if (length(input) == 0) FALSE
else if (length(input[grep("^\\W+$", input, perl = TRUE)])) FALSE
else if (length(input[grep("^\\d+$", input, perl = TRUE)])) FALSE
else if (length(input) == 1 && input[1] == "") FALSE
else if (length(input) == 1 && input[1] != "") TRUE
else FALSE
}
library(shiny)
##setwd("D:/Work/mystuff/Education/DataScience/DataScienceCapstone")
source("W3_Assignment.R")
# Define server logic required to draw a histogram
server <- function(input, output) {
print("Request received!")
reactiveInputHandler1 <- reactive( {
if (isValid(input$inputText)) return(as.character(input$inputText))
else return("<Please use a valid input>")
} )
output$datasummary <- renderImage( {
return(list(
src = "dataSummary.png",
contentType = "image/png",
width = 600,
height = 400,
alt = "Face"
))
}, deleteFile = FALSE)
output$barngram1 <- renderImage( {
return(list(
src = "bar_ngram1.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$wcngram1 <- renderImage( {
return(list(
src = "wc_ngram1.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$barngram2 <- renderImage( {
return(list(
src = "bar_ngram2.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$wcngram2 <- renderImage( {
return(list(
src = "wc_ngram2.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$barngram3 <- renderImage( {
return(list(
src = "bar_ngram3.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$wcngram3 <- renderImage( {
return(list(
src = "wc_ngram3.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$barngram4 <- renderImage( {
return(list(
src = "bar_ngram4.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$wcngram4 <- renderImage( {
return(list(
src = "wc_ngram4.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$barngram5 <- renderImage( {
return(list(
src = "bar_ngram5.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
output$wcngram5 <- renderImage( {
return(list(
src = "wc_ngram5.png",
contentType = "image/png",
width = 200,
height = 200,
alt = "Face"
))
}, deleteFile = FALSE)
#Display user's input
output$otext<- renderText(reactiveInputHandler1())
output$otext<-renderPrint( {as.character(input$input_text)})
dataInput <- reactive( {
as.character(nextWord(input$userInput)$nextword)[1:10]
##sizeL <- length(x)
##print(sizeL)
##resultString <- ""
##currentString <-""
##for(i in sizeL){
##currentString <- paste(i,x[i], sep = " ")
##resultString <- paste(resultString, currentString, sep = "\n")
##}
##return(resultString)
} )
output$guess <- renderPrint({dataInput() } )
}
# Run the application
shinyApp(ui = ui, server = server)
# Application title
##titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
##sidebarLayout(
##sidebarPanel(
##sliderInput("bins",
## "Number of bins:",
## min = 1,
## max = 50,
## value = 30)
## ),
# Show a plot of the generated distribution
## mainPanel(
##plotOutput("distPlot")
##)
##)
##output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
##x <- faithful[, 2]
##bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
##hist(x, breaks = bins, col = 'darkgray', border = 'white')
##})
|
f0c75b12d713b4bcd988f672f4a36979b0551e2a
|
da2f2493a2611fb980f59234100305f4e38d9068
|
/R/hello.R
|
dcf5088e5dcf1c5a71a8fae76345ec7bd6be66ce
|
[] |
no_license
|
davesteps/insertPipe
|
dc7030cb804f8e2941e2aa120bb9af10b70f4d45
|
6fffabe69d48512b83e2e305876cf5eda6aea4f6
|
refs/heads/master
| 2020-12-24T06:41:46.859142
| 2016-06-13T16:52:39
| 2016-06-13T16:52:39
| 61,054,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
hello.R
|
insertPipe <- function() {
rstudioapi::insertText(" %>% \n")
}
insertFun <- function() {
rstudioapi::insertText("<- function(){\n}")
}
|
e25e859f8facf94966f3a04c57f4109b24e875f7
|
8e0989c127fa440b1356606c5b1616703d76c06d
|
/man/resetOptions.Device.Rd
|
ad2d210921b6920af1d8bb80564e8c584f466311
|
[] |
no_license
|
HenrikBengtsson/R.graphics
|
8da678165bd6cfad546faf71b78d768a44b3165e
|
c92a1761f82806ecf1b761d58a59ab55532aa518
|
refs/heads/master
| 2021-01-20T11:44:10.433776
| 2014-06-19T04:04:58
| 2014-06-19T04:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,226
|
rd
|
resetOptions.Device.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% Device.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{resetOptions.Device}
\alias{resetOptions.Device}
\alias{Device.resetOptions}
\alias{resetOptions.Device}
\alias{resetOptions,Device-method}
\title{Reset the current default options for a given device type}
\description{
Reset the current default options for a given device type.
Resets the options for a given device type,
e.g. bitmap, pictex and postscript.
}
\synopsis{resetOptions.Device(static, deviceType=c("bitmap", "pictex", "postscript"), ...)}
\usage{Device$resetOptions(deviceType=c("bitmap", "pictex", "postscript"), ...)}
\arguments{
\item{deviceType}{A \code{\link[base]{character}} string.}
\item{...}{Not used.}
}
\value{Returns nothing.}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
\code{\link[R.graphics:getOptions.Device]{*getOptions}()} and \code{\link[R.graphics:setOptions.Device]{*setOptions}()}.
}
\keyword{dplot}
\keyword{internal}
\keyword{methods}
|
85581a11825c12ead96069d1008cdc4efce8c7d8
|
c8e71af48d925c34d1cb9f4dad262c970e8968d5
|
/man/Leukemia.Rd
|
75e08ca5f3e1924cce3057701a36fc21dbd3cc74
|
[
"MIT"
] |
permissive
|
tessington/qsci381
|
43c7cd323ab64cf28ba738be35779157c93e62cf
|
b981f0bd345b250d42ff5f1c0609e5e61f5911f7
|
refs/heads/master
| 2022-12-24T20:56:56.045374
| 2020-09-24T20:50:29
| 2020-09-24T20:50:29
| 284,817,926
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,226
|
rd
|
Leukemia.Rd
|
\name{Leukemia}
\alias{Leukemia}
\docType{data}
\title{Responses to Treatment for Leukemia}
\description{
Treatment results for leukemia patients
}
\format{
A data frame with 51 observations on the following 9 variables.
\tabular{rl}{
\code{Age} \tab {Age at diagnosis (in years)}\cr
\code{Smear} \tab {Differential percentage of blasts}\cr
\code{Infil} \tab {Percentage of absolute marrow leukemia infiltrate}\cr
\code{Index} \tab {Percentage labeling index of the bone marrow leukemia cells}\cr
\code{Blasts} \tab {Absolute number of blasts, in thousands}\cr
\code{Temp} \tab {Highest temperature of the patient prior to treatment, in degrees Fahrenheit}\cr
\code{Resp} \tab {\code{1}=responded to treatment or \code{0}=failed to respond}\cr
\code{Time} \tab {Survival time from diagnosis (in months)}\cr
\code{Status} \tab {\code{0}=dead or \code{1}=alive}\cr
}
}
\details{
A study involved 51 untreated adult patients with acute myeloblastic
leukemia who were given a course of treatment, after which they were assessed as to their
response.
}
\source{
Data come from Statistical Analysis Using S-Plus (Brian S. Everitt; first edition 1994, Chapman & Hall).
}
\keyword{datasets}
|
21a09b6466b75db4c13156301a291c213d3f9730
|
3d3a99fc6f571e0d977d81401dcec0d93b9b8a50
|
/man/getBackbone.Rd
|
fdbe38745590e4dd4cdf6a2674c2f6b4235e88b9
|
[] |
no_license
|
cran/genBaRcode
|
cda9cde11003dbccc60bcef762a8320663bcf551
|
e579b731f96c6628fb114e82bdedea0f79c2a156
|
refs/heads/master
| 2023-04-01T02:34:08.761473
| 2023-03-15T22:50:05
| 2023-03-15T22:50:05
| 104,376,933
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 428
|
rd
|
getBackbone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BCdata-class-methods.R
\name{getBackbone}
\alias{getBackbone}
\title{Accessing the Barcode Backbone slot of a BCdat objects.}
\usage{
getBackbone(object)
}
\arguments{
\item{object}{a BCdat object.}
}
\value{
A character string.
}
\description{
Accessing the Barcode Backbone slot of a BCdat objects.
}
\examples{
data(BC_dat)
getBackbone(BC_dat)
}
|
5341edceea621ab244b65ec7fc13cebaecebb236
|
ce1b08611df0fff10fbf5c79f8d79fd170cdb08f
|
/windows/server.R
|
89a7ba246b4d9ec3587ea9796622d43ac2d9843f
|
[
"MIT"
] |
permissive
|
xiaodaigh/shinydistro
|
b0824d8f6618b0492e4ba064ab6fe6e5603d7c96
|
cd795bff774873f6dc3f68f818a9979a6f4eeb35
|
refs/heads/master
| 2021-01-10T20:22:01.340395
| 2014-03-26T16:38:01
| 2014-03-26T16:38:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
server.R
|
# it recommended that you put the below code inside your shinyServer() function so that when PortableChrome Closes it will also close the underlying R session
session$onSessionEnded(function() { q("no") })
|
96d649672e5704c4a031bf82ddb84cb673765f87
|
c262ebf6ac6dbd85ce1e786158ffca258744a73f
|
/ExploratoryDataAnalysis/Week4_ProgrammingAssignment/Plot5.R
|
e1615cdea7341d330b32cd3c6c0289c97795abd4
|
[] |
no_license
|
imreyes/datasciencecoursera
|
47bc09b12f090b42c077db55b326d5f4377f443d
|
7b35668a3174645f62c9ad575516b4b505c52253
|
refs/heads/master
| 2021-01-12T12:21:17.517449
| 2016-12-09T22:50:12
| 2016-12-09T22:50:12
| 72,453,655
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,330
|
r
|
Plot5.R
|
# DSS Coursera.org
# Exploratory Data Analysis
# Week 4
# Programming Assignment
# Data source: EPA air pollution data - fine particle pollution.
# Unwrapping and loading data.
library(ggplot2)
unzip('exdata-data-NEI_data.zip')
# The below variables are used per instructed.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Exploration of the raw data are not coded here.
# Instead please take a look at the CodeBook.md.
# Question to address:
# How have emissions from motor vehicles changed in Baltimore (1999 ~ 2008)?
# Step 1: Subset the data regarding 'coal combustion'.
# Browse thru data, and target the column 'Short.Name' in SCC.
MV <- grep('[mM]otor', SCC$Short.Name) # Find words of motor.
MV <- SCC$SCC[MV] # Get source code.
dataMV <- subset(NEI, SCC %in% MV & fips == '24510')
# Step 2: Create a png file to write-in.
png('Plot5.png', width = 960, height = 960)
# Step 3: Plot the data:
# First look at the distribution, without summing up.
g1 <- ggplot(dataMV, aes(year, Emissions))
g1 <- g1 + geom_point(size = 4) +
labs(x = 'Year', y = 'Emissions', title = 'PM2.5 by Motor Vehicles in Baltimore (1999 ~ 2008)')
# Note there are 2 distinct 'outliers' - way too high compared to others.
# While we can't remove them, we want to look at the features of the rest points.
g2 <- ggplot(dataMV[dataMV$Emissions<5, ], aes(year, Emissions)) # Note 5 is arbitrary - any lines between 0.2~10 works!
g2 <- g2 + geom_point(size = 4) + labs(x = 'Year', y = 'Emissions',
title = "Removing 'Outliers'")
# Now, let's look at the sum plots with and without the big numbers.
aggr <- aggregate(Emissions ~ year, dataMV, sum)
g1s <- ggplot(aggr, aes(year, Emissions))
g1s <- g1s + geom_line(col = 'blue', lwd = 2) +
labs(x = 'Year', y = 'Total Emissions', title = 'Summary of Annual Sums')
aggr2 <- aggregate(Emissions ~ year, dataMV[dataMV$Emissions < 5, ], sum)
g2s <- ggplot(aggr2, aes(year, Emissions))
g2s <- g2s + geom_line(col = 'blue', lwd = 2) +
labs(x = 'Year', y = 'Total Emissions', title = "Summary of Annual Sums without 'Outliers'")
# Co-plot with gridExtra package:
cowplot::plot_grid(g1, g1s, g2, g2s, ncol = 2, nrow = 2)
# Step 4: Close the device.
dev.off()
|
07d06bbc78de7e0b6c7441775751a39c7c851496
|
5e3011b1de8bbb6e2a0e092eb01b0b1ce678a4d6
|
/man/bootstrap_fs_perc_change.Rd
|
2a46da6a476c8fdc58a146b6eb9fd56e705b225a
|
[
"MIT"
] |
permissive
|
evanjflack/cfo.behavioral
|
a95a81bd89903f4b876522331860b607fa08b83b
|
b10e451026910c48a08c3bdda011bde039250959
|
refs/heads/master
| 2023-02-23T09:08:59.557128
| 2020-10-06T19:13:57
| 2020-10-06T19:13:57
| 227,146,208
| 0
| 0
|
NOASSERTION
| 2020-08-03T16:20:28
| 2019-12-10T14:50:50
|
R
|
UTF-8
|
R
| false
| true
| 863
|
rd
|
bootstrap_fs_perc_change.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_first_stage_elasticity.R
\name{bootstrap_fs_perc_change}
\alias{bootstrap_fs_perc_change}
\title{Bootstrap SE Percentage Change Models}
\usage{
bootstrap_fs_perc_change(DT, form, y, x_main, x_int, B, quiet)
}
\arguments{
\item{DT}{a data.table}
\item{form}{fomula, first stage formula}
\item{y}{character vector, name of response variable(s) without months suffix}
\item{x_main}{character (default = "first_mo"), name of instrument}
\item{x_int}{character vector, names of variables to interact instrument with}
\item{B}{integer, number of bootstrap samples to use when calculating
standard errors}
\item{quiet}{logical (default = FALSE), if TRUE then does not print progress}
}
\value{
data.table with standard errors
}
\description{
Bootstrap SE Percentage Change Models
}
|
40ae5723b7ba7d4b2b13cfae04e25d6f6a725c9f
|
781d9f53df5cbc291d8e7a10435106d1f0c69e5b
|
/2-Parallel-Job/parallel-job.R
|
13c2d521d2a285c290efbce0967ebb3b655d51d7
|
[] |
no_license
|
syanthonyadam/Mercury-Tutorial
|
f922a779a8cda4b7787db07e68367f480ca9ead5
|
dc62fab0f8a111819be7bd4a207cac10ed530737
|
refs/heads/master
| 2023-07-29T00:26:24.766522
| 2021-09-19T22:42:58
| 2021-09-19T22:42:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
parallel-job.R
|
# parallel-job.R
# Runs a parallel job in R
library(parallel)
# Get number of cores from the SLURM_JOB_CPUS_PER_NODE enviromental variable
num_cores <- as.integer(Sys.getenv("SLURM_JOB_CPUS_PER_NODE"))
print(paste0("I have ", num_cores, " cores ready in R."))
# Run a parallel job
print("Using the two cores in parallel.")
print("Each core will sleep for 30 seconds and then print the time.")
print("If the two printed times are the same then we know the jobs were run in parallel.")
print("If the two printed times are 30 seconds apart then we know the jobs were run in serial or sequentially.")
out_list <- mclapply(1:2, function(x)
{
Sys.sleep(30)
paste0("Printed Time ", x, ": ", Sys.time())
}, mc.cores = num_cores)
print(out_list)
|
b36e6994a55185cb8a27cc9d9eb6fec5c0b93c11
|
2477434cc1b95634c5b15f558669e39ec2e963a2
|
/man/adjustOne.Rd
|
74a451c830533625a41931d11d4098d893b1da00
|
[] |
no_license
|
pariswu1988/proteomics
|
4e4b273d04490a9f3279553dd889d870e504b62f
|
5e50c3e344068130a079c9e6c145ffdbf5651ca2
|
refs/heads/master
| 2021-01-13T03:55:36.847983
| 1977-08-08T00:00:00
| 1977-08-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
rd
|
adjustOne.Rd
|
\name{adjustOne}
\alias{adjustOne}
\title{Adjust for confounding -- In one single experiment only}
\usage{
adjustOne(dwide)
}
\arguments{
\item{dwide}{iTRAQ data in wide format.}
}
\description{
Simple code when only one iTRAQ-experiment has been
performed. (Code not used anymore.)
}
|
fc010d80e65ee41d3f98b81125fea8dfeffcc3c5
|
4c8dd8169fae247b71b26ff6b64db46f4174e73b
|
/5_Convert_LatLong_to_Postcode.R
|
fa8bf6a39177c8d106c40a064d33edc8ec6c2a44
|
[
"MIT"
] |
permissive
|
git-2-it/diss_g
|
c03b76d63937a665e9b505fd04671558c4fccd0a
|
ab4a4c2a2968afee5d4b4a5aa5ac3690ce80be34
|
refs/heads/main
| 2023-02-08T23:23:33.615634
| 2021-01-02T17:08:52
| 2021-01-02T17:08:52
| 325,520,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,552
|
r
|
5_Convert_LatLong_to_Postcode.R
|
# ca4_ni
# Creates summarised data per practice, per month
# Total items
# For each file, crate equivalent summarised version
# Unzips, processe and cleans-up the extracts
# Init required libraries
library("readr")
library("tidyr")
library("dplyr")
library(PostcodesioR)
library(sf)
datapath <- "../../../data"
datapath <- "~/courses/dissertation/data"
atlas_path <- paste(datapath, "atlas", sep = "/")
atlas_path
atlas_data <- paste(atlas_path, "atlas_data.censored.csv", sep = "/")
#atlas_data <- paste(atlas_path, "atlas_data_lat_long.csv", sep = "/")
outdir <- atlas_path
output_data <- paste(outdir, "atd_lat_long.csv", sep = "/")
atlas_data
data_in <- read.csv(atlas_data )
str(data_in)
# Check for NA values
colSums(is.na(data_in))
# Convert lat/long to UK post code
# Deal with non UK possibilities?
# rev_geo
# create postcode finder for lat/long
# lat_long_chk <- function(long_in, lat_in)
lat_long_chk <- function(x)
{
long_in = x["X" ]
lat_in = x["Geog"]
# rev_geo <- reverse_geocoding(long_in, lat_in )
str(long_in)
str(lat_in)
rev_geo <- reverse_geocoding(long_in, lat_in , 1, 10, TRUE)
str(rev_geo)
postcode <- rev_geo[[1]]$postcode[1]
str(postcode)
return(postcode)
} # end function
str(data_in)
data_sub <- data_in[data_in$G_INCL == "1", ]
data_sub$postcode <- apply(data_sub, 1, lat_long_chk )
str(data_sub$postcode)
# write.csv(file= output_data, x=data_sub, quote=TRUE, row.names = TRUE)
############################# postcode for atlas data to link to SOA
postcode_api_lookup <- function(x)
{
# str(x)
find_code <- x["Postcode" ]
# The codes should already be cleaned up
ret_code <- "NOTFOUND"
tryCatch(
{
foundcode <- postcode_lookup(find_code)
ret_code <- foundcode [c("longitude", "latitude", "postcode", "eastings", "northings")]
} ,
error=function(cond) {
ret_code <- "NOTFOUND"
},
warning=function(cond) {
ret_code <- "NOTFOUND"
}
)
return(ret_code)
} # end function
# Get the specific atlas data
main_path <- "~/courses/dissertation/data"
datapath <- "atlas"
datapath <- paste(main_path, datapath, sep = "/")
datapath
atlas_data <- paste(datapath, "atd_lat_long.csv", sep = "/")
clusters_in <- read.csv(atlas_data)
names(clusters_in)[names(clusters_in) == 'postcode'] <- 'Postcode'
head(clusters_in)
clusters_in$Postcode <- as.character(clusters_in$Postcode)
# lat_long_data <- do.call(rbind.data.frame, lat_long_data)
clusters_in$Postcode <- gsub(' ', '',clusters_in$Postcode)
lat_long_data <- apply(clusters_in, 1, postcode_api_lookup)
lat_long_data
lat_long_data <- do.call(rbind.data.frame, lat_long_data)
lat_long_data$postcode <- gsub(' ', '',lat_long_data$postcode)
lat_long_data
colnames(lat_long_data) <- c("longitude", "latitude", "Postcode", "eastings", "northings")
lat_long_data <- subset(lat_long_data, !duplicated(lat_long_data$Postcode))
lat_long_data
# docs_in <- cbind(docs_in, lat_long_data)
# docs_in
clusters_in$Postcode <- gsub(' ', '', clusters_in$Postcode)
clusters_in <- merge(x = clusters_in, y = lat_long_data, by.x = "Postcode", by.y = "Postcode")
# import the SOA polygon shape file
main_path <- "~/courses/dissertation/data"
datapath <- "portal_data"
datapath <- paste(main_path, datapath, sep = "/")
shape_file <- "SOA2011_Esri_Shapefile_0/SOA2011.shp"
shape_file <- paste(datapath, shape_file, sep = "/")
shape_file
aoi_boundary_HARV <- st_read(shape_file)
class(aoi_boundary_HARV)
st_geometry_type(aoi_boundary_HARV)
crs_in <- st_crs(aoi_boundary_HARV)
crs_in
class(aoi_boundary_HARV)
st_bbox(aoi_boundary_HARV)
aoi_boundary_HARV
pts <- clusters_in[c("eastings", "northings", "PID")]
colnames(pts) <- c("x", "y", "PID")
pts
coordinates(pts) <- ~ x + y
class(pts)
pts <- st_as_sf(pts)
plot(pts)
st_crs(pts)
pts <- st_set_crs(pts, crs_in)
spdf <- as_Spatial(aoi_boundary_HARV)
# convert points()
pts_sp <- as_Spatial(pts)
class(pts_sp)
over_results <- over(pts_sp, spdf)
over_results
clusters_in <- cbind(clusters_in, over_results)
clusters_in <- droplevels(clusters_in)
# Check all practices have an SOA assigned
colSums(is.na(clusters_in))
datapath <- "atlas"
datapath <- paste(main_path, datapath, sep = "/")
datapath
clusters_soa_file <- "clusters_SOA.csv"
clusters_soa_file <- paste(datapath, clusters_soa_file, sep = "/")
# write.csv(file=clusters_soa_file, x=clusters_in, quote=TRUE, row.names = FALSE)
clusters_in$SOA_CODE <- as.character(clusters_in$SOA_CODE)
clusters_in$Cluster_Grp <- "2"
clusters_in$Cluster_Grp[clusters_in$Structure == "NI_III"] <- "1"
clusters_in$Cluster_Grp[clusters_in$Structure == "NI_II"] <- "1"
datapath <- "processed_data"
datapath <- paste(main_path, datapath, sep = "/")
datapath
grouped_clusters_file <- "grouped_clusters.csv"
grouped_clusters_file <- paste(datapath, grouped_clusters_file, sep = "/")
grouped_clusters_file
# write.csv(file=grouped_clusters_file, x=clusters_in, quote=TRUE, row.names = FALSE)
##################
# SA data
main_path <- "~/courses/dissertation/data"
datapath <- "portal_data"
datapath <- paste(main_path, datapath, sep = "/")
shape_file <- "SA2011_Esri_Shapefile_0/SA2011.shp"
shape_file <- paste(datapath, shape_file, sep = "/")
shape_file
aoi_sa_data <- st_read(shape_file)
class(aoi_sa_data)
st_geometry_type(aoi_sa_data)
crs_in <- st_crs(aoi_sa_data)
crs_in
st_bbox(aoi_sa_data)
aoi_sa_data
# Need the cluster points data
datapath <- "atlas"
datapath <- paste(main_path, datapath, sep = "/")
datapath
clusters_soa_file <- "clusters_SOA.csv"
clusters_soa_file <- paste(datapath, clusters_soa_file, sep = "/")
clusters_in <- read.csv(clusters_soa_file)
pts <- clusters_in[c("eastings", "northings", "PID")]
colnames(pts) <- c("x", "y", "PID")
pts
coordinates(pts) <- ~ x + y
class(pts)
pts <- st_as_sf(pts)
plot(pts)
st_crs(pts)
pts <- st_set_crs(pts, crs_in)
spdf <- as_Spatial(aoi_sa_data)
# convert points()
pts_sp <- as_Spatial(pts)
class(pts_sp)
over_results <- over(pts_sp, spdf)
over_results
clusters_in <- cbind(clusters_in, over_results)
clusters_in <- droplevels(clusters_in)
# Check all practices have an SOA assigned
colSums(is.na(clusters_in))
# write out clusters with SA
datapath <- "atlas"
datapath <- paste(main_path, datapath, sep = "/")
datapath
clusters_sa_file <- "clusters_SA.csv"
clusters_sa_file <- paste(datapath, clusters_sa_file, sep = "/")
# write.csv(file=clusters_sa_file, x=clusters_in, quote=TRUE, row.names = FALSE)
################################### EEE
|
6e6839323028259b384694cb246695ff7621ee79
|
ff59d2a5ae2fa5790e82d74244f4601ba89d7026
|
/scripts/defineCohorts.R
|
1b7cfcb886c59b033a3ca4836268eea1d252a37a
|
[] |
no_license
|
laderast/OHDSIqueries
|
13e854df3f97ced01044050c1b2c4cbfa8a62924
|
3b150e836a83c8548d6bab71d93797de64edf22c
|
refs/heads/master
| 2021-01-22T18:43:50.890491
| 2017-05-16T17:02:53
| 2017-05-16T17:02:53
| 85,112,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,060
|
r
|
defineCohorts.R
|
dbListTables(mydb)
?dbFetch
#step 1: pull every person
sqlStatementall <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from person pr
"
rs <- dbSendQuery(mydb,sqlStatementall)
fetch(rs, n=-1)
dbClearResult(rs)
#step 1.b : query cohorts - groups of patients predefined
sqlStatementcoh <- "
select
cd.*
from cohort_definition cd
"
rs <- dbSendQuery(mydb,sqlStatementcoh)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select outcomes
sqlStatementout <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name='ASCVD'
"
rs <- dbSendQuery(mydb,sqlStatementout)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select various potential predictors
sqlStatementpreds <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM')
"
rs <- dbSendQuery(mydb,sqlStatementpreds)
fetch(rs, n=-1)
dbClearResult(rs)
#step 3 : create unique dataset with dummy variables and case statements + retain original codes
sqlStatementdata <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value,
case when o.person_ID ne . then 1 else 0 END as outcome, o.OUTCOME_NAME, o.ASCVD_code,
case when
from person pr
LEFT JOIN (select
co.person_id, cd.cohort_definition_name as OUTCOME_NAME, cs.concept_code as ASCVD_code
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name='ASCVD' and pr.person_ID ne .) out o on o.person_ID=pr.person_ID
LEFT JOIN (select
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM'))
"
|
980e29b80415db2c7a34be0dd728bf0d7b2cba6a
|
344529cba1ea472905140fc8b4dcaf9ad8929fa8
|
/Logistic_Regression.R
|
7aee41d1b8c02976ed9a3673b34f95d6de24344c
|
[] |
no_license
|
fall2018-saltz/cs_project1
|
01be370863a7b7c83b6aea38234d250110557597
|
bb552d4c1b19db3a82743419a494000a3a9b2f1e
|
refs/heads/master
| 2020-03-30T01:59:53.651688
| 2018-12-08T03:34:04
| 2018-12-08T03:34:04
| 150,606,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,965
|
r
|
Logistic_Regression.R
|
library(jtools)
#install.packages('pscl')
library(pscl)
#install.packages("broom")
library(broom)
#install.packages("ggstance")
library(ggstance)
#install.packages("effects")
#library(effects)
#install.packages("lattice")
#install.packages("caret")
library(caret)
#install.packages("e1071")
library(e1071)
str(data)
#Creating a single variable that can show the absolute value of the delay, instead of having two variable.
data$EffectiveDelay<-abs(data$Departure.Delay.in.Minutes-data$Arrival.Delay.in.Minutes)
#Removing the two variables
data$Departure.Delay.in.Minutes<-NULL
data$Arrival.Delay.in.Minutes<-NULL
#Converting the dependent variable into a categorical variable
data$Sat<-replicate(length(data$Satisfaction),0)
data$Sat[data$Satisfaction>3]<-1
data$Sat<-as.factor(data$Sat)
str(data)
#Removing the continuous version of the dependent variable
data<-data[,-c(1)]
#Removing few more variablle, that wouldn't play a significant role in determining the dependent variable
data$Airline.Code<-NULL
data$Airline.Name<-NULL
data$Orgin.City<-NULL
data$Origin.State<-NULL
data$Destination.City<-NULL
data$Destination.State<-NULL
data$Day.of.Month<-NULL
data$Flight.date<-NULL
data$Flight.time.in.minutes<-NULL
data$Flight.Distance<-NULL
data$EffectiveDelay<-NULL
data$Eating.and.Drinking.at.Airport<-NULL
data$X..of.Flight.with.other.Airlines<-NULL
data$Shopping.Amount.at.Airport <-NULL
#SPlitting the data into training and test data.
rand<-sample(1:dim(data)[1])
cutpoint2_3<-floor(2*dim(data)[1]/3)
cutpoint2_3
traindata<-data[rand[1:cutpoint2_3],]
testdata<-data[rand[(cutpoint2_3+1):dim(data)[1]],]
#Training the logistic regression model
model<-glm(Sat~., family=binomial(link="logit"),data=traindata)
summary(model)
#K-Fold Cross Validation
ctrl <- trainControl(method = "repeatedcv", number = 10, savePredictions = TRUE)
mod_fit <- train(Sat ~., data=traindata, method="glm", family="binomial",
trControl = ctrl, tuneLength = 5)
#Predicting the dependent variable
pred = predict(mod_fit, newdata=testdata)
#Confusion Matrix
confusionMatrix(data=pred, testdata$Sat)
"Unlike linear regression with ordinary least squares estimation, there is no R2 statistic which explains
the proportion of variance in the dependent variable that is explained by the predictors.
However, there are a number of pseudo R2 metrics that could be of value.
Most notable is McFadden’s R2, which is defined as 1−[ln(LM)/ln(L0)] where ln(LM) is the log likelihood value
for the fitted model and ln(L0) is the log likelihood for the null model with only an intercept as a predictor.
The measure ranges from 0 to just under 1, with values closer to zero indicating that the model has no predictive power."
pR2(model)
#Much more elaborate summary of the model with t-statistic value and other stats
summ(model, confint = TRUE, digits = 6)
#Plotting the estimate of the coefficients. (Directionality)
ploty<-plot_summs(model, scale=TRUE)
|
1b3179bd401226a355e7e6f9aa2137a45ec81330
|
9fe4998982a9b52a66a6746d8f138fd9a3a895fb
|
/2d5.R
|
09d156a050cb131158ccdf4d107cb0a31a2c9aaf
|
[] |
no_license
|
avegac1996/Estadistica-en-R
|
effef846090888aa0e31892b18a2dbb79aca2e87
|
29d989ec541f5c4e547dce9eb9dd81ce3ad0c772
|
refs/heads/main
| 2023-06-01T22:05:07.795016
| 2021-06-21T01:15:07
| 2021-06-21T01:15:07
| 371,099,714
| 0
| 0
| null | 2021-06-14T02:07:36
| 2021-05-26T16:24:27
|
R
|
UTF-8
|
R
| false
| false
| 545
|
r
|
2d5.R
|
#-----------------a--------------------
x = c(9,32,18,15,26)
y = c(10,20,21,16,22)
reg = lm(y~x) #Regresion estimada que relaciona y con x
summary(reg)
0.11719 <=0.05
#------------------b----------------
x1 = x^2
reg2 = lm(y~x+x1)
summary(reg2)
b00 = reg2$coefficients[1]#Coeficente b0
b11 = reg2$coefficients[2]#Coeficente b1
b22 = reg2$coefficients[3]#Coeficente b2
yest1 = b00+b11*x+b22*x1
#---------------------c--------------
yest1 = b00+b11*x+b22*x1
yest1 =-8.10139 + 2.41271*(20) - 0.04797*(20)^2
print (yest1)
|
cc27c7cdb0974eead33cd67e83ab8971bce3e563
|
a9c540d94681b5e4ffb2300fd320d6c16eab3040
|
/man/Cascade_confidence.Rd
|
b6c69a9e435fce0fa31616a769b7c7eeef949691
|
[] |
no_license
|
fbertran/SelectBoost
|
fd5716b73cb07d05cc3c67bbc75b8915672c1769
|
5610332658b95a71dacdbccea721d00a88a9742f
|
refs/heads/master
| 2022-12-01T07:52:52.171876
| 2022-11-29T22:18:18
| 2022-11-29T22:18:18
| 136,206,211
| 6
| 2
| null | 2021-03-21T16:43:02
| 2018-06-05T16:35:45
|
R
|
UTF-8
|
R
| false
| true
| 884
|
rd
|
Cascade_confidence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Cascade_confidence}
\alias{Cascade_confidence}
\alias{net_confidence}
\alias{net_confidence_.5}
\alias{net_confidence_thr}
\title{Confidence indices}
\format{
A \code{network.confidence} object with four slots :
\describe{
\item{network.confidence}{The confidence matrix}
\item{name}{Names of the variables (genes)}
\item{F}{F array, see Cascade for more details}
\item{time_pt}{Repeated measurements}
\item{cv.subjects}{Logical. Was crossvalidation carried out subjectwise?}
}
An object of class \code{network.confidence} of length 1.
An object of class \code{network.confidence} of length 1.
}
\usage{
net_confidence
net_confidence_.5
net_confidence_thr
}
\description{
Result for confidence indices derivation using the Cascade package
}
\keyword{datasets}
|
b4b5737d44647cc6a34a9f2af24c7d723f03620f
|
d03924f56c9f09371d9e381421a2c3ce002eb92c
|
/man/internalGenerics.Rd
|
863e9136597726467ccee1c5ef36b0529bcef725
|
[] |
no_license
|
cran/distr
|
0b0396bbd5661eb117ca54026afc801afaf25251
|
c6565f7fef060f0e7e7a46320a8fef415d35910f
|
refs/heads/master
| 2023-05-25T00:55:19.097550
| 2023-05-08T07:10:06
| 2023-05-08T07:10:06
| 17,695,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 828
|
rd
|
internalGenerics.Rd
|
\name{internalGenerics}
\alias{internalGenerics}
\alias{distribution}
\alias{samplesize}
\alias{samplesize<-}
\title{Internal: Common Generics 'distribution' and 'samplesize', 'samplesize<-'}
\description{
In order to be able to use packages \pkg{distrSim} and \pkg{distrMod}
resp. \pkg{RobAStBase} independently,
it is necessary to import the respective generic from a prior package, i.e.,
\pkg{distr}.
}
\usage{
distribution(object)
samplesize(object, ...)
x <- samplesize(object, value)
}
\arguments{
\item{object}{ the first argument to dispatch on in the actual methods. }
\item{value}{ the value to be assigned. }
\item{\dots}{ additional arguments for function \code{samplesize}. }
}
\author{
Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}}
\keyword{internal}
|
99660094e6e8c4e2b789c0294705731fa7a8fd6b
|
7f129731f177fa696af7574aee5e16304648759a
|
/unused-code/negbinomial_mixture.r
|
c769bf8dfb71895ce6f0b4a66adbff1bff4405b7
|
[] |
no_license
|
rohanmaddamsetti/STLE-analysis
|
82e906ccad71425132d9b2533c3a5edba225ffd7
|
85cadef141135d04244b61f64b88afe8b6787943
|
refs/heads/master
| 2021-01-17T17:02:07.320741
| 2017-12-15T23:22:58
| 2017-12-15T23:22:58
| 63,113,993
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,165
|
r
|
negbinomial_mixture.r
|
full_ngb_mixed_model = """
data {
int<lower=0> N; #samples
real<lower=0> mean1; # mean counts inferred
real<lower=0> tau1;
int<lower=0> y[N];
}
parameters{
real<lower=0> mean2;
real<lower=0> tau2;
real<lower=0> alpha;
real<lower=0> beta;
real<lower=0,upper=1> pi;
}
model {
pi ~ Beta(alpha, beta);
for( i in 1:N){
target += log_mix(pi,neg_binomial_2_lpmf(y[i] | mean1,tau1),neg_binomial_2_lpmf(y[i] | mean2,tau2));
}
}
"""
library("rstan") # observe startup messages
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
data = list(N=N,mean1=mean,tau1=dispersion,y=mixed)
#compiles the model
stan_model <- stan_model(model_code =full_ngb_mixed_model)
#fit via HMC
fit <- sampling(stan_model, data=data, par=c("pi","mean2","tau2"), iter = 1000, chains = 3, thin=1)
summary(fit)
traceplot(fit)
library("bayesplot")
library("ggplot2")
posterior <-as.matrix(extract(fit)$pi)
colnames(posterior)<-paste("pi",1:N)
plot_title <- ggtitle("Posterior distributions with medians and 80% intervals")
mcmc_areas(posterior, point_est="mean",prob = 0.8) + plot_title
|
c21aef46ee05fe0bf3c8a8370e36d61012b8307f
|
2d44449f9a0021ad81b927c2bf64fe580ab4c5de
|
/man/Spherical.Rd
|
6284493d6c72179a2d516678529864d8371ec8ac
|
[] |
no_license
|
jbrowell/Dynamic-Covariance
|
58c4cabb5a0e3c48e72c62d0839433c6045a5535
|
c2af77f96f0a73b2b61c13f539f39656b8c9115d
|
refs/heads/main
| 2023-04-15T09:24:23.839923
| 2021-09-30T14:45:47
| 2021-09-30T14:45:47
| 358,193,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 834
|
rd
|
Spherical.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CovarianceFunctions.R
\name{Spherical}
\alias{Spherical}
\title{Spherical Covariance Function}
\usage{
Spherical(r, params = list(sigma = 1, theta = 1))
}
\arguments{
\item{r}{Vector or matrix of separation distances}
\item{params}{A list of parameters with default \code{list(sigma=1,theta=1)}.
Parameters may be supplied as vectors of length equal to \code{length(r)}.}
}
\value{
A vector of matrix the same size as \code{r} containing corresponding values of
the Spherical covariance function.
}
\description{
Functional form of the Spherical covariance function.
}
\details{
Function that returns the value of the Spherical covariance function.
}
\author{
Jethro Browell, \email{jethro.browell@glasgow.ac.uk}
}
\keyword{Covariance}
\keyword{Function}
|
53140b6317498768dff289eff951a9b1891d7a57
|
1523184d172fca9c0562a358e30a66b3ce07c02c
|
/IC/Dashboard/Funcoes/Graficos/Profissional/GraficoTrabalhaAreaEvasao.r
|
6d246ac2777b635226b010ea107f16002dd4edbf
|
[] |
no_license
|
guidinhani/IC2019
|
7d523dd37cf1018ecd25f430bc8d77d4b8a636f6
|
f60dee874522f6d920d9d2ab06f6eeefccd6d3e1
|
refs/heads/master
| 2020-08-21T05:35:07.561411
| 2019-10-24T22:35:49
| 2019-10-24T22:35:49
| 216,102,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,856
|
r
|
GraficoTrabalhaAreaEvasao.r
|
GraficoTrabalhaAreaEvasao <- function(trabalhaAreaEvasaodf) {
# ==============================================================================
# CONSULTA - QUANTIDADE DE EVADIDOS QUE TRABALHAM NA ÁREA DE EVASÃO
# ==============================================================================
quantidadeTrabalhaNaArea <- trabalhaAreaEvasaodf %>%
select(Trabalha) %>%
group_by(Trabalha) %>%
summarise(Quantidade = n())
X <- c("TRABALHA NA ÁREA DO CURSO DE EVADIDO?")
SIM <- quantidadeTrabalhaNaArea[, 2][2, ]
NAO <- quantidadeTrabalhaNaArea[, 2][1, ]
quantidadeTrabalhaNaArea <- data.frame(X, SIM, NAO)
names(quantidadeTrabalhaNaArea) <- c("X", "SIM", "NAO")
# ==============================================================================
# GRÁFICO DE BARRAS EMPILHADO
# ==============================================================================
plot_ly(quantidadeTrabalhaNaArea,
x = ~X, source = "sourceTrabalhaNaArea"
) %>%
add_trace(
type = "bar",
y = ~NAO, name = "NAO", hoverinfo = "text", text = ~paste(NAO, "evadido(s)"),
textfont = list(color = '#FFFFFF', size = 14),
textposition = "inside",
marker = list(
color = c("rgb(163, 21, 16)"),
line = list(color = c("rgb(99, 9, 9)"), width = 2)
),
width = .5
) %>%
add_trace(
type = "bar",
y = ~SIM, name = "SIM", hoverinfo = "text", text = ~paste(SIM, "evadido(s)"),
textfont = list(color = '#FFFFFF', size = 14),
textposition = "inside",
marker = list(
color = c("rgb(35, 101, 131)"), line = list(color = c("rgb(13, 60, 81)"), width = 2)
),
width = .5
) %>%
layout(yaxis = list(title = "Quantidade de evadidos"), xaxis = list(title = ""), showlegend = T, barmode = "stack")
}
|
470a58b6e57ffbd6a7affc6bfa489a4c0dc5295c
|
c529e1776d0073d1c122ee88def416f6f69d6c87
|
/man/stat_qq.Rd
|
e04b6b799eb5ad37f930fad5217d4d5c62ead386
|
[] |
no_license
|
lixinyao/ggplot2
|
db919577d8c53bc0522b7c47d9d56cd10ff28452
|
7be4c8944bca845c9b9e189ec8c44231f6b4dc2b
|
refs/heads/master
| 2021-01-18T18:22:59.926529
| 2016-01-31T22:22:40
| 2016-01-31T22:22:40
| 50,810,838
| 1
| 0
| null | 2016-02-01T03:18:43
| 2016-02-01T03:18:43
| null |
UTF-8
|
R
| false
| true
| 2,727
|
rd
|
stat_qq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-qq.r
\name{stat_qq}
\alias{geom_qq}
\alias{stat_qq}
\title{Calculation for quantile-quantile plot.}
\usage{
stat_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
geom_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE)
}
\arguments{
\item{mapping}{The aesthetic mapping, usually constructed with
\code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set
at the layer level if you are overriding the plot defaults.}
\item{data}{A layer specific dataset - only needed if you want to override
the plot defaults.}
\item{geom}{The geometric object to use display the data}
\item{position}{The position adjustment to use for overlapping points
on this layer}
\item{...}{other arguments passed on to \code{\link{layer}}. This can
include aesthetics whose values you want to set, not map. See
\code{\link{layer}} for more details.}
\item{distribution}{Distribution function to use, if x not specified}
\item{dparams}{Additional parameters passed on to \code{distribution}
function.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
}
\description{
Calculation for quantile-quantile plot.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "qq")}
}
\section{Computed variables}{
\describe{
\item{sample}{sample quantiles}
\item{theoretical}{theoretical quantiles}
}
}
\examples{
\donttest{
df <- data.frame(y = rt(200, df = 5))
p <- ggplot(df, aes(sample = y))
p + stat_qq()
p + geom_point(stat = "qq")
# Use fitdistr from MASS to estimate distribution params
params <- as.list(MASS::fitdistr(df$y, "t")$estimate)
ggplot(df, aes(sample = y)) +
stat_qq(distribution = qt, dparams = params["df"])
# Using to explore the distribution of a variable
ggplot(mtcars) +
stat_qq(aes(sample = mpg))
ggplot(mtcars) +
stat_qq(aes(sample = mpg, colour = factor(cyl)))
}
}
|
4e0e2c94d72873dc53c4c4767e57ab13ccb57960
|
d1129b1d416e283a9c5bc6e9f57d407b3afa06c1
|
/R/as_numeric_dot_default.R
|
6e846a136aa0c06ce3b92eb8d620448597b15f70
|
[] |
no_license
|
yaboody/my_package
|
bcc8794217e08e8e9b5f2b05aefc3f05c014e1fd
|
f2b82a86e58be4fbf2962805ce7171989d955fba
|
refs/heads/master
| 2022-11-18T11:25:33.787707
| 2020-07-09T19:02:12
| 2020-07-09T19:02:12
| 278,447,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
as_numeric_dot_default.R
|
#' Default Numeric Object Coercer
#'
#' @description by default, as.numeric works fine for coercing objects into numeric objects.
#'
#' @param x an object
#'
#' @details just a wrapper for as.numeric really
#'
#' @return x as a numeric object
#' @export
#'
#' @examples
#' as_numeric("4")
#' as_numeric(4)
#' as_numeric(TRUE)
as_numeric.default <- function(x) {
print(as.numeric(x))
}
|
35f5ca01bf01e8380dcbf0d9bab0d9e0c493f66a
|
a4b67ea46787badabc054665407cb8b90f7e2819
|
/tests/testthat/test-load-data.R
|
8056fdc77a921f660abf58d1aa011fda694fe11b
|
[] |
permissive
|
vikwato/datim-validation
|
2745486588b70b23ee257385d1c8230ebfb8985d
|
f206c43ea7710917936c1627fa0da02ba5771832
|
refs/heads/master
| 2020-03-31T17:53:56.344120
| 2019-10-16T14:19:02
| 2019-10-16T14:19:02
| 152,438,101
| 0
| 0
|
BSD-3-Clause
| 2019-10-16T14:19:09
| 2018-10-10T14:33:42
|
R
|
UTF-8
|
R
| false
| false
| 10,589
|
r
|
test-load-data.R
|
context("Parse CSV data")
with_mock_api({
test_that("We can read a CSV file coded with IDs", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-data.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_type(d,"list")
expect_is(d,"data.frame")
d_names<-c("dataElement","period","orgUnit","categoryOptionCombo","attributeOptionCombo","value")
expect_identical(names(d),d_names)
})})
with_mock_api({
test_that("We can read a headerless CSV file coded with IDs", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-data-no-header.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE,
csv_header = FALSE)
expect_type(d,"list")
expect_is(d,"data.frame")
d_names<-c("dataElement","period","orgUnit","categoryOptionCombo","attributeOptionCombo","value")
expect_identical(names(d),d_names)
expect_equal(NROW(d),5)
})})
with_mock_api({
test_that("We can error when mechanisms are not coded properly", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_warning(d2Parser(filename=test_config("test-data-bad-mechs.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE,
csv_header = FALSE))
})})
context("Parse JSON data")
with_mock_api({
test_that("We can read a JSON file coded with IDs", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-json.json"),
type="json",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_type(d,"list")
expect_is(d,"data.frame")
d_names<-c("dataElement","period","orgUnit","categoryOptionCombo","attributeOptionCombo","value")
expect_identical(names(d),d_names)
})})
with_mock_api({
test_that("We can error when the JSON attributes are not correct", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_error(d2Parser(filename=test_config("test-json-bad-attributes.json"),
type="json",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE),"JSON attributes must be one of the following")
})})
context("Parse XML data")
with_mock_api({
test_that("We can read an XML file coded with IDs", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-xml.xml"),
type="xml",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_type(d,"list")
expect_is(d,"data.frame")
d_names<-c("dataElement","period","orgUnit","categoryOptionCombo","attributeOptionCombo","value")
expect_identical(names(d),d_names)
})})
with_mock_api({
test_that("We can error when the XML attributes are not correct", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_error(d2Parser(filename=test_config("test-xml-bad-attributes.xml"),
type="xml",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE),"XML attributes must be one of the following")
})})
context("Can error on a wrong file type")
with_mock_api({
test_that("We can create an error on a bad file type", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_error(d2Parser(filename=test_config("test-xml.xml"),
type="foo",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE))
})})
context("Can error on a wrong period identifier")
with_mock_api({
test_that("We can create an error on a file with a bad period", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-data-bad-periods.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_error(checkPeriodIdentifiers(d))
})})
context("Can return bad mechanism/period association")
with_mock_api({
test_that("We can create an warning for an invalid/mechanism period association", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-data-bad-periods-mechanisms.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_warning(bad_mechs<-checkMechanismValidity(d,organisationUnit="KKFzPM8LoXs", return_violations=TRUE),"Invalid mechanisms found!")
expect_type(bad_mechs,"list")
expect_is(bad_mechs,"data.frame")
bad_mechs_names<-c("attributeOptionCombo","period","startDate","endDate","periodType","code","startDate_mech","endDate_mech","is_valid")
expect_setequal(names(bad_mechs),bad_mechs_names)
})})
context("Can warn on bad mechanism/period associations")
with_mock_api({
test_that("We can create an warning for an invalid/mechanism period association", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
d<-d2Parser(filename=test_config("test-data-bad-periods-mechanisms.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE)
expect_warning(bad_mechs<-checkMechanismValidity(d,organisationUnit="KKFzPM8LoXs", return_violations=FALSE),"Invalid mechanisms found!")
expect_null(bad_mechs)
})})
context("Can error on an invalid orgunit UID")
with_mock_api({
test_that("We can create an error for an invalid organisation unit identifier", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_warning(foo<-d2Parser(filename=test_config("test-data-bad-ou-uid.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE), "The following org unit identifiers could not be found:SiuNE0ywCW4")
expect_false(foo$is_valid)
expect_type(foo,"list")
})})
context("Can error on an invalid data element UID")
with_mock_api({
test_that("We can create an error for an invalid data element identifier", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_warning(foo<-d2Parser(filename=test_config("test-data-bad-de-uid.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE), "The following data element identifiers could not be found:SiuNE0ywCW4")
expect_false(foo$is_valid)
expect_type(foo,"list")
})})
context("Can error on an invalid attribute option combo UID")
with_mock_api({
test_that("We can create an error for an invalid attribute option combo identifier", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_warning(foo<-d2Parser(filename=test_config("test-data-bad-acoc-uid.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE), "The following attribute option combo identifiers could not be found:SiuNE0ywCW4")
expect_false(foo$is_valid)
expect_type(foo,"list")
})})
context("Can warn on a missing values")
with_mock_api({
test_that("Can warn on a missing data value", {
config <- LoadConfigFile(test_config("test-config.json"))
options("maxCacheAge"=NULL)
expect_type(config,"list")
expect_warning(foo<-d2Parser(filename=test_config("test-data-missing-value.csv"),
type="csv",
organisationUnit = "KKFzPM8LoXs",
dataElementIdScheme = "id",
orgUnitIdScheme = "id",
idScheme = "id",
invalidData = FALSE), "1 rows are incomplete. Please check your file to ensure its correct.")
})})
|
5cece9d1c8c4e9a7ac63ca6a58a79c6693ee5ba1
|
1492ba730dd6c25d527b4c66a6a9c46357cdedb8
|
/man/track_circos.Rd
|
df7132be6ae18b49b61b5a621c0d545f9d7d2000
|
[] |
no_license
|
tankbuild/postpsassR
|
11417d7bddce3bc27f2386a97251dea82d7dfea7
|
c2d135ab4bc0313e37f9c44dd7c674161479a5e9
|
refs/heads/master
| 2020-05-21T10:46:38.483966
| 2019-05-10T16:56:25
| 2019-05-10T16:56:25
| 186,015,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 810
|
rd
|
track_circos.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw_circos_plot.R
\name{track_circos}
\alias{track_circos}
\title{Drawing circos track}
\usage{
track_circos(data, track_label, bg.col = "white", ylim = c(0, 1.025 *
max(data[[3]]) + 0.01), top.track = FALSE, point.size = 0.1,
color.point = "black", sector.names = NULL,
sector.titles.expand = 1.3, sectors = NULL)
}
\arguments{
\item{data}{Generated by load_data_files function}
\item{track_label}{Label for track}
\item{bg.col}{background color(default: "white")}
\item{top.track}{to check whether is the top track (defaul: FALSE)}
\item{point.size}{point size (default: 0.1)}
\item{color.point}{point color (default: "black")}
\item{sector.names}{sector names (default: NULL)}
}
\description{
Drawing circos track
}
|
0316ee40e8ef23a52d58e6b9e32bf4571fdb46b4
|
c5b441921f14d4ed8faa4c8770a069d6c800ee7c
|
/Flight_duration.R
|
afcab51223a48cbbf09325f06f76f27b68354b14
|
[] |
no_license
|
judyh97/Timezone_conversion
|
7e5dd5ea3ad5680d4336a2a4a33c54e93373aa6f
|
11d7de2fd32b9d62295dfc544851930b612f0dbb
|
refs/heads/master
| 2020-12-02T22:08:23.421084
| 2017-07-03T07:21:25
| 2017-07-03T07:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
Flight_duration.R
|
#' Calculate flight duration.
#'
#' @param arrival_time Arrival time at the destination.
#' @param arrival_tz Timezone of the destination.
#' @param departure_time Departure time at the origin.
#' @param departure_tz Timezone of the origin.
#' @return Time elapsed between the departure time and arrival time in the timezone of the destination.
#' @examples
#' flight_duration("2017-08-20 10:30", "Asia/Hong_Kong", "2017-08-19 09:00", "Africa/Johannesburg")
#' flight_duration("2017-08-20 10:30", "Asia/Hong_Kong")
#' @export
options(warn = -1)
flight_duration <- function(arrival_time, arrival_tz, departure_time = Sys.time(), departure_tz = Sys.timezone()) {
DepTime <- format(as.POSIXct(departure_time, tz = departure_tz), tz = arrival_tz, usetz = TRUE)
ArrTime <- format(as.POSIXct(arrival_time, tz = arrival_tz), tz = arrival_tz, usetz = TRUE)
as.POSIXct(ArrTime) - as.POSIXct(DepTime)
}
|
1aee876d0fa2a7af3934b95953081bfee633e4bb
|
5d7d36ca81276b1a858d423584e43a1f2c58f12a
|
/archive/comp.plot.gene.R
|
22bcb947daea25617e1d97514bd76fa353e90d0d
|
[
"BSD-2-Clause"
] |
permissive
|
orionzhou/rmaize
|
592778ee81f3d0d6a8dc00dee46cd27d1d610d05
|
8970eecbf8ebe9deab4321c73503d41b334683ea
|
refs/heads/master
| 2022-01-20T07:11:57.528459
| 2022-01-10T20:38:21
| 2022-01-10T20:38:21
| 159,260,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,178
|
r
|
comp.plot.gene.R
|
require(rtracklayer)
require(xlsx)
source("comp.fun.R")
source("comp.plot.fun.R")
dirw = file.path(Sys.getenv("misc3"), 'comp.plot.gene')
fl = file.path(dirw, 'loci.xlsx')
tl = read.xlsx(fl, sheetIndex = 1, header = T, stringsAsFactors = F)
fid = file.path(Sys.getenv("misc3"), "comp.ortho.hm", "01.ids.tbl")
tid = read.table(fid, header = T, sep = "\t", as.is = T)
fds = file.path(Sys.getenv("misc3"), "comp.ortho.hm", "12.score.tbl")
tds = read.table(fds, header = T, sep = "\t", as.is = T)
fg = file.path(Sys.getenv("genome"), "HM101", "51.gtb")
tg = read.table(fg, header = T, sep = "\t", as.is = T)
fr = file.path(Sys.getenv("misc3"), "comp.og/05.clu/32.tbl")
tr = read.table(fr, header = T, sep = "\t", as.is = T)
orgs = sapply(strsplit(tr$id, split="-"), "[", 1)
gids = sapply(strsplit(tr$id, split="-"), "[", 2)
tr = cbind(tr, org = orgs, gid = gids)
#####
source("comp.plot.fun.R")
i = 1
chr = tl$chr[i]; beg = tl$beg[i]; end = tl$end[i]
gidxs = which(tg$chr == chr & tg$beg >= beg & tg$end <= end)
fn = sprintf("%s/fig%03d.pdf", dirw, i)
CairoPDF(file = fn, width = 7, height = res$ht/72, bg = 'transparent')
grid.newpage()
grid.draw(res$grobs)
dev.off()
|
01d94e9d41e6ac25835ecbdb5da7514d3aba155b
|
80e29403e7e9b3dcb11a076a31a930ce0a89b132
|
/hate_crime_EDA.R
|
ba3cbb289d0378282e8addcdf408f2a3b6ea82d3
|
[] |
no_license
|
tvanichachiva/Hate_Crime_EDA
|
2882f0cd7b427f8c032d30633c7f601232d8eb63
|
822fc1b7adf1d00f1a7d5efc944bcd09d2572dd7
|
refs/heads/master
| 2020-03-23T19:02:29.417321
| 2018-07-23T02:27:49
| 2018-07-23T02:27:49
| 141,950,225
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,751
|
r
|
hate_crime_EDA.R
|
#Hate Crime EDA
library(tidyverse)
library(urbnmapr)
library(fivethirtyeight)
hc <- fivethirtyeight::hate_crimes
#Joining fivethirtyeight data with mapping data
state <- urbnmapr::states
names(state)[names(state) == "state_name"] <- "state" #Changing state_name to state to match hc data
hc_state <- left_join(state, hc, by = "state")
#Midwest states
midwest <- c("Illinois", "Indiana", "Iowa", "Kansas", "Michigan",
"Minnesota", "Missouri", "Nebraska", "North Dakota",
"Ohio", "South Dakota", "Wisconsin")
hc_viz <- hc_state %>%
filter(state == midwest) %>% #Filter down to Midwest States
ggplot(aes(long, lat,
group = group,
fill = avg_hatecrimes_per_100k_fbi)) +
scale_fill_continuous(high = "#ff0000", low = "#ffdab9")+
geom_polygon(color = "#ffffff") +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
legend.position = "bottom",
legend.background = element_rect(size=0.5, linetype="solid", colour ="black"),
plot.title = element_text(face = "bold", hjust = .5),
plot.caption = element_text(face = "italic", hjust = 1.45)) +
labs(fill = "Average Hate Crimes per 100,000 people",
caption = "Based on data from the FBI aggregated by fivethirtyeight") +
ggtitle("Average Hate Crimes in the Midwest 2010-2015")
hc_viz
|
443c2ed5c962a418d76c6fc9c9fa5278ee06ee32
|
6e6202e97b13bead3f40ab7a141c2bc4fe8e9345
|
/sr-ch12.R
|
1892114e1fb6e5c28f11368679b98a33c7832355
|
[] |
no_license
|
loudermilk/bayesian-stats
|
24b074d9b3775a2e193acb509c66b8ba3550417b
|
ca9840314183423e15ee80666b97fac43ee0b4d1
|
refs/heads/master
| 2021-01-09T20:13:57.391662
| 2016-09-19T12:46:39
| 2016-09-19T12:46:39
| 62,754,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,998
|
r
|
sr-ch12.R
|
## sr-ch12.R
## Chapter 12 - Multilevel Models
## Remember features of each clusteer in the data as they learn about all
## the clusters.
## (1) Improved estimates for repeat sampling - when more than one observation
## arises from the same indiv, loc, or time, then traditional single-level
## models either maximally underfit or underfit the data.
## (2) Improved estimates for imbalance in sampling - when some indiv, loc, or
## time are sampled more than others, multilevel models cope with differing
## uncertainty across these clusters. This prevents over-sampled clusters from
## unfairly dominating inference.
## (3) Estimates of variation. If RQ include variation among indiv or other grps
## in data , then multilevel models help bc the model variation explicitly.
## (4) Avoid averaging, retain variation - pre-averaging data to construct
## variables can be dangerous bc it removes variation.
## 12.1 Multilevel tadpoles
library(rethinking)
data(reedfrogs)
d <- reedfrogs
str(d)
head(d)
## VARYING INTERCEPTS MODEL - multilevel model in which we simultaneously estimate both an intercept for
## each tank and the variation among tanks
## learn the prior that is common to all the modeled intercepts
## make the tank cluster variable
d$tank <- 1:nrow(d)
## fit
m12.1 <- map(
alist(
surv ~ dbinom(density, p),
logit(p) <- a_tank[tank],
a_tank[tank] ~ dnorm(0,5)
), data = d
)
precis(m12.1, depth = 2)
## HYPERPARAMETERS - parameters for parameters
m12.2 <- map2stan(
alist(
surv ~ dbinom(density, p),
logit(p) <- a_tank[tank],
a_tank[tank] ~ dnorm(a, sigma),
a ~ dnorm(0,1),
sigma ~ dcauchy(0,1)
), data = d, iter = 4000, chains = 4
)
compare(m12.1, m12.2)
## POOLING - each tank provides information that can be used to improve the estimates
## for all of the other tanks
post <- extract.samples(m12.2)
plot(NULL, xlim=c(-3,4), ylim=c(0,0.35),
xlab="log-odds survive", ylab="density")
for (i in 1:100) {
curve(dnorm(x, post$a[i], post$sigma[i]), add = T, col=col.alpha("black", 0.2))
}
# sample imaginary tank from post dist
sim_tanks <- rnorm(8000, post$a, post$sigma)
dens(logistic(sim_tanks), xlab = "probability survive")
## 12.2 Varying effects and the underfitting/overfitting trade-off
## Varying intercepts are just regularized estimates but adaptively regularized by
## estimating how diverse the clusters are while estimating the features of each
## cluster
## (1) complete pooling - assume that the population of the ponds is invariant,
## the same as estimating a common intercept for all ponds.
## (2) no pooling - assume each pond tells us nothing about any other pond
## (3) partial pooling - using an adaptive regularizing prior
## 12.2.1 The model
## multilevel binomial model with ponds instead of tanks
## 12.2.2 Assign values to the parameters
a <- 1.4
sigma <- 1.5
nponds <- 60
ni <- as.integer(rep(c(5,10,25,35),each=15))
a_pond <- rnorm(nponds, mean=a, sd=sigma)
dsim <- data.frame(pond=1:nponds, ni=ni, true_a=a_pond)
head(dsim)
tail(dsim)
## simulate survival process
dsim$si <- rbinom(nponds,prob=logistic(dsim$true_a), size = dsim$ni)
## 12.2.4 Compute the no-pooling estimates
dsim$p_nopool <- dsim$si/dsim$ni
## 12.2.5 Compute the partial-pooling estimates
m12.3 <- map2stan(
alist(
si ~ dbinom(ni, p),
logit(p) <- a_pond[pond],
a_pond[pond] ~ dnorm(a, sigma),
a ~ dnorm(0,1),
sigma ~ dcauchy(0,1)
), data = dsim, iter=1e4, warmup = 1000
)
precis(m12.3, depth = 2)
## 60 estimated intercept parameters
estimated.a_pond <- as.numeric(coef(m12.3)[1:60])
dsim$p_partpool <- logistic(estimated.a_pond)
dsim$p_true <- logistic(dsim$true_a)
nopool_error <- abs(dsim$p_nopool - dsim$p_true)
partpool_error <- abs(dsim$p_partpool - dsim$p_true)
plot(1:60, nopool_error, xlab="pond", ylab="abs error", col=rangi2, pch=16)
points(1:60, partpool_error)
## 12.3 More than one type of cluster
library(rethinking)
data(chimpanzees)
d <- chimpanzees
str(d)
head(d)
d$recipient <- NULL #get rid of NAs
m12.4 <- map2stan(
alist(
pulled_left ~ dbinom(1,p),
logit(p) <- a + a_actor[actor] + (bp + bpC*condition)*prosoc_left,
a_actor[actor] ~ dnorm(0,sigma_actor),
a ~ dnorm(0,10),
bp ~ dnorm(0,10),
bpC ~ dnorm(0,10),
sigma_actor ~ dcauchy(0,1)
), data = d, chains = 2, cores = 3, iter = 5000, warmup = 1000
)
plot(m12.4)
precis(m12.4)
post <- extract.samples(m12.4)
total_a_actor <- sapply(1:7, function(actor) post$a + post$a_actor[,actor])
round(apply(total_a_actor,2,mean),2)
## 12.3.2 Two types of cluster (add block)
## fit model that uses both actor and block
d$block_id < d$block # name `block` is reserved by stan
m12.5 <- map2stan(
alist(
pulled_left ~ dbinom(1, p),
logit(p) <- a + a_actor[actor] + a_block[block_id] + (bp + bpc*condition)*prosoc_left,
a_actor[actor] ~ dnorm(0, sigma_actor),
a_block[block_id] ~ dnorm(0, sigma_block),
c(a,bp, bpc) ~ dnorm(0,10),
sigma_actor ~ dcauchy(0,1),
sigma_block ~ dcauchy(0,1)
), data = d, warmup = 1000, iter = 6000, chains = 1, cores = 1
)
## 12.4 Multilevel posterior predictions
## MODEL CHECKING - compare the sample to the posterior predictions of the fit model
## producing implied predictions from a fit model is helpful for understanding
## what the model means. INFORMATION CRITERIA (like DIC & WAIC) provide simple estimates
## of out-of-sample model accuracy, like the KL divergence. IC provide rough measure of
## a model's flexibility and therefore overfitting risk.
## in chimpanzees there are 7 unique actors - these are clusters.
precis(m12.4, depth=2)
## the whole pt of partial pooling is to shrink estimates towards the grand mean
chimp <- 2
d.pred <- list(
prosoc_left = c(0,1,0,1),
condition = c(0,0,1,1),
actor = rep(chimp, 4)
)
d.pred
link.m12.4 <- link(m12.4, data = d.pred)
pred.p <- apply(link.m12.4, 2, mean)
pred.p.PI <- apply(link.m12.4, 2, PI)
par(mfrow=c(1,1))
plot(0,0,type='n', xlab="prosoc_left/condition", ylab="proportion pulled left", ylim=c(0,1),xaxt="n", xlim=c(1,4))
axis(1,at=1:4, labels = c("0/0","1/0", "0/1","1/1"))
p <- by(d$pulled_left, list(d$prosoc_left, d$condition, d$actor), mean)
for (chimp in 1:7) {
lines(1:4, as.vector(p[,,chimp]), col=rangi2, lwd = 1.5)
}
lines(1:4, pred.p)
shade(pred.p.PI, 1:4)
post <- extract.samples(m12.4)
str(post)
dens(post$a_actor[,5])
p.link <- function(prosoc_left, condition, actor) {
logodds <- with(post,
a + a_actor[,actor] + (bp +bpC * condition)*prosoc_left
)
return(logistic(logodds))
}
## compute predictions
prosoc_left <- c(0,1,0,1)
condition <- c(0,0,1,1)
pred.raw <- sapply(1:4, function(i) p.link(prosoc_left[i], condition[i], 2))
pred.p <- apply(pred.raw, 2, mean)
pred.p.PI <- apply(pred.raw, 2, PI)
## 12.4.2 Posterior prediction for new clusters
## often the particular clusters in the sample are not of any enduring interest - in the
## chimpanzee data, for examoke, we'd like to make inferences about the population, so
## the actor intercepts are not of interest.
## imagine leaving out one of the clusters when you fit the data. Use the a and sigma_actor
## parameters because they descrive the population of actors.
## how to construct posterior predictions for a now, previously unobserved average actor.
## by average, I mean a chimp with an intercept exactly at the mean a.
library(rethinking)
data(chimpanzees)
d <- chimpanzees
str(d)
head(d)
d$recipient <- NULL #get rid of NAs
m12.4 <- map2stan(
alist(
pulled_left ~ dbinom(1,p),
logit(p) <- a + a_actor[actor] + (bp + bpC*condition)*prosoc_left,
a_actor[actor] ~ dnorm(0,sigma_actor),
a ~ dnorm(0,10),
bp ~ dnorm(0,10),
bpC ~ dnorm(0,10),
sigma_actor ~ dcauchy(0,1)
), data = d, chains = 2, cores = 3, iter = 5000, warmup = 1000
)
plot(m12.4)
precis(m12.4)
d.pred <- list(
prosoc_left = c(0,1,0,1),
condition = c(0,0,1,1),
actor = rep(2,4)
)
## replace varying interceot samples w zeros
## 1000 samples by 7 actors
a_actor_zeros <- matrix(0,1000,7)
## fire up link
link.m12.4 <- link(m12.4, n = 1000, data=d.pred, replace = list(a_actor=a_actor_zeros))
## summarize & plot
pred.p.mean <- apply(link.m12.4, 2, mean)
pred.p.PI <- apply(link.m12.4, 2, PI, prob=0.8)
par(mfrow=c(1,1))
plot(0,0,type="n",xlab="prosoc_left/condition",
ylab="proportion pulled left", ylim=c(0,1),xaxt="n",xlim=c(1,4))
axis(1, at = 1:4, labels=c("0/0","1/0", "0/1", "1/1"))
lines(1:4, pred.p.mean)
shade(pred.p.PI, 1:4)
## to show variation among actors use sigma_alpha in calculation
## replace varying intercept samples with simulations
post <- extract.samples(m12.4)
a_actor_sims <- rnorm(7000, 0, post$sigma_actor)
a_actor_sims <- matrix(a_actor_sims, 1000, 7)
## pass simulated intercepts into link
link.m12.4 <- link(m12.4, n = 1000, data = d.pred, replace = list(a_actor=a_actor_sims))
## summarize & plot
pred.p.mean <- apply(link.m12.4, 2, mean)
pred.p.PI <- apply(link.m12.4, 2, PI, prob=0.8)
par(mfrow=c(1,1))
plot(0,0,type="n",xlab="prosoc_left/condition",
ylab="proportion pulled left", ylim=c(0,1),xaxt="n",xlim=c(1,4))
axis(1, at = 1:4, labels=c("0/0","1/0", "0/1", "1/1"))
lines(1:4, pred.p.mean)
shade(pred.p.PI, 1:4)
## simulate a new actor from the estimated population of actors and then
## computes probabilities of pulling the left lever for each of the 4 treatments
post <- extract.samples(m12.4)
sim.actor <- function(i) {
sim_a_actor <- rnorm(1,0,post$sigma_actor[i])
P <- c(0,1,0,1)
C <- c(0,0,1,1)
p <- logistic(
post$a[i] +
sim_a_actor +
(post$bp[i] + post$bpC[i]*C)*P
)
return(p)
}
plot(0,0,type="n",xlab="prosoc_left/condition",
ylab="proportion pulled left", ylim=c(0,1),xaxt="n",xlim=c(1,4))
axis(1, at = 1:4, labels=c("0/0","1/0", "0/1", "1/1"))
# plot 50 simulated actors
for (i in 1:50) lines(1:4,sim.actor(i), col=col.alpha("black", 0.5))
## 12.4.3 Focus and multilevel prediction
## multilevel models contain parameters with different FOCUS - i.e. which
## level of the model the parameter makes direct predictions for.
## (1) when retrodicting the sample, the parameters that describe the population
## of clusters do not influence prediction directly. These population parameters
## are called HYPERPARAMETERS, as they are parameters for parameters and they have
## their effects during estimation by shrinking the varying effect parameters
## towards a common mean.
## (2) the same is true when forecasting a new observation for a cluster that was
## present in the sample.
## (3) when we wish to forecast for some new (unseen) cluster, we need the
## hyperparameters as they tell us how to forecast a new cluster by generating
## a distribution of new per-cluster intercepts.
## over-dispersed Poisson model
library(rethinking)
data(Kline)
d <- Kline
str(d)
d$logpop <- log(d$population)
d$society <- 1:10
m12.6 <- map2stan(
alist(
total_tools ~ dpois(mu),
log(mu) <- a + a_society[society] + bp*logpop,
a ~ dnorm(0,10),
bp ~ dnorm(0,1),
a_society[society] ~ dnorm(0,sigma_society),
sigma_society ~ dcauchy(0,1)
), data = d, iter=4000, chains=3
)
## to see the general trend that the model expects we need to simulate
## counterfactual societies using hyperparameters alpha and sigma_society
post <- extract.samples(m12.6)
d.pred <- list(
logpop = seq(from=6, to=14, length.out=30),
society = rep(1,30)
)
a_society_sims <- rnorm(20000,0,post$sigma_society)
a_society_sims <- matrix(a_society_sims, 2000, 10)
link.m12.6 <- link(m12.6, n=2000, data = d.pred, replace = list(a_society=a_society_sims))
plot(d$logpop, d$total_tools, col=rangi2, pch=16, xlab="log population", ylab="total tools")
mu.median <- apply(link.m12.6, 2, median)
lines(d.pred$logpop, mu.median)
mu.PI <- apply(link.m12.6, 2, PI, prob=.97)
shade(mu.PI, d.pred$logpop)
mu.PI <- apply(link.m12.6, 2, PI, prob=.89)
shade(mu.PI, d.pred$logpop)
mu.PI <- apply(link.m12.6, 2, PI, prob=.67)
shade(mu.PI, d.pred$logpop)
|
41f7403e3657e7ff8dad1545f1de69d9452b204b
|
03fb214812a36c4408fd59107b333f144f4de1f8
|
/man/SharpeRatio.deflated.Rd
|
0f85f2ce2d2a970d1fa43de1cab2c5a8cadc19b7
|
[] |
no_license
|
braverock/quantstrat
|
e8a911fac4fd73d1dc6623706a3bcbec72c81d69
|
3e660300b322bb63dcb7659a26304fe4e8d4a693
|
refs/heads/master
| 2023-02-07T16:28:50.251525
| 2023-02-04T20:29:30
| 2023-02-04T20:29:30
| 58,736,659
| 282
| 132
| null | 2022-06-25T02:20:08
| 2016-05-13T12:07:31
|
R
|
UTF-8
|
R
| false
| true
| 2,401
|
rd
|
SharpeRatio.deflated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deflated.Sharpe.R
\name{deflatedSharpe}
\alias{deflatedSharpe}
\alias{SharpeRatio.deflated}
\alias{.deflatedSharpe}
\title{Calculate a Deflated Sharpe Ratio using number of trials and portfolio moments}
\usage{
deflatedSharpe(
portfolios,
...,
strategy = NULL,
trials = NULL,
audit = NULL,
env = .GlobalEnv
)
.deflatedSharpe(
sharpe,
nTrials,
varTrials,
skew,
kurt,
numPeriods,
periodsInYear = 252
)
}
\arguments{
\item{portfolios}{string name of portfolio, or optionally a vector of portfolios, see DETAILS}
\item{...}{any other passtrhrough parameters}
\item{strategy}{optional strategy specification that would contain more information on the process, default NULL}
\item{trials}{optional number of trials,default NULL}
\item{audit}{optional audit environment containing the results of parameter optimization or walk forward, default NULL}
\item{env}{optional environment to find market data in, if required.}
\item{sharpe}{candidate (annualized) Sharpe Ratio}
\item{nTrials}{numeric number or trials}
\item{varTrials}{variance of Sharpe ratios of the trials}
\item{skew}{skewness of the candidate}
\item{kurt}{non-excess kurtosis}
\item{numPeriods}{total periods in the backtest}
\item{periodsInYear}{number of periods in a year, default 252 (daily)}
}
\value{
a \code{data.frame} containing:
\itemize{
\item{original observed Sharpe ratio}
\item{deflated Sharpe ratio}
\item{p-value of the deflated Sharpe ratio}
\item{number of trials used for adjustment}
}
this object may change in the future, and may be classed so that we can include more information
}
\description{
Per Bailey and Lopex de Prado (2014), construct a Deflated Sharpe Ratio and
associated p-value based on an observed Sharpe ratio and information drawn
from a series of trials (e.g. parameter optimization or other strategies tried
before the candidate strategy)
}
\references{
Bailey, David H, and Marcos Lopez de Prado. 2014. "The Deflated Sharpe Ratio:
Correcting for Selection Bias, Backtest Overfitting and Non-Normality."
Journal of Portfolio Management 40 (5): 94-107.
http://www.davidhbailey.com/dhbpapers/deflated-sharpe.pdf
https://quantstrattrader.wordpress.com/2015/09/24/
}
\seealso{
\code{\link{SharpeRatio.haircut}}
}
\author{
Brian G. Peterson
Ilya Kipnis, Brian G. Peterson
}
|
0f067ff3cbd80f806e6e8e9fb6764aa8aabb8ab2
|
fe77966452f50926681c2790aad028666e41e0a8
|
/R/make_plot_gwas_catalog.R
|
46ffeeb966d3773a3c64cc52e2101760800bf012
|
[
"MIT"
] |
permissive
|
tbaghfalaki/CheckSumStats
|
282ddd7e24fccf06ffac7e0d9617819cd96a68b8
|
6ab4f518341536a5ab322cfa53c26c8395f730e6
|
refs/heads/main
| 2023-08-13T01:54:49.378641
| 2021-09-21T16:41:28
| 2021-09-21T16:41:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,755
|
r
|
make_plot_gwas_catalog.R
|
#' Plot comparing the test study to the GWAS catalog
#'
#' Make a plot comparing signed Z scores, or effect allele frequency, between the test dataset and the GWAS catalog, in order to identify effect allele meta data errors
#'
#' @param dat the test dataset of interest
#' @param beta name of the column containing the SNP effect size
#' @param se name of the column containing the standard error for the SNP effect size.
#' @param plot_type compare Z scores or effect allele frequency? For comparison of Z scores set plot_type to "plot_zscores". For comparison of effect allele frequency set to "plot_eaf". Default is set to "plot_zscores"
#' @param trait the trait of interest
#' @param efo_id ID for trait of interest in the experimental factor ontology
#' @param efo trait of interest in the experimental factor ontology
#' @param gwas_catalog_ancestral_group restrict the comparison to these ancestral groups in the GWAS catalog. Default is set to (c("European","East Asian")
#' @param force_all_trait_study_hits force the plot to include GWAS hits from the outcome study if they are not in the GWAS catalog? This should be set to TRUE only if dat is restricted to GWAS hits for the trait of interest. This is useful for visualising whether the outcome/trait study has an unusually larger number of GWAS hits, which could, in turn, indicate that the summary statistics have not been adequately cleaned.
#' @param exclude_palindromic_snps should the function exclude palindromic SNPs? default set to TRUE. If set to FALSE, then conflicts with the GWAS catalog could reflect comparison of different reference strands.
#' @param distance_threshold distance threshold for deciding if the GWAS hit in the test dataset is present in the GWAS catalog. For example, a distance_threshold of 25000 means that the GWAS hit in the test dataset must be within 25000 base pairs of a GWAS catalog association, otherwise it is reported as missing from the GWAS catalog.
#' @param legend include legend in plot. Default TRUE
#' @param Title plot title
#' @param Title_size_subplot size of title
#' @param Ylab label for Y axis
#' @param Xlab label for X axis
#' @param Title_xaxis_size size of x axis title
#' @param return_dat if TRUE, the dataset used to generate the plot is returned to the user and no plot is made.
#'
#' @return plot
#' @export
make_plot_gwas_catalog<-function(dat=NULL,plot_type="plot_zscores",efo_id=NULL,efo=NULL,trait=NULL,gwas_catalog_ancestral_group=c("European","East Asian"),legend=TRUE,Title="Comparison of Z scores between test dataset & GWAS catalog",Title_size_subplot=10,Ylab="Z score in test dataset",Xlab="Z score in GWAS catalog",Title_xaxis_size=10,force_all_trait_study_hits=FALSE,exclude_palindromic_snps=TRUE,beta="lnor",se="lnor_se",distance_threshold=25000,return_dat=TRUE){
Dat.m<-compare_effect_to_gwascatalog(dat=dat,beta=beta,se=se,efo_id=efo_id,efo=efo,trait=trait,force_all_trait_study_hits=force_all_trait_study_hits,exclude_palindromic_snps=exclude_palindromic_snps,distance_threshold=distance_threshold)
Dat.m[Dat.m$Z_scores=="high conflict",]
Names<-grep("beta",names(Dat.m))
Names2<-grep("effect",names(Dat.m))
Names3<-grep("eaf",names(Dat.m))
Names4<-grep("ances",names(Dat.m))
Dat.m$rsid[Dat.m$Z_scores=="high conflict"]
Dat.m[Dat.m$Z_scores=="high conflict",c(Names,Names2,Names3,Names4)]
# head(Dat.m)
# Dat.m[Dat.m$Z_scores=="high conflict",c("z.y","z.x")]
Dat.m$Z_scores[Dat.m$Z_scores=="high conflict"]<-"red"
Dat.m$Z_scores[Dat.m$Z_scores=="moderate conflict"]<-"blue"
Dat.m$Z_scores[Dat.m$Z_scores=="no conflict"]<-"black"
labels_colour<-unique(Dat.m$Z_scores)
values_colour<-unique(Dat.m$Z_scores)
Dat.m$plot_x<-Dat.m$z.x
Dat.m$plot_y<-Dat.m$z.y
Dat.m$colour<-Dat.m$Z_scores
Name<-"Effect size conflict"
if(plot_type=="plot_eaf")
{
Dat.m<-Dat.m[!is.na(Dat.m$eaf.x),]
Dat.m$EAF[Dat.m$EAF=="high conflict"]<-"red"
Dat.m$EAF[Dat.m$EAF=="moderate conflict"]<-"blue"
Dat.m$EAF[Dat.m$EAF=="no conflict"]<-"black"
labels_colour<-unique(Dat.m$EAF)
values_colour<-unique(Dat.m$EAF)
Dat.m$plot_x<-Dat.m$eaf.x
Dat.m$plot_y<-Dat.m$eaf.y
Dat.m$colour<-Dat.m$EAF
Name<-"EAF conflict"
Ylab="EAF in outcome study"
Xlab="EAF in GWAS catalog"
Title="Comparison of EAF between test dataset and GWAS catalog"
}
if(return_dat) return(Dat.m)
labels_colour[labels_colour == "red"]<-"high"
if(force_all_trait_study_hits & any(Dat.m$z.x ==0))
{
labels_colour[labels_colour == "high"]<-"high or not\npresent in GWAS catalog"
}
labels_colour[labels_colour == "blue"]<-"moderate"
labels_colour[labels_colour == "black"]<-"none"
Pos<-order(values_colour)
values_colour<-values_colour[Pos]
labels_colour<-labels_colour[Pos]
ancestry1<-Dat.m$ancestral_group
labels_shape<-unique(ancestry1)[order(unique(ancestry1))]
values_shape<-labels_shape
values_shape[values_shape == "European"]<-15
values_shape[values_shape == "East Asian"]<-16
values_shape<-as.numeric(values_shape)
# values_shape<-c(16,15,17,18)
if(is.null(Title)){
Title<-paste0(unique(dat$study)," | " ,unique(dat$ID) , " | EFO: ", efo)
}
Subtitle<-paste0(Dat.m$outcome," | ",Dat.m$population)
if(legend){
Plot<-ggplot2::ggplot(Dat.m) + ggplot2::geom_point(ggplot2::aes(x=plot_x, y=plot_y,colour=colour,shape=ancestry1)) +ggplot2::ggtitle(Title) +ggplot2::labs(y= Ylab, x =Xlab,subtitle=Subtitle) + ggplot2::theme(plot.title = ggplot2::element_text(size = Title_size_subplot, face = "plain"),
)+
ggplot2::theme(axis.title=ggplot2::element_text(size=Title_xaxis_size),plot.subtitle = ggplot2::element_text(size = 8))+
ggplot2::scale_shape_manual(name = "GWAS catalog ancestry",
labels = labels_shape,
# labels = unique(ancestry1)[order(unique(ancestry1))],
# labels = c("European","East Asian"),
values = values_shape) +
# values = 1:length(Shape2)) +
ggplot2::scale_colour_manual(name=Name,
labels=labels_colour,
values=values_colour)+
ggplot2::theme(legend.title=ggplot2::element_text(size=8))+
ggplot2::theme(legend.text=ggplot2::element_text(size=8))
}
if(!legend){
Plot<-ggplot2::ggplot(Dat.m) + ggplot2::geom_point(ggplot2::aes(x=plot_x, y=plot_y,colour=colour,shape=ancestry1)) +ggplot2::ggtitle(Title) +ggplot2::labs(y= Ylab, x =Xlab,subtitle=Subtitle) + ggplot2::theme(plot.title = ggplot2::element_text(size = Title_size_subplot, face = "plain"))+
ggplot2::theme(axis.title=ggplot2::element_text(size=Title_xaxis_size))+
ggplot2::scale_shape_manual(name = "GWAS catalog ancestry",
labels = labels_shape,
values = values_shape) +
ggplot2::scale_colour_manual(name=Name,
labels=labels_colour,
values=values_colour)+
ggplot2::theme(legend.title=ggplot2::element_text(size=8),
legend.text=ggplot2::element_text(size=8),plot.subtitle = ggplot2::element_text(size = 8),
legend.position = "none")
}
# ggplot2::scale_colour_manual(name="Z score conflict",
# labels=unique(Z_scores)[order(unique(Z_scores))] ,
# values=unique(Z_scores)[order(unique(Z_scores))])
# ggplot2::scale_colour_manual(name="Z score conflict",
# labels=c("none", "moderate","high"),
# values=c("black","blue", "red"))
return(Plot)
}
#' Compare the genetic effect sizes in the test dataset to the GWAS catalog
#'
#' Compare the direction of effects and effect allele frequency between the test dataset and the GWAS catalog, in order to identify effect allele meta data errors
#'
#' @param dat the test dataset of interest
#' @param beta name of the column containing the SNP effect size
#' @param se name of the column containing the standard error for the SNP effect size.
#' @param trait the trait of interest
#' @param efo_id ID for trait of interest in the experimental factor ontology
#' @param efo trait of interest in the experimental factor ontology
#' @param gwas_catalog_ancestral_group restrict the comparison to these ancestral groups in the GWAS catalog. Default is set to (c("European","East Asian")
#' @param force_all_trait_study_hits force the comparison to include GWAS hits from the test dataset if they are not in the GWAS catalog? This should be set to TRUE only if dat is restricted to GWAS hits for the trait of interest. This is useful for visualising whether the test trait study has an unusually larger number of GWAS hits, which could, in turn, indicate analytical issues with the summary statistics
#' @param exclude_palindromic_snps should the function exclude palindromic SNPs? default set to TRUE. If set to FALSE, then conflicts with the GWAS catalog could reflect comparison of different reference strands.
#' @param distance_threshold distance threshold for deciding if the GWAS hit in the test dataset is present in the GWAS catalog. For example, a distance_threshold of 25000 means that the GWAS hit in the test dataset must be within 25000 base pairs of a GWAS catalog association, otherwise it is reported as missing from the GWAS catalog.
#'
#' @return dataframe
#' @export
compare_effect_to_gwascatalog<-function(dat=NULL,efo=NULL,efo_id=NULL,trait=NULL,beta=NULL,se=NULL,gwas_catalog_ancestral_group=c("European","East Asian"),exclude_palindromic_snps=TRUE,force_all_trait_study_hits=FALSE,distance_threshold=distance_threshold)
{
# exclude the MAF 1k ref set. Causes problems if you force inclusion of SNPs missing from the GWAS catalog
utils::data("refdat_1000G_superpops",envir =environment())
snps_exclude<-unique(refdat_1000G_superpops$SNP)
dat<-dat[!dat$rsid %in% snps_exclude,]
if(beta=="lnor")
{
if(!"lnor" %in% names(dat)) stop("name of beta column set to lnor but there is no column with that name")
}
if(!beta %in% names(dat)) stop(paste0("beta column not found. Check you correctly specified the name of the beta column"))
if(!se %in% names(dat)) stop(paste0("se column not found. Check you correctly specified the name of the se column"))
if(is.null(efo) & is.null(efo_id) & is.null(trait)) stop("you must specify either efo, efo_id or trait")
gwas_catalog<-gwas_catalog_hits2(efo=efo,efo_id=efo_id,trait=trait)
message_trait<-paste(c(efo,efo_id,trait),collapse="/")
Dat.m<-merge(gwas_catalog,dat,by="rsid")
if(all(is.na(Dat.m$effect_allele.x))) stop(paste0("associations for ",message_trait," were found but all effect alleles are missing in the GWAS catalog. Therefore no comparison of effect size direction can be made"))
Dat.m<-Dat.m[!is.na(Dat.m$effect_allele.x),]
Dat.m<-Dat.m[nchar(Dat.m$effect_allele.y)==1,]
Dat.m<-Dat.m[nchar(Dat.m$other_allele)==1,]
Alleles<-paste0(Dat.m$effect_allele.y,Dat.m$other_allele)
if(exclude_palindromic_snps)
{
Dat.m<-Dat.m[!Alleles %in% c("AT","TA","GC","CG"),]
}
if(!is.null(gwas_catalog_ancestral_group))
{
# c("European","East Asian")
Dat.m<-Dat.m[Dat.m$ancestral_group %in% gwas_catalog_ancestral_group,]
}
# Dat.m1<-Dat.m
# Dat.m<-Dat.m1
Dat.m<-harmonise_effect_allele(dat=Dat.m,beta=beta)
Pos<-Dat.m$effect_allele.x!=Dat.m$effect_allele.y
if(any(Pos))
{
Dat.m1<-Dat.m[Pos,]
Dat.m2<-Dat.m[!Pos,]
Dat.m1<-flip_strand(dat=Dat.m1,allele1_col="effect_allele.x")
# Dat.m1$effect_allele.x
# Dat.m1$effect_allele.y
# Dat.m1[,c("effect_allele.x","effect_allele.y","other_allele","rsid")]
Dat.m<-rbind(Dat.m1,Dat.m2)
}
Pos<-Dat.m$effect_allele.x!=Dat.m$effect_allele.y
if(any(Pos))
{
Dat.m<-harmonise_effect_allele(dat=Dat.m,beta=beta)
}
Pos<-Dat.m$effect_allele.x!=Dat.m$effect_allele.y
if(any(Pos))
{
stop("effect alleles not fully harmonised")
# Dat.m[Pos,c("rsid","Effect.Allele.x","Effect.Allele.y","Other.Allele")]
}
Dat.m$z.y<-Dat.m[,beta]/Dat.m[,se]
# Dat.m$z.x<-Dat.m$beta_gc/Dat.m$se_gc
# Dat.m$z.y<-Dat.m$lnor.y/Dat.m$se.y
# Dat.m$z.x<-Dat.m$lnor.x/Dat.m$se.x
# head(Dat.m[,c("p.x","z.x","p.y","z.y")])
# max(Dat.m$p.x)
# dim(Dat.m)
# Ylab<-""
# Xlab<-""
if("pmid" %in% names(dat))
{
gwas_studies<-gwasrapidd::get_studies(study_id=unique(Dat.m$study_id ))
Publications<-gwas_studies@publications
Publications<-Publications[!duplicated(Publications$study_id),]
Dat.m<-merge(Dat.m,Publications,by="study_id")
}
#identifty eaf conflicts
# ancestry2<-Dat.m$ancestral_group
Dat.m$EAF<-"no conflict"
Dat.m$EAF[is.na(Dat.m$eaf.x)]<-NA
# EAF<-rep("black",nrow(Dat.m))
Pos1<-which(Dat.m$eaf.x<0.5 & Dat.m$eaf.y>0.5 | Dat.m$eaf.x>0.5 & Dat.m$eaf.y<0.5)
Dat.m$EAF[Pos1]<-"moderate conflict"
Pos2<-which(Dat.m$eaf.x<0.40 & Dat.m$eaf.y>0.60 | Dat.m$eaf.x>0.60 & Dat.m$eaf.y<0.40)
Dat.m$EAF[Pos2]<-"high conflict"
Pos3<-which(Dat.m$pmid==Dat.m$pubmed_id)
Pos4<-Pos1[Pos1 %in% Pos3]
Dat.m$EAF[Pos4]<-"high conflict" #if there is a moderate eaf conflict (eaf close to 0.5) but both datasets are from the same study, then the conflict is upgraded to high
# if(plot_type=="plot_zscores"){
if(force_all_trait_study_hits)
{
gc_list<-find_hits_in_gwas_catalog(gwas_hits=dat$rsid,trait=trait,efo=efo,efo_id=efo_id,distance_threshold=distance_threshold)
if(length(gc_list$not_in_gc)>0)
{
# if(any(!dat$rsid %in% gwas_catalog$rsid)){
# dat$rsid[!dat$rsid %in% gwas_catalog$rsid]
dat2<-dat[dat$rsid %in% gc_list$not_in_gc,] #the snps not in the GWAS catalog. Genomic coordinates for SNPs associated with trait/efo in the GWAS catalog did not overlap with these SNPs (including +/- 250 kb)
Dat.m2<-merge(gwas_catalog,dat2,by="rsid",all.y=TRUE)
Dat.m2$z.y<-Dat.m2[,beta]/Dat.m2[,se]
Dat.m2$z.x<-0
# Dat.m$plot_x
Dat.m2$ancestral_group<-unique(dat$population)
Names<-names(Dat.m)[!names(Dat.m) %in% names(Dat.m2)]
for(i in 1:length(Names)){
Dat.m2[,Names[i]]<-NA
}
# Dat.m3<-Dat.m
Dat.m<-rbind(Dat.m,Dat.m2)
}
}
Dat.m$Z_scores<-"no conflict"
# Z_scores<-rep("black",nrow(Dat.m))
Dat.m$Z_scores[which(sign(Dat.m$z.y) != sign(as.numeric(Dat.m$z.x)))]<-"moderate conflict"
Dat.m$Z_scores[which(sign(Dat.m$z.y) != sign(as.numeric(Dat.m$z.x)) & abs(Dat.m$z.y) >= 3.890592 & abs(Dat.m$z.x) >= 3.890592 )]<-"high conflict" # Z score of 3.890592 = 2 sided p value of 0.0001
Dat.m$Z_scores[which(Dat.m$pmid==Dat.m$pubmed_id & sign(Dat.m$z.y) != sign(as.numeric(Dat.m$z.x)))]<-"high conflict" #if the signs are different but Z.x and Z.y come from the same study, then there is a clear incompatability
if(force_all_trait_study_hits){
Dat.m$Z_scores[Dat.m$z.x==0]<-"high conflict" #these SNPs are not in the GWAS catalog
}
# Z_scores[which(sign(Dat.m$z.y) != sign(as.numeric(Dat.m$z.x)) & abs(Dat.m$z.y) >= 4.891638 & abs(Dat.m$z.x) >= 4.891638 )]<-"red"
return(Dat.m)
}
harmonise_effect_allele<-function(dat=NULL,beta=beta){
Pos<-which(dat$effect_allele.x!=dat$effect_allele.y)
beta.y<-dat[,beta][Pos]*-1
dat[,beta][Pos]<-beta.y
oa<-dat$effect_allele.y[Pos]
ea<-dat$other_allele[Pos]
dat$effect_allele.y[Pos]<-ea
dat$other_allele[Pos]<-oa
eaf<-1-dat$eaf.y[Pos]
dat$eaf.y[Pos]<-eaf
return(dat)
}
#' Are hits in the GWAS catalog?
#'
#' Identify GWAS hits in the test dataset and see if they overlap with GWAS hits in the GWAS catalog.
#'
#' @param gwas_hits the "GWAS hits" in the test dataset (e.g. SNP-trait associations with P<5e-8)
#' @param trait the trait of interest
#' @param efo_id ID for trait of interest in the experimental factor ontology
#' @param efo trait of interest in the experimental factor ontology
#' @param distance_threshold distance threshold for deciding if the GWAS hit in the test dataset is present in the GWAS catalog. For example, a distance_threshold of 25000 means that the GWAS hit in the test dataset must be within 25000 base pairs of a GWAS catalog association, otherwise it is reported as missing from the GWAS catalog.
#'
#' @return list
#' @export
find_hits_in_gwas_catalog<-function(gwas_hits=NULL,trait=NULL,efo=NULL,efo_id=NULL,distance_threshold=25000){
utils::data("refdat_1000G_superpops",envir =environment())
snps_exclude<-unique(refdat_1000G_superpops$SNP)
gwas_hits<-gwas_hits[!gwas_hits %in% snps_exclude]
ensembl<-get_positions_biomart(gwas_hits=gwas_hits)
if(!is.null(efo)) efo<-trimws(unlist(strsplit(efo,split=";")))
if(!is.null(efo_id)) efo_id<-trimws(unlist(strsplit(efo_id,split=";")))
if(!is.null(trait)) trait<-trimws(unlist(strsplit(trait,split=";")))
gwas_variants<-get_gwas_associations(reported_trait=trait,efo_trait=efo,efo_id=efo_id)
# gwas_variants<-gwasrapidd::get_variants(efo_trait = efo,efo_id=efo_id,reported_trait=trait)
if(class(unlist(gwas_variants)) == "character")
{
if(nrow(gwas_variants)==0)
{
warning(paste("search returned 0 variants from the GWAS catalog"))
}
}
if(is.null(trait) & is.null(efo) & is.null(efo_id))
{
genomic_range<-list(chromosome=as.character(ensembl$chr_name),start=ensembl$chrom_start - distance_threshold,end=ensembl$chrom_start + distance_threshold)
gwas_variants<-gwasrapidd::get_variants(genomic_range=genomic_range)
gwas_variants<-data.frame(gwas_variants@variants)
ens.m<-merge(ensembl,gwas_variants,by.x="chr_name",by.y="chromosome_name",all.x=TRUE)
Pos<-abs(ens.m$chrom_start.x-ens.m$chrom_start.y)<distance_threshold
# Pos<-which(ens.m$chromosome_position>ens.m$bp_minus & ens.m$chromosome_position<ens.m$bp_plus)
gwashit_in_gc<-unique(ens.m$refsnp_id[Pos])
gwashit_notin_gc<-unique(ens.m$refsnp_id[!ens.m$refsnp_id %in% gwashit_in_gc])
return(list("not_in_gc"=gwashit_notin_gc,"in_gc"=gwashit_in_gc))
}
if(!(is.null(trait) & is.null(efo) & is.null(efo_id))){
# for now use ensembl/biomart to determine positions for GWAS catalog and test variants. Both are in GRCh38 so could also use GWAS catalog positions for GWAS catalog variats (maybe this would be faster too) but there is the risk that the reference build could diverge over time between biomart/ensembl and GWAS catalog. might update this so that chromosome positions could be based on GWAS catalog instead
# if(positions_biomart)
# {
# gwas_variants<-data.frame(gwas_variants@variants)
# gwas_hits %in% gwas_variants@variants$variant_id
# ensembl2<-get_positions_biomart(gwas_hits=unique(gwas_variants$variant_id))
ensembl2<-get_positions_biomart(gwas_hits=gwas_variants@risk_alleles$variant_id)
# }
gwashit_in_gc<-NA
if(any(ensembl$chr_name %in% ensembl2$chr_name))
{
gwashit_notin_gc<-ensembl$refsnp_id[!ensembl$chr_name %in% ensembl2$chr_name]
ens.m<-merge(ensembl,ensembl2,by="chr_name")
# ens.m[which(ens.m$refsnp_id.x =="rs12239737"),c("chrom_start.x","chrom_start.y")]
Test<-any(abs(ens.m$chrom_start.x-ens.m$chrom_start.y)<distance_threshold)
if(Test)
{
Pos<-abs(ens.m$chrom_start.x-ens.m$chrom_start.y)<distance_threshold
# Pos<-ens.m$chrom_start.x>ens.m$bp_minus.y & ens.m$chrom_start.x<ens.m$bp_plus.y
gwashit_in_gc<-unique(ens.m$refsnp_id.x[Pos])
ens.m<-ens.m[!ens.m$refsnp_id.x %in% gwashit_in_gc,]
Pos<-abs(ens.m$chrom_start.x-ens.m$chrom_start.y)<distance_threshold
# Pos<-ens.m$chrom_start.x>ens.m$bp_minus.y & ens.m$chrom_start.x<ens.m$bp_plus.y
gwashit_notin_gc<-c(gwashit_notin_gc,unique(ens.m$refsnp_id.x[!Pos]))
}
if(!Test)
{
gwashit_notin_gc<-c(gwashit_notin_gc,unique(ens.m$refsnp_id.x))
}
}else{
gwashit_notin_gc<-unique(ensembl$refsnp_id)
gwashit_in_gc<-NA
}
return(list("not_in_gc"=gwashit_notin_gc,"in_gc"=gwashit_in_gc))
}
}
get_positions_biomart<-function(gwas_hits=NULL){
# library(biomaRt)
# Get chromosomal positions and genes names from ENSEMBL. Should be build 38. Version object contains version ID for genome build used
Mart <- biomaRt::useMart(host="www.ensembl.org", biomart="ENSEMBL_MART_SNP",dataset="hsapiens_snp")
Version<-biomaRt::listDatasets(Mart)[ biomaRt::listDatasets(Mart)$dataset=="hsapiens_snp","version"]
message(paste0("Using ",Version," of human genome from ensembl for genomic coordinates"))
Attr<-biomaRt::listAttributes(Mart)
ensembl<-biomaRt::getBM(attributes=c("refsnp_id","chr_name","chrom_start"),filters="snp_filter",values=gwas_hits,mart=Mart)
ensembl<-ensembl[order(ensembl$refsnp_id),]
ensembl<-ensembl[nchar(ensembl$chr_name)<3,]
ensembl$chr_name<-as.numeric(ensembl$chr_name)
# ensembl$bp_minus<-ensembl$chrom_start - bp_down
# ensembl$bp_plus<-ensembl$chrom_start + bp_up
return(ensembl)
}
#' Flag conflicts with the GWAS catalog
#'
#' Flag conflicts with the GWAS catalog through comparison of reported effect alleles and reported effect allele frequency.
#'
#' @param dat the test dataset of interest
#' @param beta name of the column containing the SNP effect size
#' @param se name of the column containing the standard error for the SNP effect size.
#' @param trait the trait of interest
#' @param efo_id ID for trait of interest in the experimental factor ontology
#' @param efo trait of interest in the experimental factor ontology
#' @param gwas_catalog_ancestral_group restrict the comparison to these ancestral groups in the GWAS catalog. Default is set to (c("European","East Asian")
#' @param exclude_palindromic_snps should the function exclude palindromic SNPs? default set to TRUE. If set to FALSE, then conflicts with the GWAS catalog could reflect comparison of different reference strands.
#'
#' @return list
#' @export
flag_gc_conflicts<-function(dat=NULL,beta="lnor",se="lnor_se",efo=NULL,trait=NULL,efo_id=NULL,gwas_catalog_ancestral_group=c("European","East Asian"),exclude_palindromic_snps=TRUE){
gc_dat<-compare_effect_to_gwascatalog(dat=dat,efo=efo,trait=trait,efo_id=efo_id,beta=beta,se=se,gwas_catalog_ancestral_group=gwas_catalog_ancestral_group,exclude_palindromic_snps=exclude_palindromic_snps)
effect_size_conflict<-gc_dat$Z_scores
gc_conflicts<-c("high conflict","moderate conflict","no conflict")
es_conflicts_list<-lapply(1:length(gc_conflicts),FUN=function(x)
length(effect_size_conflict[which(effect_size_conflict==gc_conflicts[x])]))
total<-length(which(!is.na(gc_dat$Z_scores)))
es_conflicts_list<-c(es_conflicts_list,total)
names(es_conflicts_list)<-c(gc_conflicts,"n_snps")
eaf_conflicts<-gc_dat$EAF
eaf_conflicts_list<-lapply(1:length(gc_conflicts),FUN=function(x)
length(eaf_conflicts[which(eaf_conflicts==gc_conflicts[x])]))
total<-length(which(!is.na(gc_dat$EAF)))
eaf_conflicts_list<-c(eaf_conflicts_list,total)
names(eaf_conflicts_list)<-c(gc_conflicts,"n_snps")
# gc_ancestries<-paste(unique(gc_dat$ancestral_group),collapse="; ")
all_conflicts_list<-list("effect_size_conflicts"=es_conflicts_list,"eaf_conflicts"=eaf_conflicts_list)
return(all_conflicts_list)
}
|
4761670f574723b06eaaf8ce1706fe6ad8bbe9f4
|
30431c11955b3028736d8ac66e3b5a80ed05eeb1
|
/man/simple.imputer.Rd
|
589c188f20d403ffeeadca5ec0a4cde97aa1418a
|
[
"MIT"
] |
permissive
|
johncollins/fastimputer
|
da22c03fdeaeb4ae9b8b6083472022f7ed88c20c
|
5de99c83c359868af6660cb5e6f57fbfc6158b50
|
refs/heads/master
| 2020-05-17T17:25:02.192779
| 2014-07-16T03:43:18
| 2014-07-16T03:43:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
rd
|
simple.imputer.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{simple.imputer}
\alias{simple.imputer}
\title{simple.imputer}
\usage{
simple.imputer(df)
}
\arguments{
\item{df}{Data frame for all data including missing and non-missing values}
}
\description{
I use really simple models to predict missing values.
For continuous variables I use linear regression and
for categorical variables I use logistic regression.
}
\examples{
df <- data.frame(A=c(1,2,3,1,2), B=as.factor(c(1,2,1,3,NA)), C=c(1.1, 3.5, NA, 3, NA))
df.imputed <- simple.imputer(df)
}
\keyword{imputation,}
\keyword{impute,}
\keyword{imputer}
|
100f5a6e88b24c1b2f5dd19f42bc5a5e790e4e07
|
3cb52d718c7f563b7a420810c5ef0a16c524e362
|
/Project/MLProject_Evaluate.R
|
aa9c7f66020edc04ec616c0c8c6922522cf69e77
|
[] |
no_license
|
leoyuchuan/MachineLearning
|
1e6b2d6f10def6b8c9c890cd0123b452ab03cc7e
|
afd994a04b4052301caa94a114a41828f46195a8
|
refs/heads/master
| 2021-01-19T08:05:34.295155
| 2017-04-08T01:47:37
| 2017-04-08T01:47:37
| 87,599,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,145
|
r
|
MLProject_Evaluate.R
|
library(h2o)
library(data.table)
library(Metrics)
options(warn = -1)
###### Read Data Path ######
args = commandArgs(trailingOnly = TRUE)
if(length(args)<1){
stop("Please provide path of training data & testing data")
}
path = paste(dirname(file.path(args[1])), "/", sep = "")
###### Random Forest ######
###### Initialize h2o cluster/ Build model/ Predict ######
invisible(h2o.init(nthreads = -1, max_mem_size = '4g'))
trainData = h2o.importFile(paste(path, 'pp_train.csv', sep = ''))
testData = h2o.importFile(paste(path, 'pp_test.csv', sep = ''))
model <- h2o.randomForest(y=7, x=1:6, training_frame = trainData, ntrees = 100, mtries = -1, max_depth = 3, nfolds = 10)
pred.train <- as.data.frame(h2o.predict(model, trainData))
pred.test <- as.data.frame(h2o.predict(model, testData))
###### Shut down h2o cluster ######
mse.train = h2o.performance(model)@metrics$MSE
mse.test = h2o.performance(model, testData)@metrics$MSE
invisible(h2o.shutdown(prompt = FALSE))
rm(testData, trainData, model)
###### Evaluate MAP@12 For Training Data ######
actual.train = fread(paste(path, 'pp_train.csv', sep = ''))
actual.train = cbind(actual.train, pred.train)
rm(pred.train)
invisible(gc())
setkey(actual.train, 'predict')
pred.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.train,"display_id")
actual.train = actual.train[which(actual.train$clicked==1),]
actual.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.train, 'display_id')
actual.train = actual.train[pred.train, on='display_id']
actual.train[, c('i.ad_id'):=NULL]
actual = strsplit(actual.train$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.train$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.train, pred.train)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Training Data is %.4f\n", "Random Forest", mse.train)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Training Data is %.4f\n", "Random Forest", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
###### Evaluate MAP@12 For Testing Data ######
actual.test = fread(paste(path, 'pp_test.csv', sep = ''))
actual.test = cbind(actual.test, pred.test)
rm(pred.test)
invisible(gc())
setkey(actual.test, 'predict')
pred.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.test,"display_id")
actual.test = actual.test[which(actual.test$clicked==1),]
actual.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.test, 'display_id')
actual.test = actual.test[pred.test, on='display_id']
actual.test[, c('i.ad_id'):=NULL]
actual = strsplit(actual.test$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.test$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.test, pred.test)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Testing Data is %.4f\n", "Random Forest", mse.test)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Testing Data is %.4f\n", "Random Forest", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
rm(mse.test, mse.train)
###### Regression ######
###### Initialize h2o cluster/ Build model/ Predict ######
invisible(h2o.init(nthreads = -1, max_mem_size = '4g'))
trainData = h2o.importFile(paste(path, 'pp_train.csv', sep = ''))
testData = h2o.importFile(paste(path, 'pp_test.csv', sep = ''))
model <- h2o.glm(y=7, x=1:6, training_frame = trainData, family = "gaussian", nfolds = 10)
pred.train <- as.data.frame(h2o.predict(model, trainData))
pred.test <- as.data.frame(h2o.predict(model, testData))
###### Shut down h2o cluster ######
mse.train = h2o.performance(model)@metrics$MSE
mse.test = h2o.performance(model, testData)@metrics$MSE
invisible(h2o.shutdown(prompt = FALSE))
rm(testData, trainData, model)
###### Evaluate MAP@12 For Training Data ######
actual.train = fread(paste(path, 'pp_train.csv', sep = ''))
actual.train = cbind(actual.train, pred.train)
rm(pred.train)
invisible(gc())
setkey(actual.train, 'predict')
pred.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.train,"display_id")
actual.train = actual.train[which(actual.train$clicked==1),]
actual.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.train, 'display_id')
actual.train = actual.train[pred.train, on='display_id']
actual.train[, c('i.ad_id'):=NULL]
actual = strsplit(actual.train$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.train$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.train, pred.train)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Training Data is %.4f\n", "Regression", mse.train)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Training Data is %.4f\n", "Regression", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
###### Evaluate MAP@12 For Testing Data ######
actual.test = fread(paste(path, 'pp_test.csv', sep = ''))
actual.test = cbind(actual.test, pred.test)
rm(pred.test)
invisible(gc())
setkey(actual.test, 'predict')
pred.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.test,"display_id")
actual.test = actual.test[which(actual.test$clicked==1),]
actual.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.test, 'display_id')
actual.test = actual.test[pred.test, on='display_id']
actual.test[, c('i.ad_id'):=NULL]
actual = strsplit(actual.test$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.test$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.test, pred.test)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Testing Data is %.4f\n", "Regression", mse.test)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Testing Data is %.4f\n", "Regression", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
rm(mse.test, mse.train)
###### Gradient Boosting ######
###### Initialize h2o cluster/ Build model/ Predict ######
invisible(h2o.init(nthreads = -1, max_mem_size = '4g'))
trainData = h2o.importFile(paste(path, 'pp_train.csv', sep = ''))
testData = h2o.importFile(paste(path, 'pp_test.csv', sep = ''))
model <- h2o.gbm(y=7, x=1:6, training_frame = trainData, ntrees = 100, max_depth = 4, sample_rate = 0.8, nfolds = 10)
pred.train <- as.data.frame(h2o.predict(model, trainData))
pred.test <- as.data.frame(h2o.predict(model, testData))
###### Shut down h2o cluster ######
mse.train = h2o.performance(model)@metrics$MSE
mse.test = h2o.performance(model, testData)@metrics$MSE
invisible(h2o.shutdown(prompt = FALSE))
rm(testData, trainData, model)
###### Evaluate MAP@12 For Training Data ######
actual.train = fread(paste(path, 'pp_train.csv', sep = ''))
actual.train = cbind(actual.train, pred.train)
rm(pred.train)
invisible(gc())
setkey(actual.train, 'predict')
pred.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.train,"display_id")
actual.train = actual.train[which(actual.train$clicked==1),]
actual.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.train, 'display_id')
actual.train = actual.train[pred.train, on='display_id']
actual.train[, c('i.ad_id'):=NULL]
actual = strsplit(actual.train$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.train$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.train, pred.train)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Training Data is %.4f\n", "Gradient Boosting", mse.train)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Training Data is %.4f\n", "Gradient Boosting", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
###### Evaluate MAP@12 For Testing Data ######
actual.test = fread(paste(path, 'pp_test.csv', sep = ''))
actual.test = cbind(actual.test, pred.test)
rm(pred.test)
invisible(gc())
setkey(actual.test, 'predict')
pred.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.test,"display_id")
actual.test = actual.test[which(actual.test$clicked==1),]
actual.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.test, 'display_id')
actual.test = actual.test[pred.test, on='display_id']
actual.test[, c('i.ad_id'):=NULL]
actual = strsplit(actual.test$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.test$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.test, pred.test)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Testing Data is %.4f\n", "Gradient Boosting", mse.test)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Testing Data is %.4f\n", "Gradient Boosting", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
rm(mse.test, mse.train)
###### Deep Learning ######
###### Initialize h2o cluster/ Build model/ Predict ######
invisible(h2o.init(nthreads = -1, max_mem_size = '4g'))
trainData = h2o.importFile(paste(path, 'pp_train.csv', sep = ''))
testData = h2o.importFile(paste(path, 'pp_test.csv', sep = ''))
model <- h2o.deeplearning(y=7, x=1:6, training_frame = trainData, hidden = c(20,20), epochs = 20, activation = 'Rectifier', nfolds = 10)
pred.train <- as.data.frame(h2o.predict(model, trainData))
pred.test <- as.data.frame(h2o.predict(model, testData))
###### Shut down h2o cluster ######
mse.train = h2o.performance(model)@metrics$MSE
mse.test = h2o.performance(model, testData)@metrics$MSE
invisible(h2o.shutdown(prompt = FALSE))
rm(testData, trainData, model)
###### Evaluate MAP@12 For Training Data ######
actual.train = fread(paste(path, 'pp_train.csv', sep = ''))
actual.train = cbind(actual.train, pred.train)
rm(pred.train)
invisible(gc())
setkey(actual.train, 'predict')
pred.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.train,"display_id")
actual.train = actual.train[which(actual.train$clicked==1),]
actual.train <- actual.train[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.train, 'display_id')
actual.train = actual.train[pred.train, on='display_id']
actual.train[, c('i.ad_id'):=NULL]
actual = strsplit(actual.train$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.train$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.train, pred.train)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Training Data is %.4f\n", "Deep Learning", mse.train)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Training Data is %.4f\n", "Deep Learning", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
###### Evaluate MAP@12 For Testing Data ######
actual.test = fread(paste(path, 'pp_test.csv', sep = ''))
actual.test = cbind(actual.test, pred.test)
rm(pred.test)
invisible(gc())
setkey(actual.test, 'predict')
pred.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
invisible(gc())
setkey(pred.test,"display_id")
actual.test = actual.test[which(actual.test$clicked==1),]
actual.test <- actual.test[,.(ad_id=paste(rev(ad_id),collapse=" ")),by=display_id]
setkey(actual.test, 'display_id')
actual.test = actual.test[pred.test, on='display_id']
actual.test[, c('i.ad_id'):=NULL]
actual = strsplit(actual.test$ad_id, " ")
actual = lapply(actual, as.integer)
predicted = strsplit(pred.test$ad_id, " ")
predicted = lapply(predicted, as.integer)
rm(actual.test, pred.test)
MAP12 = mapk(12, actual, predicted)
str = sprintf("%s: Mean Squared Error For Testing Data is %.4f\n", "Deep Learning", mse.test)
str = paste(str, sprintf("%s: Mean Average Precision @12 For Testing Data is %.4f\n", "Deep Learning", MAP12), sep = "")
cat(str)
write(str, file = paste(path, "output.log", sep = ''), append = TRUE)
rm(actual, predicted, MAP12, str)
rm(mse.test, mse.train)
|
58b3e0bdcef3306e47441610fd13171aca2eb0a0
|
d423cea9d1263ba4afe2509c95efa1e8ea18d6e8
|
/R/MaxPrecip.R
|
14d384075abfae1aa6d14c237e5e6a7da62949a4
|
[] |
no_license
|
nburola/climateimpacts
|
50eceb87d6bdd0af7401e47582a41cc191b10efd
|
38773f77d5a1b84a288bd4a3b1247076b5be5ef2
|
refs/heads/master
| 2021-03-14T05:50:16.466561
| 2020-03-11T02:40:41
| 2020-03-11T02:40:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
MaxPrecip.R
|
#' Function to find the greatest recorded precipitation values in a daily rainfall dataset for a particular location during the given period
#'
#'
#' @param precip precipitation in inches/day
#' @param station describes the number or name of the precipitation gauge or station
#' @return the greatest recorded rainfall value
MaxPrecip = function(precip, station = "Cachuma") {
precip_subset <- precip %>%
dplyr::mutate(year_month_day = lubridate::parse_date_time(date, "ymd"),
year = lubridate::year(date),
month = lubridate::month(date),
day = lubridate::day(date),
rainfall = as.numeric(rainfall)) %>%
dplyr::filter(station == "station") %>%
dplyr::select(rain)
precip_subset_max <- apply(precip_subset, MARGIN = 2, FUN = max)
return(list(Station = station, Max_Precip = precip_subset_max))
}
|
57d7cfe6836dd2e55941295fb51b1867c15541be
|
8c82a703ee4661feb4db1f456f94ee468b5f6459
|
/man/diagt.o.Rd
|
6cdb826f81779834868e6403cb8a7801602a6b3a
|
[] |
no_license
|
gvdovandzung/thongke
|
b8d93d6aa3fa08c91a1360ccec3c683dd6e8cc2c
|
26936ff9587953e8601c59179e25860fa75894a7
|
refs/heads/master
| 2022-08-29T08:19:57.995573
| 2020-05-22T16:01:28
| 2020-05-22T16:01:28
| 266,149,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 916
|
rd
|
diagt.o.Rd
|
\name{diagt.o}
\alias{diagt.o}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Gia tri cua xet nghiem }
\description{
Gia tri cua xet nghiem khi biet ket qua xet nghiem (nhi gia) va benh (nhi gia)
Su dung khi muon ket hop gia tri cua nhieu xet nghiem thanh mot bang
}
\usage{
diagt.o(D, btest, digits = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{btest}{Ket qua xet nghiem co ket qua nhi gia}
\item{D}{ Tinh trang benh theo xet nghiem tieu chuan vang}
\item{ci}{ Co ghi nhan khoang tin cay ra hay khong }
}
\author{Do Van Dung <dovandzung@gmail.com>}
\examples{
data(lact)
estat(lact$vmn,lact$lact.dnt,cutoff=0.5)
diagt(lact$vmn,lact$lact.dnt>3.14)
diagt.o(lact$vmn,lact$lact.dnt>3.14)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d0264df5d1290b16e13cd18328f382690617c57d
|
6792f8a34ceeb2a3dddaec74271c9a998f2b200e
|
/DRHMCcodes/lingauss_statespace/kalman_filters.R
|
b7fe0ce2441e4cec7b8faf67efebdb9131cb169c
|
[] |
no_license
|
torekleppe/DRHMCcodes
|
0ae2c7851197fcc748393e29091375ed7aad11f4
|
32c79ed707d13b24be8825a0c07501d36fafe6d3
|
refs/heads/master
| 2020-03-19T03:15:17.966655
| 2018-06-01T11:48:22
| 2018-06-01T11:48:22
| 135,710,089
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,027
|
r
|
kalman_filters.R
|
require(CIPlib)
require(stats)
posterior_kernel <- function(lam_x,lam_y,omega,y){
T <- length(y);
phi <- tanh(CIP_AR1_psi(omega,T)$psi)
sigmav <- exp(-0.5*lam_x);
sigmay <- exp(-0.5*lam_y);
margstd <- sigmav/sqrt(1.0-phi^2);
# kalman filter
aa <- 0.0;
PP <- margstd^2;
ll <- 0.0
for (t in 1:T){
et <- y[t] - aa;
Dt <- PP + sigmay^2
ll <- ll - 0.5*log(Dt) - 0.5*et^2/Dt;
Kt <- (phi*PP)/Dt;
aa <- phi*aa + Kt*et;
Lt <- phi - Kt;
Jt <- sigmav; # - Kt*sigmay;
PP <- phi*PP*Lt + sigmav*Jt;
}
return(ll)
}
post_kern_lx <- function(from,to,ng,lam_y,omega,y){
grid <- as.vector(seq(from=from,to=to,length.out=ng))
lkern <- 0*grid
mu.1 <- 0*grid
mu.T <- 0*grid
sig.1 <- 0*grid
sig.T <- 0*grid
T <- length(y);
phi <- tanh(CIP_AR1_psi(omega,T)$psi)
sigmay <- exp(-0.5*lam_y);
for(i in 1:ng){
lkern[i] <- posterior_kernel(grid[i],lam_y,omega,y)
#sigmax <- exp(-0.5*grid[i]);
#Sig <- toeplitz(sigmax^2/(1.0-phi^2)*(phi^(0:(T-1))))
#mu.post <- Sig%*%solve(Sig+sigmay^2*diag(1.0,T,T),y)
#Sig.post <- Sig - Sig%*%solve(Sig+sigmay^2*diag(1.0,T,T),Sig)
#mu.1[i] <- mu.post[1]
#mu.T[i] <- mu.post[T]
#sig.1[i] <- sqrt(Sig.post[1,1])
#sig.T[i] <- sqrt(Sig.post[T,T])
}
wts <- exp(lkern-max(lkern))
wts <- wts/sum(wts)
ret <- cbind(grid,wts,lkern)#,mu.1,sig.1,mu.T,sig.T)
}
post_kern_ly <- function(from,to,ng,lam_x,omega,y,pri_mean){
grid <- as.vector(seq(from=from,to=to,length.out=ng))
lkern <- 0*grid
T <- length(y);
phi <- tanh(CIP_AR1_psi(omega,T)$psi)
for(i in 1:ng){
lkern[i] <- posterior_kernel(lam_x,grid[i],omega,y) - 0.5*(grid[i]-pri_mean)^2/(9.0)
}
wts <- exp(lkern-max(lkern))
wts <- wts/sum(wts)
ret <- cbind(grid,wts,lkern)
}
|
89b941212176ee9f923f26eee3bb1780000ca976
|
8f7bb94c9b21e944a82c5cffc4ec11d476211f89
|
/man/oshka-package.Rd
|
2810054386ffb3551a96d01f4df182b00efea1ff
|
[] |
no_license
|
brodieG/oshka
|
5f988be714628a8921f52964d467cbd99ae682d3
|
b7f73552aba09252df74e8780ac3b4d6948a5d13
|
refs/heads/master
| 2021-01-23T21:55:29.615977
| 2017-10-17T00:19:21
| 2017-10-17T00:19:21
| 102,914,120
| 14
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
oshka-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/oshka-package.R
\docType{package}
\name{oshka-package}
\alias{oshka-package}
\title{Recursive Quoted Language Expansion}
\description{
Expands quoted language by recursively replacing any symbol that points to
quoted language with the language it points to. The recursive process
continues until only symbols that point to non-language objects remain. The
resulting quoted language can then be evaluated normally. This differs from
the traditional 'quote'/'eval' pattern because it resolves intermediate
language objects that would interfere with evaluation.
}
|
1cdd58fa6b76068f5dfce5e4b4a349d654790c83
|
326a197c0f0a6852e129bce9f642e5fe71b67760
|
/server.R
|
68094a56be8ac5578abbb67df6e89b0e4f5c0a97
|
[] |
no_license
|
blueshadowz12/DevelopingDataProduct
|
415d6cdc9149e8f0fad547f79c311dfeb53524a5
|
d37a214b7967ff37a5a4a2f2a2ba055d8d2b5cd0
|
refs/heads/master
| 2020-04-20T15:51:03.661799
| 2019-02-03T12:44:23
| 2019-02-03T12:44:23
| 168,943,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,831
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to get denomination
shinyServer(function(input, output) {
# Create a reactiveValues object where we can track some extra elements
# reactively.
#Show inputted denomination
output$currentDenom<- renderText({
input$calc
paste("Current Denomination is : ",isolate(input$den))
})
#Initialize reactive values
calcdenom <- reactiveValues()
#Calculations
observe({
input$calc
calcdenom$calculate_onet <- isolate({
(input$den %/% 1000)
})
calcdenom$calculate_fiveh <- isolate({
((input$den %% 1000) %/% 500)
})
calcdenom$calculate_twoh <- isolate({
(((input$den %% 1000) %% 500) %/%200)
})
calcdenom$calculate_oneh <- isolate({
((((input$den %% 1000) %% 500) %%200) %/%100)
})
calcdenom$calculate_fif <- isolate({
(((((input$den %% 1000) %% 500) %%200) %%100) %/% 50)
})
calcdenom$calculate_twen <- isolate({
((((((input$den %% 1000) %% 500) %%200) %%100) %% 50) %/% 20)
})
calcdenom$calculate_ten <- isolate({
(((((((input$den %% 1000) %% 500) %%200) %%100) %% 50) %%20 ) %/% 10)
})
calcdenom$calculate_five <- isolate({
((((((((input$den %% 1000) %% 500) %%200) %%100) %% 50) %%20 ) %% 10) %/%5)
})
calcdenom$calculate_one <- isolate({
(((((((((input$den %% 1000) %% 500) %%200) %%100) %% 50) %%20 ) %% 10) %%5) %/% 1)
})
})
#Printing the output data
output$onet<- renderText({
paste("1000 PHP bill: ",calcdenom$calculate_onet)
})
output$fiveh<- renderText({
paste("500 PHP bill: ",calcdenom$calculate_fiveh)
})
output$twoh<- renderText({
paste("200 PHP bill: ",calcdenom$calculate_twoh)
})
output$oneh<- renderText({
paste("100 PHP bill: ",calcdenom$calculate_oneh)
})
output$fif <- renderText(({
paste("50 PHP bill: ",calcdenom$calculate_fif)
}))
output$twen <- renderText(({
paste("20 PHP bill: ",calcdenom$calculate_twen)
}))
output$ten <- renderText(({
paste("10 coin/s: ",calcdenom$calculate_ten)
}))
output$five <- renderText(({
paste("5 coin/s: ",calcdenom$calculate_five)
}))
output$one <- renderText(({
paste("1 coin/s: ",calcdenom$calculate_one)
}))
})
|
94e3439c2dce4bd78bd76374eefedf7ec7dec9c5
|
4411d901964c67610469962d0c106d218153b4db
|
/cachematrix.R
|
c7f377accf922542da58fb8656265db48552b4cd
|
[] |
no_license
|
mpancotti/ProgrammingAssignment2
|
4ae75f725bc3e2667d5ca372923ec04abf0f446b
|
cb56dc23537bf4c34ecfe907e6d5a04c12a18456
|
refs/heads/master
| 2021-01-24T21:42:24.797671
| 2014-11-22T15:27:49
| 2014-11-22T15:27:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,353
|
r
|
cachematrix.R
|
## The functions do more or less the same actions that the example does, only using the
## solve function instead of the mean function
## this function return a list of methods (get, setinverse and getinverse) that will be used
## by the cacheSolve function.
##
## The getInverse function return the cachedInvertedMatrix if it has been already calculated.
makeCacheMatrix <- function(x = matrix()) {
cachedInvertedMatrix <- NULL
get <- function() {x}
setinverse <- function(invertedMatrix) cachedInvertedMatrix <<- invertedMatrix
getinverse <- function() cachedInvertedMatrix
list(get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function uses the methods created in the makeCacheMatrix function in order to use or set the
## cachedInvertedMatrix instead of calculate all the time. In ths way the solve function will be applied only the
## first time you calculate the inverted value of a specific matrix. The subsequent times you need the same inverted
## matrix the cached value will be used
cacheSolve <- function(x, ...) {
invertedMatrix <- x$getinverse()
if(!is.null(invertedMatrix)) {
message("getting cached data")
return(invertedMatrix)
}
data <- x$get()
invertedMatrix <- solve(data, ...)
x$setinverse(invertedMatrix)
invertedMatrix
}
|
9d78f5ab308c50e2128c75c085804ee48876e44e
|
9f85709553f4e38fdb5725b9b3d5d6d661b905bd
|
/NumberTheory/man/is.spsp.Rd
|
1431405a2892122d613847ad8c87d9616ae04192
|
[] |
no_license
|
michallbujak/NumberTheory
|
b78075254270827d69a04b34a382023bcaf0343a
|
efc483ccbb130e9512649d98cd854fcdd029499f
|
refs/heads/master
| 2022-12-20T01:15:17.322787
| 2020-09-30T12:36:45
| 2020-09-30T12:36:45
| 297,791,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
is.spsp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{is.spsp}
\alias{is.spsp}
\title{Strongly pseudo-prime}
\usage{
is.spsp(a, n)
}
\arguments{
\item{a}{number to be checked}
\item{n}{modulus}
}
\value{
boolean value
}
\description{
Check whether the number a is strongly pseudo-prime with respect to n
}
|
acb6ad8388902e5616adf2a2a67307c307fde5f7
|
ddfe7b5c2b8a2f95e010fbe347ebb035fb7bb48d
|
/statistics.R
|
81adc3edd00e5ba473e4b19d7bad8eec820fa555
|
[] |
no_license
|
sandy149/analytics_01
|
5da9ebd65be27f4c5031e7a845ac6ce42d6cd310
|
9fadad4161e674553c000a1899e1938abef09e08
|
refs/heads/master
| 2020-03-26T20:06:33.788159
| 2018-08-21T17:44:26
| 2018-08-21T17:44:26
| 145,305,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
statistics.R
|
#Basic stats
x <- ceiling(rnorm(10000, mean = 60, sd = 20)) # create data for normal distribution
mean(x)
median(x)
#no mode function
table(x)
sort(table(x), decreasing = T )
library(modeest)
mlv(x, method = 'shorth')
quantile(x)
quantile(x, seq(.1,1,by = .1)) #decile
quantile(x, seq(0.01, 1, by = 0.01)) #percentile
library(e1071)
plot(density(x))
e1071::skewness(x)
kurtosis(x)
sd(x); var(x)
cov(women$height, women$weight)
cov(women$weight, women$height)
#feq Table
library(fdth)
ftable1 = fdt(x)
ftable1
stem(x)
|
719b6a2961c2bf88e191a3865e92850fba6824a7
|
371e4e296cd48efed22de583bb90fdb1030d523f
|
/R/fitArCo.R
|
6d50885af307dd57f6876451199cbe26f8fb38aa
|
[
"MIT"
] |
permissive
|
rmasini/ARCO
|
e461c61c9e01214e2594bed1299f0e136b372938
|
116e8d00d1516ce714be061d1434a51f568fb6ce
|
refs/heads/master
| 2021-01-22T21:27:22.687936
| 2017-03-16T14:32:20
| 2017-03-16T14:32:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,966
|
r
|
fitArCo.R
|
#' Estimates the ArCo using the model selected by the user
#'
#' Estimates the Artificial Counterfactual unsing any model supplied by the user, calculates the most relevant statistics and allows for the counterfactual confidence intervals to be estimated by block bootstrap.
#'
#' @details This description may be useful to clarify the notation and understand how the arguments must be supplied to the functions.
#' \itemize{
#' \item{units: }{Each unity is indexed by a number between 1,...,n. They are for exemple: countries, states, municipalities, firms, etc.}
#' \item{Variables: }{For each unity and for every time period t=1,...,T we observe q_i >= 1 variables. They are for example: GDP, inflation, sales, etc.}
#' \item{Intervention: }{The intervention took place only in the treated unity at time t0=L0*T, where L0 is in (0,1).}
#' }
#'
#' @param data A list of matrixes or dataframes of length q. Each matrix is T X n and it contains observations of a single variable for all units and all periods of time. Even in the case of a single variable (q=1), the matrix must be inside a list.
#' @param fn The function used to estimate the first stage model. This function must receive only two arguments in the following order: X (independent variables), y (dependent variable). If the model requires additional arguments they must be supplied inside the function fn.
#' @param p.fn The function used to estimate the predict using the first stage model. This function also must receive only two arguments in the following order: model (model estimated in the first stage), newdata (out of sample data to estimate the second stage). If the prediction requires additional arguments they must be supplied inside the function p.fn.
#' @param treated.unity Single number indicating the unity where the intervention took place.
#' @param t0 Single number indicating the intervention period.
#' @param lag Number of lags in the first stage model. Default is 0, i.e. only contemporaneous variables are used.
#' @param Xreg Exogenous controls.
#' @param alpha Significance level for the delta.
#' @param boot.cf Should bootstrap confidence intervals for the counterfactual be calculated (default=FALSE).
#' @param R Number of bootstrap replications in case boot.cf=TRUE.
#' @param l Block length for the block bootstrap.
#' @param VCOV.type Type of covariance matrix for the delta. "iid" for standard covariance matrix, "var" or "varhac" to use prewhitened covariance matrix using VAR models, "varhac" selects the order of the VAR automaticaly and "nw" for Newey West. In the last case the user may select the kernel type and combine the kernel with the VAR prewhitening. For more details see Andrews and Monahan (1992).
#' @param VCOV.lag Lag used on the robust covariance matrix if VCOV.type is different from "iid".
#' @param bandwidth.kernel Kernel bandwidth. If NULL the bandwidth is automatically calculated.
#' @param kernel.type Kernel to be used for VCOV.type="nw".
#' @param VHAC.max.lag Maximum lag of the VAR in case VCOV.type="varhac".
#' @param prewhitening.kernel If TRUE and VCOV.type="nw", the covariance matrix is calculated with prewhitening (default=FALSE).
#' @return An object with S3 class fitArCo.
#' \item{cf}{estimated counterfactual}
#' \item{fitted}{In sample fitted values for the pre-treatment period.}
#' \item{model}{A list with q estimated models, one for each variable. Each element in the list is the output of the fn function.}
#' \item{delta}{The delta statistics and its confidence interval.}
#' \item{data}{The data used.}
#' \item{t0}{The intervention period used.}
#' \item{treated.unity}{The treated unity used.}
#' \item{boot.cf}{A list with the bootstrap result (boot.cf=TRUE) or logical FALSE (boot.cf=FALSE). In the first case, each element in the list refeers to one bootstrap replication of the counterfactual, i. e. the list length is R.}
#' \item{call}{The matched call.}
#' @keywords ArCo
#' @export
#' @import Matrix glmnet
#' @importFrom stats cov embed qnorm
#' @examples
#' #############################
#' ## === Example for q=1 === ##
#' #############################
#' data(data.q1)
#' # = First unity was treated on t=51 by adding a constant equal 3
#'
#' data=list(data.q1) # = Even if q=1 the data must be in a list
#'
#' ## == Fitting the ArCo using linear regression == ##
#' # = creating fn and p.fn function = #
#' fn=function(X,y){
#' return(lm(y~X))
#' }
#' p.fn=function(model,newdata){
#' b=coef(model)
#' return(cbind(1,newdata) %*% b)
#' }
#'
#' ArCo=fitArCo(data = data,fn = fn, p.fn = p.fn, treated.unity = 1 , t0 = 51)
#'
#' #############################
#' ## === Example for q=2 === ##
#' #############################
#'
#' # = First unity was treated on t=51 by adding constants 15 and -10
#' # for the first and second variables
#'
#' data(data.q2) # data is already a list
#'
#' ## == Fitting the ArCo using the package glmnet == ##
#' ## == Quadratic Spectral kernel weights for two lags == ##
#'
#' ## == Fitting the ArCo using the package glmnet == ##
#' ## == Bartlett kernel weights for two lags == ##
#' require(glmnet)
#' set.seed(123)
#' ArCo2=fitArCo(data = data.q2,fn = cv.glmnet, p.fn = predict,treated.unity = 1 , t0 = 51,
#' VCOV.type = "nw",kernel.type = "QuadraticSpectral",VCOV.lag = 2)
#'
#' @references Carvalho, C., Masini, R., Medeiros, M. (2016) "ArCo: An Artificial Counterfactual Approach For High-Dimensional Panel Time-Series Data.".
#'
#' Andrews, D. W., & Monahan, J. C. (1992). An improved heteroskedasticity and autocorrelation consistent covariance matrix estimator. Econometrica: Journal of the Econometric Society, 953-966.
fitArCo=function (data, fn, p.fn, treated.unity, t0, lag = 0, Xreg = NULL, alpha = 0.05, boot.cf = FALSE, R = 100, l = 3,VCOV.type=c("iid","var","nw","varhac"),VCOV.lag=1,bandwidth.kernel=NULL,kernel.type=c("QuadraticSpectral","Truncated","Bartlett","Parzen","TukeyHanning"),VHAC.max.lag=5,prewhitening.kernel=FALSE)
{
VCOV.type=match.arg(VCOV.type)
kernel.type=match.arg(kernel.type)
if (boot.cf == TRUE) {
if (R < 10) {
stop("Minimum number of bootstrap samples is 10.")
}
}
if (is.null(names(data))) {
names(data) = paste("Variable", 1:length(data), sep = "")
}
for (i in 1:length(data)) {
if (is.null(colnames(data[[i]]))) {
colnames(data[[i]]) = paste("Unity", 1:ncol(data[[i]]),
sep = "")
}
}
for (i in 1:length(data)) {
aux = length(unique(colnames(data[[i]])))
k = ncol(data[[i]])
if (aux < k) {
colnames(data[[i]]) = paste("Unity", 1:ncol(data[[i]]),
sep = "")
}
}
if (length(data) == 1) {
Y = matrix(data[[1]][, treated.unity], ncol = 1)
X = data[[1]][, -treated.unity]
X = as.matrix(X)
colnames(X) = paste(names(data), colnames(data[[1]])[-treated.unity],
sep = ".")
}else {
Y = Reduce("cbind", lapply(data, function(x) x[, treated.unity]))
X = Reduce("cbind", lapply(data, function(x) x[, -treated.unity]))
aux = list()
for (i in 1:length(data)) {
aux[[i]] = paste(names(data)[i], colnames(data[[i]])[-treated.unity],
sep = ".")
}
colnames(X) = unlist(aux)
}
Y.raw = Y
if (lag != 0) {
aux1 = sort(rep(0:lag, ncol(X)))
aux = paste(rep(colnames(X), lag + 1), "lag", aux1, sep = ".")
X = embed(X, lag + 1)
colnames(X) = aux
Y = tail(Y, nrow(X))
}
if (length(Xreg) != 0) {
X = cbind(X, tail(Xreg, nrow(X)))
}
if (is.vector(Y)) {
Y = matrix(Y, length(Y), 1)
}
T=nrow(X)
y.fit = matrix(Y[1:(t0 - 1 - lag), ], ncol = length(data))
y.pred = matrix(Y[-c(1:(t0 - 1 - lag)), ], ncol = length(data))
x.fit = X[1:(t0 - 1 - lag), ]
x.pred = X[-c(1:(t0 - 1 - lag)), ]
save.cf = matrix(NA, nrow(y.pred), length(data))
save.fitted = matrix(NA, nrow(Y), length(data))
model.list = list()
for (i in 1:length(data)) {
model = fn(x.fit, y.fit[, i])
model.list[[i]] = model
contra.fact = p.fn(model, x.pred)
save.cf[, i] = contra.fact
save.fitted[, i] = p.fn(model, X)
}
boot.list = FALSE
if (boot.cf == TRUE) {
serie = cbind(y.fit, x.fit)
q = length(data)
bootfunc = function(serie) {
y.fit = serie[, 1:q]
x.fit = serie[, -c(1:q)]
if (is.vector(y.fit)) {
y.fit = matrix(y.fit, ncol = 1)
}
save.cf.boot = matrix(NA, nrow(x.pred), q)
for (i in 1:q) {
model.boot = fn(x.fit, y.fit[, i])
contra.fact.boot = p.fn(model.boot, x.pred)
save.cf.boot[, i] = contra.fact.boot
}
return(as.vector(save.cf.boot))
}
boot.cf = boot::tsboot(serie, bootfunc, R = R, l = 3,
sim = "fixed")
boot.stat = boot.cf$t
boot.list = list()
for (i in 1:nrow(boot.stat)) {
boot.list[[i]] = matrix(boot.stat[i, ], ncol = q)
}
}
delta.aux = tail(Y.raw, nrow(save.cf)) - save.cf
delta = colMeans(delta.aux)
aux = matrix(0, T, length(data))
aux[(t0 - lag):nrow(aux), ] = 1
vhat = Y - (save.fitted + t(t(aux) * delta))
v1 = matrix(vhat[1:(t0 - lag - 1), ], ncol = length(data))
v2 = matrix(vhat[(t0 - lag):nrow(vhat), ], ncol = length(data))
t0lag=t0-lag
sigmahat=T*switch(VCOV.type,
iid = cov(v1)/(t0lag-1) + cov(v2)/(T-t0lag),
var = VAR(v1,VCOV.lag)$LR/(t0lag-1) + VAR(v2,VCOV.lag)$LR/(T-t0lag),
nw = neweywest(v1,NULL,kernel.type,prewhitening.kernel,VCOV.lag)/(t0lag-1) + neweywest(v2,NULL,kernel.type,prewhitening.kernel,VCOV.lag)/(T-t0lag),
varhac = VARHAC(v1,VHAC.max.lag)/(t0lag-1) + VARHAC(v2,VHAC.max.lag)/(T-t0lag)
)
w = sqrt(diag(sigmahat))
W = T * t(delta) %*% solve(sigmahat) %*% delta
p.value = 1 - stats::pchisq(W, length(delta))
uI = delta + (w * qnorm(1 - alpha/2))/sqrt(T)
lI = delta - (w * qnorm(1 - alpha/2))/sqrt(T)
delta.stat = cbind(LB = lI, delta = delta, UB = uI)
names(model.list) = names(data)
colnames(save.cf) = names(data)
rownames(save.cf) = tail(rownames(Y.raw), nrow(save.cf))
colnames(save.fitted) = names(data)
rownames(save.fitted) = head(rownames(Y), nrow(save.fitted))
rownames(delta.stat) = names(data)
save.fitted = head(save.fitted, nrow(save.fitted) - nrow(save.cf))
if (typeof(boot.list) == "list") {
NAboot = Reduce(sum, boot.list)
if (is.na(NAboot)) {
warning("Some of the boostrap counterfactuals may have returned NA values. \n \n A possible cause is the number of observations being close the number of variables if the lm function was used.")
}
}
result = list(cf = save.cf, fitted = save.fitted, model = model.list,
delta = delta.stat, p.value = p.value, data = data, t0 = t0,
treated.unity = treated.unity, boot.cf = boot.list, call = match.call())
class(result) = "fitArCo"
return(result)
}
|
859493187027392a2e326c759346229c7633d7eb
|
f9e73c8e325d98a1c5933142ce2f8c041f514208
|
/R/plotting.R
|
7ce397b6724b482d1ed9420648cc7617552d7340
|
[] |
no_license
|
shaoyoucheng/monaLisa
|
836dff6fa63e807ed9b0f45b68ad1c92d236b77a
|
e4d2d6c070f7be958f4d044d87c76c58ca86430a
|
refs/heads/master
| 2023-08-19T06:05:17.036710
| 2021-09-10T15:51:20
| 2021-09-10T15:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,288
|
r
|
plotting.R
|
#' @importFrom grDevices colorRampPalette
#' @importFrom graphics axis hist lines par plot rect rug segments barplot matplot abline legend text
#' @importFrom stats density dist hclust
#' @importFrom S4Vectors isEmpty
NULL
#' @title Get colors by bin.
#'
#' @description Get colors for elements according to their bin.
#' Colors are assigned to bins forming a gradient from \code{col1}
#' to \code{col2} in the order of \code{levels{b}}. \code{col0} is assigned
#' to the neutral bin (attribute \code{""}) if available.
#'
#' @param b A factor that groups elements into bins (typically the output of
#' \code{\link{bin}}).
#' @param col1 First color.
#' @param col2 Second color.
#' @param col0 Neutral color.
#'
#' @seealso \code{\link{bin}}.
#'
#' @return A character vector with colors for the elements in \code{b}.
#'
#' @examples
#' set.seed(1)
#' x <- rnorm(100)
#' b <- bin(x, "equalN", nElements = 10)
#' cols <- getColsByBin(b)
#'
#' @export
getColsByBin <- function(b,
col1 = c("#003C30", "#01665E", "#35978F", "#80CDC1", "#C7EAE5"),
col2 = c("#F6E8C3", "#DFC27D", "#BF812D", "#8C510A", "#543005"),
col0 = "#F5F5F5") {
if (!is.factor(b)) {
b <- factor(b, levels = unique(b))
b <- setZeroBin(b, NA)
}
if (!is.null(getZeroBin(b)) && !is.na(getZeroBin(b))) {
bin0 <- getZeroBin(b)
cols <- c(colorRampPalette(col1)(bin0 - 1L),
"#AAAAAA33",
colorRampPalette(col2)(nlevels(b) - bin0))
} else {
nh <- round(nlevels(b) / 2)
cols <- c(colorRampPalette(col1)(nh),
colorRampPalette(col2)(nlevels(b) - nh))
}
res <- cols[b]
names(cols) <- levels(b)
attr(res, "cols") <- cols
return(res)
}
#' @title Histogram of binned elements.
#'
#' @description Plot a histogram of binned elements with binning information.
#'
#' @param x A numerical vector with the values used for binning.
#' @param b A factor that groups elements of \code{x} into bins (typically the output of
#' \code{\link{bin}}).
#' @param breaks Controls the histogram breaks (passed to \code{hist(...)}).
#' @param xlab Label for x-axis.
#' @param ylab Label for y-axis.
#' @param main Main title.
#' @param legend If not \code{NULL}, draw a legend with binning information (will
#' be passed to \code{legend(x=legend)} to control legend position).
#' @param legend.cex A scalar that controls the text size in the legend relative
#' to the current \code{par("cex")} (see \code{\link{legend}}).
#' @param ... Further arguments passed to \code{\link{getColsByBin}}.
#'
#' @seealso \code{\link{getColsByBin}}, \code{\link[graphics]{hist}}
#'
#' @return Invisibly the return value of \code{hist(...)} that generated the plot.
#'
#' @examples
#' set.seed(1)
#' x <- rnorm(100)
#' b <- bin(x, "equalN", nElements = 10)
#' plotBinHist(x, b)
#'
#' @export
plotBinHist <- function(x, b, breaks = 10 * nlevels(b),
xlab = deparse(substitute(x, env = as.environment(-1))),
ylab = "Frequency",
main = "", legend = "topright", legend.cex = 1.0, ...) {
.assertVector(x = b, type = "factor", len = length(x))
stopifnot("breaks" %in% names(attributes(b)))
.assertScalar(x = legend.cex, type = "numeric", rngExcl = c(0, Inf))
cols <- getColsByBin(b, ...)
binbreaks <- attr(b, "breaks")
bincols <- attr(cols, "cols")
h <- hist(x, breaks = breaks, plot = FALSE)
par(mar = c(5, 4, 4 - if (main == "") 3 else 0, 2) + 0.1, cex = 1.25)
ret <- hist(x, breaks = breaks, col = bincols[findInterval(h$mids, binbreaks, all.inside = TRUE)],
xlab = xlab, ylab = ylab, main = main)
pusr <- par('usr')
segments(x0 = pusr[c(1,1)], y0 = pusr[c(4,3)],
x1 = pusr[c(1,2)], y1 = pusr[c(3,3)])
rug(binbreaks, col = "black")
if (!is.null(legend) && legend[1] != FALSE)
legend(x = legend, legend = sprintf("%s : %d", levels(b), table(b)),
fill = bincols, bty = "n", cex = legend.cex)
invisible(ret)
}
#' @title Density plot of binned elements.
#'
#' @description Plot the density of binned elements with binning information.
#'
#' @param x A numerical vector with the values used for binning.
#' @param b A factor that groups elements of \code{x} into bins (typically the output of
#' \code{\link{bin}}).
#' @param xlab Label for x-axis.
#' @param ylab Label for y-axis.
#' @param main Main title.
#' @param legend If not \code{NULL}, draw a legend with binning information (will
#' be passed to \code{legend(x=legend)} to control legend position).
#' @param legend.cex A scalar that controls the text size in the legend relative
#' to the current \code{par("cex")} (see \code{\link{legend}}).
#' @param ... Further arguments passed to \code{\link{getColsByBin}}.
#'
#' @seealso \code{\link{getColsByBin}}
#'
#' @return Invisibly the return value of \code{density(x)} that generated the plot.
#'
#' @examples
#' set.seed(1)
#' x <- rnorm(100)
#' b <- bin(x, "equalN", nElements = 10)
#' plotBinDensity(x, b)
#'
#' @export
plotBinDensity <- function(x, b,
xlab = deparse(substitute(x, env = as.environment(-1))),
ylab = "Density",
main = "", legend = "topright", legend.cex = 1.0, ...) {
.assertVector(x = b, type = "factor", len = length(x))
stopifnot("breaks" %in% names(attributes(b)))
.assertScalar(x = legend.cex, type = "numeric", rngExcl = c(0, Inf))
cols <- getColsByBin(b, ...)
binbreaks <- attr(b, "breaks")
bincols <- attr(cols, "cols")
par(mar = c(5, 4, 4 - if (main == "") 3 else 0, 2) + 0.1, cex = 1.25)
ret <- density(x)
plot(ret$x, ret$y, type = "l", col = "black", xlab = xlab, ylab = ylab, main = main, axes = FALSE)
axis(1)
axis(2)
pusr <- par('usr')
segments(x0 = pusr[c(1,1)], y0 = pusr[c(4,3)],
x1 = pusr[c(1,2)], y1 = pusr[c(3,3)])
rug(binbreaks, col = "black")
dx <- diff(ret$x[seq_len(2)]) / 2
rect(xleft = ret$x - dx, ybottom = 0, xright = ret$x + dx, ytop = ret$y,
col = bincols[findInterval(ret$x, binbreaks, all.inside = TRUE)], border = NA)
lines(ret$x, ret$y)
if (!is.null(legend) && legend[1] != FALSE)
legend(x = legend, legend = sprintf("%s : %d", levels(b), table(b)),
fill = bincols, bty = "n", cex = legend.cex)
invisible(ret)
}
#' @title Scatter plot (xy-plot) of binned elements.
#'
#' @description Plot a scatter (xy-plot) of binned elements with binning information.
#'
#' @param x A numerical vector with x values.
#' @param y A numerical vector with y values (the values used for binning).
#' @param b A factor that groups elements of \code{x,y} into bins (typically the output
#' of \code{\link{bin}(y)}).
#' @param cols A color vector (will be computed based on \code{b} by default using
#' \code{\link{getColsByBin}(b)}).
#' @param xlab Label for x-axis.
#' @param ylab Label for y-axis.
#' @param main Main title.
#' @param legend If not \code{NULL}, draw a legend with binning information (will
#' be passed to \code{legend(x=legend)} to control legend position).
#' @param legend.cex A scalar that controls the text size in the legend relative
#' to the current \code{par("cex")} (see \code{\link{legend}}).
#' @param ... Further arguments passed to \code{plot(x, y, ...)}.
#'
#' @seealso \code{\link{bin}}, \code{\link{getColsByBin}}
#'
#' @return Invisibly the return value of \code{plot(x, y, ...)} that generated the plot.
#'
#' @examples
#' set.seed(1)
#' x <- rnorm(100)
#' y <- rnorm(100)
#' b <- bin(y, "equalN", nElements = 10)
#' plotBinScatter(x, y, b)
#'
#' @export
plotBinScatter <- function(x, y, b,
cols = getColsByBin(b),
xlab = deparse(substitute(x, env = as.environment(-1))),
ylab = deparse(substitute(y, env = as.environment(-1))),
main = "", legend = "topright", legend.cex = 1.0, ...) {
.assertVector(x = y, len = length(x))
.assertVector(x = b, len = length(x))
.assertScalar(x = legend.cex, type = "numeric", rngExcl = c(0, Inf))
if (length(cols) == 1L)
cols <- rep(cols, length(x))
stopifnot(length(x) == length(cols))
par(mar = c(5, 4, 4 - if (main == "") 3 else 0, 2) + 0.1, cex = 1.25)
ret <- plot(x, y, pch = 16, cex = 0.6, col = cols,
xlab = xlab, ylab = ylab, main = main, axes = FALSE, ...)
axis(1)
axis(2)
pusr <- par('usr')
segments(x0 = pusr[c(1,1)], y0 = pusr[c(4,3)],
x1 = pusr[c(1,2)], y1 = pusr[c(3,3)])
if (!is.null(legend) && legend[1] != FALSE) {
stopifnot("cols" %in% names(attributes(cols)))
bincols <- attr(cols, "cols")
legend(x = legend, legend = sprintf("%s : %d", levels(b), table(b)),
fill = bincols, bty = "n", cex = legend.cex)
}
invisible(ret)
}
#' @title Heatmap of motif enrichments.
#'
#' @description Plot motif enrichments (e.g. significance or magnitude) as a heatmap.
#'
#' @param x A \code{\link[SummarizedExperiment]{SummarizedExperiment}} with numerical matrices
#' (motifs-by-bins) in its \code{assays()}, typically the return value
#' of \code{\link{calcBinnedMotifEnrR}} or \code{\link{calcBinnedMotifEnrHomer}}.
#' @param which.plots Selects which heatmaps to plot (one or several from \code{"negLog10P"},
#' \code{"negLog10Padj"}, \code{"pearsonResid"} and \code{"log2enr"}).
#' @param width The width (in inches) of each individual heatmap, without legend.
#' @param col.enr Colors used for enrichment heatmap ("pearsonResid" and "log2enr").
#' @param col.sig Colors used for significance hetmaps ("negLog10P" and "negLog10Padj").
#' @param col.gc Colors used for motif GC content (for \code{show_motif_GC = TRUE}).
#' @param maxEnr Cap color mapping at enrichment = \code{maxEnr} (default: 99.5th percentile).
#' @param maxSig Cap color mapping at -log10 P value or -log10 FDR = \code{maxSig}
#' (default: 99.5th percentile).
#' @param highlight A logical vector indicating motifs to be highlighted.
#' @param cluster If \code{TRUE}, the order of transcription factors will be determined by
#' hierarchical clustering of the \code{"pearsonResid"} component. Alternatively, an
#' \code{hclust}-object can be supplied which will determine the motif ordering.
#' No reordering is done for \code{cluster = FALSE}.
#' @param show_dendrogram If \code{cluster != FALSE}, controls whether to show
#' a row dendrogram for the clustering of motifs. Ignored for \code{cluster = FALSE}.
#' @param show_motif_GC If \code{TRUE}, show a column with the percent G+C of the motif
#' as part of the heatmap.
#' @param show_seqlogo If \code{TRUE}, show a sequence logo next to each motif label.
#' This will likely only make sense for a heatmap with a low number of motifs.
#' @param width.seqlogo The width (in inches) for the longest sequence logo (shorter
#' logos are drawn to scale).
#' @param use_raster \code{TRUE} or \code{FALSE} (default). Passed to \code{use_raster}
#' of \code{\link[ComplexHeatmap]{Heatmap}}.
#' @param na_col "white" (default). Passed to \code{na_col} of
#' \code{\link[ComplexHeatmap]{Heatmap}}.
#' @param ... Further arguments passed to \code{\link[ComplexHeatmap]{Heatmap}}
#' when creating the main heatmaps selected by \code{which.plots}.
#'
#' @details The heatmaps are created using the \pkg{ComplexHeatmap} package
#' and plotted side-by-side.
#'
#' Each heatmap will be \code{width} inches wide, so the total plot needs a
#' graphics device with a width of at least \code{length(which.plots) * width}
#' plus the space used for motif names and legend. The height will be auto-adjusted to
#' the graphics device.
#'
#' @seealso \code{\link{bin}}, \code{\link[ComplexHeatmap]{Heatmap}}
#'
#' @references Gu, Z. Complex heatmaps reveal patterns and correlations in multidimensional
#' genomic data. Bioinformatics 2016.
#'
#' @return A list of \code{ComplexHeatmap::Heatmap} objects.
#'
#' @examples
#' se <- readRDS(system.file("extdata", "results.binned_motif_enrichment_LMRs.rds", package = "monaLisa"))
#' i <- which(SummarizedExperiment::assay(se, "negLog10Padj")[, 8] > 4)
#' plotMotifHeatmaps(se[i, ], which.plots = "pearsonResid",
#' width = 2, show_seqlogo = TRUE)
#'
#' @importFrom methods is
#' @importFrom stats hclust dist quantile
#' @importFrom TFBSTools Matrix
#' @importFrom grDevices colorRampPalette
#' @importFrom S4Vectors metadata
#' @importFrom SummarizedExperiment assayNames assay rowData
#' @importFrom ComplexHeatmap HeatmapAnnotation Heatmap add_heatmap
#' @importFrom grid unit
#' @importFrom circlize colorRamp2
#'
#' @export
plotMotifHeatmaps <- function(x,
which.plots = c("negLog10P", "pearsonResid", "negLog10Padj", "log2enr"),
width = 4,
col.enr = c("#053061","#2166AC","#4393C3","#92C5DE",
"#D1E5F0","#F7F7F7","#FDDBC7","#F4A582",
"#D6604D","#B2182B","#67001F"),
col.sig = c("#F0F0F0","#D9D9D9","#BDBDBD","#969696",
"#737373","#525252","#252525","#000000"),
col.gc = c("#F7FCF5","#E5F5E0","#C7E9C0","#A1D99B",
"#74C476","#41AB5D","#238B45","#006D2C",
"#00441B"),
maxEnr = NULL,
maxSig = NULL,
highlight = NULL,
cluster = FALSE,
show_dendrogram = FALSE,
show_motif_GC = FALSE,
show_seqlogo = FALSE,
width.seqlogo = 1.5,
use_raster = FALSE,
na_col = "white",
...) {
stopifnot(exprs = {
is(x, "SummarizedExperiment")
all(which.plots %in% assayNames(x))
"bins" %in% names(metadata(x))
(!show_motif_GC || "motif.percentGC" %in% colnames(rowData(x)))
})
b <- metadata(x)$bins
.assertScalar(x = width, type = "numeric", rngExcl = c(0, Inf))
.assertScalar(x = show_dendrogram, type = "logical")
.assertScalar(x = show_motif_GC, type = "logical")
.assertScalar(x = show_seqlogo, type = "logical")
.assertScalar(x = width.seqlogo, type = "numeric", rngExcl = c(0, Inf))
.assertScalar(x = use_raster, type = "logical")
.assertScalar(x = na_col, type = "character")
stopifnot(exprs = {
ncol(x) == nlevels(b)
all(which.plots %in% c("negLog10P", "negLog10Padj", "pearsonResid", "log2enr"))
is.null(highlight) || (is.logical(highlight) && length(highlight) == nrow(x))
})
bincols <- attr(getColsByBin(b), "cols")
if (identical(cluster, TRUE)) {
clAssayName <- "pearsonResid"
clAssay <- assay(x, clAssayName)
allNA <- rowSums(is.na(clAssay)) == ncol(clAssay)
if (any(allNA)) {
warning("removing motifs without finite values in '",
clAssayName, "': ",
paste(rownames(clAssay)[allNA], collapse = ", "))
x <- x[!allNA, ]
clAssay <- clAssay[!allNA, ]
}
clres <- hclust(dist(clAssay))
} else if (identical(cluster, FALSE)) {
clres <- FALSE
} else if (is(cluster, "hclust")) {
clres <- cluster
} else {
stop("'cluster' must be either TRUE, FALSE or an hclust-object.")
}
hmBin <- HeatmapAnnotation(df = data.frame(bin = colnames(x)), name = "bin",
col = list(bin = bincols),
show_annotation_name = FALSE,
which = "column", width = unit(width,"inch"),
annotation_height = unit(width / 16, "inch"),
show_legend = FALSE)
tmp <- matrix(if (!is.null(highlight)) as.character(highlight) else rep(NA, nrow(x)),
ncol = 1, dimnames = list(unname(rowData(x)$motif.name), NULL))
hmSeqlogo <- NULL
if (show_seqlogo) {
pfms <- rowData(x)$motif.pfm
maxwidth <- max(vapply(TFBSTools::Matrix(pfms), ncol, 0L))
grobL <- lapply(pfms, seqLogoGrob, xmax = maxwidth, xjust = "center")
hmSeqlogo <- HeatmapAnnotation(
logo = anno_seqlogo(grobL = grobL, which = "row",
space = unit(0.5, "mm"),
width = unit(width.seqlogo, "inch")),
show_legend = FALSE, show_annotation_name = FALSE, which = "row")
}
hmMotifs <- Heatmap(matrix = tmp, name = "names",
width = unit(if (!is.null(highlight)) .2 else 0, "inch"),
na_col = NA, col = c("TRUE" = "green3", "FALSE" = "white"),
cluster_rows = clres, show_row_dend = show_dendrogram,
cluster_columns = FALSE, show_row_names = TRUE,
row_names_side = "left", show_column_names = FALSE,
show_heatmap_legend = FALSE, left_annotation = hmSeqlogo)
assayNameMap1 <- c(negLog10P = "P value",
negLog10Padj = "adj. P value",
pearsonResid = "Pearson residual",
log2enr = "log2 enrichment")
assayNameMap2 <- c(negLog10P = "P value (-log10)",
negLog10Padj = "adj. P value (-log10)",
pearsonResid = "Pearson residual (o-e)/sqrt(e)",
log2enr = "enrichment (log2)")
L <- list(labels = hmMotifs)
if (show_motif_GC) {
tmp <- as.matrix(rowData(x)[, "motif.percentGC", drop = FALSE])
hmPercentGC <- Heatmap(matrix = tmp, name = "Percent G+C",
width = unit(0.2, "inch"), na_col = NA,
col = colorRamp2(breaks = c(0, seq(20, 80, length.out = 254), 100),
colors = colorRampPalette(col.gc)(256)),
cluster_rows = FALSE, cluster_columns = FALSE,
show_row_names = FALSE, show_column_names = FALSE,
show_heatmap_legend = TRUE,
heatmap_legend_param = list(color_bar = "continuous"),
use_raster = use_raster)
L <- c(L, list("percentGC" = hmPercentGC))
}
ret <- c(L, lapply(which.plots, function(w) {
dat <- assay(x, w)
if ((w == "pearsonResid") | (w == "log2enr")) {
rng <- c(-1, 1) * if (is.null(maxEnr)) quantile(abs(dat), .995, na.rm = TRUE) else maxEnr
cols <- col.enr
} else {
rng <- c(0, if (is.null(maxSig)) quantile(dat, .995, na.rm = TRUE) else maxSig)
cols <- col.sig
}
Heatmap(matrix = dat,
name = assayNameMap1[w],
width = unit(width,"inch"),
column_title = assayNameMap2[w],
col = colorRamp2(breaks = seq(rng[1], rng[2], length.out = 256),
colors = colorRampPalette(cols)(256)),
cluster_rows = FALSE, cluster_columns = FALSE,
show_row_names = FALSE, show_column_names = FALSE,
##column_names_side = "bottom", column_names_max_height = unit(1.5,"inch"),
top_annotation = hmBin, show_heatmap_legend = TRUE,
heatmap_legend_param = list(color_bar = "continuous"),
use_raster = use_raster,
na_col = na_col,
...)
}))
names(ret)[seq(length(ret) - length(which.plots) + 1L, length(ret))] <- which.plots
show(Reduce(ComplexHeatmap::add_heatmap, ret))
invisible(ret)
}
#' @title Plot Stability Paths
#'
#' @description Plot the stability paths of each variable (predictor), showing the selection probability
#' as a function of the regularization step.
#'
#' @param se the \code{SummarizedExperiment} object resulting from stability selection,
#' by running \code{\link[monaLisa]{randLassoStabSel}}.
#' @param selProbMin A numerical scalar in [0,1]. Predictors with a selection
#' probability greater than \code{selProbMin} are shown as colored lines. The
#' color is defined by the \code{col} argument.
#' @param col color of the selected predictors.
#' @param lwd line width (default = 1).
#' @param lty line type (default = 1).
#' @param ylim limits for y-axis (default = c(0,1.1)).
#' @param ... additional parameters to pass on to \code{matplot}.
#'
#' @return plot of stability paths.
#'
#' @seealso \code{\link[stabs]{stabsel}} and \code{\link[graphics]{matplot}}
#'
#' @importFrom SummarizedExperiment assay rowData colData
#' @importFrom graphics matplot
#'
#' @export
plotStabilityPaths <- function(se,
selProbMin = metadata(se)$stabsel.params.cutoff,
col = "cadetblue",
lwd = 1, lty = 1, ylim = c(0, 1.1), ...) {
# checks
if (!is(se, "SummarizedExperiment")) {
stop("'se' must be a SummarizedExperiment")
}
# set plot parameters
mat <- as.matrix(colData(se))
mat <- t(mat[, grep(pattern = "^regStep", x = colnames(mat))])
cols <- rep("black", ncol(mat))
sel <- se$selProb > selProbMin
cols[sel] <- col
# plot stability paths
graphics::matplot(mat, col = cols, type = "l", lty = lty,
ylab = "Selection Probability", xlab = "Regularization Step",
ylim = ylim, lwd = lwd, ...)
abline(h = selProbMin, lty = 5, col = "red", lwd = lwd)
legend("topleft", legend = c("not selected", "selected", "selProbMin"),
col = c("black", col, "red"), lty = c(1, 1, 5), bty = "n", lwd = lwd)
# return TRUE
invisible(TRUE)
}
#' @title Plot selection probabilities of predictors
#'
#' @description This function plots the selection probabilities of predictors
#' (for example the selected motifs), optionally multiplied with either +1 or
#' -1 to give a sense of both the strength and the directionality of the
#' associated effects. The directionality is estimated from the sign of the
#' correlation coefficient between each predictor and the response vector.
#'
#' @param se The \code{SummarizedExperiment} object with the results from
#' stability selection (typically returned by \code{\link{randLassoStabSel}}).
#' @param directional A logical scalar. If \code{TRUE}, selection probabilities
#' are plotted with the sign of the marginal correlation between a predictor
#' and the response.
#' @param selProbMin A numerical scalar in [0,1]. Predictors with a selection
#' probability greater than \code{selProbMin} are shown as colored bars. The
#' color is defined by \code{col[1]}. By default, \code{selProbMin} is
#' extracted from the parameters stored in \code{se}.
#' @param selProbMinPlot A numerical scalar in [0,1] less than \code{selProbMin}.
#' Predictors with a selection probability greater than \code{selProbMinPlot}
#' but less than \code{selProbMin} are shown as bars with color \code{col[2]}.
#' \code{selProbMinPlot} is useful to include additional predictors in the plot
#' that were not selected according to \code{selProbMin} but may be close to
#' that cutoff. Setting \code{selProbMinPlot = 0} will create a plot including
#' all predictors.
#' @param showSelProbMin A logical scalar. If \code{TRUE}, the value of
#' \code{selProbMin} is shown by a horizontal dashed line of color \code{col[3]}.
#' @param col A color vector giving the three colors used for predictors with
#' selection probability greater than \code{selProbMin}, additional predictors
#' with selection probability greater than \code{selProbMinPlot}, and the
#' selection probability cutoff line.
#' @param method A character scalar with the correlation method to use in the
#' calculation of predictor-response marginal correlations. One of "pearson",
#' "kendall" or "spearman" (see \code{\link[stats]{cor}}).
#' @param ylimext A numeric scalar defining how much the y axis limits should be
#' expanded beyond the plotted probabilities to allow for space for the
#' bar labels.
#' @param ... additional parameters passed to \code{\link[graphics]{barplot}}.
#'
#' @return \code{TRUE} (invisible). The function is called to create a barplot
#' indicating the selection probability and optionally directionality of the
#' predictors (motifs).
#'
#' @importFrom SummarizedExperiment rowData assay
#' @importFrom S4Vectors metadata
#' @importFrom stats cor
#' @importFrom graphics barplot abline legend text axis
#'
#' @export
plotSelectionProb <- function(se,
directional = TRUE,
selProbMin = metadata(se)$stabsel.params.cutoff,
selProbMinPlot = 0.4,
showSelProbMin = TRUE,
col = c("cadetblue", "grey", "red"),
method = c("pearson", "kendall", "spearman"),
ylimext = 0.25,
...) {
# checks
.assertScalar(x = directional, type = "logical")
.assertScalar(x = selProbMin, type = "numeric", rngIncl = c(0, 1))
.assertScalar(x = selProbMinPlot, type = "numeric", rngIncl = c(0, 1))
.assertScalar(x = showSelProbMin, type = "logical")
stopifnot(exprs = {
is(se, "SummarizedExperiment")
selProbMin > selProbMinPlot
})
.assertVector(x = col, len = 3L)
method <- match.arg(method)
.assertScalar(x = ylimext, type = "numeric", rngIncl = c(0, Inf))
# selection probabilities * sign(correlation to y)
probs <- se$selProb
cols <- ifelse(probs > selProbMin, col[1], col[2])
if (directional) {
corcoef <- as.vector(cor(x = SummarizedExperiment::rowData(se)$y,
y = SummarizedExperiment::assay(se, "x"),
method = method))
probs <- probs * sign(corcoef)
}
# kept and ordered
keep <- which(abs(probs) >= selProbMinPlot)
keep <- keep[order(probs[keep], decreasing = TRUE)]
cols <- cols[keep]
predNames <- colnames(se)[keep]
probs <- probs[keep]
up <- probs > 0
# plot
if (any(keep)) {
bar <- graphics::barplot(probs, col = cols, border = NA,
ylab = ifelse(directional,
"Directional selection probability",
"Selection probability"),
names.arg = NA, axes = FALSE,
ylim = c(min(probs) - ylimext,
max(probs) + ylimext),
...)
ys <- pretty(x = c(0, probs))
graphics::axis(side = 2, at = ys)
if (showSelProbMin) {
hval <- if (directional) c(-1, 1) * selProbMin else selProbMin
graphics::abline(h = hval, lty = 5, col = col[3])
}
graphics::legend("topright", bty = "n", fill = col[seq_len(2)], border = NA,
legend = c("selected", "not selected"))
if (any(up)) {
graphics::text(x = bar[up], y = probs[up] + par("cxy")[2] / 3,
labels = predNames[up], col = cols[up],
xpd = TRUE, srt = 90, adj = c(0, 0.5))
}
if (any(!up)) {
graphics::text(x = bar[!up], y = probs[!up] - par("cxy")[2] / 3,
labels = predNames[!up], col = cols[!up],
xpd = TRUE, srt = 90, adj = c(1, 0.5))
}
}
invisible(TRUE)
}
|
1dcf00b94b11029ecf9dda6a55c3b2337a4c183a
|
32105d2c12935dbba0e0af2e84077e69bb08b627
|
/man/lalgp_graph.Rd
|
47b86428bbc9153593a3728396d2116ff16c643a
|
[] |
no_license
|
atusy/LLfreq
|
f40be9fe9cdc92c9db006dec9b1acb21ae3d16cc
|
6f944ec2e8d5cf183ede4761020359eb1dc2f49b
|
refs/heads/master
| 2020-05-28T08:10:28.919975
| 2019-05-25T10:39:51
| 2019-05-25T10:39:51
| 188,932,685
| 0
| 0
| null | 2019-05-28T01:36:45
| 2019-05-28T01:36:45
| null |
UTF-8
|
R
| false
| true
| 3,288
|
rd
|
lalgp_graph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lalgp_graph.R
\name{lalgp_graph}
\alias{lalgp_graph}
\title{Frequency graph}
\usage{
lalgp_graph(data, x_min = NA, x_max = NA, y_max = NA,
uncertainty = TRUE, uncertainty2 = TRUE, curve = TRUE,
rug_plot = 1, graphic_theme = 1, grid = 1, xlab = "Age (Ma)",
ylab = "frequency", grid_color = "grey30", area_color = "grey80",
line_color = "black", rug_plot_color = rgb(0, 0, 0, alpha = 0.6),
hist = FALSE, hist_bin = as.integer(50), hist_height = 0.2,
hist_color = rgb(1, 0.62, 0, alpha = 0.6), analyze = FALSE)
}
\arguments{
\item{data}{list data of the result of functions; \code{\link{lalgp}} or \code{\link{manual_lalgp}}.}
\item{x_min}{numeric; the smallest end point of x axis.}
\item{x_max}{numeric; the largest end point of x axis.}
\item{y_max}{numeric; the largest end point of y axis.}
\item{uncertainty}{logical; if \code{TRUE} (default), drawing 95 percent
confident interval of the estimate at certain age.}
\item{uncertainty2}{logical; if \code{TRUE} (default), drawing 90 percent
confident interval of the estimate line.}
\item{rug_plot}{apply data points' shape of rug plot.
\itemize{
\item{0: no plot}
\item{1: thin line plot (default)}
\item{2: thin square plot. This plot should be used for discrete data,
such as integer data.}
\item{3: thick square plot. Also used for discrete data}
}}
\item{graphic_theme}{one of either:
\itemize{
\item{1: default theme, with y
axis label, graph frame, and grid.}
\item{2: simple theme, only with x axis
in plot area.}
}}
\item{grid}{one of either (only used if \code{graphic_theme = 1}):
\itemize{
\item{0: no grid}
\item{1: dotted grid (default)}
}}
\item{xlab}{a text style title for the x axis. Defaults to "Age (Ma)".}
\item{ylab}{a text style title for the y axis, only used if
\code{graphic_theme = 1}.}
\item{grid_color}{the color to be used for grid, only used if
\code{graphic_theme = 1}. Defaults to "grey30".}
\item{area_color}{the color to be used for 95 percent confident interval
area, only used if \code{uncertainty = 1}. Defaults to "grey80".}
\item{line_color}{the color to be used for estimate line. Defaults to
"black".}
\item{rug_plot_color}{the color to be used for rug plot. Defaults to rgb(0,
0, 0, alpha = .6).}
\item{hist}{logical; if \code{TRUE}, drawing histogram in plot area. Defaults
to \code{FALSE}.}
\item{hist_bin}{integer, larger than 0. Number of histogram bins. Only used
if \code{hist = TRUE}.}
\item{hist_height}{numeric, between 0 and 1. If \code{hist_height = 1}, then
the height of largest bin is that of plot area. Generally, the height is
\code{hist_height} times. Only used if \code{hist = TRUE}.}
\item{hist_color}{the color to be used for histgrams. Defaults to rgb(1,
0.62, 0, alpha = .6). Only used if \code{hist = TRUE}.}
\item{analyze}{logical; if \code{TRUE}, drawing the peak x-coordinate,
largest and smallest data, and number of data. Defaults to \code{FALSE}.}
}
\value{
data.frame of the x-coordinats and height of peaks. Only
returned if \code{analyze =TRUE}.
}
\description{
Drawing frequency graph (density plot) and analyzing the out put of
\code{\link{lalgp}} or \code{\link{manual_lalgp}}.
}
\examples{
d <- Osayama
e <- lalgp(d)
lalgp_graph(e)
}
|
a56ba84d0d7a4ab2b7ce29a36f874e85c411c79e
|
b530cb3f49e020f0e6a0d6969aa48c4e284a3bb4
|
/Projects/DMP/heritability_analysis_v2/DMP_heritability_v10_mockdata_plotresults.R
|
2947776f59f851a8813fba437ed4bd4a88f831c7
|
[] |
no_license
|
GRONINGEN-MICROBIOME-CENTRE/Groningen-Microbiome
|
614916d08d2b2748af2fe228ebc73d961960a90c
|
5b13765ee003dc7b65a18981b2171540d28d1a68
|
refs/heads/master
| 2023-03-09T23:21:48.188107
| 2023-02-23T16:38:10
| 2023-02-23T16:38:10
| 200,232,505
| 45
| 44
| null | 2021-06-09T07:41:43
| 2019-08-02T12:40:18
|
HTML
|
UTF-8
|
R
| false
| false
| 10,988
|
r
|
DMP_heritability_v10_mockdata_plotresults.R
|
# ========================================================================
# By: R.Gacesa, Weersma Group, UMCG (2020)
#
# Script plots results of heritability analysis
# (Panels A & B in Figure 2 in main DMP manuscript )
# NOTE: These codes are implemented for mock data heritability models
# constructed using
# DMP_heritability_v10_mockdata.taxa.R
# DMP_heritability_v10_mockdata.pwys.R
# and processed using
# DMP_heritability_v10_mockdata_collect.R
#
# =========================================================================
# function that parses confidence intervals from heritability results table
parseCIrange <- function(inDFs=inDFs,varN,toget="range") {
ret = c()
for (cc in c(1:nrow(inDFs))) {
if (!is.na(inDFs[[varN]][cc]) & ! grepl('Inf',inDFs[[varN]][cc])) {
ccc <- unlist(strsplit(inDFs[[varN]][cc],'-') )
if (toget=="range") {
ret <- c(ret,abs(as.numeric(ccc[2])-as.numeric(ccc[1])) )
} else if (toget=="high") {
ret <- c(ret,as.numeric(ccc[2]))
} else if (toget=="low") {
ret <- c(ret,as.numeric(ccc[1]))
}
} else {
ret <- c(ret,0)
}
}
ret
}
library(ggplot2)
library(tidyr)
# ======================
# set working directory: NOTE this has to be set to appropriate path!
# example:
#setwd('D:/Vbox/shared/dag/git_14_05/DMP/heritability_analysis_v2/')
setwd('.')
# initalize folder for storing plots
if (!dir.exists('Plots')) {
dir.create('Plots')
}
# input: heritability results (taxa)
inDFm <- read.table('results_mockdata_withFDRs_and_CIs_taxa.csv',sep=',',header=T,quote = '"',fill = T,stringsAsFactors = F)
# make plots (taxa)
inDFm$FTYPE <- NA
inDFm$FTYPE[grep('^s_',inDFm$Trait.short)] <-"Taxon.S"
inDFm$FTYPE[grep('^g_',inDFm$Trait.short)] <-"Taxon.G"
inDFm$FTYPE[grep('^f_',inDFm$Trait.short)] <-"Taxon.F"
inDFm$FTYPE[grep('^c_',inDFm$Trait.short)] <-"Taxon.C"
inDFm$FTYPE[grep('^p_',inDFm$Trait.short)] <-"Taxon.P"
inDFm$FTYPE[grep('^k_',inDFm$Trait.short)] <-"Taxon.K"
inDFm$FTYPE[grep('^o_',inDFm$Trait.short)] <-"Taxon.O"
# debug output
#print(inDFm$FEATURE[is.na(inDFm$FTYPE)])
# select which taxonomic levels to plot
fTypes <- c("Taxon.S","Taxon.G","Taxon.F","Taxon.C","Taxon.O","Taxon.P")
# color-blind color palette
cbPalette <- c("#E69F00","#999999", "#009E73","#56B4E9")#, "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# iterate over taxonomy levels
for (oneType in fTypes)
{
#debug
#oneType <- "Taxon.S"
inDFs <- inDFm[inDFm$FTYPE==oneType,]
inDFs$Taxon <- factor(as.character(inDFs$Trait.short),levels=inDFs$Trait.short[order(inDFs$VE_ID)])
inDFs <- inDFs[order(inDFs$VE_ID,decreasing = T),]
inDFs$LBL = ""
inDFs$LBL[inDFs$SW_PV_ID <= 0.05] <- "*"
inDFs$LBL[inDFs$SW_FDR_ID <= 0.1] <- "**"
inDFs$H2 <- inDFs$VE_ID
inDFs$VE_Cohousing <- inDFs$VE_COHOUSING.ID_DMP
inDFs$VE_Family <- inDFs$VE_famID
inDFs$VE_Environment <- inDFs$VE_Residual
inDFs$VE_ID[is.na(inDFs$VE_ID)] <- 0.0
if (grepl('Taxon',oneType)) {
inDFs$Taxon_Shortname <- as.character(inDFs$Taxon)
inDFs$Taxon_Shortname <- factor(as.character(inDFs$Taxon_Shortname),levels=inDFs$Taxon_Shortname[order(inDFs$VE_ID,decreasing = F)])
}
# parse confidence intervals
inDFs$CI_ID_low <- parseCIrange(inDFs,"CI_ID","low")
inDFs$CI_ID_low[inDFs$CI_ID_low < 0] <- 0
inDFs$CI_ID_high <- parseCIrange(inDFs,"CI_ID","high")
inDFs$CI_ID_high[inDFs$CI_ID_high > 1] <- 1
inDFtoPlot <- inDFs[,c("Taxon_Shortname","LBL","H2","VE_Cohousing","VE_Family","VE_Environment","CI_ID_low","CI_ID_high")]
#inDFtoPlot$VE_Environment <- 1-inDFtoPlot$H2-inDFtoPlot$VE_Cohousing-inDFtoPlot$VE_Family
#inDFtoPlot$SEH2 <- inDFs$SEH2
inDFtoPlot$LBL <- inDFs$LBL
inDFtoPlotL <- gather(inDFtoPlot,"Var.Exp","Var.Exp.NR", H2:VE_Environment,factor_key = T)
inDFtoPlotL$Var.Exp <- as.character(inDFtoPlotL$Var.Exp)
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "H2"] <- "Additive genetics"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Environment"] <- "Environment"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Cohousing"] <- "Cohousing"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Family"] <- "Family"
inDFtoPlotL$Var.Exp <- factor(as.character(inDFtoPlotL$Var.Exp),level = c("Environment","Cohousing","Family","Additive genetics"))
inDFtoPlotL$Var.Exp <- factor(as.character(inDFtoPlotL$Var.Exp),level = c("Cohousing","Family","Environment","Additive genetics"))
g <- ggplot(inDFtoPlotL,aes(x=Taxon_Shortname,y=Var.Exp.NR,fill=Var.Exp)) +
scale_fill_manual(values = cbPalette) +
geom_col(col="black", width=1,size=0.75) +
theme(axis.text.x = element_text(angle = 0,face="bold")) + ylim(-0.01,1.01) +
theme(axis.text.y = element_text(face="bold")) +
geom_errorbar(ymin=inDFtoPlotL$CI_ID_low,ymax=inDFtoPlotL$CI_ID_high,width=0.25, linetype='solid') +
geom_text(data = inDFtoPlotL,
aes(x = Taxon_Shortname, y=CI_ID_high,
label = format(LBL, nsmall = 0, digits=1, scientific = FALSE)),
color="black", vjust=+0.75, angle = 0, hjust=-1,size=6) + ylim(-0.01,1.01) +
ylab('Microbiome variance explained') + xlab('') +
theme(legend.position="bottom") +
theme(text = element_text(size = 14)) +
coord_flip()
print(g)
ggsave(paste0('Plots/mockdata_heritability_',oneType,'.png'),height = 1.25+8/50*nrow(inDFs),width = 9,limitsize = F)
# smaller plot
if (nrow(inDFs) > 20) {
topN <- 20
topFeatures <- inDFs[order(inDFs$VE_ID,decreasing = T),]$Trait.short[1:topN]
inDFtoPlotLs <- inDFtoPlotL[inDFtoPlotL$Taxon_Shortname %in% topFeatures,]
g <- ggplot(inDFtoPlotLs,aes(x=Taxon_Shortname,y=Var.Exp.NR,fill=Var.Exp)) +
scale_fill_manual(values = cbPalette) +
geom_col(col="black", width=1,size=0.75) +
theme(axis.text.x = element_text(angle = 0,face="bold")) + ylim(-0.01,1.01) +
theme(axis.text.y = element_text(face="bold")) +
geom_errorbar(ymin=inDFtoPlotLs$CI_ID_low,ymax=inDFtoPlotLs$CI_ID_high,width=0.25, linetype='solid') +
geom_text(data = inDFtoPlotLs,
aes(x = Taxon_Shortname, y=CI_ID_high,
label = format(LBL, nsmall = 0, digits=1, scientific = FALSE)),
color="black", vjust=+0.75, angle = 0, hjust=-1,size=6) + ylim(-0.01,1.01) +
ylab('Microbiome variance explained') + xlab('') +
theme(legend.position="bottom") +
theme(text = element_text(size = 14)) +
coord_flip()
print(g)
ggsave(paste0('Plots/mockdata_heritability_',oneType,'_topsignals.png'),height = 1.25+8/50*topN,width = 9,limitsize = F)
}
}
# ================================
# PATHWAY PLOTS
# ================================
# input: heritability results (taxa)
inDFm <- read.table('results_mockdata_withFDRs_and_CIs_pwys.csv',sep=',',header=T,quote = '"',fill = T,stringsAsFactors = F)
# make plots (pwys)
inDFm$FTYPE <- "PWYS"
cbPalette <- c("#E69F00","#999999", "#009E73","#56B4E9")#, "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
oneType <- "PWYS"
inDFs <- inDFm[inDFm$FTYPE==oneType,]
inDFs$VE_ID[is.na(inDFs$VE_ID)] <- 0.0
inDFs$Taxon <- factor(as.character(inDFs$Trait),levels=inDFs$Trait[order(inDFs$VE_ID)])
inDFs <- inDFs[order(inDFs$VE_ID),]
inDFs$LBL = ""
inDFs$LBL[inDFs$SW_PV_ID <= 0.05] <- "*"
inDFs$LBL[inDFs$SW_FDR_ID <= 0.1] <- "**"
inDFs$H2 <- inDFs$VE_ID
inDFs$VE_Cohousing <- inDFs$VE_COHOUSING.ID_DMP
inDFs$VE_Family <- inDFs$VE_famID
inDFs$VE_Environment <- inDFs$VE_Residual
inDFs$Taxon_Shortname <- as.character(inDFs$Taxon)
inDFs$Taxon_Shortname <- factor(as.character(inDFs$Taxon_Shortname),levels=inDFs$Taxon_Shortname[order(inDFs$VE_ID)])
inDFs$CI_ID_low <- parseCIrange(inDFs,varN = "CI_ID","low")
inDFs$CI_ID_low[inDFs$CI_ID_low < 0] <- 0
inDFs$CI_ID_high <- parseCIrange(inDFs,"CI_ID","high")
inDFs$CI_ID_high[inDFs$CI_ID_high > 1] <- 1
inDFtoPlot <- inDFs[,c("Taxon_Shortname","LBL","H2","VE_Cohousing","VE_Family","VE_Environment","CI_ID_low","CI_ID_high")]
inDFtoPlot$LBL <- inDFs$LBL
inDFtoPlotL <- gather(inDFtoPlot,"Var.Exp","Var.Exp.NR", H2:VE_Environment,factor_key = T)
inDFtoPlotL$Var.Exp <- as.character(inDFtoPlotL$Var.Exp)
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "H2"] <- "Additive genetics"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Environment"] <- "Environment"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Cohousing"] <- "Cohousing"
inDFtoPlotL$Var.Exp[inDFtoPlotL$Var.Exp == "VE_Family"] <- "Family"
inDFtoPlotL$Var.Exp <- factor(as.character(inDFtoPlotL$Var.Exp),level = c("Environment","Cohousing","Family","Additive genetics"))
inDFtoPlotL$Var.Exp <- factor(as.character(inDFtoPlotL$Var.Exp),level = c("Cohousing","Family","Environment","Additive genetics"))
g <- ggplot(inDFtoPlotL,aes(x=Taxon_Shortname,y=Var.Exp.NR,fill=Var.Exp)) +
scale_fill_manual(values = cbPalette) +
geom_col(col="black", width=1,size=0.75) +
theme(axis.text.x = element_text(angle = 0,face="bold")) + ylim(-0.01,1.01) +
theme(axis.text.y = element_text(face="bold")) +
geom_errorbar(ymin=inDFtoPlotL$CI_ID_low,ymax=inDFtoPlotL$CI_ID_high,width=0.25, linetype='solid') +
geom_text(data = inDFtoPlotL,
aes(x = Taxon_Shortname, y=CI_ID_high,
label = format(LBL, nsmall = 0, digits=1, scientific = FALSE)),
color="black", vjust=+0.75, angle = 0, hjust=-1,size=6) + ylim(-0.01,1.01) +
ylab('Microbiome variance explained') + xlab('') +
theme(legend.position="bottom") +
theme(text = element_text(size = 14)) +
coord_flip()
print(g)
ggsave(paste0('Plots/mockdata_heritability_PWYS.png'),height = 1.25+8/50*nrow(inDFs),width = 20,limitsize = F)
# smaller plot (top 20)
topN <- 20
topFeatures <- inDFm[order(inDFm$VE_ID,decreasing = T),]$Trait[1:topN]
inDFtoPlotLs <- inDFtoPlotL[inDFtoPlotL$Taxon_Shortname %in% topFeatures,]
g <- ggplot(inDFtoPlotLs,aes(x=Taxon_Shortname,y=Var.Exp.NR,fill=Var.Exp)) +
scale_fill_manual(values = cbPalette) +
geom_col(col="black", width=1,size=0.75) +
theme(axis.text.x = element_text(angle = 0,face="bold")) + ylim(-0.01,1.01) +
theme(axis.text.y = element_text(face="bold")) +
geom_errorbar(ymin=inDFtoPlotLs$CI_ID_low,ymax=inDFtoPlotLs$CI_ID_high,width=0.25, linetype='solid') +
geom_text(data = inDFtoPlotLs,
aes(x = Taxon_Shortname, y=CI_ID_high,
label = format(LBL, nsmall = 0, digits=1, scientific = FALSE)),
color="black", vjust=+0.75, angle = 0, hjust=-1,size=6) + ylim(-0.01,1.01) +
ylab('Microbiome variance explained') + xlab('') +
theme(legend.position="bottom") +
theme(text = element_text(size = 14)) +
coord_flip()
print(g)
ggsave(paste0('Plots/mockdata_heritability_PWYS_top20.png'),height = 1.25+8/50*topN,width = 20,limitsize = F)
|
0f004aa33d6f491d8b33440b5b7bb03c97c41318
|
0f3aa5b26df6061ccf85a29fd2686749c3dadb2d
|
/cov_vsvb.R
|
9242924e8211a8401c8c9fb81b63265bbda373cc
|
[] |
no_license
|
Sutanoy/Covariate-dependent-Graph-Estimation
|
1f18d7910615b033a03c5020bde80a4ab838a69c
|
c8450783b90e6df69656cda99cc6228147f5e271
|
refs/heads/main
| 2023-04-20T11:56:41.378570
| 2021-05-01T00:37:31
| 2021-05-01T00:37:31
| 360,359,816
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,800
|
r
|
cov_vsvb.R
|
##The core function that calculates the variational parameter updates and returns the final variational estimates
##for a single regression. So in the arguments, y plays the role of the response, i.e the j th variable whose
##conditional distribution given the remaining variables is being calculated.
##This calls the ELBO_calculator function
##X: the data matrix except the j th variable
##XtX is x transpose times x.
##DXtX:diagonal elements of XtX
##Diff_mat: XtX-diag(DXtX)
##Xty: x transpose times y
##sigmasq: variance of response given the parameters (homoscedastic part, actual variance sigma_sq/w_i)
##sigmabeta_sq: prior variance of coefficient parameter
##true_pi: estimate of spike and slab mixture proportion.
cov_vsvb= function(y,X,Z,XtX,DXtX,Diff_mat,Xty,sigmasq,sigmabeta_sq,true_pi){
thres=1e-7
tol=1e-9
msg <- function(s, ...)
{
time <- format(Sys.time(), "%X")
cat(sprintf("%s %s\n", time, s))
}
change_alpha <- rep(0.001,n*p) #alpha_new - alpha_int
max_iter <- 100
iter=1
Mu_vec=matrix(rep(mu,n),n*p,1)
while(sqrt(sum(change_alpha^2))>tol & iter<max_iter){#The max_iter controls the max number of iterations until convergence
alpha_int=alpha ##Initialization of inclusion probability parameter.
alpha_mat=matrix(alpha,n,p,byrow=TRUE)
alpha_vec=matrix(alpha,n*p,1,byrow=TRUE)
for(i in 1:n){
S_sq[i,]=sigmasq*(t(DXtX_Big_ind)%*%D_long[,i] + 1/sigmabeta_sq)^(-1) ##variance parameter
}
S_sq_vec=matrix(t(S_sq),n*p,1)
for(i in 1:n){
y_XW=y_long_vec*X_vec*D_long[,i]
y_XW_mat=matrix(y_XW,n,p,byrow=TRUE)
X_mu_alpha=X_vec*Mu_vec*alpha_vec
xmualpha_mat=t(matrix(X_mu_alpha,p,n))%*%(matrix(1,p,p)-diag(rep(1,p)))
XW_mat=matrix(X_vec*D_long[,i],n,p,byrow=TRUE)*xmualpha_mat
mu_mat[i,]=(t(y_XW_mat)%*%matrix(1,n,1)-(t(XW_mat)%*%matrix(1,n,1)))*(S_sq[i,]/sigmasq) ### ### CAVI updation of mean variational parameter mu
}
Mu_vec=matrix(t(mu_mat),n*p,1)
vec_1=log(true_pi/(1-true_pi)) ##term 1 of the update of inclusion probability alpha
vec_2=as.matrix(0.5*log(S_sq_vec/(sigmasq*sigmabeta_sq))) ##term 2 of update of alpha
vec_3=as.matrix(Mu_vec^2/(2*S_sq_vec)) ##term 3 of update of alpha
# (vec_1+vec_2+vec_3)[1:p]
unlogitalpha=vec_1+vec_2+vec_3 #Sum of 3 terms for alpha update
# thres=10^{-9}
lthres=logit(thres)
uthres=logit(1-thres)
indlarge=which(unlogitalpha > uthres)
indsmall=which(unlogitalpha < lthres)
unlogitalpha[indlarge]<-uthres
unlogitalpha[indsmall]<-lthres
alpha[which(unlogitalpha>9)]=1 #thresholding very large values to 1 for computational stability
alpha[which(unlogitalpha<=9)]=1/(1+ exp(-unlogitalpha[which(unlogitalpha<=9)])) ### ### CAVI updation of variational parameter alpha
e=0
for(i in 1:n){ ## calculates ELBO for the j th variable by adding the contribution of the parameter
##corresponding to every individual in study. i th iteration takes the contribution of the variational
##parameters corresponding to the i th individual in study, but the information is borrowed from
## all the n individuals depending on the weights coded in D[,i]
e=e+ELBO_calculator(y,X_mat,S_sq[i,],mu_mat[i,],alpha_mat[i,],sigmasq, sigmabeta_sq, true_pi, D[,i],n,p )
}
ELBO_LB= e
alpha_new <- alpha
change_alpha <-alpha_new - alpha_int
ELBO_LBit[iter]=ELBO_LB
iter=iter+1
}
ELBO_LBit=ELBO_LBit[1:(iter-1)]
list(var.alpha=alpha, var.mu=mu_mat, var.S_sq=S_sq, var.elbo=ELBO_LB,var.elboit=ELBO_LBit)
}
|
5bae6966c327262ab2a71d14c5ff44f2ff45fb3d
|
b1c1e9d146157d14c142d24a9e02b95b3a31f584
|
/IPAM 2016/Dados da Spera por municipio.R
|
24c1cb20ae328f3e4e4b00c287538bf3b116a840
|
[] |
no_license
|
Eduardoqm/Science-Repository
|
1ef37904f290cbbea3c060c0a4cf37265f60b699
|
d655a12fb833a9dd128672576c93cc6f9303f6ea
|
refs/heads/master
| 2023-07-17T08:24:52.460738
| 2023-07-05T17:22:07
| 2023-07-05T17:22:07
| 200,397,253
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,942
|
r
|
Dados da Spera por municipio.R
|
## corrige eerro em ler polygon dataframes
# muda acentos para pontos
Sys.setlocale(category = "LC_ALL", locale = "C")
# carregar library
library(ggmap)
library(raster)
library(maptools)
library(spatial.tools)
library(snow)
# Diretorio do shape
setwd("/home/eduardo/Documents/public-ipam/data_geo/shapes/municipios_IBGE/municipios_dissolve")
dir()
# shape dos municipios
mun2001 <-readOGR(".",layer="Mtopiba_2001")
mun2001
#P4S <- CRS("+proj=longlat +datum=WGS84")
#utm <-CRS("+proj=utm +zone=22 +south +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0 ")
#tang_plano<-spTransform(tang,utm)
#plot(tang_plano)
# diretorio dos dados da Spera
setwd("D:/IPAM/Dados coletados/Dados Spera/Mato Grosso (Cerrado)")
pasta="D:/IPAM/Dados coletados/Dados Spera/Mato Grosso (Cerrado)/Dados Spera por muncipio"
sp2001=raster("GY2001.tif")
tang2001<-crop(sp2001,tang_plano)
tang2001_mask<-mask(tang2001,tang_plano)
#reclassifying sp2001 to get just soybean cover types
table(values(sp2001_mask)) #conferindo tipos de cobertura presentes no raster
tab<-c(0,1,2,4,5,6,11,13,99,NA,1,NA,NA,5,6,NA,NA,NA);tab<-matrix(tab,9,2);tab
sp2001_soja<-reclassify(tang2001_mask,tab)
#another way to reclass
#ff=function(x){x[x==0]=NA;x[x==2]=NA;x[x==4]=NA;x[x==11]=NA;x[x==13]=NA;x[x==99]=NA;return(x)}
#soja=calc(sp2001,fun=ff)
x11()
plot(sp2001_soja)
start.time <- Sys.time()
beginCluster( detectCores() ) #use all but one core
tang_plano$SoyArea2001 <- extract(sp2001_soja, tang_plano, fun = sum, na.rm = TRUE,cellnumbers=TRUE)
endCluster()
#end.time <- Sys.time()
#time.taken <- end.time - start.time
#time.taken
head(mumt@data)
arquivo = foreach(i = 1:100 )%dopar%{
x.temp = subset(shape, XX == i)
extract(x.temp, RASTER)
}
#########################################
##Quantas c?lulas de soja existem em cada talh?o da Tanguro entre os anos de 2001 a 2013?
#########################################
# diretorio dos dados da Spera
setwd("D:/IPAM/Dados coletados/Dados Spera/Mato Grosso (Cerrado)/only_tif")
#pasta="D:/IPAM/Dados coletados/Dados Spera/Mato Grosso (Cerrado)/only_tif"
x1=list.files(,pattern="*.tif");x1
spera = stack(x1)
tang_sp<-crop(spera,tang_plano)
tang_sp_mask<-mask(tang_sp,tang_plano)
#reclassifying sp2001 to get just soybean cover types
table(values(tang_sp_mask)) #conferindo tipos de cobertura presentes no raster
tab<-c(0,1,2,4,5,6,11,13,99,NA,1,NA,NA,5,6,NA,NA,NA);tab<-matrix(tab,9,2);tab
tang_soja<-reclassify(tang_sp_mask,tab)
#another way to reclass
#ff=function(x){x[x==0]=NA;x[x==2]=NA;x[x==4]=NA;x[x==11]=NA;x[x==13]=NA;x[x==99]=NA;return(x)}
#soja=calc(sp2001,fun=ff)
x11()
plot(tang_soja)
start.time <- Sys.time()
beginCluster( detectCores() ) #use all but one core
tang_plano$SoyArea <- extract(tang_soja, tang_plano, fun = sum, na.rm = TRUE,cellnumbers=TRUE)
endCluster()
barplot(tang_plano@data$SoyArea)
pixels<-data.frame(tang_plano@data$SoyArea)
area<-pixels*53.4
barplot(as.matrix(area))
|
78aff0c995623bf3f375d875554b94485c2cb6d1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BNPdensity/examples/enzyme.Rd.R
|
e737718f830d4951717d0a7f2f6dbd6b60d26e12
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
enzyme.Rd.R
|
library(BNPdensity)
### Name: enzyme
### Title: Enzyme Dataset
### Aliases: enzyme
### Keywords: datasets
### ** Examples
data(enzyme)
hist(enzyme)
|
c9f45f22067dbc7d5a6065a34403040429b125b5
|
0e45743c43c89c504446112f081fcea949299dec
|
/man/sfilter.Rd
|
9c0131853bb4fa87832ebf496dd04c7f8ee87ea0
|
[] |
no_license
|
bmcclintock/crwHMM
|
99ac74eb7f77bebd2d68800dd1516b4228efc342
|
95d5755ecedae60bdacbab2dc95fb121249a730e
|
refs/heads/master
| 2020-05-19T23:06:50.678045
| 2019-05-06T19:33:24
| 2019-05-06T19:33:24
| 185,260,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,804
|
rd
|
sfilter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfilter.R
\name{sfilter}
\alias{sfilter}
\title{fit the state-space model to \code{prefilter}-ed data}
\usage{
sfilter(x, model = c("rw", "crw"), time.step = 6, parameters = NULL,
fit.to.subset = TRUE, optim = c("nlminb", "optim"),
verbose = FALSE, inner.control = NULL)
}
\arguments{
\item{x}{Argos data passed through prefilter()}
\item{model}{specify which SSM is to be fit: "rw" or "crw"}
\item{time.step}{the regular time interval, in hours, to predict to.
Alternatively, a vector of prediction times, possibly not regular, must be
specified as a data.frame with id and POSIXt dates.}
\item{parameters}{a list of initial values for all model parameters and
unobserved states, default is to let sfilter specifiy these. Only play with
this if you know what you are doing...}
\item{fit.to.subset}{fit the SSM to the data subset determined by prefilter
(default is TRUE)}
\item{optim}{numerical optimizer to be used ("nlminb" or "optim")}
\item{verbose}{report progress during minimization}
\item{inner.control}{list of control settings for the inner optimization
(see ?TMB::MakeADFUN for additional details)}
}
\description{
generates initial values for model parameters and unobserved states;
structures data and initial values for C++ \code{TMB} template;
fits state-space model; minimises the joint log-likelihood via the selected
optimizer (\code{nlminb} or \code{optim}); structures and passes output
object to \code{fit_ssm}
}
\details{
called by \code{fit_ssm}. \code{sfilter} can only fit to an
individual track, use \code{fit_ssm} to fit to multiple tracks (see ?fit_ssm).
}
\examples{
data(ellie)
pf <- prefilter(ellie, vmax=10, ang=c(15,25), min.dt=120)
out <- sfilter(pf, model="rw", time.step=24)
}
|
0ea483995e407b70d82920cd0a7053472b73cf23
|
9b84ccc884052b90bb577372f0896db6855439f1
|
/backup_code.R
|
977bd65512cb741686eabdfff8d45b7b47b291fc
|
[] |
no_license
|
gaojingyuusa/CEM_Tool
|
34ad72c1aaa5bbe137a1fd008fe3bdd9e641631d
|
da2056ca8be74960760ad360ab323f25bfec7d76
|
refs/heads/master
| 2020-07-14T22:26:21.344490
| 2019-09-06T21:21:46
| 2019-09-06T21:21:46
| 205,416,380
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,080
|
r
|
backup_code.R
|
# Test data query from data
# Comparators+Target
isocode <- c("CHN", "JPN", "USA", "CHL", "BRA", "RUS", "CHL", "IDN", "MYS", "MEX", "COL", "SGP","KOR")
group <- c("Target", "Stuc_1","Stuc_2","Stuc_3","Aspr_1","Aspr_2","Aspr_3","High income","High income","ASEAN","ASEAN","OECD","OECD")
basis <- data.frame(
isocode = isocode,
group = group,
stringsAsFactors = F
)
# Subset user defined data
## Individual comparators
basis_inv <- basis[1:7,]
normal_inv <- data.frame()
for (i in seq_along(basis_inv$isocode)){
repl <- subset(normal_dt, ISO == basis_inv$isocode[i] & Year >=2005 & Year <= 2010) %>%
mutate(identifier=paste0(basis_inv$isocode[i],"_",basis_inv$group[i])) %>%
select(-Source, -ISO)
normal_inv <- rbind(normal_inv,repl)
}
## Aggregate 3 typologies
# Define a function to do the trick
typ_cal <-function(test, start, end){
typo_iso <- basis$isocode[basis$group==test]
sub_tp <- subset(normal_dt, ISO %in% typo_iso & Year >=start & Year <= end)
sub_tp <- aggregate(x=sub_tp$value, by=list(sub_tp$Year, sub_tp$Indicator), FUN=mean, na.rm=T)
names(sub_tp) <- c("Year", "Indicator", "value")
sub_tp$identifier <- test
sub_tp}
# run a loop to do the trick
normal_typ <- data.frame()
basis_typ <- unique(basis$group[8:nrow(basis)])
for (j in basis_typ){
repl <- typ_cal(j, 2005, 2010)
normal_typ <- rbind(normal_typ, repl)
}
## Append all data together
full <- rbind(normal_inv, normal_typ) %>% spread(identifier, value)
ordername <- names(full)
full <- full[c("Indicator","Year",
ordername[grep("Target",ordername)],
ordername[grep("Stuc_1",ordername)],
ordername[grep("Stuc_2",ordername)],
ordername[grep("Stuc_3",ordername)],
ordername[grep("Aspr_1",ordername)],
ordername[grep("Aspr_2",ordername)],
ordername[grep("Aspr_3",ordername)],
ordername[grep(basis_typ[1],ordername)],
ordername[grep(basis_typ[2],ordername)],
ordername[grep(basis_typ[3],ordername)]
)]
|
d1a2bb93ed9c70deb412cbc1415e087adb37eb5a
|
da5ee9a7b322b05d3e99c2a0a51b1bf2078415a8
|
/man/mmf.Rd
|
220942b40819bbe1481efd237f83fd8e2996392b
|
[] |
no_license
|
cran/growthmodels
|
4140464c0d457a8bdbed1ae3cbb845077c384c95
|
dd212a6ad56212c2feb34fdba3d08667906785a6
|
refs/heads/master
| 2023-05-25T02:10:25.242190
| 2023-05-22T18:00:02
| 2023-05-22T18:00:02
| 17,696,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 954
|
rd
|
mmf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mmf.R
\name{mmf}
\alias{mmf}
\alias{mmf.inverse}
\title{Morgan-Mercer-Flodin growth model}
\usage{
mmf(t, alpha, w0, gamma, m)
mmf.inverse(x, alpha, w0, gamma, m)
}
\arguments{
\item{t}{time}
\item{alpha}{upper asymptote}
\item{w0}{the value at t = 0}
\item{gamma}{parameter that controls the point of inflection}
\item{m}{growth rate}
\item{x}{size}
}
\description{
Computes the Morgan-Mercer-Flodin growth model
\deqn{ y(t) = \frac{(w_0 \gamma + \alpha t^m)}{\gamma} +t^m}{ y(t) = (w_0 * \gamma + \alpha * t^m) / (\gamma + t^m)}
}
\examples{
growth <- mmf(0:10, 10, 0.5, 4, 1)
# Calculate inverse function
time <- mmf.inverse(growth, 10, 0.5, 4, 1)
}
\references{
A. Khamiz, Z. Ismail, and A. T. Muhammad, "Nonlinear growth models for
modeling oil palm yield growth," Journal of Mathematics and Statistics,
vol. 1, no. 3, p. 225, 2005.
}
\author{
Daniel Rodriguez
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.