blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
β | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
β | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
β | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9acefb2f5a9ec0d5cd17033d8798951c4c28b325
|
661335f940804b3e6349bbff7c2599ac6db4fa02
|
/Visualize_Code/gg_viz2.R
|
975387a95dfcc984dcbc32fb58344dc626e0bd26
|
[] |
no_license
|
hwijongkim/BDS_Project
|
b20469db2b8137c7f711806e7720dd3ed902562b
|
78ee62a84aeeca9ecd385ff5ae3e3ab0ccfc5f9e
|
refs/heads/master
| 2021-01-16T00:42:44.146234
| 2017-11-09T06:23:35
| 2017-11-09T06:23:35
| 99,974,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
gg_viz2.R
|
#leaflet μ¬μ©μ ν¨ν€μ§
library(htmltools)
library(dplyr)
library(leaflet)
#1 κ°νκ΅°μ 곡μ μ¬μ° μλ λ° λΆν¬
m1 <- read.csv("Project/μκ°ν μ½λ/Gapyeong.csv")
m1$avg_price = (m1$λμ₯κ°μ‘.μ./m1$μ¬μ°λ©΄μ )
m1$λμ₯κ°μ‘.μ. <- ifelse(is.na(m1$λμ₯κ°μ‘.μ.), 0, m1$λμ₯κ°μ‘.μ.)
m1 <- data.frame(m1$latitude, m1$longitude, m1$곡λΆμ§λͺ©λͺ
, m1$μ¬μ°λ©΄μ , m1$λμ₯κ°μ‘.μ., m1$avg_price)
colnames(m1) <- c('lat', 'lon', 'assets_name', 'size', 'tot_price', 'avg_price')
m1$size <- ifelse(is.na(m1$size), 1, m1$size)
m1$avg_price <- ifelse(is.na(m1$avg_price), 0, m1$avg_price)
m1$avg_price <- ifelse(m1$avg_price == "Inf", 1, m1$avg_price)
m1$avg_price = round(m1$avg_price)
m1$assets_name <- as.character(m1$assets_name)
m1$popup <- paste("assets_name: ", m1$assets_name, " size: ", m1$size, " tot_price: ", m1$tot_price, " avg_price: ", m1$avg_price)
op <- leaflet(m1)
op <- op %>% addTiles() %>% setView(lng = 127.4962945, lat = 37.8372685, zoom = 11)
op %>% addTiles() %>% addMarkers(~lon, ~lat, clusterOptions = markerClusterOptions(), popup = ~htmlEscape(popup))
|
731e127b4cd781e8f9e6c1c0d28ca7d40f7982bc
|
3c883c8e8f1aad9cfbaeff60998ec9b0df2b7ba0
|
/man/esDropPheno.Rd
|
ea74869d51473b2416f5d5c10eb69e47587c783a
|
[] |
no_license
|
genomelab/esFunctions
|
ec14979d03247120d54972f9b2b9213a5cbcc3cc
|
e721a3859ce29abdd50c930b213eebe503e6ad26
|
refs/heads/master
| 2023-05-13T11:23:07.544888
| 2023-05-01T04:51:05
| 2023-05-01T04:51:05
| 17,329,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 691
|
rd
|
esDropPheno.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esDropPheno.R
\name{esDropPheno}
\alias{esDropPheno}
\title{esDropPheno}
\usage{
esDropPheno(es, column_number)
}
\arguments{
\item{es}{expression set}
\item{column_number}{specify a column number and removes it from the
dataframe}
}
\description{
Function to drop a column of the phenoData of expressionset
}
\examples{
# 5 is column number you want to delete
#neweset <- esDropPheno(eset, 5)
#neweset
}
\references{
"An Introduction to Bioconductor's ExpressionSet Class" \cr Seth
Falcon, Martin Morgan, and Robert Gentleman \cr 6 October, 2006; revised 9
February, 2007 \cr
}
\author{
Shahab Asgharzadeh
}
|
b9748cc6a606b4738d17a22f4c8c36d32ba6ea92
|
3a6f80fad6b991699c6fbc4cf86bb5e960d6f688
|
/R/reanalysis.R
|
5af5a16bb02d60a02c18e3d4f5ca3ca8d69c4a72
|
[] |
no_license
|
vandenman/A-Cautionary-Note-on-Estimating-Effect-Size
|
428275ca69c6478a5edd01fea18908ac939c4cb8
|
e6e9bec862875353f658e0d99c6ceadfc7f6cadf
|
refs/heads/master
| 2021-07-01T20:03:21.128350
| 2020-11-16T08:22:40
| 2020-12-14T11:32:16
| 200,222,347
| 2
| 1
| null | 2020-09-08T07:48:20
| 2019-08-02T11:20:49
|
TeX
|
UTF-8
|
R
| false
| false
| 10,202
|
r
|
reanalysis.R
|
rm(list = ls())
library(ggplot2)
library(tibble)
library(BayesFactor)
library(papaja)
source(file.path("R", "functions.R"))
source(file.path("R", "ggplotTheme.R"))
getRobustnessData <- function(priorPH0_vals, ybar, n, sigmaSlab = 1) {
mat <- matrix(NA, length(priorPH0_vals), 5, dimnames = list(NULL, c("ph0", "ph0|data", "lower", "upper", "ma")))
for (i in seq_along(priorPH0_vals)) {
ph0 <- priorPH0_vals[i]
upMA <- updatePar(ph0, sigmaSlab, n, ybar[1L])
ciMA <- postStat(upMA)
mat[i, ] <- c(ph0, upMA[1], ciMA)
}
return(as_tibble(mat))
}
getRobustnessPlot <- function(priorPH0_vals, ybar, n, sigmaSlab = 1) {
mat <- matrix(NA, length(priorPH0_vals), 5, dimnames = list(NULL, c("ph0", "ph0|data", "lower", "upper", "ma")))
for (i in seq_along(priorPH0_vals)) {
ph0 <- priorPH0_vals[i]
upMA <- updatePar(ph0, sigmaSlab, n, ybar[1L])
ciMA <- postStat(upMA)
mat[i, ] <- c(ph0, upMA[1], ciMA)
}
df <- as_tibble(mat)
df_hline <- tibble(x = range(priorPH0_vals), y = rep(upMA[2], 2))
gImpl <- ggplot(data = df, aes(x = ph0, y = ma, ymin = lower, ymax = upper)) +
geom_line(data = df_hline, aes(x = x, y = y), inherit.aes = FALSE, show.legend = TRUE, linetype = "longdash") +
geom_line() +
geom_point(size = 2) +
geom_ribbon(alpha = .2) +
scale_x_continuous(name = "p(spike)", breaks = seq(.05, .95, length.out = 7)) +
labs(y = "Model averaged estimate") +
geom_rangeframe(sides = "bl") +
myTheme(legend.position = "right")
}
datExpl <- readRDS(file.path("data", "twoMinds.rds")) # Expl = Explicit
datImpl <- readRDS(file.path("data", "twoMinds_2.rds")) # Impl = Implicit
# replication ----
# functions from Heycke et al., 2018.
#scientific notation for print BayesFactor functions below
as.scientific <- function(number){
num <- format(number, scientific = TRUE, digits = 3)
p <- as.integer(gsub(".*e+", "", num))
b <- as.numeric(gsub( "e+.*", "", num))
paste0('\\mathit{', b, '}', '\\times', '{',10, '}', '^', '{', p, '}')
}
#print Bayes factor ANOVA outputs
printBF <- function(BF, Hypothesis = 1, index = 1, OutputSize = 99999.99, HStyle = 0){
if(Hypothesis == "1" & as.vector(BF[index]) >= 1) return("\\linebreak __BayesFactor larger 1, but Output for H1 selected__ \\linebreak ")
if(Hypothesis == "0" & as.vector(BF[index]) < 1 & HStyle == 0) return("\\linebreak __BayesFactor smaller 1, but Output for H0 selected__ \\linebreak ")
if(Hypothesis == "0") return(ifelse(as.vector(BF[index])>OutputSize, paste0('$\\mathit{BF}_{01} = ', as.scientific(as.vector(BF[index])), '$'), paste0('$\\mathit{BF}_{01} = ', printnum(as.vector(BF[index])), '$')))
if(Hypothesis == "1") return(ifelse(1/as.vector(BF[index])>OutputSize, paste0('$\\mathit{BF}_{10} = ', as.scientific(1/as.vector(BF[index])), '$'), paste0('$\\mathit{BF}_{10} = ', printnum(1/as.vector(BF[index])), '$')))
}
#t test print function
printBFt <- function(BF, HStyle = 0, index = 1, OutputSize = 99999.99 , postit = 100000){
if(as.vector(BF[index]) < 1 & HStyle == 0){
b <- 1/as.vector(BF[index])
num <- "01"
}else{
b <- as.vector(BF[index])
num <- "10"
}
if(as.character(class(BF@numerator[[names(BF@numerator)[index]]])) == "BFoneSample"){
rBF <- BayesFactor::ttestBF(BF@data[,1], mu = BF@numerator[[names(BF@numerator)[index]]]@prior$mu, rscale = BF@numerator[[names(BF@numerator)[index]]]@prior$rscale)
}
if(as.character(class(BF@numerator[[names(BF@numerator)[1]]])) == "BFindepSample"){
rBF <- BayesFactor::ttestBF(subset(BF@data, BF@data[,2] == "x")[,1] , subset(BF@data, BF@data[,2] == "y")[,1], rscale = BF@numerator[[names(BF@numerator)[index]]]@prior$rscale, paired = FALSE)
}
post <- BayesFactor::posterior(rBF, index = index, iterations = postit)
d <- median(post[, "delta"])
HDI <- coda::HPDinterval(post[, "delta"])
# modified
d_mean <- mean(post[, "delta"])
txt <- ifelse(b > OutputSize,
paste0('$\\mathit{BF}_{', num, '} = ', as.scientific(b), '$', ', ', '$d = ', round(d, 4), '$', ', ', '95% HDI [', printnum(HDI[1]), ', ', printnum(HDI[2]), ']', ' d_mean = ', round(d_mean, 4)),
paste0('$\\mathit{BF}_{', num, '} = ', printnum(b), '$', ', ', '$d = ', round(d, 4), '$', ', ', '95% HDI [', printnum(HDI[1]), ', ', printnum(HDI[2]), ']', ' d_mean = ', round(d_mean, 4))
)
return(list(d_mean, txt))
# original
# ifelse(b > OutputSize,
# paste0('$\\mathit{BF}_{', num, '} = ', as.scientific(b), '$', ', ', '$d = ', printnum(d), '$', ', ', '95% HDI [', printnum(HDI[1]), ', ', printnum(HDI[2]), ']', ' d_mean = ', printnum(d_mean)),
# paste0('$\\mathit{BF}_{', num, '} = ', printnum(b), '$', ', ', '$d = ', printnum(d), '$', ', ', '95% HDI [', printnum(HDI[1]), ', ', printnum(HDI[2]), ']', ' d_mean = ', printnum(d_mean))
# )
}
printBFt(VB1expltBF)
ttestpr <- sqrt(2)/2
# replicates results from Heycke et al., 2018. code copied from Manuscript_RR.Rmd
#ValenceBlock 1
VB1explt <- t.test(subset(datExpl, ValenceBlock == "1" & Block == "1")$DV,
subset(datExpl, ValenceBlock == "1" & Block == "2")$DV,
paired = TRUE)
VB1expltBF <- ttestBF(subset(datExpl, ValenceBlock == "1" & Block == "1")$DV,
subset(datExpl, ValenceBlock == "1" & Block == "2")$DV,
paired = TRUE,
rscale = ttestpr)
#ValenceBlock 2
VB2explt <- t.test(subset(datExpl, ValenceBlock == "2" & Block == "2")$DV,
subset(datExpl, ValenceBlock == "2" & Block == "1")$DV,
paired = TRUE)
VB2expltBF <- ttestBF(subset(datExpl, ValenceBlock == "2" & Block == "2")$DV,
subset(datExpl, ValenceBlock == "2" & Block == "1")$DV,
paired = TRUE,
rscale = ttestpr)
#ValenceBlock 1
VB1implt <- t.test(subset(datImpl, ValenceBlock == "1" & Block == "2")$DV,
subset(datImpl, ValenceBlock == "1" & Block == "1")$DV,
paired = TRUE)
VB1impltBF <- ttestBF(subset(datImpl, ValenceBlock == "1" & Block == "2")$DV,
subset(datImpl, ValenceBlock == "1" & Block == "1")$DV,
paired = TRUE,
rscale = ttestpr)
#ValenceBlock 2
VB2implt <- t.test(subset(datImpl, ValenceBlock == "2" & Block == "1")$DV,
subset(datImpl, ValenceBlock == "2" & Block == "2")$DV,
paired = TRUE)
VB2impltBF <- ttestBF(subset(datImpl, ValenceBlock == "2" & Block == "1")$DV,
subset(datImpl, ValenceBlock == "2" & Block == "2")$DV,
paired = TRUE,
rscale = ttestpr)
# explicit test
apa_print(VB1explt)$statistic
Heycke_expl <- printBFt(VB1expltBF)
print(Heycke_expl[[2]])
# implicit test
apa_print(VB1implt)$statistic
Heycke_impl <- printBFt(VB1impltBF)
print(Heycke_impl[[2]])
# reanalysis ----
priorPH0 <- 0.5
sigmaSlab <- 1
# explicit
datExplBlock1 <- subset(datExpl, ValenceBlock == "1" & Block == "1")$DV
datExplBlock2 <- subset(datExpl, ValenceBlock == "1" & Block == "2")$DV
ybarExpl <- getCohenD(datExplBlock1, datExplBlock2)
nExpl <- length(datExplBlock1)
upMAExpl <- updatePar(priorPH0, sigmaSlab, nExpl, ybarExpl[1L])
ciMAExpl <- postStat(upMAExpl)
tbExplicit <- data.frame(t(c(upMAExpl, ciMAExpl)))
names(tbExplicit) <- c("ph0", "mu1", "sd1", "Lower", "Upper", "modelAveraged")
# implicit
datImplBlock1 <- subset(datImpl, ValenceBlock == "1" & Block == "1")$DV
datImplBlock2 <- subset(datImpl, ValenceBlock == "1" & Block == "2")$DV
ybarImpl <- getCohenD(datImplBlock2, datImplBlock1)
nImpl <- length(datImplBlock1)
upMAImpl <- updatePar(priorPH0, sigmaSlab, nImpl, ybarImpl[1L])
ciMAImpl <- postStat(upMAImpl)
tbImplicit <- data.frame(t(c(upMAImpl, ciMAImpl)))
names(tbImplicit) <- c("ph0", "mu1", "sd1", "Lower", "Upper", "modelAveraged")
tbBoth <- rbind(tbExplicit, tbImplicit)
tbBoth$ph1 <- 1 - tbBoth$ph0
# tbBoth
# writeTable(tbBoth, file = "reanalysis.csv")
# robustness plot
priorPH0_vals <- seq(.05, .95, .025)
robExpl <- getRobustnessData(priorPH0_vals, ybarExpl[1L], nExpl)
robImpl <- getRobustnessData(priorPH0_vals, ybarImpl[1L], nImpl)
robExpl$analysis <- "Explicit Evaluation"
robImpl$analysis <- "Implicit Evaluation"
robustnessData <- rbind(robExpl, robImpl)
robustnessData$sides <- "bl"
hlineDf <- tibble(
x = rep(range(priorPH0_vals), 2),
y = c(rep(upMAExpl[2], 2), rep(upMAImpl[2], 2)),
analysis = rep(c("Explicit Evaluation", "Implicit Evaluation"), each = 2)
)
breaksSelector <- function(limits, n) {
if (all(limits > 0))
return(seq(1.5, 2.5, .5))
else
return(seq(0, -.8, -.4))
}
limitsSelector <- function(limits) {
if (all(limits > 0))
return(c(1.5, 2.5))
else
return(limits)
}
graph <- ggplot(data = robustnessData, aes(x = ph0, y = ma, ymin = lower, ymax = upper, sides = sides)) +
# lemon::geom_pointline(size = 2) +
geom_line() +
geom_point(size = 2) +
geom_line(data = hlineDf, aes(x = x, y = y), col = 3, inherit.aes = FALSE, show.legend = TRUE, linetype = "longdash") +
geom_ribbon(alpha = .2) +
scale_x_continuous(name = "p(spike)", breaks = seq(.05, .95, length.out = 7)) +
scale_y_continuous(name = "Model averaged estimate", breaks = breaksSelector, limits = limitsSelector) +
labs(y = "Model averaged estimate") +
lemon::facet_rep_wrap(~analysis, scales = "free") +
geom_rangeframe(sides = "bl") +
myTheme(legend.position = "right", base_size = 28)
graph
saveFigure(graph, "robustnessReanalysis_big_font.pdf", width = 14)
# df_hline <- tibble(x = range(priorPH0_vals), y = rep(upMA[2], 2))
#
# gImpl <- ggplot(data = df, aes(x = ph0, y = ma, ymin = lower, ymax = upper)) +
# geom_line(data = df_hline, aes(x = x, y = y), inherit.aes = FALSE, show.legend = TRUE, linetype = "longdash") +
# # geom_line(linetype = "dashed") +
# geom_line() +
# geom_point(size = 2) +
# geom_ribbon(alpha = .2) +
# scale_x_continuous(name = "p(spike)", breaks = seq(.05, .95, length.out = 7)) +
# labs(y = "Model averaged estimate") +
# geom_rangeframe(sides = "bl") +
# myTheme(legend.position = "right")
#
# saveFigure(g, "robustnessReanalysis.pdf")
#
|
935b88df418ec1f6c03e6d024095225a3789643d
|
953689109af5647828af6d8205ceccaa02c0ba4e
|
/man/plot_selectivities.Rd
|
b9fb3e6ace372d2231f43366ed29e0e49018b422
|
[] |
no_license
|
SPRFMO/jjmr
|
dc27f2810f92e53f0da5db9cac5cafa5da07127f
|
68fc9ed028181cb47873567024d793b78b2d5159
|
refs/heads/master
| 2023-07-19T20:57:59.532264
| 2023-07-16T22:55:24
| 2023-07-16T22:55:24
| 70,265,542
| 2
| 7
| null | 2022-07-20T13:45:21
| 2016-10-07T16:50:37
|
R
|
UTF-8
|
R
| false
| true
| 747
|
rd
|
plot_selectivities.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_selectivities.R
\name{plot_selectivities}
\alias{plot_selectivities}
\title{Plot selectivities by age, year, fleet, and model}
\usage{
plot_selectivities(
sels,
fleet = "fsh",
alpha = 0.4,
scale = 4,
size = 0.5,
years = "all"
)
}
\arguments{
\item{sels}{selectivity data frame generated by \code{get_selectivities}}
\item{fleet}{fleets to plot: "fsh" (fishery), "ind" (survey), or "all" (both)}
}
\value{
a ggplot2 plot object
}
\description{
Plot selectivities by age, year, fleet, and model
}
\examples{
\dontrun{
oldnewMods <- combineModels(mod0.00,mod_prev)
selectivities <- get_selectivities(oldnewMods)
plot_selectivities(selectivities)
}
}
|
51a42cf6a7aff97159c6296c7a50ace98a93d4fc
|
1873bcdf1e78e2370f440d129d5d19f115095640
|
/tests/testthat/test-hillshade.R
|
c29c0e470c080737ecb5409c053f5806a5c899b5
|
[] |
no_license
|
tylermorganwall/rayshader
|
bdc6314d093a7b874aec43576a975909f17a668d
|
81f95cf24973049f84f4a4250daa9b0b4659281d
|
refs/heads/master
| 2023-08-10T21:34:51.566012
| 2023-08-01T12:15:39
| 2023-08-01T12:15:39
| 133,241,343
| 1,939
| 219
| null | 2023-07-31T09:29:09
| 2018-05-13T13:51:00
|
R
|
UTF-8
|
R
| false
| false
| 4,358
|
r
|
test-hillshade.R
|
run_tests_success = function(func, argument_grid, ...) {
stopifnot(inherits(argument_grid,"data.frame"))
for(i in seq_len(nrow(argument_grid))){
args = unlist(argument_grid[i,], recursive = FALSE)
args = append(args, ...)
expect_no_condition(do.call(func, args = args))
}
}
test_that("sphere_shade", {
sphere_args_palette = expand.grid(texture = list("imhof1", "imhof2", "imhof3", "imhof4",
"desert", "bw", "unicorn"),
sunangle = list(315))
run_tests_success("sphere_shade", sphere_args_palette, list(heightmap = volcano))
custom_tex = create_texture("red","green","blue","yellow","purple")
expect_no_condition(sphere_shade(heightmap = volcano, texture = custom_tex))
sphere_args_palette_sunangle = expand.grid(texture = list("imhof1"),
sunangle = list(315, -315,0, 720,-800),
zscale = list(1,10))
run_tests_success("sphere_shade", sphere_args_palette_sunangle, list(heightmap = volcano))
normal_vecs = calculate_normal(volcano)
sphere_args_normals = expand.grid(texture = list("imhof1"),
normalvectors = list(normal_vecs))
run_tests_success("sphere_shade", sphere_args_normals, list(heightmap = volcano))
})
test_that("height_shade", {
hs_args = expand.grid(texture = list(grDevices::colorRampPalette(c("#6AA85B", "#D9CC9A", "#FFFFFF"))(256),
heat.colors(256),
terrain.colors(256)),
range = list(NULL,range(montereybay),c(0,max(montereybay))))
run_tests_success("height_shade", hs_args, list(heightmap = montereybay))
})
test_that("lamb_shade", {
ls_args = expand.grid(sunangle = list(-45,780),
sunaltitude = list(0,45,90),
zscale = list(1,10),
zero_negative = list(TRUE, FALSE))
run_tests_success("lamb_shade", ls_args, list(heightmap = volcano))
})
test_that("texture_shade", {
ts_args = expand.grid(detail = list(0,1),
contrast = list(0.5,10),
brightness = list(-10,10),
transform = list(TRUE, FALSE),
dx = list(1,10),
dy = list(1,10),
pad = list(50,200))
run_tests_success("texture_shade", ts_args, list(heightmap = volcano))
})
test_that("ray_shade", {
rs_args = expand.grid(sunangle = list(-45,90),
sunaltitude = list(10,90),
zscale = list(1,3),
maxsearch = list(NULL, 10),
anglebreaks = list(NULL, seq(10,20,by=1), seq(10,50,by=5)))
run_tests_success("ray_shade", rs_args, list(heightmap = volcano))
#Test with shadow cache
shadow_cache = ray_shade(volcano)
cache_mask = volcano > 150
run_tests_success("ray_shade", rs_args, list(heightmap = volcano,
cache_mask = cache_mask,
shadow_cache = shadow_cache))
})
test_that("ambient_shade", {
as_args = expand.grid(sunbreaks=list(3,12,24),
zscale = list(1,3),
maxsearch = list(10, 30),
anglebreaks = list(NULL, seq(10,20,by=1)))
run_tests_success("ambient_shade", as_args, list(heightmap = volcano))
#Test with shadow cache
shadow_cache = ray_shade(volcano)
cache_mask = volcano > 150
run_tests_success("ambient_shade", as_args, list(heightmap = volcano,
cache_mask = cache_mask,
shadow_cache = shadow_cache))
})
test_that("constant_shade", {
cs_args = expand.grid(color=list("white","red","black"),
alpha = list(0,0.5,1))
run_tests_success("constant_shade", cs_args, list(heightmap = volcano))
})
test_that("create_texture", {
expect_no_condition(create_texture("#fff673","#55967a","#8fb28a","#55967a","#cfe0a9"))
expect_no_condition(create_texture("#fff673","#55967a","#8fb28a","#55967a","#cfe0a9",
cornercolors = c("red","blue","pink","orange")))
})
|
bcffb51e83db17d82ef5fd5557875d9643eb719e
|
83b9cc7a2695e5526df70db993ccf99ec0129e7d
|
/Session_2/08_group_by_example.R
|
915418404b8285420c562d81cbebca8dd42b3c4b
|
[] |
no_license
|
jmh5ad/R_for_Digital_Humanities
|
3c646d2a39466231330c3ce8af28fb9ab04c9f4f
|
6e338e11064b9ca57dfaf5f8ddbe2e6ee4d3ba00
|
refs/heads/master
| 2023-05-14T18:12:36.563143
| 2021-06-11T03:19:12
| 2021-06-11T03:19:12
| 371,443,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
08_group_by_example.R
|
library(dplyr)
word <- c("dream", "dream", "dream", "life", "life" )
chapter <- c(1, 2, 3, 1, 3)
paragraph <- c(2, 8, 7, 3, 5)
sentence <- c(3, 1, 2, 4, 4)
myTable <- tibble(word = word, chapter = chapter, paragraph=paragraph, sentence = sentence)
mySummary <- myTable %>%
group_by(chapter) %>%
summarize(mean(paragraph))
print(mySummary)
|
3016c5144fb72636b469a785d73ab8d3d4e2094a
|
076b0b274efa646ee88f3aea5375959f9e8e8a65
|
/R/data.R
|
cf70524a4a8512e7004c4aed041e5f6f0877db70
|
[] |
no_license
|
djvanderlaan/reclin
|
40df352ba99540146b6ea5fd4187c9aff571a300
|
23d6600ee894d7358260c51d09fa86af3a782c3d
|
refs/heads/master
| 2022-10-03T10:43:29.111462
| 2022-10-01T15:16:58
| 2022-10-01T15:16:58
| 97,716,500
| 56
| 16
| null | 2018-09-14T16:09:32
| 2017-07-19T13:00:30
|
R
|
UTF-8
|
R
| false
| false
| 1,354
|
r
|
data.R
|
#' Tiny example dataset for probabilistic linkage
#'
#' Contains fictional records of 7 persons.
#'
#' \itemize{
#' \item \code{id} the id of the person; this contains no errors and can be used to
#' validate the linkage.
#' \item \code{lastname} the last name of the person; contains errors.
#' \item \code{firstname} the first name of the persons; contains errors.
#' \item \code{address} the address; contains errors.
#' \item \code{sex} the sex; contains errors and missing values.
#' \item \code{postcode} the postcode; contains no errors.
#' }
#'
#' @docType data
#' @keywords datasets
#' @name linkexample1
#' @rdname linkexample
#' @format Two data frames with resp. 6 and 5 records and 6 columns.
NULL
#' @name linkexample2
#' @rdname linkexample
NULL
#' Spelling variations of a set of town names
#'
#' Contains spelling variations found in various files of a set of town/village
#' names. Names were selected that contain 'rdam' or 'rdm'. The correct/official
#' names are also given. This data set can be used as an example data set for
#' deduplication
#'
#' \itemize{
#' \item name the name of the town/village as found in the files
#' \item official_name the official/correct name
#' }
#'
#' @docType data
#' @keywords datasets
#' @name town_names
#' @format Data frames with 584 records and two columns.
NULL
|
73fdd22d19328088a64412718352bf174a53299a
|
45c9bbfd9290f210b7812b1c596ef8bbf71d766a
|
/R/CLT_normal_movie.R
|
2acd9e3313f86216c9c20fb152fc0a2c14cb0bb5
|
[] |
no_license
|
anhnguyendepocen/stat0002
|
0ecac4af4e85c6b5db05df10d49bd9bee1f21a69
|
66a6372043ef39cec18465d0cf339bb50e02bdf9
|
refs/heads/master
| 2023-01-02T01:12:46.374718
| 2020-10-16T20:27:34
| 2020-10-16T20:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,074
|
r
|
CLT_normal_movie.R
|
# ============================== clt_normal_movie =============================
#' Central Limit Theorem movie: normal data
#'
#' A movie to illustrate the ideas of a sampling distribution of a random
#' variable and the central limit theorem (CLT). In this case (based on
#' random samples from a normal distribution) the CLT provides an exact
#' result.
#'
#' @param n An integer scalar. The size of the samples drawn from a
#' normal distribution.
#' @param mu,sigma Numeric scalars. The respective mean and standard
#' deviation of the normal distribution from which data are to be
#' simulated using \code{\link{rnorm}}.
#' @param xlab A character scalar. A name to use to label the horizontal
#' axis of the plots.
#' @param pos A numeric integer. Used in calls to \code{\link{assign}}
#' to make information available across successive frames of a movie.
#' By default, uses the current environment.
#' @param envir An alternative way (to \code{pos}) of specifying the
#' environment. See \code{\link{environment}}.
#' @details Loosely speaking, a consequence of the
#' \href{https://en.wikipedia.org/wiki/Central_limit_theorem}{Central Limit Theorem (CLT)}
#' is that, in many situations, the mean of a \strong{large number} of
#' independent random variables has \strong{approximately} a normal distribution,
#' even if these original variables are not normally distributed.
#'
#' This movie illustrates this in the very special case where the original
#' variables \emph{are} normally distributed. Samples of size \code{n}
#' are repeatedly simulated from a normal distribution. These samples are
#' summarized using a histogram that appears at the top of the movie screen.
#' For each sample the mean of these \code{n} values is calculated, stored
#' and added to another histogram plotted below the first histogram.
#' The respective probability density functions (p.d.f.s) of the original
#' variables and the means are superimposed on these histograms.
#' The latter is know to be exactly a normal p.d.f. in this special case.
#'
#' The user may choose the sample size \code{n}, that is, the number of
#' values over which a mean is calculated, the mean \code{mu} and/or
#' standard deviation \code{sigma} of the normal distribution from which
#' values are simulated and the label \code{xlab} for the horizontal axis.
#'
#' Once it starts, two aspects of this movie are controlled by the user.
#' Firstly, there are buttons to increase (+) or decrease (-) the sample
#' size, that is, the number of values over which a mean is calculated.
#' Then there is a button labelled "simulate another sample of size n".
#' Each time this button is clicked a new sample is simulated and its sample
#' mean added to the bottom histogram.
#'
#' Another movie (\code{\link{clt_exponential_movie}}) illustrates the CLT
#' in the case where the original variables are exponentially distributed.
#' @return Nothing is returned, only the animation is produced.
#' @seealso \code{\link{stat0002movies}}: general information about the movies.
#' @seealso \code{\link{clt_exponential_movie}}: a similar movie using data
#' simulated from an exponential distribution.
#' @examples
#' clt_normal_movie(44, 7.22, sqrt(1.36), "weight (pounds)")
#' @export
clt_normal_movie <- function(n = 30, mu = 0, sigma = 1, xlab = "x", pos = 1,
envir = as.environment(pos)) {
# Assign variables to an environment so that they can be accessed inside
# clt_normal_movie_plot()
old_n <- 0
assign("old_n", old_n, envir = envir)
assign("mu", mu, envir = envir)
assign("sigma", sigma, envir = envir)
assign("xlab", xlab, envir = envir)
# Create buttons for movie
clt_norm_panel <- rpanel::rp.control("sample size", n = n, mu = mu,
sigma = sigma, envir = envir)
rpanel::rp.doublebutton(clt_norm_panel, n, 1, range=c(1, 1000),
repeatinterval = 20, initval = n,
title = "sample size, n",
action = clt_normal_movie_plot)
rpanel::rp.button(clt_norm_panel, repeatinterval = 20,
title = "simulate another sample of size n",
action = clt_normal_movie_plot)
rpanel::rp.do(clt_norm_panel, clt_normal_movie_plot)
return(invisible())
}
# Function to be called by clt_normal_movie().
clt_normal_movie_plot <- function(panel) {
with(panel, {
old_par <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(old_par))
graphics::par(mfrow = c(2, 1), oma = c(0, 0, 0, 0),
mar = c(4, 4, 2, 2) + 0.1)
assign("mu", mu, envir = envir)
assign("sigma", sigma, envir = envir)
assign("xlab", xlab, envir = envir)
y <- stats::rnorm(n, mean = mu, sd = sigma)
mean_y <- mean(y)
if (n != old_n) {
sample_means <- mean_y
} else {
sample_means <- c(sample_means, mean_y)
}
assign("sample_means", sample_means, envir = envir)
h_low <- mu - 3 * sigma
h_up <- mu + 3 * sigma
ytop <- dnorm(0, sd = sigma) * 1.5
y <- y[y > h_low & y < h_up]
# Histogram with rug
graphics::hist(y, col = 8, probability = TRUE, axes = FALSE,
xlab = xlab, ylab = "density", main = "",
ylim = c(0, ytop), xlim = c(h_low, h_up))
axis(2)
axis(1, line = 0.5)
graphics::rug(y, line = 0.5, ticksize = 0.05)
graphics::title(paste("sample size, n = ",n))
graphics::curve(stats::dnorm(x, mean = mu, sd = sigma), from = h_low,
to = h_up, n = 500, bty = "l", ylab = "density",
las = 1, xpd = TRUE, lwd = 2, add =TRUE, lty = 2)
my_mean <- round(mu, 2)
my_var <- round(sigma ^ 2, 2)
my_leg <- paste("N(", my_mean, ",", my_var,")" )
graphics::legend("topright", legend = my_leg, lty = 2, lwd = 2)
graphics::segments(mean_y, 0, mean_y, -10, col = "red", xpd = TRUE, lwd = 2)
graphics::points(mean_y, 0, pch = 16, col = "red", cex = 1.5)
ytop <- dnorm(0, sd = sigma / sqrt(n)) * 1.5
y <- sample_means
y <- y[y > h_low & y < h_up]
my_xlab <- paste("sample mean of", xlab)
# Histogram with rug
graphics::hist(y, col = 8, probability = TRUE, las = 1, axes = FALSE,
xlab = my_xlab, ylab = "density", main = "",
ylim = c(0, ytop), xpd = TRUE, xlim = c(h_low, h_up))
graphics::axis(2)
graphics::axis(1, line = 0.5)
graphics::rug(y, line = 0.5, ticksize = 0.05, col = "red")
graphics::curve(stats::dnorm(x, mean = mu, sd = sigma / sqrt(n)),
from = h_low, to = h_up, n = 500, bty = "l",
ylab="density", las = 1, xpd = TRUE, lwd = 2,
add = TRUE, lty = 2)
my_leg_2 <- paste("N(", my_mean, ",", my_var, "/ n)" )
graphics::legend("topright", legend = my_leg_2, lty = 2, lwd = 2)
graphics::arrows(mean_y, 2* ytop, mean_y, 0, col = "red", lwd = 2, xpd = TRUE)
old_n <- n
assign("old_n", old_n, envir = envir)
})
return(invisible(panel))
}
|
ef9a9b64b21a11f042a472347601125ef398e2d1
|
6cbb51fe996e65a51a8d9f2f35e3159721933f25
|
/inst/shiny/ui_09_3_celdaWorkflow.R
|
90d877ed587aed4479820649cea06ed0819b503f
|
[
"MIT"
] |
permissive
|
compbiomed/singleCellTK
|
927fb97e257ba89cddee9a90f9cb7cb375a5c6fb
|
990e89e7ccfbf663f23c793454f72fb8c6878a32
|
refs/heads/master
| 2023-08-11T09:17:41.232437
| 2023-07-26T20:43:47
| 2023-07-26T20:43:47
| 68,756,293
| 144
| 89
|
NOASSERTION
| 2023-09-06T18:22:08
| 2016-09-20T21:50:24
|
R
|
UTF-8
|
R
| false
| false
| 13,900
|
r
|
ui_09_3_celdaWorkflow.R
|
# User Interface for Celda Workflow ---
shinyPanelCelda <- fluidPage(
h1("Celda"),
h5(tags$a(href = paste0(docs.artPath, "ui_celda_curated_workflow.html"),
"(help)", target = "_blank")),
inlineCSS(list(".panel-danger>.panel-heading" = "background-color:#dcdcdc; color:#000000", ".panel-primary>.panel-heading" = "background-color:#f5f5f5; color:#000000; border-color:#dddddd", ".panel-primary" = "border-color:#dddddd;", ".panel-primary>.panel-heading+.panel-collapse>.panel-body" = "border-color:#dddddd;")),
bsCollapse(
id = "CeldaUI",
open = "Data Input",
bsCollapsePanel(
"Identify Number of Feature Modules",
fluidRow(
column(
4,
panel(
selectInput("celdaassayselect", "Choose an Assay:",
choices = c()),
selectInput("celdafeatureselect", "Choose Feature Selection Method:",
choices = c("None", "SeuratFindHVG", "Scran_modelGeneVar")),
conditionalPanel("input.celdafeatureselect == 'SeuratFindHVG'",
selectInput("celdaseurathvgmethod", "Select HVG method:",
choices = c("vst", "dispersion", "mean.var.plot"))),
conditionalPanel("input.celdafeatureselect != 'None'",
numericInput("celdafeaturenum",
"Select number of variable features:", min = 1, max = 5000, value = 2500)),
numericInput("celdarowcountsmin",
"Keep features with this many counts:", value = 3),
numericInput("celdacolcountsmin",
"In at least this many cells:", value = 3),
numericInput("celdaLinit", "Select Number of Initial Feature Modules:", min = 1, max = 25, value = 10),
numericInput("celdaLmax", "Select Number of Maximum Feature Modules:", min = 15, max = 200, value = 100),
actionButton("celdamodsplit", "Recursive Module Split"),
hidden(
numericInput("celdaLselect", "Select Number of Feature Modules:", min = 1, max = 100, value = 25),
actionButton("celdaLbtn", "Select Number of Modules")
)
)
),
column(
8,
fluidRow(
column(
12,
hidden(
tags$div(class = "celda_modsplit_plots", tabsetPanel(id = "celdaModsplitTabset", type = "tabs")
)
)
)
)
)
),
style = "primary"
),
bsCollapsePanel(
"Identify Number of Cell Clusters",
fluidRow(
column(
4,
panel(
numericInput("celdaKinit", "Select Number of Initial Cell Modules:", min = 1, max = 10, value = 5),
numericInput("celdaKmax", "Select Number of Maximum Cell Modules:", min = 15, max = 40, value = 25),
actionButton("celdacellsplit", "Recursive Cell Split"),
hidden(
numericInput("celdaKselect", "Select Number of Cell Clusters:", min = 2, max = 20, value = 10),
actionButton("celdaKbtn", "Select Number of Clusters")
)
)
),
column(
8,
fluidRow(
column(
12,
hidden(
tags$div(
class = "celda_cellsplit_plots",
fluidRow(
tabsetPanel(
tabPanel("Rate of perplexity change",
panel(
plotlyOutput(outputId = "plot_cellsplit_perpdiff", height = "auto")
)),
tabPanel("Perplexity Plot",
panel(
plotlyOutput(outputId = "plot_cellsplit_perp", height = "auto")
)),
tabPanel("Preliminary UMAP Plots",
uiOutput("celdaKplots"))
)
)
)
)
)
)
)
),
style = "primary"
),
bsCollapsePanel(
"Visualization",
tabsetPanel(
tabPanel(
"Embeddings",
fluidRow(
column(
4,
panel(
radioButtons("celdaPlot", "Plot Type:", c("UMAP", "TSNE")),
conditionalPanel(
condition = "input.celdaPlot == 'UMAP'",
numericInput("celdaUMAPmaxCells",
label =
"Max.cells: Maximum number of cells to plot",
value = 25000,
min = 1,
step = 1),
numericInput("celdaUMAPminClusterSize",
label =
"Min.cluster.size: Do not subsample cell
clusters below this threshold",
value = 100,
min = 1,
step = 1),
numericInput("celdaUMAPSeed",
label =
"Seed: ",
value = 12345),
numericInput("celdaUMAPmindist",
label =
"Min.dist: Effective minimum distance
between embedded points",
value = 0.75),
numericInput("celdaUMAPspread",
label =
"Spread: ",
value = 1),
numericInput("celdaUMAPnn",
label =
"nNeighbors: ",
value = 30),
actionButton("CeldaUmap", "Run UMAP")
),
conditionalPanel(
condition = "input.celdaPlot == 'TSNE'",
numericInput("celdatSNEmaxCells",
label =
"Max.cells: Maximum number of cells to
plot",
value = 25000,
min = 1,
step = 1),
numericInput("celdatSNEminClusterSize",
label =
"Min.cluster.size: Do not subsample cell
clusters below this threshold",
value = 100,
min = 1,
step = 1),
numericInput("celdatSNEPerplexity",
label =
"Perplexity: ",
value = 20),
numericInput("celdatSNEmaxIter",
label =
"Max.iter: Maximum number of iterations in
tSNE generation",
value = 2500),
numericInput("celdatSNESeed",
label =
"Seed: ",
value = 12345),
actionButton("CeldaTsne", "Run tSNE")
)
)
),
column(
8,
panel(
conditionalPanel(
condition = "input.celdaPlot == 'UMAP'",
plotlyOutput("celdaumapplot", height = "auto")
),
conditionalPanel(
condition = "input.celdaPlot == 'TSNE'",
plotlyOutput("celdatsneplot", height = "auto")
)
)
)
)
),
#tabPanel("Heatmap",
# fluidRow(
# panel(
# plotOutput("celdaheatmapplt")
# )
# )
#),
tabPanel(
"Module Analysis",
fluidRow(
column(
4,
panel(
numericInput(inputId = "celdamodheatmapnum",
label = "Select module to display on heatmap:", value = 10, step = 1),
numericInput(inputId = "celdamodheatmaptopcells",
label = "Select number of cells to display on heatmap:", value = 100),
actionButton("celdamodheatmapbtn", "Plot Module Heatmap")
)
),
column(
8,
panel(
plotOutput(outputId = "celdamodheatmapplt") %>% withSpinner(size = 3, color = "#0dc5c1", type = 8),
plotOutput(outputId = "celdamodprobplt") %>% withSpinner(size = 3, color = "#0dc5c1", type = 8)
)
)
)
),
tabPanel("Probablity Map",
fluidRow(
panel(
plotOutput("celdaprobmapplt")
)
)
)
),
style = "primary"
)
),
nonLinearWorkflowUI(id = "nlw-celda")
)
|
0a6d39a442ecacf18c6126a88f7b943baeae78e6
|
f928d982e782e04d403fd90a758c879e24bdc79e
|
/man/theme_zi.Rd
|
db7f1f22412da5c7a733b8407f904dc796d8639d
|
[
"MIT"
] |
permissive
|
kvdatascience/zicolors
|
161ba57d78d7f0190ae916563b6142156d8e0699
|
bfd2cfa45dc5fc73cbd1a92821c2001618660e62
|
refs/heads/master
| 2023-01-31T14:29:36.579838
| 2020-12-10T14:50:18
| 2020-12-10T14:50:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 665
|
rd
|
theme_zi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{theme_zi}
\alias{theme_zi}
\title{Zi Theme based on theme_grey}
\usage{
theme_zi(fontsize = 14, font = "Calibri")
}
\arguments{
\item{Define}{a base_size (Defaults to 12) and base_family and bold_family for Fonts used (defaults to ggplot2's defaults)}
}
\description{
Zi Theme based on theme_grey
}
\examples{
library(extrafont)
library(tidyverse)
font_import()
loadfonts(device="win")
ggplot(as.data.frame(Titanic) \%>\% group_by(Class) \%>\% summarise(n=sum(Freq)), aes(x=Class, y=n)) + geom_bar(stat="identity" , fill=zi_cols("ziblue")) + theme_zi()
}
\keyword{theme}
|
b43ea0fb8c6a60e2e250889000588b0d02ebd130
|
cbf1300ffdf9c70c8d551b387f58bf0782c4693e
|
/man/get_Lu.Rd
|
09559b5ac52188b3ded77d425ef02f6ec59678e9
|
[] |
no_license
|
monogenea/gflasso
|
1250fd0065a2413aa5d10adba7fb4f11734bc8fb
|
93c6ded279a90786b0bff96c1100f0f14e79e20c
|
refs/heads/master
| 2021-05-07T02:19:46.855777
| 2020-02-11T17:52:48
| 2020-02-11T17:52:48
| 110,537,586
| 3
| 0
| null | 2017-11-13T11:05:06
| 2017-11-13T11:05:06
| null |
UTF-8
|
R
| false
| true
| 625
|
rd
|
get_Lu.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input.R
\name{get_Lu}
\alias{get_Lu}
\title{Get automatic step-sizes}
\usage{
get_Lu(X, C, lambda, gamma, mu)
}
\arguments{
\item{X}{The data matrix.}
\item{lambda}{The l1 regularization parameter.}
\item{gamma}{The graph regularization parameter.}
\item{mu}{The smoothing parameter.}
\item{H}{The matrix H defined in the reference.}
}
\value{
Lu The automatically chosen step sizes defined in the reference.
}
\description{
Get automatic step-sizes
}
\references{
Smoothing Proximal Gradient Method for General Structured Sparse Regressoin
}
|
f0ecddbfbedb6c573e5e7b7d5675c452e0280af1
|
0e670f1aa94b59f88ee69d8767c0cb4960d75f07
|
/r_scripts/cherry_rnaseq_1019.R
|
fb498e77c85ca1a4b3c71a9ed680aaadeabda3d1
|
[] |
no_license
|
atimms/ratchet_scripts
|
ce24bc222775364828459bcfe943182276328cd7
|
b1a722c0d65a64075de8363fba82b31861bec05c
|
refs/heads/master
| 2023-01-03T20:57:58.415901
| 2020-11-02T17:27:12
| 2020-11-02T17:27:12
| 262,200,513
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,432
|
r
|
cherry_rnaseq_1019.R
|
library("DESeq2")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("genefilter")
workingDir = "/data/atimms/cherry_rnaseq_1019";
setwd(workingDir);
##read in count and metadata
countData1 <- read.table('cherry_rnaseq_1019_ret_rpe.star_fc.counts.txt', header=T, row.names=1)
colData1 <- read.table('cherry_rnaseq_1019_ret_rpe.star_fc.metadata.txt', header=T, row.names=1)
head(countData1)
head(colData1)
##add to deseq, give countdata and metadata and then design information i.e. info on sample types
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~ sex + ethnicity + tissue)
dds
##remove rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 2, ]
nrow(dds)
##write normalized counts
dds_norm <- estimateSizeFactors(dds)
count_data <- counts(dds_norm, normalized=TRUE)
write.csv(count_data, file="cherry_rnaseq_1019.ret_rpe.norm_counts.csv")
##vst transform data -- new version
rld <- vst(dds, blind=FALSE)
##check
head(assay(rld), 3)
head(assay(dds),3)
##and write to csv file
#write.csv(assay(rld), file="cherry_rnaseq_1019.ret_rpe.deseq.vst_counts.csv")
##calculate sample distances from rlog
sampleDists <- dist( t( assay(rld) ) )
sampleDists
##and plot as heat map
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( rld$name, rld$tissue , sep="-" )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.sample_heatmap.pdf', width = 7, height = 5)
##principal components analysis
plotPCA(rld, intgroup = c("name"))
ggsave('cherry_rnaseq_1019.ret_rpe.name_pca.pdf')
plotPCA(rld, intgroup = c("tissue"))
ggsave('cherry_rnaseq_1019.ret_rpe.tissue_pca.pdf')
##gene clustering
##take 25 most variable gene
topVarGenes <- head(order(rowVars(assay(rld)),decreasing=TRUE),25)
mat <- assay(rld)[ topVarGenes, ]
mat <- mat - rowMeans(mat)
newdf <- as.data.frame(colData(rld)[,c("tissue","name","sex")])
pheatmap(mat, annotation_col=newdf, fontsize_row=8, fontsize_col=8)
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.25_var_gene_clustering.pdf', width = 7, height = 5)
##differential expression
##do the test
dds <- DESeq(dds)
##to get a specific test:
res2 <- results(dds, contrast=c("tissue", "retina", "rpe"))
##get summary
summary(res2) #lots of significant genes
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:27910,]
write.csv(resOrdered2DF, file="cherry_rnaseq_1019.ret_rpe.retina_vs_rpe.csv")
##plot individual genes
plotCounts(dds, gene="PHGDH", intgroup=c("tissue"))
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.PHGDH.pdf')
plotCounts(dds, gene="RPE65", intgroup=c("tissue"))
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.RPE65.pdf')
plotCounts(dds, gene="RHO", intgroup=c("tissue"))
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.RHO.pdf')
plotCounts(dds, gene="NR2E3", intgroup=c("tissue"))
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.NR2E3.pdf')
plotCounts(dds, gene="ABCA4", intgroup=c("tissue"))
dev.copy2pdf(file='cherry_rnaseq_1019.ret_rpe.ABCA4.pdf')
|
f2794960824344de2a50124f182c3eca5091285a
|
c6f8b268cdd35377a81c3e95953a8ea6aed2b3ae
|
/R/input_validation.R
|
4873d2a29df0955e4bfdbf887c08ef212939d506
|
[] |
no_license
|
cran/skpr
|
66afcf4fb51a07d95a72e5ecf28db60c9de53937
|
e34638d13253b7f82de9fcfee87f2c17f2619c37
|
refs/heads/master
| 2023-06-26T22:02:24.856196
| 2023-06-16T15:10:02
| 2023-06-16T15:10:02
| 100,655,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
input_validation.R
|
#'@title Check Model Formula Validity
#'
#'@keywords internal
check_model_validity = function(model) {
if(length(model) != 2) {
stop(sprintf("Model (`%s`) should not have a left-hand side (here, given as `%s`). Only include the right-hand side of the formula.",
as.character(model), as.character(model[2])))
}
}
|
ef9e799d272f282a82d7ae70eb5a92963bd169a7
|
70412a43e78946f75c14a05d79e23a08636ba625
|
/Classes/Day_06/slotResource/final/fitSlots(1).R
|
92738213f7319331ec264853e633cc7ca3a080fc
|
[] |
no_license
|
danieljwilson/CMMC-2018
|
94de25ec725b331477d6d38349de3db540d51354
|
8450f092c81f25a056e0de607f05cd79421271e8
|
refs/heads/master
| 2020-03-22T19:45:07.769122
| 2018-08-04T16:57:08
| 2018-08-04T16:57:08
| 140,547,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 470
|
r
|
fitSlots(1).R
|
source("objective.R")
#the data
changeK = c(35,25); changeN = c(40,40)
sameK = c(5,15); sameN = c(40,40)
setSize = c(4,8)
#set up best-fitting parameters for slot model
startpars = c(3.5, 0.5)
out = optim(par = startpars, fn = slotObjective, changeN = changeN, sameN = sameN, changeK = changeK, sameK = sameK, setSize = setSize, lower = c(0,0), upper = c(8,1), method = c('L-BFGS-B'), control = list(fnscale = -1))
slotBestPars = out$par
slotMaxLikelihood = out$value
|
1156a22f584c5a6c04d280adf21a607af872bc2f
|
7a28b73b0e29e125f86fd0ee3c7884642272be88
|
/code/institute-materials/day1_monday/subset-h5-file-R.R
|
011ce36768948400736f401429ab7d4437eb0b74
|
[] |
no_license
|
lwasser/neon-data-institute-2016
|
27716137cd5076d403f7af391cbdf8aa375a0629
|
20be06de2d61e13d33b86919b4c8874dda97516c
|
refs/heads/gh-pages
| 2020-12-30T19:57:48.250648
| 2016-10-11T15:45:32
| 2016-10-11T15:45:32
| 64,349,558
| 2
| 0
| null | 2016-07-27T23:55:20
| 2016-07-27T23:55:19
| null |
UTF-8
|
R
| false
| false
| 4,575
|
r
|
subset-h5-file-R.R
|
## ----call-libraries, results="hide"--------------------------------------
# load packages
library(rhdf5)
library(raster)
library(plyr)
library(rgeos)
library(rgdal)
library(ggplot2)
# be sure to set your working directory
# setwd("~/Documents/data/NEONDI-2016") # Mac
# setwd("~/data/NEONDI-2016") # Windows
## ----import-h5-functions-------------------------------------------------
# install devtools (only if you have not previously intalled it)
# install.packages("devtools")
# call devtools library
#library(devtools)
## install from github
# install_github("lwasser/neon-aop-package/neonAOP")
## call library
library(neonAOP)
# your file will be in your working directory! This one happens to be in a diff dir
# than our data
# source("/Users/lwasser/Documents/GitHub/neon-aop-package/neonAOP/R/aop-data.R")
## ----open-H5-file, results='hide'----------------------------------------
# Define the file name to be opened
f <- "NEONdata/D17-California/TEAK/2013/spectrometer/reflectance/Subset3NIS1_20130614_100459_atmcor.h5"
# Look at the HDF5 file structure
h5ls(f, all=T)
# define the CRS in EPGS format for the file
epsg <- 32611
## ----read-band-wavelengths-----------------------------------------------
# read in the wavelength information from the HDF5 file
wavelengths<- h5read(f,"wavelength")
# convert wavelength to nanometers (nm)
# NOTE: this is optional!
wavelengths <- wavelengths*1000
## ----extract-subset------------------------------------------------------
# get of H5 file map tie point
h5.ext <- create_extent(f)
# turn the H5 extent into a polygon to check overlap
h5.ext.poly <- as(extent(h5.ext), "SpatialPolygons")
# open file clipping extent
clip.extent <- readOGR("NEONdata/D17-California/TEAK/vector_data", "TEAK_plot")
# assign crs to h5 extent
# paste0("+init=epsg:", epsg) -- it is better to use the proj string here
crs(h5.ext.poly) <- CRS("+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0")
# ensure the two extents overlap
gIntersects(h5.ext.poly, clip.extent)
# if they overlap, then calculate the extent
# this doesn't currently account for pixel size at all
# and these values need to be ROUNDED
yscale <- 1
xscale <- 1
# define index extent
# xmin.index, xmax.index, ymin.index,ymax.index
# all units will be rounded which means the pixel must occupy a majority (.5 or greater)
# within the clipping extent
index.bounds <- calculate_index_extent(extent(clip.extent),
h5.ext)
# open a band that is subsetted using the clipping extent
b58_clipped <- neonAOP::open_band(fileName=f,
bandNum=58,
epsg=32611,
subsetData = TRUE,
dims=index.bounds)
# plot clipped bands
plot(b58_clipped,
main="Band 58 Clipped")
## ----open-many-bands-----------------------------------------------------
# within the clipping extent
index.bounds <- calculate_index_extent(extent(clip.extent),
h5.ext)
# create alist of the bands
bands <- list(19,34,58)
# within the clipping extent
index.bounds <- calculate_index_extent(extent(clip.extent),
h5.ext)
# clip out raster
rgbRast.clip <- neonAOP::create_stack(file=f,
bands=bands,
epsg=epsg,
subset=TRUE,
dims=index.bounds)
plotRGB(rgbRast.clip,
stretch="lin")
## ----plot-RGB-stack------------------------------------------------------
rgbRast <- create_stack(file=f,
bands=bands,
epsg=epsg,
subset=FALSE)
plotRGB(rgbRast,
stretch="lin")
plot(clip.extent,
add=T,
border="yellow",
lwd=3)
## ----subset-h5-file------------------------------------------------------
# array containing the index dimensions to slice
H5close()
subset.h5 <- h5read(f, "Reflectance",
index=list(index.bounds[1]:index.bounds[2],
index.bounds[3]:index.bounds[4],
1:426)) # the column, row
final.spectra <- data.frame(apply(subset.h5,
MARGIN = c(3), # take the mean value for each z value
mean)) # grab the mean value in the z dimension
final.spectra$wavelength <- wavelengths
# scale the data
names(final.spectra) <- c("reflectance","wavelength")
final.spectra$reflectance <- final.spectra$reflectance / 10000
# plot the data
qplot(x=final.spectra$wavelength,
y=final.spectra$reflectance,
xlab="Wavelength (nm)",
ylab="Reflectance",
main="Mean Spectral Signature for Clip Region")
|
99f116b8c003ca1983b76ba6d4538a5c0726d6f2
|
89113d0c7ad8b3ed947e8dc3975a0e3e44eb496a
|
/ShinyApps/CensusExplorer/app.R
|
2a915a168cd9fb49948f59a9aa0c0795cd41246b
|
[] |
no_license
|
jp106/Samples
|
c5bea9b962fd39328cf2cd3c04b674a7f1f4ef54
|
756d4e6ab505a8890e34929d554f1efe7cdc752e
|
refs/heads/master
| 2021-01-20T01:34:11.121230
| 2018-02-01T18:59:39
| 2018-02-01T18:59:39
| 89,298,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,057
|
r
|
app.R
|
library('shiny')
library('readxl')
library('leaflet')
# TODO - When state is selected in the dropdown zoom to that state in map
# DONE - replace statecode in dropdown with statename
# TODO - use http://www.devinfo.org/indiacensuspopulationtotals2011/libraries/aspx/RegDataQuery.aspx
# rest api to get data
state_codes <- read.csv("www/StateCodes.csv")
ui<-fluidPage(
titlePanel("Indian Census Explorer by State"),
navbarPage(title = "Explorer",
tabPanel(title = "Data Explorer",
fluidRow(
#column(3,sliderInput(inputId = "slide1",label = "find your number", value = 50, min = 1,max = 200)),
#column(3,actionButton(inputId = "btnload",label = "Load")),
#column(3,selectInput(inputId = "selinput",label = "Select a State",choices = 1:35)),
column(3,selectInput(inputId = "selinput1",label = "Select a State",choices = state_codes$StateName))
),
hr(),
dataTableOutput(outputId = "statestable"),
textOutput(outputId = "path")
),
tabPanel(title = "Map Explorer",
leafletOutput("map", width = "100%", height = 600))
)
)
server<-function(input,output){
getStateCode <- reactive({
state_codes[state_codes$StateName==input$selinput1,1]
})
#xlpath<-reactive(paste("~/Documents/GithubRepos/DataDownload/Downloadedfrompython/",input$selinput,".xlsx",sep = ""))
xlpath<-reactive(paste("www/",input$selinput,".xlsx",sep = ""))
#output$path<-reactive(getStateCode())
output$statestable<-renderDataTable(read_excel(paste("www/",getStateCode(),".xlsx",sep = "")),
options = list(pageLength=10),escape = c('State Code'))
output$map<-renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(lng = -93.85, lat = 37.45, zoom = 5)
})
#output$statemap<-renderPlot(indiadistrds)
}
shinyApp(ui = ui,server = server)
|
8d99528e184d1b72ff06358e8d0eeca4561e810e
|
88491527cc0d1905c966a6f78e2982ea70ed3618
|
/man/mWaveDDemo.Rd
|
9956ad55e180644dc8f17bd516a9fec9565e6e98
|
[] |
no_license
|
jrwishart/mwaved
|
e47a87dd8222149accb1cf8ddb92f4c210f551ea
|
8d46f0ecd619a239b6d66265987d456adba0943d
|
refs/heads/master
| 2022-02-24T09:45:19.729618
| 2021-10-28T10:59:11
| 2021-10-28T10:59:11
| 21,720,773
| 4
| 2
| null | 2021-05-07T09:24:40
| 2014-07-11T03:46:00
|
C++
|
UTF-8
|
R
| false
| true
| 231
|
rd
|
mWaveDDemo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility-mwaved.R
\name{mWaveDDemo}
\alias{mWaveDDemo}
\title{Interactive Demonstration}
\usage{
mWaveDDemo()
}
\description{
Interactive Demonstration
}
|
d6dcc28527ce8c321590b9c538c981c515516079
|
be047b1e124afbcfa8aba5325e79f76ae842a5e1
|
/assignmenet1.r
|
bc28e650fee1ca4c4f5d97e3d7636d78c3e725ae
|
[] |
no_license
|
sthayani/RepData_PeerAssessment1
|
ba934c919eca3f95005e0078be45a6226fcd9a5e
|
aedc7c40a9ef22fb9d17f24f9d18bd509d964b6c
|
refs/heads/master
| 2020-03-28T20:07:06.912918
| 2018-09-24T07:14:40
| 2018-09-24T07:14:40
| 149,040,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,576
|
r
|
assignmenet1.r
|
if(!file.exists("activity.csv"))
unzip("activity.zip",exdir = ".")
Df <- read.csv("activity.csv")
sum <- tapply(Df$steps,Df$date,sum)
hist(sum)
mean(sum,na.rm= T)
median(sum,na.rm = T)
#Average number of steps all across days for each of the interval
avg <- tapply(Df$steps,Df$interval,mean,na.rm=T)
plot(names(avg),avg,type = "l",xlab="Interval",ylab = "Average number of steps", main ="Average steps across all days for 5 min inerval")
maxval<- which(avg == max(avg))
#interval with maximum number of steps
names(maxval)
######Total numer of missing values
newDf <- Df
sum(is.na(newDf$steps))
###impute
impute <- function(x) x <- replace(x, is.na(x), mean(x,na.rm = T))
dummy <- tapply(newDf$steps,list(newDf$interval),impute)
ddf <- data.frame(steps=unlist(dummy),date=rep(seq(as.Date("2012-10-01"),as.Date("2012-11-30"),by = "day"),288),interval = rep(as.numeric(names(dummy)),each = 61))
#ddf <- ddf[order(ddf$date),]
sum <- tapply(ddf$steps,ddf$date,sum)
head(sum)
hist(sum)
mean(sum,na.rm= T)
median(sum,na.rm = T)
#ddf$date = as.date(ddf.date)
ddf$day <- weekdays(ddf$date)
ddf_wkday <- subset(ddf,!day %in% c("Saturday","Sunday"))
ddf_wkend <- subset(ddf,day %in% c("Saturday","Sunday"))
avgwkday <- tapply(ddf_wkday$steps,ddf_wkday$interval,mean,na.rm=T)
avgwkend <- tapply(ddf_wkend$steps,ddf_wkend$interval,mean,na.rm=T)
par(mfrow=c(2,1))
plot(names(avgwkday),avgwkday,type = "l",xlab="Interval",ylab = "Average number of steps", main ="weekday")
plot(names(avgwkend),avgwkend,type = "l",xlab="Interval",ylab = "Average number of steps", main ="weekend")
|
b28ae1ab5d9b0400631d62849d2459b285282ac7
|
f9963618c8d61c7ea5ab1d7ea6c201975f04fe4e
|
/R/metaGene.R
|
e9e0633631e2663c78a45d9226a99e39b17fa8dc
|
[] |
no_license
|
Kraus-Lab/groHMM
|
7f31d0f0ac026e99dd7c8eaab74bb5b5411973c3
|
514f58a04befc2d3c9c8e10ce63ce30f5f05560e
|
refs/heads/master
| 2022-10-19T16:00:39.726593
| 2022-09-28T20:51:02
| 2022-09-28T20:51:02
| 47,837,375
| 1
| 1
| null | 2015-12-11T16:23:24
| 2015-12-11T16:23:24
| null |
UTF-8
|
R
| false
| false
| 19,086
|
r
|
metaGene.R
|
###########################################################################
##
## Copyright 2013, 2014 Charles Danko and Minho Chae.
##
## This program is part of the groHMM R package
##
## groHMM is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
## for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program. If not, see <http://www.gnu.org/licenses/>.
##
##########################################################################
#' Returns a histogram of the number of reads in each section of a moving
#' window centered on a certain feature.
#'
#' Supports parallel processing using mclapply in the 'parallel' package.
#' To change the number of processors, set the option 'mc.cores'.
#'
#' @param features A GRanges object representing a set of genomic coordinates.
#' The meta-plot will be centered on the transcription start site (TSS)
#' @param reads A GRanges object representing a set of mapped reads.
#' Instead of 'reads', 'plusCVG' and 'minusCVG' can be used Default: NULL
#' @param plusCVG An IntegerRangesList object for reads with '+' strand.
#' @param minusCVG An IntegerRangesList object for reads with '-' strand.
#' @param size The size of the moving window.
#' @param up Distance upstream of each features to align and histogram.
#' Default: 10 kb.
#' @param down Distance downstream of each features to align and histogram.
#' If NULL, same as up. Default: NULL.
#' @param ... Extra argument passed to mclapply
#' @return Returns a integer-Rle representing the 'typical' signal
#' centered on a point of interest.
#' @author Charles G. Danko and Minho Chae
#' @examples
#' features <- GRanges("chr7", IRanges(1000, 1000), strand="+")
#' reads <- GRanges("chr7", IRanges(start=c(1000:1004, 1100),
#' width=rep(1, 6)), strand="+")
#' mg <- metaGene(features, reads, size=4, up=10)
metaGene <- function(features, reads=NULL, plusCVG=NULL, minusCVG=NULL,
size=100L, up=10000L, down=NULL, ...) {
seqlevels(features) <- seqlevelsInUse(features)
# Check 'reads'
if (is.null(reads)) {
if (is.null(plusCVG) || is.null(minusCVG))
stop("Either 'reads' or 'plusCVG' and 'minusCVG' must be used")
} else {
seqlevels(reads) <- seqlevelsInUse(reads)
plusCVG <- coverage(reads[strand(reads)=="+",])
minusCVG <- coverage(reads[strand(reads)=="-",])
}
if (is.null(down)) down <- up
featureList <- split(features, seqnames(features))
H <- mclapply(seqlevels(features), metaGene_foreachChrom,
featureList=featureList, plusCVG=plusCVG, minusCVG=minusCVG,
size=size, up=up, down=down, ...)
M <- sapply(seq_len(length(H)), function(x) as.integer(H[[x]]))
return(Rle(apply(M, 1, sum)))
}
metaGene_foreachChrom <- function(chrom, featureList, plusCVG, minusCVG,
size, up, down) {
f <- featureList[[chrom]]
pCVG <- plusCVG[[chrom]]
mCVG <- minusCVG[[chrom]]
offset <- floor(size/2L)
pro <- promoters(f, upstream=up+offset, downstream=(down+offset-1L))
M <- sapply(1:length(pro), function(x) {
if (as.character(strand(pro)[x]) == "+")
as.integer(runsum(pCVG[start(pro)[x]:end(pro)[x]],
k=size))
else
as.integer(rev(runsum(mCVG[start(pro)[x]:end(pro)[x]],
k=size)))
})
return(Rle(apply(M, 1, sum)))
}
#' Runs metagene analysis for sense and antisense direction.
#'
#' Supports parallel processing using mclapply in the 'parallel' package.
#' To change the number of processors, set the option 'mc.cores'.
#'
#' @param features GRanges A GRanges object representing a set of genomic
#' coordinates, i.e., set of genes.
#' @param reads GRanges of reads.
#' @param anchorType Either 'TSS' or 'TTS'. Metagene will be centered on the
#' transcription start site(TSS) or transcription termination site(TTS).
#' Default: TSS.
#' @param size Numeric. The size of the moving window. Default: 100L
#' @param normCounts Numeric. Normalization vector such as average reads.
#' Default: 1L
#' @param up Numeric. Distance upstream of each feature to align and histogram.
#' Default: 1 kb
#' @param down Numeric. Distance downstream of each feature to align and
#' histogram. If NULL, down is same as up. Default: NULL
#' @param sampling Logical. If TRUE, subsampling of Metagene is used.
#' Default: FALSE
#' @param nSampling Numeric. Number of subsampling. Default: 1000L
#' @param samplingRatio Numeric. Ratio of sampling for features. Default: 0.1
#' @param ... Extra argument passed to mclapply.
#' @return A list of integer-Rle for sense and antisene.
#' @author Minho Chae
#' @examples
#' features <- GRanges("chr7", IRanges(start=1000:1001, width=rep(1,2)),
#' strand=c("+", "-"))
#' reads <- GRanges("chr7", IRanges(start=c(1000:1003, 1100:1101),
#' width=rep(1, 6)), strand=rep(c("+","-"), 3))
#' ## Not run:
#' # mg <- runMetaGene(features, reads, size=4, up=10)
runMetaGene <- function(features, reads, anchorType="TSS", size=100L,
normCounts=1L, up=10000L, down=NULL, sampling=FALSE, nSampling=1000L,
samplingRatio=0.1, ...) {
# Check 'anchorType'
if (!anchorType %in% c("TSS", "TTS")) {
stop("'anchorType' must be either 'TSS' or 'TTS'")
}
if (anchorType == "TSS") {
f <- resize(features, width=1L, fix="start")
} else if (anchorType == "TTS") {
f <- resize(features, width=1L, fix="end")
}
if (is.null(down)) down <- up
fRev <- f
strand(fRev) <- rev(strand(f))
plusCVG <- coverage(reads[strand(reads)=="+",])
minusCVG <- coverage(reads[strand(reads)=="-",])
message("sense ... ", appendLF=FALSE)
if (sampling) {
sense <- samplingMetaGene(features=f, plusCVG=plusCVG,
minusCVG=minusCVG, size=size, up=up, down=down,
nSampling=nSampling, samplingRatio=samplingRatio, ...)
} else {
sense <- metaGene(features=f, plusCVG=plusCVG, minusCVG=minusCVG,
size=size, up=up, down=down, ...)
sense <- sense/length(features)
}
message("OK")
message("antisense ... ", appendLF=FALSE)
if (sampling) {
antisense <- samplingMetaGene(features=fRev, plusCVG=plusCVG,
minusCVG=minusCVG, size=size, up=up, down=down,
nSampling=nSampling, samplingRatio=samplingRatio, ...)
} else {
antisense <- metaGene(features=fRev, plusCVG=plusCVG, minusCVG=minusCVG,
size=size, up=up, down=down, ...)
antisense <- antisense/length(features)
}
message("OK")
sense <- sense*normCounts
antisense <- antisense*normCounts
return(list(sense=sense, antisense=antisense))
}
samplingMetaGene <- function(features, plusCVG, minusCVG, size=100L, up=10000L,
down=NULL, nSampling=1000L, samplingRatio=0.1, ...) {
samplingSize <- round(length(features)*samplingRatio)
metaList <- mclapply(1:length(features), function(x) {
metaGene(features=features[x,], plusCVG=plusCVG, minusCVG=minusCVG,
size=size, up=up, down=down)
}, ...)
allSamples <- mclapply(1:nSampling, function(x) {
inx <- sample(1:length(features), size=samplingSize, replace=TRUE)
onesample <- metaList[inx]
mat <- sapply(onesample, function(x) as.integer(x))
Rle(apply(mat, 1, sum))
}, ...)
M <- sapply(allSamples, function(x) as.integer(x))
return(Rle(apply(M, 1, median)) / samplingSize)
}
#' Returns a matrix, with rows representing read counts across a specified
#' gene, or other features of interest.
#'
#' Supports parallel processing using mclapply in the 'parallel' package.
#' To change the number of processors, use the argument 'mc.cores'.
#'
#' @param features A GRanges object representing a set of genomic coordinates.
#' @param reads A GRanges object representing a set of mapped reads.
#' @param size The size of the moving window.
#' @param up Distance upstream of each f to align and histogram Default: 1 kb.
#' @param down Distance downstream of each f to align and histogram
#' Default: same as up.
#' @param debug If set to TRUE, provides additional print options.
#' Default: FALSE
#' @param ... Extra argument passed to mclapply
#' @return Returns a vector representing the 'typical' signal across
#' genes of different length.
#' @author Charles G. Danko and Minho Chae
## Returns a matrix of counts. Rows represent different streches of DNA.
## Columns represent positions relative to a certain feature. Summed together,
## these should be a meta-gene.
##
## Arguments:
## f -> data.frame of: CHR, START, STRAND.
## p -> data.frame of: CHR, START, END, STRAND.
## size -> The size of the moving window.
## up -> Distance upstream of each f to align and histogram.
## down -> Distance downstream of each f to align and histogram (NULL).
##
## Assumptions: Same as MetaGene
metaGeneMatrix <- function(features, reads, size= 50, up=1000, down=up,
debug=FALSE, ...) {
C <- sort(unique(as.character(seqnames(features))))
## Run parallel version.
mcp <- mclapply(seq_along(C), metaGeneMatrix_foreachChrom, C=C,
features=features, reads=reads, size=size, up=up, down=down,
debug=debug, ...)
## Append data from all chromosomes.
H <- NULL
for(i in seq_along(C)) {
# Which KG? prb?
indxF <- which(as.character(seqnames(features)) == C[i])
indxPrb <- which(as.character(seqnames(reads)) == C[i])
if((NROW(indxF) >0) & (NROW(indxPrb) >0)) {
H <- rbind(H, mcp[[i]])
}
}
return(H)
}
metaGeneMatrix_foreachChrom <- function(i, C, features, reads, size, up, down,
debug) {
# Which KG? prb?
indxF <- which(as.character(seqnames(features)) == C[i])
indxPrb <- which(as.character(seqnames(reads)) == C[i])
if((NROW(indxF) >0) & (NROW(indxPrb) >0)) {
# Order -- Make sure, b/c this is one of our main assumptions.
# Otherwise violated for DBTSS.
ord <- order(start(features[indxF,]))
# Type coersions.
FeatureStart <- start(features[indxF,][ord])
FeatureStr <- as.character(strand(features[indxF,][ord]))
PROBEStart <- start(reads[indxPrb,])
PROBEEnd <- end(reads[indxPrb,])
PROBEStr <- as.character(strand(reads[indxPrb,]))
size <- as.integer(size)
up <- as.integer(up)
down <- as.integer(down)
# Set dimensions.
dim(FeatureStart) <- c(NROW(FeatureStart), NCOL(FeatureStart))
dim(FeatureStr) <- c(NROW(FeatureStr), NCOL(FeatureStr))
dim(PROBEStart) <- c(NROW(PROBEStart), NCOL(PROBEStart))
dim(PROBEEnd) <- c(NROW(PROBEEnd), NCOL(PROBEEnd))
dim(PROBEStr) <- c(NROW(PROBEStr), NCOL(PROBEStr))
if(debug) {
message(C[i],": Counting reads in specified region.")
}
Hprime <- .Call("MatrixOfReadsByFeature", FeatureStart, FeatureStr,
PROBEStart, PROBEEnd, PROBEStr,
size, up, down, PACKAGE = "groHMM")
return(Hprime)
}
return(integer(0))
}
#' Returns a histogram of the number of reads in each section of a moving
#' window of #' variable size across genes.
#'
#' Supports parallel processing using mclapply in the 'parallel' package.
#' To change the number of processors, use the argument 'mc.cores'.
#'
#' @param features A GRanges object representing a set of genomic coordinates.
#' @param reads A GRanges object representing a set of mapped reads.
#' @param n_windows The number of windows to break genes into.
#' @param debug If set to TRUE, provides additional print options.
#' Default: FALSE
#' @param ... Extra argument passed to mclapply
#' @return Returns a vector representing the 'typical' signal across genes of
#' different length.
#' @author Charles G. Danko and Minho Chae
## Returns a histogram of the number of reads in each section of a
## moving window of variable size across genes.
##
## Arguments:
## f -> data.frame of: CHR, START, END, STRAND.
## p -> data.frame of: CHR, START, END, STRAND.
## n_windows -> The resolution of the MetaGene -- i.e. the number of moving
## windows to break it into..
##
## Assumptions:
## (1) Gene list should be ordered!
## (2) Gene list should be pretty short, as most of the processing and
## looping over genes is currently done in R.
#
metaGene_nL <- function(features, reads, n_windows=1000, debug=FALSE, ...) {
C <- sort(unique(as.character(seqnames(features))))
H <- rep(0,n_windows)
for(i in 1:NROW(C)) {
if(debug) {
message(C[i])
}
# Which KG? prb?
indxF <- which(as.character(seqnames(features)) == C[i])
indxPrb <- which(as.character(seqnames(reads)) == C[i])
if((NROW(indxF) >0) & (NROW(indxPrb) >0)) {
# Order -- Make sure, b/c this is one of our main assumptions.
# Otherwise violated for DBTSS.
ord <- order(start(features[indxF,]))
# Type coersions.
FeatureStart <- start(features[indxF,][ord])
FeatureEnd <- end(features[indxF,][ord])
FeatureStr <- as.character(strand(features[indxF,][ord]))
PROBEStart <- start(reads[indxPrb,])
PROBEEnd <- end(reads[indxPrb,])
PROBEStr <- as.character(strand(reads[indxPrb,]))
# Set dimensions.
dim(FeatureStart) <- c(NROW(FeatureStart), NCOL(FeatureStart))
dim(FeatureStr) <- c(NROW(FeatureStr), NCOL(FeatureStr))
dim(PROBEStart) <- c(NROW(PROBEStart), NCOL(PROBEStart))
dim(PROBEEnd) <- c(NROW(PROBEEnd), NCOL(PROBEEnd))
dim(PROBEStr) <- c(NROW(PROBEStr), NCOL(PROBEStr))
#for(iFeatures in 1:NROW(FeatureStart)) {
mcpg <- mclapply(c(1:NROW(FeatureStart)), function(iFeatures) {
ws <- (FeatureEnd[iFeatures]-FeatureStart[iFeatures])/n_windows
## This WILL be an interger.
if(debug) {
message(C[i],": Counting reads in specified region:",
FeatureStart[iFeatures],"-",FeatureEnd[iFeatures])
message(C[i],": Window size:",
FeatureStart[iFeatures],"-",FeatureEnd[iFeatures])
message(C[i],": End-Start:",
FeatureEnd[iFeatures]-FeatureStart[iFeatures])
}
DataByOne <- .Call("WindowAnalysis", PROBEStart, PROBEEnd,
PROBEStr, FeatureStr[iFeatures], as.integer(1),
as.integer(1), FeatureStart[iFeatures],
FeatureEnd[iFeatures],
PACKAGE = "groHMM")
if(debug) {
message("DataByOne size:",NROW(DataByOne))
}
## This seems almost immeidate on my pentium M machine.
Hprime <- unlist(lapply(1:NROW(H), function(i) {
indx <- ceiling(ws*(i-1)+1):ceiling(ws*i)
return(sum(DataByOne[indx]))
}))
## Reverse it for "-" strands.
if(FeatureStr[iFeatures] == "-")
Hprime <- rev(Hprime)
#H <- H + Hprime/ws ## Put in units of: number of reads/base
return(Hprime/ws)
}, ...)
## Add genes from mclapply together.
for(iFeatures in 1:NROW(FeatureStart)) {
H<- H+mcpg[[i]]
}
}
if(debug) {
message(C[i],": Done!")
}
}
return(H)
}
#' Returns the average profile of tiling array probe intensity values or
#' wiggle-like count data centered on a set of genomic positions
#' (specified by 'Peaks').
#'
#' Supports parallel processing using mclapply in the 'parallel' package.
#' To change the number of processors, use the argument 'mc.cores'.
#'
#' @param ProbeData Data.frame representing chromosome, window center,
#' and a value.
#' @param Peaks Data.frame representing chromosome, and window center.
#' @param size Numeric. The size of the moving window. Default: 50 bp.
#' @param bins The bins of the meta gene -- i.e. the number of moving windows
#' to break it into. Default +/- 1kb from center.
#' @return A vector representing the 'typical' signal centered on the peaks of
#' interest.
#' @author Charles G. Danko and Minho Chae
## Returns the average profile of tiling array probe intensity values or
## wiggle-like count data centered on a set of genomic positions.
##
## Arguments:
## Peaks -> data.frame of: CHR, CENTER, STRAND.
## (note that STRAND is currenly not supported, and does nothing).
## ProbeData -> data.frame of: CHR, CENTER, VALUE
## bins -> The bins of the meta gene -- i.e. the number of
## moving windows to break it into.
##
## TODO:
## (1) Implement support for a Peaks$starnd
## (2) ...
averagePlot <- function(ProbeData, Peaks, size=50, bins= seq(-1000,1000,size)){
## For each chromsome.
ProbeData$minDist <- rep(999)
for(chr in unique(Peaks[[1]])) {
## Get the set of data that fits.
indxPeaks <- which(Peaks[[1]] == chr)
## The '$' ensures that chrom is end of line. Otherwise, grep for chr1
## returns chr10-chr19 as well.
## Should work fine, even when chromsome is simply "chrN".
indxAffxProbes <- grep(paste(chr,"$", sep=""), ProbeData[[1]], perl=TRUE)
## Calculate the minimum distance between the probe and the vector
## over all features ...
ProbeData$minDist[indxAffxProbes] <-
unlist(lapply(indxAffxProbes, function(x) {
## TODO: For strand to switch it, just multiply by strand here.
return((ProbeData[x,2] -
Peaks[indxPeaks,2])[which.min(abs(ProbeData[x,2] -
Peaks[indxPeaks,2]))])}))
}
## Make bins. Take averages and all that...
means <- unlist(lapply(c(1:NROW(bins)),
function(i){mean(ProbeData[(ProbeData$minDist >=
(bins[i]-size) & ProbeData$minDist < bins[i]),3])}))
return(data.frame(windowCenter= bins+(size/2), means))
}
|
f4a249bdbde4cbb4c8edb4f8617e1fea8d1ba51c
|
3294922fcd180db899951f8641a04132a45568f2
|
/Part 2 - Regression/Multiple Linear Regression/Multi_Reg.R
|
217d77c3615acc4edde503308b44cacf4239b3de
|
[] |
no_license
|
AchyuthReddy001/Machine-Learning
|
cdf633ed597db621f559a967933bb3c9adafe7eb
|
cc2dafebd8aaec575ec475e5503f1ed276a5ed91
|
refs/heads/master
| 2020-11-25T19:21:01.000893
| 2019-12-18T10:18:04
| 2019-12-18T10:18:04
| 228,809,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
Multi_Reg.R
|
#importing packages
install.packages("caTools")
library(caTools)
#importing Dataset
data=read.csv("50_Startups.csv")
#categorical data
data$State=factor(data$State,levels = c("New York","California","Florida"),
labels = c(1,2,3))
#splitting data into train and test sets
set.seed(123)
values=sample.split(data$Profit,SplitRatio = 0.8)
train_set=subset(data,values==TRUE)
test_set=subset(data,values==FALSE)
#build the model back propagation
reg=lm(formula = Profit ~ R.D.Spend + Administration +
Marketing.Spend + State,data = data)
summary(reg)
reg=lm(formula = Profit ~ R.D.Spend + Administration +
Marketing.Spend,data = data)
summary(reg)
reg=lm(formula = Profit ~ R.D.Spend +
Marketing.Spend,data = data)
summary(reg)
reg=lm(formula = Profit ~ R.D.Spend ,data = data)
summary(reg)
y_pred
|
f00a26ee128abc0f57ca3b195a6332a45a536848
|
bb62b85c79ddaede92555a328436e7a29cd508a7
|
/man/net_stat.Rd
|
4e24bb4c7245b2b2410450629f2e13a66d0c5e7f
|
[] |
no_license
|
jihuilee/PCAN
|
bcbbf5efd5f8d8b7b3b3aa3c86ed11ea25f2ea26
|
98efaf1fd727cd746772df67aec23655deb3bef3
|
refs/heads/master
| 2023-06-04T22:50:28.641358
| 2021-06-26T10:05:48
| 2021-06-26T10:05:48
| 379,827,372
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 457
|
rd
|
net_stat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/net_stat.R
\name{net_stat}
\alias{net_stat}
\title{Calculate network statistics}
\usage{
net_stat(net, directed = FALSE, configuration = c("edges", "isolate",
"kstar(2)", "kstar(3)", "kstar(4)", "kstar(5)", "cycle(4)", "cycle(5)",
"cycle(6)"))
}
\arguments{
\item{net}{Input network (adjacency matrix)}
\item{stat}{Statistics}
}
\description{
Calculate network statistics
}
|
03830a0b01d4babb3ae8e4c2774c1a7e72dadf41
|
6047320375f1cccae54dbe2f8a5287d025f5892e
|
/DOGMA_CITE/03_mitoRNA_qc.R
|
d89631835610e9678ffa610845ca04fcb1dd8820
|
[] |
no_license
|
xzlandy/Benchmark_CITEseq_DOGMAseq
|
8feb626e2a1328caef283690fc038a0c884dcaec
|
5aadc53d5d0ce56e6227bf04f51d57cc9d2fca11
|
refs/heads/main
| 2023-04-17T05:09:39.293175
| 2022-05-30T01:33:47
| 2022-05-30T01:33:47
| 495,191,931
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,058
|
r
|
03_mitoRNA_qc.R
|
library(Seurat)
library(BuenColors)
library(dplyr)
library(stringr)
setwd('~/RWorkSpace/CITE-seq/Duerr/DOGMA-seq/DIG_CITE_rerun_1/code/')
# the 10x hdf5 file contains both data types.
import_pctMT_RNAseq <- function(file, condition, what){
inputdata.10x <- Read10X_h5(file)
pbmc <- CreateSeuratObject(counts = inputdata.10x$`Gene Expression`)
data.frame(
what,
condition,
pct_mt = PercentageFeatureSet(pbmc, pattern = "^MT-")[[1]])
}
load("../data/DIG_data.RData")
dig <- data
load("../data/CITE_data.RData")
cite <- data
rm(data)
dig$what <- 'DIG'
cite$what <- 'CITE'
# cite$condition <- cite$orig.ident
all_mtRNA_df <- rbind(dig@meta.data[,c('percent.mt', 'what', 'condition')], cite@meta.data[,c('percent.mt', 'what', 'condition')])
all_mtRNA_df$pct_mt <- all_mtRNA_df$percent.mt
all_mtRNA_df$sample <- str_split_fixed(all_mtRNA_df$condition, '_Act', 2)[,1]
pMitoRNA <- ggplot(all_mtRNA_df, aes(x = what, y = (pct_mt), color = what)) +
geom_boxplot(outlier.shape = NA, position = position_dodge(preserve = "single")) +
pretty_plot(fontsize = 7) + L_border() + scale_y_continuous(limits = c(0,50)) +
scale_color_manual(values = c("dodgerblue3", "firebrick", "darkgrey")) +
labs(x = "Condition", y = "%UMIs from mtRNA", color = "") +
theme(legend.position = "none")
pMitoRNA
cowplot::ggsave2(cowplot::plot_grid(
pMitoRNA
), file = "../plots/mitoRNApct_QC.pdf", width = 1, height = 1.5)
all_mtRNA_df %>% group_by(what) %>% summarize(median(pct_mt))
# # A tibble: 2 Γ 2
# what `median(pct_mt)`
# <chr> <dbl>
# 1 CITE 6.32
# 2 DIG 7.56
pMitoRNA <- ggplot(all_mtRNA_df, aes(x = what, y = (pct_mt), color = condition)) +
geom_boxplot(outlier.shape = NA, position = position_dodge(preserve = "single")) +
pretty_plot(fontsize = 7) + L_border() + scale_y_continuous(limits = c(0,50)) +
labs(x = "Condition", y = "%UMIs from mtRNA", color = "")
pMitoRNA
cowplot::ggsave2(cowplot::plot_grid(
pMitoRNA
), file = "../plots/mitoRNApct_QC_split.pdf", width = 3, height = 1.5)
all_mtRNA_df %>% group_by(condition, what) %>% summarize(median(pct_mt))
pMitoRNA <- ggplot(all_mtRNA_df, aes(x = what, y = (pct_mt), color = sample)) +
geom_boxplot(outlier.shape = NA, position = position_dodge(preserve = "single")) +
pretty_plot(fontsize = 7) + L_border() + scale_y_continuous(limits = c(0,50)) +
scale_color_manual(values = c("dodgerblue3", "firebrick", "darkgrey")) +
labs(x = "Condition", y = "%UMIs from mtRNA", color = "") +
theme(legend.position = "bottom", legend.direction = 'horizontal')
pMitoRNA
cowplot::ggsave2(cowplot::plot_grid(
pMitoRNA
), file = "../plots/mitoRNApct_QC_split_sample.pdf", width = 2, height = 2)
all_mtRNA_df %>% group_by(sample, what) %>% summarize(median(pct_mt))
# # A tibble: 4 Γ 3
# # Groups: sample [2]
# sample what `median(pct_mt)`
# <chr> <chr> <dbl>
# 1 SB775372 CITE 5.77
# 2 SB775372 DIG 7.04
# 3 SB775393 CITE 6.90
# 4 SB775393 DIG 8.30
|
3ff145aa80557f60dc5fa42785effd1e463904f3
|
89e9c55cf19f9a341ea99b19e7e29c6192dcf422
|
/bts_reformat_for_maria.R
|
547d73091c062ba8935248664445e8f78279416d
|
[
"MIT"
] |
permissive
|
elahi/marBioDivChange_reformat
|
bcd3b8e2ef6b7ac6707a2fcbffcb5bfca8eb4560
|
ed0628a982b6ce901f252d0a8b6a64ceeb5b63e0
|
refs/heads/master
| 2021-01-10T12:35:06.153768
| 2016-02-24T13:42:12
| 2016-02-24T13:42:12
| 49,683,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,751
|
r
|
bts_reformat_for_maria.R
|
#################################################
# Reformat CB data for Maria Dornelas, and sDiv working group
# Author: Robin Elahi
# Date: 150221
#################################################
# Change Log
# 150223 - reformatted data again
rm(list=ls(all=TRUE)) # removes all previous material from R's memory
##### LOAD PACKAGES #####
library(dplyr)
library(tidyr)
##### MASTER DATASET #####
fullDat <- read.csv("./data/TableS4.csv", header=TRUE, na.strings="NA")
names(fullDat)
unique(fullDat$studyName)
unique(fullDat$studySub)
unique(fullDat$subSiteID)
fullDat$observation <- with(fullDat, paste(subSiteID, dateR, sep = "_"))
unique(fullDat$observation)
# Note that table S4 did not have evenness, pull this from the evenness dataset I used in Mary's lab meeting
evenDat <- read.csv("./data/evenDat3.csv", header=TRUE, na.strings="NA")
names(evenDat)
evenDat$dateR<-as.Date(evenDat$date.no, origin="1904-01-01")
evenDat$observation <- with(evenDat, paste(subSiteID, dateR, sep = "_"))
evenDat2 <- evenDat %>% select(observation, even)
head(evenDat2)
str(evenDat2)
unique(evenDat2$observation)
testDF <- left_join(fullDat, evenDat2)
write.csv(testDF, './output/testDF.csv')
# Select columns and rename
fullDat <- testDF
fullDat <- fullDat %>%
select(site, studySub, studyName, subSiteID, Scale, dateR, rich, div, even,
abund, AbundUnitsOrig)
head(fullDat)
fullDat$study_sub_site <- with(fullDat, paste(studyName, studySub, site, sep = "-"))
unique(fullDat$study_sub_site)
unique(fullDat$studyName)
##### STUDIES #####
studies <- read.csv("./data/studyNameList.csv", header=TRUE, na.strings="NA")
names(studies)
# Select columns and rename
studies <- studies %>%
select(studyName, Reference_dunic, FigsTables, Collector, Sys_dunic, Loc) %>%
rename(Reference = Reference_dunic, Source = FigsTables, Sys = Sys_dunic)
head(studies)
##### SUB-STUDIES #####
subStudies <- read.csv("./data/studySubList.csv", header=TRUE, na.strings="NA")
names(subStudies)
unique(subStudies$Descriptor.of.Taxa.Sampled)
# Select columns and rename
names(subStudies)
subStudies <- subStudies %>%
select(studyName:Descriptor.of.Taxa.Sampled, Vis:PltN, SiAreaCalc,
SiLinearExtentUnits, RepeatType)
names(subStudies)
##### SITES #####
sites <- read.csv("./data/siteList.csv", header=TRUE, na.strings="NA")
names(sites)
# Select columns and rename
sites <- sites %>%
select(studyName, site, Event, Driver, Prediction, EventDate1, EventDate2,
Lat, Long, Depth_m)
head(sites)
### Create 'Year of Event' column
# translate dates to R
sites$event1 <- as.Date(sites$EventDate1, origin="1900-01-01")
sites$event2 <- as.Date(sites$EventDate2, origin="1900-01-01")
# check to make sure that 1998 El Nino was in fact in 1998 (use origin = "1904-01-01" if not)
head(sites)
str(sites)
# Get year from date object
sites$event1yr <- as.numeric(format(sites$event1, '%Y'))
sites$event2yr <- as.numeric(format(sites$event2, '%Y'))
# create "Year of event" column
sites$Year_of_Event <- with(sites, ifelse(is.na(event2yr) == "TRUE",
event1yr,
paste(event1yr, ",", event2yr)))
head(sites)
unique(sites$Event_type)
# select new columns
sites2 <- sites %>%
select(studyName, site, Event, Driver, Lat, Long, Year_of_Event, Depth_m)
head(sites2)
##### MERGE STUDIES, SUB-STUDIES, SITES #####
### Merge studies and sub-studies
dat1 <- right_join(studies, subStudies, by = "studyName")
head(dat1)
dat2 <- right_join(studies, sites2, by = "studyName") %>%
select(studyName, site:Depth_m)
dat2$study_site <- with(dat2, paste(studyName, site, sep = "-"))
head(dat2)
dat3 <- right_join(dat1, dat2, by = "studyName")
head(dat3)
# GSUB to switch BirkeSC to SC
unique(dat3$site)
site2 <- dat3$site
dat3$Site <- gsub("BirkeSC", "SC", site2)
dat3 %>% filter(crap2 == "SC")
dat3$study_sub_site <- with(dat3, paste(studyName, studySub, Site, sep = "-"))
unique(dat3$study_sub_site)
# remove irrelevant studies not used in CB paper
unique(fullDat$studyName)
unique(dat3$studyName)
dat4 <- dat3 %>%
filter(studyName != "Bebars" & studyName != "Keller" &
studyName != "Greenwood" & studyName != "Sonnewald" &
studyName != "SwedFishTrawl")
unique(dat4$studyName)
dat4 <- droplevels(dat4)
# Compare details with richness data
with(dat4, unique(studyName))
with(fullDat, unique(studyName))
with(dat4, unique(studySub))
with(fullDat, unique(studySub))
with(dat4, unique(site))
with(fullDat, unique(site))
with(dat4, unique(study_sub_site))
with(fullDat, unique(study_sub_site))
# Rename
master_details <- dat4
head(master_details)
##### MERGE DETAILS WITH RICHNESS DATA #####
names(master_details)
names(fullDat)
fullDat$site
# drop redundant columns in one dataset
fullDat2 <- fullDat %>% select(-studyName, -site, -studySub)
master <- left_join(fullDat2, master_details, by = "study_sub_site")
names(master)
master$Organism <- master$Descriptor.of.Taxa.Sampled
##### SUBSET COLUMNS FOR MARIA #####
master2 <- master %>% select(studyName, studySub, subSiteID, Lat, Long, dateR,
rich, div, even, abund, AbundUnitsOrig, Scale,
study_sub_site, Site, Organism,
Driver, Year_of_Event, Depth_m, RepeatType)
master2 <- droplevels(master2)
head(master2)
master2$sample_id <- with(master2,
paste(studySub, "_", Site, "_", Scale, sep = ""))
unique(master2$sample_id)
head(master2)
##### REMOVE GAMMA TIME-SERIES THAT ARE ALREADY REPRESENTED BY ALPHA #####
names(master2)
unique(master2$Scale)
unique(master2$subSiteID)
unique(master2$studySub)
unique(master2$studyName)
master2 %>% filter(Scale == "alpha") %>% summarise(length = n())
master2 %>% filter(Scale == "gamma") %>% summarise(length = n())
study_scale_DF <- as.data.frame.matrix(with(master2, table(study_sub_site, Scale)))
head(study_scale_DF)
study_scale_DF$study_sub_site <- rownames(study_scale_DF)
study_scale_DF$bothScales <- study_scale_DF$alpha > 0 & study_scale_DF$gamma > 0
master3 <- left_join(master2, study_scale_DF, by = "study_sub_site")
head(master3)
master3$bothScales == "TRUE" & master3$Scale == "gamma"
master3$remove <- ifelse(master3$bothScales == "TRUE" &
master3$Scale == "gamma", 'remove', 'keep')
masterSub <- master3 %>% filter(remove == "keep") %>% droplevels()
unique(masterSub$subSiteID)
### Do the subsetting and renaming
names(masterSub)
# Database
masterSub$Database <- "Elahi"
# DepthElevation
masterSub$dateR <- as.Date(as.character(masterSub$dateR))
masterSub$Day <- as.numeric(format(masterSub$dateR, "%d"))
masterSub$Month <- as.numeric(format(masterSub$dateR, "%m"))
masterSub$Year <- as.numeric(format(masterSub$dateR, "%Y"))
# Genus, Species
masterSub$Genus <- NA
masterSub$Species <- NA
# ObsEventID
names(masterSub)
masterSub$ObsEventID <- with(masterSub, paste(studySub, Site,
Lat, Long, "NA",
Year, Month, Day, sep = "_"))
# ObsID
names(masterSub)
masterSub$ObsID <- with(masterSub, paste(studySub, Site,
Lat, Long, "NA", sep = "_"))
head(masterSub)
# Treatment
masterSub$Treatment <- NA
names(masterSub)
# Rename: CitationID, StudyID, Latitude, Longitude, DepthElevation
masterSub2 <- masterSub %>% rename(CitationID = studyName,
StudyID = studySub,
Latitude = Lat,
Longitude = Long,
DepthElevation = Depth_m)
names(masterSub2)
masterSub3 <- masterSub2 %>% select(Database, CitationID, StudyID,
Site, Latitude, Longitude,
DepthElevation, Day, Month, Year,
Genus, Species, ObsEventID, ObsID,
RepeatType, Treatment,
rich, div, even, abund,
AbundUnitsOrig, Organism)
head(masterSub3)
### Put rich, div, abund in one column (make dataset longer)
# Can't use gather, because richness and div are not evenly distributed
# So, split the datasets apart first
richDat <- droplevels(masterSub3[complete.cases(masterSub3$rich), ])
richDat <- richDat %>% rename(Value = rich) %>% select(-div, -even, -AbundUnitsOrig, -abund)
richDat$ValueType <- "richness"
richDat$ValueUnits <- NA
head(richDat)
divDat <- droplevels(masterSub3[complete.cases(masterSub3$div), ])
divDat <- divDat %>% rename(Value = div) %>% select(-rich, -even, -AbundUnitsOrig, -abund)
divDat$ValueType <- "shannon"
divDat$ValueUnits <- NA
head(divDat)
evenDat <- droplevels(masterSub3[complete.cases(masterSub3$even), ])
evenDat <- evenDat %>% rename(Value = even) %>% select(-rich, -div, -AbundUnitsOrig, -abund)
evenDat$ValueType <- "evenness"
evenDat$ValueUnits <- NA
head(evenDat)
abundDat <- droplevels(masterSub3[complete.cases(masterSub3$abund), ])
abundDat <- abundDat %>% rename(Value = abund, ValueUnits = AbundUnitsOrig) %>%
select(-rich, -div, -even)
unique(abundDat$ValueUnits)
ValueType <- abundDat$ValueUnits
leftText <- function(x, n) {
substr(x, 1, n)
}
vt2 <- leftText(ValueType, 5)
unique(vt2)
vt3 <- gsub("indiv", "abundance", vt2)
vt4 <- gsub("Perce", "cover", vt3)
vt5 <- gsub("metri", "biomass", vt4)
vt6 <- gsub("kgPer", "biomass", vt5)
unique(vt6)
abundDat$ValueType <- vt6
names(richDat)
names(divDat)
names(evenDat)
names(abundDat)
### Now rbind it
masterL <- rbind(richDat, divDat, evenDat, abundDat)
head(masterL)
unique(masterL$StudyID)
unique(masterL$ObsID)
unique(masterL$ValueType)
unique(masterL$ValueUnits)
summary(masterL)
unique(masterL$ValueType)
# Add BioType column
masterL$BioType <- 'marine'
# Add Taxa column (to match sDiv format)
# create new ecosystem column (with easier names)
orgList <- unique(masterL$Organism)
orgList
taxaList <- c("Fish", "Fish",
"Marine Invertebrates", "Benthos",
"Marine Invertebrates", "Marine Invertebrates",
"Marine Invertebrates", "Marine Invertebrates",
"Benthos", "Foraminifera",
"Benthos", "Marine Invertebrates",
"Marine Plants", "Fish",
"Marine Plants", "Marine Invertebrates",
"Marine Invertebrates", "Marine Invertebrates")
library(plyr)
Taxa <- mapvalues(masterL$Organism, from = orgList,
to = taxaList)
masterL$Taxa <- Taxa
masterL %>% select(Organism, Taxa)
head(masterL)
write.csv(masterL, "./output/elahi_biotime.csv")
|
73fd4c976dbd45d52e689ac19f69c26e52eb44d2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/AquaEnv/examples/BufferFactors.Rd.R
|
5ede77148ca4c31c325dd8e6658af5e0cbafa0bf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,293
|
r
|
BufferFactors.Rd.R
|
library(AquaEnv)
### Name: BufferFactors
### Title: BufferFactors
### Aliases: BufferFactors
### Keywords: misc
### ** Examples
## Not run:
##D # Default run
##D BufferFactors()
##D
##D # All carbonate system species
##D BufferFactors(species = c("CO2", "HCO3", "CO3"))
##D
##D # Total concentrations of all species
##D BufferFactors(species = c("SumCO2", "SumNH4", "SumH3PO4", "SumHNO3",
##D "SumHNO2", "SumH2S", "SumSiOH4", "SumBOH3",
##D "SumHF", "SumH2SO4"))
##D
##D # Different carbonate system equilibrium constants
##D BufferFactors(k1k2 = "roy")
##D
##D # Object of class 'aquaenv' as input
##D ae_input <- aquaenv(S=35, t=25, SumCO2 = 0.0020, pH = 8.1,
##D skeleton = TRUE)
##D BufferFactors(ae = ae_input)
##D
##D # Produces some NaNs as certain total concentrations are zero
##D BufferFactors(ae = ae_input,
##D species = c("SumCO2", "SumNH4", "SumH3PO4", "SumHNO3",
##D "SumHNO2", "SumH2S", "SumSiOH4", "SumBOH3",
##D "SumHF", "SumH2SO4"))
##D
##D # Object of class 'aquaenv' as input, but different total alkalinity
##D parameters <- c(Alk = 0.0022)
##D BufferFactors(ae = ae_input, parameters = parameters)
##D
## End(Not run)
|
d59be27ce59dc22eee4bf1720d6059f8934b8c5f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RBMRB/examples/HSQC_13C.Rd.R
|
f811101966f756e545585a02040a07628bb35e4b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
HSQC_13C.Rd.R
|
library(RBMRB)
### Name: HSQC_13C
### Title: Simulates H1-C13 HSQC spectra for a given entry or list of
### entries from BMRB
### Aliases: HSQC_13C
### ** Examples
plot_hsqc<-HSQC_13C(c(17074,17076,17077))
#Simulates C13-HSQC spectra form the given list of entries
plot_hsqc<-HSQC_13C(c(17074,17076,17077),'line')
#Simulates C13-HSQC and connects the peaks with same sequence number
plot_hsqc<-HSQC_13C(c(17074,17076,17077),interactive=FALSE)
#Example for non interactive plot
|
9f212825193a45ab9530e84d7ae45d3b07b092e8
|
cc814c2fc94888349abbd58fb8f730ea074ad19e
|
/server.R
|
58eaff27847ad05ba298d2f64b7f1d802334ac8f
|
[] |
no_license
|
EduardoSuja/Data-Products---Shiny
|
baa9034f596175a7127d5c902f437c11643ba0ee
|
51b84fb06b23387ba686f6e4eeee274cc1559c21
|
refs/heads/master
| 2021-01-19T00:50:06.715707
| 2017-04-04T16:42:56
| 2017-04-04T16:42:56
| 87,211,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,290
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
my_fit <- reactive({
brushed_data <- brushedPoints(airquality,
input$brush_area,
xvar = "Temp",
yvar = "Ozone")
if (nrow(brushed_data) < 2) {
return(NULL)
}
lm(Ozone ~ Temp, data = brushed_data)
})
tempInput <- reactive({input$sliderTemp})
my_pred <- reactive({
if (!is.null(my_fit())) {
predict(my_fit(), newdata = data.frame(Temp = tempInput()))
} else {
"Not enough data selected"
}
})
output$ozone_temp <- renderPlot({
plot(airquality$Temp, airquality$Ozone, xlab="Temperature",
ylab = "Ozone", main = "Ozone versus temperature")
if (!is.null(my_fit())) {
abline(my_fit(), col="red", lwd=2)
points(tempInput(), my_pred(), col="blue", pch=18, cex= 1.5)
}
})
output$my_pred <- renderText({
my_pred()
})
})
|
f6a345c230500b02e71c6a5b77506b2e7e93062a
|
8fe42808e411420aca8c25e999de3f5c086d1433
|
/man/sigfig1.Rd
|
689fc6ca074af1cde8885feccef0bc19f172699a
|
[] |
no_license
|
leonpheng/lhtool
|
51b94dbe33e51780fd284f62a5b0b656bbeddb91
|
27c033ea3f5f2b463e199a7ffadc823705169797
|
refs/heads/master
| 2022-03-21T23:13:42.215774
| 2019-12-11T09:13:49
| 2019-12-11T09:13:49
| 158,558,265
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 300
|
rd
|
sigfig1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collectedfunctions.R
\name{sigfig1}
\alias{sigfig1}
\title{Significant figure}
\usage{
sigfig1(x, y)
}
\description{
This function allows you to round value in significant figure.
}
\examples{
sigfig1()
}
\keyword{sigfig}
|
5b0c1c2fb3188c3086054014618df8e4c1661f96
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mousetrap/examples/mt_scale_trajectories.Rd.R
|
45c7492adc4a651aad3af3f297589812ca829853
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
mt_scale_trajectories.Rd.R
|
library(mousetrap)
### Name: mt_scale_trajectories
### Title: Standardize variables in mouse trajectory array.
### Aliases: mt_scale_trajectories
### ** Examples
# Calculate derivatives
mt_example <- mt_derivatives(mt_example)
# Standardize velocity across trajectories
mt_example <- mt_scale_trajectories(mt_example,var_names = "vel")
|
06aa07eaf49b25c8385bb4c762e8a1fbf32267a8
|
dd59a74bd8137bb96b5f965378caa41044cc246b
|
/url_generator.R
|
c5c3ad3e7392940e3ffeccee31546a2eab60eadd
|
[] |
no_license
|
swattamw/Risk-Analytics-Based-on-Topic-Analysis-of-Financial-Disclosures
|
b5680352781b00f4fbd10bc8b056738ac93f519b
|
8f442b7eeea5f30c51139ee8a6cdfd9cb3998ae7
|
refs/heads/master
| 2020-07-29T04:45:20.398754
| 2019-05-16T19:52:23
| 2019-05-16T19:52:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,978
|
r
|
url_generator.R
|
#URLs generator
#Load libraries
library(edgarWebR)
library(knitr)
#Create function
####################
#Find a Submission
# ticker <- "STX"
#
# filings <- company_filings(ticker, type = "10-K", count = 40)
# # Specifying the type provides all forms that start with 10-, so we need to
# # manually filter.
# filings <- filings[filings$type == "10-K", ]
#
# # We're only interested in the last nine reports so...
# filings <- head(filings,9)
# filings <- data.frame(filings$filing_date, filings$href)
# names(filings) <- c("Date", "HTML Link")
# filings$Ticker <- ticker
###########################
url_generator <- function(ticker) {
filings <- company_filings(ticker, type = "10-K", count = 40)
# Specifying the type provides all forms that start with 10-, so we need to
# manually filter.
filings <- filings[filings$type == "10-K", ]
#filter by date
filings$filing_date <- as.Date(filings$filing_date, format= "%Y-%m-%d")
#filings<- subset(filings, filing_date> "2010-01-01" & filing_date < "2018-12-31")
filings<- subset(filings, filing_date> "2010-07-01" & filing_date < "2019-12-31")
#rename columns
filings <- data.frame(filings$filing_date, filings$href)
names(filings) <- c("Date", "HTML Link")
filings$Ticker <- ticker
return(filings)
}
list <- c('BA', 'MMM', 'UNP', 'HON', 'UTX', 'UPS', 'GE', 'LMT', 'CAT', 'CSX', 'RTN', 'DE', 'GD', 'NSC', 'ITW', 'NOC','FDX', 'WM', 'EMR', 'ROP')
final<- rbind(url_generator('BA'), url_generator('MMM'),url_generator('UNP'),url_generator('HON'), url_generator('UTX'),url_generator('UPS'), url_generator('GE'),url_generator( 'LMT'),url_generator( 'CAT'),url_generator( 'CSX'),url_generator( 'RTN'),url_generator( 'DE'),url_generator( 'GD'),url_generator( 'NSC'),url_generator( 'ITW'), url_generator('NOC'),url_generator('FDX'),url_generator( 'WM'),url_generator( 'EMR'),url_generator( 'ROP'))
write.csv(final, file = "url_generator.csv")
|
83c75a6a12208c531b5cd328342929a2804ed1ee
|
17d444fc419cd73696e0358a47ea12695a911351
|
/Projet_FAO_IA_SCHOOL--master/Mise_en_place_data.R
|
f3434db6a90a135a37a983586c69832ca91f10dd
|
[] |
no_license
|
MohamadTURKI94/FAO
|
eec54e3ca8a83520f8e658a9d34e3484e4557c38
|
218feb16939e0b0946059cd55e8cdc9fa2bc0e7a
|
refs/heads/main
| 2023-04-09T09:55:29.687427
| 2021-04-14T22:44:32
| 2021-04-14T22:44:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,365
|
r
|
Mise_en_place_data.R
|
#********************************PROJET FAO****************************************************
## Import des donnΓ©es des bilans alimentaires
animal <- read.csv("FAOSTAT_data_Animals_Products.csv", sep = ",", dec = ".")
vegetal_2014 <- read.csv("FAOSTAT_data_vegetal_products_2014.csv", sep = ",", dec = ".")
vegetal_2015 <- read.csv("FAOSTAT_data_vegetal_products_2015.csv", sep = ",", dec = ".")
vegetal_2016 <- read.csv("FAOSTAT_data_vegetal_products_2016.csv", sep = ",", dec = ".")
vegetal_2017 <- read.csv("FAOSTAT_data_vegetal_products_2017.csv", sep = ",", dec = ".")
vegetal <- rbind(vegetal_2014, vegetal_2015, vegetal_2016, vegetal_2017)
#ajout de la variable origin
animal$origin <- 'animal'
vegetal$origin <- 'vegetal'
# on regroupe animal et vegetal en un unique dataframe, via une union
temp <- merge(animal,vegetal,all=TRUE)
# suppressio des dataframe animal et vegetal
remove(animal, vegetal)
remove(vegetal_2014, vegetal_2015, vegetal_2016, vegetal_2017)
# on renomme les colonnes de temp
names(temp) <- c("xx", "xx2", "country_code", "country", "xx3", "element",
"item_code", "item", "xx4", "year", "unit", "value", "xx5", "xx6",
"origin")
# Transformation de temp en table pivot
result = dcast(temp, country_code + country + item_code + item + year + origin
~ element, value.var="value", fun.aggregate=sum)
remove(temp)
names(result) <- c("code_pays", "pays", "code_produit", "produit", "annΓ©e", "origin",
"domestic_supply_quantity", "export_quantity", "fat_supply_quantity_gcapitaday",
"feed", "food", "food_supply_kcalcapitaday", "food_supply_quantity_kgcapitayr",
"import_quantity", "losses", "other_uses", "processing", "production",
"protein_supply_quantity_gcapita_day","residuals","seed",
"stock_variation", "tourist_consumption")
result <- result %>%
filter(code_pays != 351) %>%
distinct
# Base de donnΓ©es
#**********************************************************************************
#1 Table Population
#**********************************************************************************
population <- read.csv("FAOSTAT_data_population.csv", sep = ",", dec = ".")
names(population) <- c("xx1", "xx2", "country_code", "country", "element_code",
"element", "item_code", "item", "year_code", "year",
"unit", "population", "xx3", "xx4")
Population <- population %>%
select(country_code, country, year, population)
remove(population)
## calculer la taille de la population mondiale (pour chaque annΓ©e)
Population_mondiale <- Population %>%
group_by(year) %>%
summarise(taille = sum(population))
### Exclure le code_pays 351 il correspond Γ l'aggrΓ©gation des pays : 41, 96, 214 et 128
### execute a pattern-matching function on your data to create an index vector
ndx <- grep("China", Population$country, perl=T)
### use this index vector to extract the rows you want from the data frame:
selected_rows = Population[ndx,]
selected_rows
### Exclure le code_pays 351
population <- Population %>%
filter(country_code != 351)
names(population) <- c("code_pays", "pays", "annΓ©e", "population")
remove(Population)
remove(Population_mondiale)
Population_mondiale <- population %>%
group_by(annΓ©e) %>%
summarise(Total = sum(population))
#**********************************************************************************
#2 Table dispo_alim
#**********************************************************************************
dispo_alim <- result %>%
filter(code_pays != 351) %>%
select(pays, code_pays, annΓ©e, produit, code_produit, origin, food,
food_supply_kcalcapitaday,
protein_supply_quantity_gcapita_day,
fat_supply_quantity_gcapitaday) %>%
distinct(pays, code_pays, annΓ©e, produit, code_produit, .keep_all = TRUE)
names(dispo_alim) <- c("pays", "code_pays", "annΓ©e", "produit", "code_produit", "origin",
"dispo_alim_tonnes", "dispo_alim_kcal_p_j",
"dispo_prot_g_p_j", "dispo_mat_gr_g_p_j")
## 2.1 Produis considΓ©rΓ©s comme cΓ©rΓ©ales
cereals <- read.csv("FAOSTAT_data_cΓ©rΓ©ales.csv", sep = ",", dec = ".")
cereals <- cereals %>%
select(Item.Code, Item, Year, Value)
names(cereals) <- c("code_produit", "produit", "annΓ©e", "value")
## 2.1.1 Ajout de la variable is_cereal dans la table disp_alim et dans la table result
dispo_alim$is_cereal <- ifelse(dispo_alim$produit == "Barley and products" |
dispo_alim$produit == "Cereals, Other" |
dispo_alim$produit == "Maize and products" |
dispo_alim$produit == "Millet and products" |
dispo_alim$produit == "Oats" |
dispo_alim$produit == "Rice and products" |
dispo_alim$produit == "Rye and products" |
dispo_alim$produit == "Sorghum and products" |
dispo_alim$produit == "Wheat and products",
"OUI", "NON")
result$is_cereal <- ifelse(result$produit == "Barley and products" |
result$produit == "Cereals, Other" |
result$produit == "Maize and products" |
result$produit == "Millet and products" |
result$produit == "Oats" |
result$produit == "Rice and products" |
result$produit == "Rye and products" |
result$produit == "Sorghum and products" |
result$produit == "Wheat and products",
"OUI", "NON")
## modifier l'ordre des colonnes de la table disp_alim et de la table result
dispo_alim <- dispo_alim[, c(2, 1, 3, 4, 5, 6, 11, 7, 8, 9, 10)]
result <- result[,c(1,2,3,4,5,6,24,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23)]
## 2.1.2 Proportion des cΓ©rΓ©ales pour lβalimentation animale
### En ne prenant en compte que les cΓ©rΓ©ales destinΓ©es Γ l'alimentation (humaine et animale),
### quelle proportion (en termes de poids) est destinΓ©e Γ l'alimentation animale
cereals_for_feed_food <- result %>%
filter(is_cereal == "OUI", feed != 0, food != 0) %>%
select(produit, annΓ©e, feed, food) %>%
group_by(annΓ©e) %>%
summarise(prop_animale = round(100 * sum(feed) / (sum(feed) + sum(food)),2))
## 2.2 Comment mesure-t-on la disponibilitΓ© alimentaire ?
### 2.2.1 Dispo alim par pays et par produit ?
### Calculer (pour chaque pays et chaque produit) la disponibilitΓ© alimentaire en kcal puis en kg de protΓ©ines ?
dispo_alim <- dispo_alim %>% left_join(population)
dispo_alim <- dispo_alim %>%
mutate(food_supply_kcal = dispo_alim$dispo_alim_kcal_p_j * dispo_alim$population * 365,
food_supply_kgprotein = dispo_alim$dispo_prot_g_p_j * 0.001 * dispo_alim$population * 365)
### 2.2.2 Q : Ratio "Γ©nergie/poids" ? et 2.2.3 Q : % de protΓ©ines de chaque produit ?
### A partir de ces dernières informations, et à partir du poids de la disponibilité alimentaire
### (pour chaque pays et chaque produit), calculez pour chaque produit le ratio "Γ©nergie/poids",
### que vous donnerez en kcal ?
### Indice : La disponibilitΓ© alimentaire en kcal/personne/jour est calculΓ©e par la FAO en
### multipliant la quantitΓ© Nouriture (Food) par le ratio Γ©nergie/poids (en kcal/kg),
### puis en le divisant par la population du pays puis par 365
# Colonne contenant le poids de la disponibilitΓ© alimentaire pour chaque pays et item (en kg)
dispo_alim$food_supply_kg <- dispo_alim$dispo_alim_tonnes * 1000000
dispo_alim <- dispo_alim %>%
mutate(`ratio_kcal/kg` = food_supply_kcal / food_supply_kg,
`protein_%` = 100 * food_supply_kgprotein / food_supply_kg )
## 2.3 Top 20 des aliments
### 2.3.1 Q : les plus caloriques
### Citez 5 aliments parmi les 20 aliments les plus caloriques, en utilisant
### le ratio Γ©nergie/poids ?
### 2.3.2 Q : les plus protΓ©inΓ©s
### Citez 5 aliments parmi les 20 aliments les plus riches en protΓ©ines ?
# omit rows where either 'ratio_Kcal/Kg' or 'protein_%' have missing values
dispo_alim <- na.omit(dispo_alim, cols=c("ratio_Kcal/Kg", "protein_%"))
# garder les lignes sans Inf
dispo_alim <- dispo_alim %>%
filter(is.finite(`ratio_kcal/kg`), is.finite(`protein_%`))
# En 2014
recherche_aliments_plus_caloriques_ou__plus_proteines_2014 <-
dispo_alim %>%
filter(`ratio_kcal/kg` != 0.0, `protein_%` != 0.000000, annΓ©e == 2014) %>%
group_by(produit) %>%
summarise(`ratio_kcal/kg` = mean(`ratio_kcal/kg`, na.rm = TRUE),
`protein_%` = mean(`protein_%`, na.rm = TRUE)) %>%
arrange(desc(`ratio_kcal/kg`))
recherche_aliments_plus_caloriques_ou__plus_proteines_2014$annΓ©e <- 2014
# En 2015
recherche_aliments_plus_caloriques_ou__plus_proteines_2015 <-
dispo_alim %>%
filter(`ratio_kcal/kg` != 0.0, `protein_%` != 0.000000, annΓ©e == 2015) %>%
group_by(produit) %>%
summarise(`ratio_kcal/kg` = mean(`ratio_kcal/kg`, na.rm = TRUE),
`protein_%` = mean(`protein_%`, na.rm = TRUE)) %>%
arrange(desc(`ratio_kcal/kg`))
recherche_aliments_plus_caloriques_ou__plus_proteines_2015$annΓ©e <- 2015
#En 2016
recherche_aliments_plus_caloriques_ou__plus_proteines_2016 <-
dispo_alim %>%
filter(`ratio_kcal/kg` != 0.0, `protein_%` != 0.000000, annΓ©e == 2016) %>%
group_by(produit) %>%
summarise(`ratio_kcal/kg` = mean(`ratio_kcal/kg`, na.rm = TRUE),
`protein_%` = mean(`protein_%`, na.rm = TRUE)) %>%
arrange(desc(`ratio_kcal/kg`))
recherche_aliments_plus_caloriques_ou__plus_proteines_2016$annΓ©e <- 2016
# En 2017
recherche_aliments_plus_caloriques_ou__plus_proteines_2017 <-
dispo_alim %>%
filter(`ratio_kcal/kg` != 0.0, `protein_%` != 0.000000, annΓ©e == 2017) %>%
group_by(produit) %>%
summarise(`ratio_kcal/kg` = mean(`ratio_kcal/kg`, na.rm = TRUE),
`protein_%` = mean(`protein_%`, na.rm = TRUE)) %>%
arrange(desc(`ratio_kcal/kg`))
recherche_aliments_plus_caloriques_ou__plus_proteines_2017$annΓ©e <- 2017
aliments_plus_caloriques_ou_plus_proteines <- rbind(recherche_aliments_plus_caloriques_ou__plus_proteines_2014,
recherche_aliments_plus_caloriques_ou__plus_proteines_2015,
recherche_aliments_plus_caloriques_ou__plus_proteines_2016,
recherche_aliments_plus_caloriques_ou__plus_proteines_2017
)
aliments_plus_caloriques_ou_plus_proteines <-
aliments_plus_caloriques_ou_plus_proteines[, c(4, 1, 2,3)]
remove(recherche_aliments_plus_caloriques_ou__plus_proteines_2014,
recherche_aliments_plus_caloriques_ou__plus_proteines_2015,
recherche_aliments_plus_caloriques_ou__plus_proteines_2016,
recherche_aliments_plus_caloriques_ou__plus_proteines_2017)
### 2.4 DisponibilitΓ© mondiale
### 2.4.1 Q : En vΓ©gΓ©taux uniquement
### Calculer, pour les produits vΓ©gΓ©taux uniquement, la disponibilitΓ© intΓ©rieure mondiale
### exprimΓ©e en kcal et en Kg protΓ©ines pour chaque annΓ©e et tracer la viz correspondante
# PrΓ©liminaires
result <- result %>% left_join(population)
result <- result %>%
mutate(food_supply_kcal = result$food_supply_kcalcapitaday * result$population * 365,
food_supply_kgprotein = result$protein_supply_quantity_gcapita_day * 0.001 * result$population * 365)
result$food_supply_kg <- result$food * 1000000
result <- result %>%
mutate(`ratio_kcal/kg` = food_supply_kcal / food_supply_kg,
`protein_%` = 100 * food_supply_kgprotein / food_supply_kg )
# omit rows where either 'ratio_Kcal/Kg' or 'protein_%' have missing values
result <- na.omit(result, cols=c("ratio_Kcal/Kg", "protein_%"))
# garder les lignes sans Inf
result <- result %>%
filter(is.finite(`ratio_kcal/kg`), is.finite(`protein_%`))
# Ajout de la disponibilitΓ© intΓ©rieure en kcal et en kg de proteines
dispo_int_vegetal <- result %>%
filter(origin == "vegetal") %>%
mutate(dom_sup_kcal = domestic_supply_quantity * 1000000 * `ratio_kcal/kg`,
dom_sup_kgprot = domestic_supply_quantity * 1000000 * `protein_%` * .01)
# Calcul de la dispo. int. par annΓ©e
dispo_int_vegetal <- dispo_int_vegetal %>%
group_by(annΓ©e) %>%
summarise(dom_sup_Kcal = sum(dom_sup_kcal),
dom_sup_kgprot = sum(dom_sup_kgprot))
# visualization
ggplot(
dispo_int_vegetal,
aes(x = annΓ©e)
) +
geom_line(aes(y = dom_sup_Kcal, color = "dom_sup_Kcal")) +
geom_line(aes(y = dom_sup_kgprot, color = "dom_sup_kgprot")) +
scale_colour_manual("",
breaks = c("dom_sup_Kcal", "dom_sup_kgprot"),
values = c("dom_sup_Kcal"="green", "dom_sup_kgprot"="blue"))+
scale_x_continuous(name="year") +
scale_y_continuous(name="domestic supply") +
ggtitle("global domestic availability")
### 2.4.2 Q : Tous vΓ©gΓ©tariens ?
# Combien d'humains pourraient Γͺtre nourris si toute la disponibilitΓ© intΓ©rieure mondiale
# de produits vΓ©gΓ©taux Γ©tait utilisΓ©e pour de la nourriture ? Donnez les rΓ©sultats en termes
# de calories, puis de protΓ©ines, et exprimez ensuite ces 2 rΓ©sultats en pourcentage de la
# population mondiale.
# Renseignez-vous sur la recommandation (de la FAO si possible) concernant le nombre de calories
# ainsi que sur le nombre de protΓ©ines par jour nΓ©cessaire Γ un Γͺtre humain
# (on recherche sur internet ! ;) ).
# Nombre de calories par jour et par personne
NB_KCAL_PER_CAPITA_PER_DAY = 2500
# Poids moyen d'un humain : 62kg (https://en.wikipedia.org/wiki/Human_body_weight)
# Besoin en protΓ©ines moyens pour un humain : 0.9 g/kg/jour
KG_PROT_PER_CAPITA_PER_DAY = 62 * .9 * .001
dispo_int_vegetal <- dispo_int_vegetal %>%
left_join(Population_mondiale)
dispo_int_vegetal <- dispo_int_vegetal[,c(1,4,2,3)]
names(dispo_int_vegetal) <- c("annΓ©e", "total_pop", "dom_sup_Kcal", "dom_sup_kgprot")
# rΓ©sultats en termes de calories
nb_humains <- dispo_int_vegetal$dom_sup_Kcal / 365 / NB_KCAL_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© intΓ©rieure en produits issus de vΓ©gΓ©taux (en termes calorifiques) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_int_vegetal$total_pop, 1), " % de la population mondiale"))
# rΓ©sultats en termes de protΓ©ines
nb_humains <- dispo_int_vegetal$dom_sup_kgprot / 365 / KG_PROT_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© intΓ©rieure en produits issus de vΓ©gΓ©taux (en termes de protΓ©ines) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_int_vegetal$total_pop, 1), " % de la population mondiale"))
### 2.4.3 Q : Rien ne se pert, tout se transforme
### Combien d'humains pourraient Γͺtre nourris si toute la disponibilitΓ© alimentaire en produits
### vΓ©gΓ©taux (Food), la nourriture vΓ©gΓ©tale destinΓ©e aux animaux (Feed) et les pertes de
### produits vΓ©gΓ©taux (Waste) Γ©taient utilisΓ©s pour de la nourriture ?
### Donnez les rΓ©sultats en termes de calories, puis de protΓ©ines, et
### exprimez ensuite ces 2 rΓ©sultats en pourcentage de la population mondiale
# Ajout de la disponibilitΓ© alimentaire en kcal et en kgprot
dispo_alim_food_feed_waste <- result %>%
filter(origin == "vegetal") %>%
mutate(food_feed_losses_supply_kcal = 1000000 * `ratio_kcal/kg` * (food + feed + losses),
food_feed_losses_supply_kgprot = 1000000 * `protein_%` * (food + feed + losses) * .01)
# Calcul de la dispo. alim. nourriture animale et pertes de produits vΓ©gΓ©taux par annΓ©e
dispo_alim_food_feed_waste <- dispo_alim_food_feed_waste %>%
group_by(annΓ©e) %>%
summarise(food_feed_losses_supply_kcal = sum(food_feed_losses_supply_kcal),
food_feed_losses_supply_kgprot = sum(food_feed_losses_supply_kgprot))
dispo_alim_food_feed_waste <- dispo_alim_food_feed_waste %>%
left_join(Population_mondiale)
dispo_alim_food_feed_waste <- dispo_alim_food_feed_waste[,c(1,4,2,3)]
names(dispo_alim_food_feed_waste) <- c("annΓ©e", "total_pop", "food_feed_losses_supply_kcal",
"food_feed_losses_supply_kgprot")
# rΓ©sultats en termes de calories
nb_humains <- dispo_alim_food_feed_waste$food_feed_losses_supply_kcal / 365 / NB_KCAL_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© alimentaire, la nourriture animale et les pertes de produits vΓ©gΓ©taux (en termes calorifiques) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_alim_food_feed_waste$total_pop, 1), " % de la population mondiale"))
# rΓ©sultats en termes de protΓ©ines
nb_humains = dispo_alim_food_feed_waste$food_feed_losses_supply_kgprot / 365 / KG_PROT_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© alimentaire, la nourriture animale et les pertes de produits vΓ©gΓ©taux (en termes de protΓ©ines) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_alim_food_feed_waste$total_pop, 1), " % de la population mondiale"))
### 2.4.4 Q : Tous bien nourris ?
# Combien d'humains pourraient Γͺtre nourris avec la disponibilitΓ© alimentaire mondiale ?
# Donnez les rΓ©sultats en termes de calories, puis de protΓ©ines, et exprimez ensuite ces
# 2 rΓ©sultats en pourcentage de la population mondiale.
dispo_alim_mondiale <- dispo_alim %>%
group_by(annΓ©e) %>%
summarise(food_supply_kcal = sum(food_supply_kcal),
food_supply_kgprotein = sum(food_supply_kgprotein))
dispo_alim_mondiale <- dispo_alim_mondiale %>%
left_join(Population_mondiale)
dispo_alim_mondiale <- dispo_alim_mondiale[,c(1,4,2,3)]
names(dispo_alim_mondiale) <- c("annΓ©e", "total_pop", "food_supply_kcal",
"food_supply_kgprotein")
# rΓ©sultats en termes de calories
nb_humains <- dispo_alim_mondiale$food_supply_kcal / 365 / NB_KCAL_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© alimentaire mondiale (en termes calorifiques) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_alim_mondiale$total_pop, 1), " % de la population mondiale"))
# rΓ©sultats en termes de protΓ©ines
nb_humains = dispo_alim_mondiale$food_supply_kgprotein / 365 / KG_PROT_PER_CAPITA_PER_DAY
print(paste0("Population potentiellement nourrie par la disponibilitΓ© alimentaire mondiale (en termes de protΓ©ines) : ",
round(nb_humains/1000000,2), " Millards, soit ", round(100*nb_humains/dispo_alim_mondiale$total_pop, 1), " % de la population mondiale"))
#**********************************************************************************
# 3 Table sous_nutrition
#**********************************************************************************
temp2 <- read.csv("FAOSTAT_data_people_undernourished.csv", sep = ",", dec = ".")
names(temp2) <- c("xx1", "xx2", "country_code", "country", "element_code",
"element", "item_code", "item", "year_code", "year",
"unit", "nb_persons", "xx3", "xx4", "xx5")
Sous_nutrition <- temp2 %>%
select(country_code, country, year, nb_persons) %>%
distinct
remove(temp2)
### 3.1 Q : Proportion de la pop en sous-nutrition ?
### Quelle proportion de la population mondiale est considΓ©rΓ©e comme Γ©tant en sous-nutrition ?
# Calcul du nombre de personnes en sous-nutrition par annΓ©e
# Remplacer les cellules vides par des NA
Sous_nutrition[Sous_nutrition==""] <- NA
temp <- subset(Sous_nutrition, nb_persons != "<0.1")
temp$nb_persons <- as.numeric(temp$nb_persons)
nb_persons_undernourished <- temp %>%
group_by(year) %>%
summarise(personnes_en_sous_nutrition = sum(nb_persons) * 1000000)
### 3.2 Q : Liste des pays en sous-nutrition
### SΓ©lectionnez parmi les donnΓ©es des bilans alimentaires les informations relatives
### aux pays dans lesquels la FAO recense des personnes en sous-nutrition.
liste_pays_en_sous_nutrition <- Sous_nutrition %>%
filter(!is.na(nb_persons)) %>%
select(country_code, country) %>%
distinct
names(liste_pays_en_sous_nutrition) <- c("code_pays", "pays")
### 3.3 Q : Liste des produits les plus exportΓ©s
### RepΓ©rer les 15 produits les plus exportΓ©s par ce groupe de pays.
result_Exportation <- result %>%
select(code_pays, pays, produit, export_quantity) %>%
distinct
Liste_15_produits_plus_exportes <- liste_pays_en_sous_nutrition %>%
left_join(result_Exportation) %>%
filter(export_quantity != 0) %>%
group_by(produit) %>%
summarise(nb_Export = n()) %>%
arrange(desc(nb_Export), .by_group = TRUE) %>%
head(15)
### 3.4 Q : Les plus grandes importations ?
### Parmi les donnΓ©es des bilans alimentaires au niveau mondial, sΓ©lectionner les 200 plus
### grandes importations de ces produits (1 importation = une quantitΓ© d'un produit donnΓ©
### importΓ©e par un pays donnΓ© sur l'annΓ©e choisie)
Importations <- liste_pays_en_sous_nutrition %>%
left_join(result) %>%
inner_join(Liste_15_produits_plus_exportes, by = "produit") %>%
distinct(code_pays, pays, code_produit, produit, annΓ©e, .keep_all = TRUE)
Importations <- Importations[,-31]
Importations <- Importations %>%
group_by(code_pays, pays, code_produit, produit) %>%
arrange(desc(import_quantity), .by_group = TRUE)
# Remarque importante : arrange, qui par dΓ©faut trie la table sans tenir compte des groupes.
# Pour obtenir un tri par groupe, il faut lui ajouter lβargument .by_group = TRUE
### 3.5 Q : Regrouper les importations par produit
### Grouper ces importations par produit, afin d'avoir une table contenant 1 ligne
### pour chacun des 15 produits
### Ensuite, calculer pour chaque produit les 2 quantitΓ©s suivantes :
### Le ratio entre la quantitΓ© destinΓ©s aux "Autres utilisations" (Other uses) et la disponibilitΓ© intΓ©rieure.
### Le ratio entre la quantitΓ© destinΓ©e Γ la nourriture animale et la quantitΓ© destinΓ©e Γ la nourriture (animale + humaine)
Importations <- Importations %>%
filter(domestic_supply_quantity != 0) %>%
mutate(other_Uses_vs_dom_Supply = other_uses / domestic_supply_quantity,
feed_vs_food = feed / (feed + food))
Importations <- na.omit(Importations, cols=c("other_Uses_vs_dom_Supply", "feed_vs_food"))
Importations_par_produit <- Importations %>%
group_by(produit) %>%
summarise(other_Uses_vs_dom_Supply = sum(other_Uses_vs_dom_Supply),
feed_vs_food = sum(feed_vs_food))
### 3.6 Q : Top 3 produits
# Donnez les 3 produits qui ont la plus grande valeur pour chacun des 2 ratios
# (vous aurez donc 6 produits Γ citer)
Top_3_ratio_feed_vs_food <- Importations_par_produit %>%
select(produit, feed_vs_food) %>%
arrange(desc(feed_vs_food)) %>%
head(3)
Top_3_ratio_Other_Uses_vs_domestic_supply <- Importations_par_produit %>%
select(produit, other_Uses_vs_dom_Supply) %>%
arrange(desc(other_Uses_vs_dom_Supply)) %>%
head(3)
### 3.7 Q :
# Combien de tonnes de cΓ©rΓ©ales pourraient Γͺtre libΓ©rΓ©es si les USA diminuaient
# leur production de produits animaux de 10% ?
### 3.8 Q :
### En ThaΓ―lande, quelle proportion de manioc est exportΓ©e ?
Exportations_en_ThaΓ―lande_2014 <- result %>%
distinct %>%
filter(pays == "Thailand", annΓ©e == 2014, produit == "Cassava and products") %>%
select(produit, annΓ©e, export_quantity) %>%
group_by(produit)
Exportations_en_ThaΓ―lande_2015 <- result %>%
distinct %>%
filter(pays == "Thailand", annΓ©e == 2015, produit == "Cassava and products") %>%
select(produit, annΓ©e, export_quantity) %>%
group_by(produit)
Exportations_en_ThaΓ―lande_2016 <- result %>%
distinct %>%
filter(pays == "Thailand", annΓ©e == 2016, produit == "Cassava and products") %>%
select(produit, annΓ©e, export_quantity) %>%
group_by(produit)
Exportations_en_ThaΓ―lande_2017 <- result %>%
distinct %>%
filter(pays == "Thailand", annΓ©e == 2017, produit == "Cassava and products") %>%
select(produit, annΓ©e, export_quantity) %>%
group_by(produit)
Exportations_Manioc_en_ThaΓ―lande <- rbind(Exportations_en_ThaΓ―lande_2014,
Exportations_en_ThaΓ―lande_2015,
Exportations_en_ThaΓ―lande_2016,
Exportations_en_ThaΓ―lande_2017)
Exportations_Manioc_en_ThaΓ―lande <- Exportations_Manioc_en_ThaΓ―lande[,c(2,1,3)]
remove(Exportations_en_ThaΓ―lande_2014,
Exportations_en_ThaΓ―lande_2015,
Exportations_en_ThaΓ―lande_2016,
Exportations_en_ThaΓ―lande_2017)
Total_Export_en_ThaΓ―lande_par_annee <- result %>%
distinct %>%
filter(pays == "Thailand") %>%
group_by(annΓ©e) %>%
summarise(Total_Export_Quantity = sum(export_quantity))
Exportations_Manioc_en_ThaΓ―lande <- Exportations_Manioc_en_ThaΓ―lande %>%
left_join(Total_Export_en_ThaΓ―lande_par_annee) %>%
mutate(`Proportion_%` = round(100 * export_quantity / Total_Export_Quantity, 2))
### Quelle est la proportion de personnes en sous-nutrition ?
Sous_nutrition_en_ThaΓ―lande <- Sous_nutrition %>%
filter(country == "Thailand")
#**********************************************************************************
# 4 Table equilibre_prod
#**********************************************************************************
equilibre_prod <- result %>%
select(code_pays, pays, code_produit, produit, annΓ©e,
domestic_supply_quantity,
feed,
seed,
losses,
processing,
food,
other_uses) %>%
distinct(code_pays, pays, code_produit, produit, annΓ©e, .keep_all = TRUE)
names(equilibre_prod) = c("code_pays", "pays", "code_produit", "produit",
"annΓ©e", "dispo_int", "alim_ani", "semences",
"pertes", "transfo", "nourriture", "autres_utilisations")
#**********************************************************************************
#5 Q : Query SQL
#**********************************************************************************
## connect to database (don't forget to disconnect)
DB <- dbConnect(MySQL(), user="root", host="localhost",
password="Traore160410@", dbname="projet_fao")
#La base de donnΓ©es que nous venons de crΓ©er est vide :
dbListTables(DB) # list tables
dbSendQuery(DB, "SET GLOBAL local_infile = true;") # <--- Added this
#Nous allons y ajouter le tableau de donnΓ©es suivant :
dbWriteTable(DB, "population", population, row.names = FALSE) # write dataframe to the database
dbWriteTable(DB, "dispo_alim", dispo_alim, row.names = FALSE)
dbWriteTable(DB, "Sous_nutrition", Sous_nutrition, row.names = FALSE)
dbWriteTable(DB, "equilibre_prod", equilibre_prod, row.names = FALSE)
dbWriteTable(DB, "result", result, row.names = FALSE)
dbWriteTable(DB, "Population_mondiale", Population_mondiale, row.names = FALSE)
dbWriteTable(DB, "cereals", cereals, row.names = FALSE)
# DΓ©connexion
dbDisconnect(DB)
|
e9875b9cb1418848397aa2c9470650ad9ffd1b35
|
d2061a237532f631fde4eb2651093bed8593c403
|
/man/Currency.Rd
|
80ea8e98070ec89c79caeb6ada35609a28011750
|
[] |
no_license
|
cran/sur
|
23fb12f03ea816e80e0b2fd0a1491f2ca81218dd
|
612cc5e2391a019ed157f90958d31f40e9466560
|
refs/heads/master
| 2020-12-22T19:13:21.454433
| 2020-08-25T21:30:02
| 2020-08-25T21:30:02
| 236,903,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
Currency.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-Currency.R
\docType{data}
\name{Currency}
\alias{Currency}
\title{Value and Circulation of Currency}
\format{A data frame with 5 rows and 3 variables:
\describe{
\item{BillValue}{denomination}
\item{TotalCirculation}{total currency in circulation in U.S. dollars}
\item{NumberCirculation}{total number of bills in circulation}
}}
\usage{
Currency
}
\description{
This dataset contains, for the smaller bill denominations, the value of the bill and the total value in circulation. The source for these data is \emph{The World Almanac and Book of Facts 2014}.
}
\keyword{datasets}
|
1bbb46f9c79ea73f3aeed042eec1274d1fc34264
|
458881641ec0d872d6a38e7bb9a7b32fae1084b2
|
/trigramdf.R
|
8b5af7c09855a84b5967b4f3f3628f026370126a
|
[] |
no_license
|
wpmcdonald2000/Coursera-JHU-Capstone
|
28346cf95f2c4dfc28dea0ade53791d371f6721f
|
2f190a07ecca3202b481229e1198e934818a55f6
|
refs/heads/master
| 2016-09-06T14:50:21.329263
| 2015-04-26T18:47:51
| 2015-04-26T18:47:51
| 32,734,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,939
|
r
|
trigramdf.R
|
# Data Science Capstone Project
# Trigram dataframe building
source("/Users/williammcdonald/Coursera-JHU-Capstone/Capstone_helper.R")
options(mc.cores=1)
load('/Users/williammcdonald/CourseraCapstoneData/allText.RData')
load('/Users/williammcdonald/CourseraCapstoneData/naughty.RData')
# Set seed for sampling
set.seed(1056)
# Function to create multiple saved samples
trigramdf <- function(text){
for (i in 1:10){
name <- paste("/Users/williammcdonald/CourseraCapstoneData/df3gram", i, ".Rdata", sep = "")
sampleText <- sample(text, size = 50000)
clean <- cleanText(sampleText)
rm(sampleText)
text.corp <- createCorp(clean)
rm(clean)
corpus <- tm_map(text.corp, content_transformer(tolower))
corpus <- tm_map(text.corp, content_transformer(removePunctuation))
corpus <- tm_map(text.corp, content_transformer(removeNumbers))
rm(text.corp)
tdm3gram <- TermDocumentMatrix(corpus, control = list(tokenize = TrigramTokenizer))
rm(corpus)
df3gram <- rollup(tdm3gram, 2, na.rm=TRUE, FUN = sum)
df3gram <- as.data.frame(inspect(df3gram))
df3gram$num <- rowSums(df3gram)
df3gram <- subset(df3gram, num > 1)
df3gram[c('predictor', 'prediction')] <- subset(str_match(row.names(df3gram), "(.*) ([^ ]*)"), select=c(2,3))
df3gram <- subset(df3gram, select=c('predictor', 'prediction', 'num'))
df3gram <- df3gram[order(df3gram$predictor,-df3gram$num),]
row.names(df3gram) <- NULL
assign(paste("df3gram", i, sep = ""), df3gram)
save(df3gram, file = name)
}
}
# Create multiple trigram dataframes
trigramdf(allText)
# Clear environment
WS <- c(ls())
rm(list = WS)
# Load saved files
for (i in 1:10){
name <- paste("/Users/williammcdonald/CourseraCapstoneData/df3gram", i, ".Rdata", sep = "")
load(name)
assign(paste("df3gram", i, sep = ""), df3gram)
}
# List of all dataframes
file <- list(df3gram1, df3gram2, df3gram3, df3gram4, df3gram5, df3gram6,
df3gram7, df3gram8, df3gram9, df3gram10)
# Merge
trigramMaster <- Reduce(function(x,y) merge(x, y, by = c("predictor", "prediction") , all = TRUE), file)
trigramMaster$num <- rowSums(trigramMaster[, 3:12], na.rm = TRUE)
trigramMaster <- trigramMaster[, c(1:2, 13)]
trigramMaster <- by(trigramMaster, trigramMaster$predictor, head, n = 3)
trigramMaster <- Reduce(rbind, trigramMaster)
save(trigramMaster, file = "/Users/williammcdonald/CourseraCapstoneData/df3Master.Rdata")
# Hash
load("/Users/williammcdonald/CourseraCapstoneData/df3Master.Rdata")
predict3hash <- Hash(trigramMaster)
save(predict3hash, file = '/Users/williammcdonald/CourseraCapstoneData/predict3hash.RData')
|
b4b082c55d30abf7b23c8547d9fc721cf41a1d2a
|
3049076334b528b59cd8bb4105518f3c742d4706
|
/composites.R
|
66f8322827d39b0e6924a82bc6b3f9f3e70c5f12
|
[] |
no_license
|
AZASRS/CIO-CAFR-letters
|
7126edae04348635490119199fa9c8fe551c778e
|
7ac5f42313863ea560a0f2c03e569d7e120a9ab5
|
refs/heads/master
| 2021-07-01T16:56:53.536895
| 2020-10-07T22:59:55
| 2020-10-07T22:59:55
| 137,118,149
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,024
|
r
|
composites.R
|
# This is to put composites built so that they
# can be reference by other files
# to add this to your file put in the line
# source('composites.R')
#load data & mapping from SSBT
library(readxl)
source('performance_functions.R')
path.ssbt <- "P:/IMD/2018 Database Project/MYSS Data/Copy of SSBT Data.xlsx"
policy.tree <- read_excel(path.ssbt, sheet = "Policy Tree") %>%
mutate(Inception = ymd(Inception), Defunding = ymd(Defunding))
composite.tree <- read_excel(path.ssbt, sheet = "Composite Table")
eq.top <- c("ASRSA070", "ASRSA055", "ASRSA056")
top.names <- c("Total Equity", "Public Equity", "Private Equity")
eq.second <- c("ASRSA009", "ASRSA010")
eq.public <- c("ASRSA011", "ASRSA013", "ASRSA012",
"ASRSA023", "ASRSA024", "ASRSA028")
factor.portfolios <- c("A1S4", "A1TH", "A1TO")
e11_factor <- "A1S4"
eafe_factor <- "A1TH"
em_factor <- "A1TO"
e.port <- c("A1VE", "A1V4", "A1WV")
e.portfolios <- c("A1VE", "A1V4", "A1WV")
e10_internal <- "A1V4"
e2_internal <- "A1VE"
e6_internal <- "A1WV"
closed.e.portfolios <- c("A1VN", "A1VR", "A1RV", "A1RW")
br.intl <- c( "A1ZY", "A1SU", "A1QA", "A1Q0")
br_eafe_index <- "A1ZY"
br_em_lending <- "A1Q0"
br_em_mkts <- "A1QA"
br_eafe_small <- "A1SU"
eq.opp <- "A1U6"
overlay <- "A1Z1"
transitions <- "ASRSA062"
exFutures <- c("ASRSA072", "ASRSA073", "ASRSA074", "ASRSA075")
activeComposites <- c("ASRSA063", "ASRSA064", "ASRSA065","ASRSA066","ASRSA067", "ASRSA068")
eq.second <- composite.tree %>%
filter(is.na(Sub_Portfolio) == FALSE & Asset_Class == "Equity") %>%
select(SSBT_Composite_ID) %>% unlist
public.ids <- composite.tree %>%
filter(is.na(Category) == FALSE) %>%
select(SSBT_Composite_ID) %>% unlist
all.eqID <- c(eq.top, eq.second, public.ids, e.portfolios, br.intl,
factor.portfolios, eq.opp, closed.e.portfolios)
all.eq <- c(eq.top, eq.second, eq.public, factor.portfolios, e.port, br.intl,
eq.opp, overlay, transitions)
namesEq <- as.vector(get_shortName(all.eq))
names(namesEq) <- all.eq
|
c0f41acd50b2a84a551b02daf6732ce8d465b194
|
44c12bf5db12471edba464b652f9b2133a38e80e
|
/R/decideTestsPerLabel.R
|
c26878725fd5150e4a3d1dbcd346f69c2e742c9a
|
[] |
no_license
|
MarioniLab/scran
|
af4d01246208a12d40fc01b4d7d49df6a5f59b9f
|
f238890d5642dfb8062cf0254e0257fd28c5f28d
|
refs/heads/master
| 2023-08-10T08:58:35.499754
| 2023-08-04T23:19:40
| 2023-08-04T23:30:29
| 100,610,090
| 43
| 31
| null | 2023-04-09T15:14:31
| 2017-08-17T14:06:03
|
R
|
UTF-8
|
R
| false
| false
| 4,433
|
r
|
decideTestsPerLabel.R
|
#' Decide tests for each label
#'
#' Decide which tests (i.e., genes) are significant for differential expression between conditions in each label,
#' using the output of \code{\link{pseudoBulkDGE}}.
#' This mimics the \code{\link{decideTests}} functionality from \pkg{limma}.
#'
#' @param results A \linkS4class{List} containing the output of \code{\link{pseudoBulkDGE}}.
#' Each entry should be a DataFrame with the same number and order of rows,
#' containing at least a numeric \code{"PValue"} column (and usually a \code{"logFC"} column).
#'
#' For \code{summarizeTestsPerLabel}, this may also be a matrix produced by \code{decideTestsPerLabel}.
#' @param method String specifying whether the Benjamini-Hochberg correction should be applied across all clustesr
#' or separately within each label.
#' @param threshold Numeric scalar specifying the FDR threshold to consider genes as significant.
#' @param pval.field String containing the name of the column containing the p-value in each entry of \code{results}.
#' Defaults to \code{"PValue"}, \code{"P.Value"} or \code{"p.value"} based on fields in the first entry of \code{results}.
#' @param lfc.field String containing the name of the column containing the log-fold change.
#' Ignored if the column is not available Defaults to \code{"logFC"} if this field is available.
#' @param ... Further arguments to pass to \code{decideTestsPerLabel} if \code{results} is a List.
#'
#' @return
#' For \code{decideTestsPerLabel},
#' an integer matrix indicating whether each gene (row) is significantly DE between conditions for each label (column).
#'
#' For \code{summarizeTestsPerLabel},
#' an integer matrix containing the number of genes of each DE status (column) in each label (row).
#'
#' @details
#' If a log-fold change field is available and specified in \code{lfc.field}, values of \code{1}, \code{-1} and \code{0}
#' indicate that the gene is significantly upregulated, downregulated or not significant, respectively.
#' Note, the interpretation of \dQuote{up} and \dQuote{down} depends on the design and contrast in \code{\link{pseudoBulkDGE}}.
#'
#' Otherwise, if no log-fold change is available or if \code{lfc.field=NULL},
#' values of \code{1} or \code{0} indicate that a gene is significantly DE or not, respectively.
#'
#' \code{NA} values indicate either that the relevant gene was low-abundance for a particular label and filtered out,
#' or that the DE comparison for that label was not possible (e.g., no residual d.f.).
#'
#' @author Aaron Lun
#'
#' @examples
#' example(pseudoBulkDGE)
#' head(decideTestsPerLabel(out))
#' summarizeTestsPerLabel(out)
#'
#' @seealso
#' \code{\link{pseudoBulkDGE}}, which generates the input to this function.
#'
#' \code{\link{decideTests}}, which inspired this function.
#'
#' @export
#' @importFrom stats p.adjust
decideTestsPerLabel <- function(results, method=c("separate", "global"), threshold=0.05,
pval.field=NULL, lfc.field="logFC")
{
method <- match.arg(method)
if (is.null(pval.field)) {
pval.field <- intersect(c("PValue", "P.Value", "p.value"), colnames(results[[1]]))
if (length(pval.field)==0) {
stop("could not automatically determine 'pval.field'")
}
pval.field <- pval.field[1]
}
all.p <- lapply(results, "[[", i=pval.field)
if (method=="separate") {
all.p <- lapply(all.p, p.adjust, method="BH")
all.p <- do.call(cbind, all.p)
} else {
all.p <- do.call(cbind, all.p)
all.p[] <- p.adjust(all.p, method="BH")
}
rownames(all.p) <- rownames(results[[1]])
sig <- all.p <= threshold
if (!is.null(lfc.field) && !lfc.field %in% colnames(results[[1]])) {
lfc.field <- NULL
}
if (!is.null(lfc.field)) {
all.lfc <- do.call(cbind, lapply(results, "[[", i=lfc.field))
sig <- sig * sign(all.lfc)
}
storage.mode(sig) <- "integer"
sig
}
#' @export
#' @rdname decideTestsPerLabel
summarizeTestsPerLabel <- function(results, ...) {
if (!is.matrix(results)) {
results <- decideTestsPerLabel(results, ...)
}
output <- list()
available <- sort(unique(as.vector(results)), na.last=TRUE)
for (i in available) {
output[[as.character(i)]] <- if (is.na(i)) {
colSums(is.na(results))
} else {
colSums(results==i, na.rm=TRUE)
}
}
do.call(cbind, output)
}
|
85794cdf91e711713d5d4a33c8263ca9444cf114
|
b3bf7b8c56b2f3e8d8594cccce6f65981c9514e5
|
/man/plot_gg_infestationXclone.Rd
|
e0a78905754959d63320238907fb2812ae651d79
|
[] |
no_license
|
faustovrz/bugcount
|
055ee388bcf9049e5d01cf3ad19898220f7787a2
|
f3fbb7e9ed5cecae78fdfaa1035e2a87e072be2d
|
refs/heads/master
| 2021-03-27T15:43:12.992541
| 2018-05-04T22:17:49
| 2018-05-04T22:17:49
| 104,142,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
plot_gg_infestationXclone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_gg_infestationXclone.R
\name{plot_gg_infestationXclone}
\alias{plot_gg_infestationXclone}
\title{Plot infestation X clone from GLM}
\usage{
plot_gg_infestationXclone(cld)
}
\arguments{
\item{cld}{\code{multcomp} common letter display \code{cld} object from
posthoc comparison in \code{plot_fit.nb.glm()}}
}
\value{
Nothing
}
\description{
Plot infestation X clone from GLM
}
\examples{
# plot_gg.infestationXclone(nb.glm.fit$posthoc)
}
|
9b7fbc94e63539f28f9292e80f832d45b42518cd
|
f1897fae82edc098385a75d60ee934691a1eddcd
|
/binomial/R/aux_mode.R
|
da2df5516b7bbd34bbf6b2c5375ac22218a86272
|
[] |
no_license
|
stat133-sp19/hw-stat133-Zehao1006
|
6bb9367077e24ee8926dd0af78c38314ba2767df
|
17bf42c3b0d93d854fb56d3b46a8f85b3b8efe14
|
refs/heads/master
| 2020-04-28T14:52:57.081374
| 2019-05-01T02:23:01
| 2019-05-01T02:23:01
| 175,352,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
aux_mode.R
|
# Description: calculate the mode of a binomial distribution
# Inputs
# trials: the number of trials
# prob: probability of success on each trial
# Output
# the calculated mode
aux_mode <- function(trials, prob){
if (as.integer(trials*prob+prob) == trials*prob+prob){
return(c(as.integer(trials*prob+prob), as.integer(trials*prob+prob)-1))
} else{
return(as.integer(trials*prob+prob))
}
}
|
1104c942d6aa3a8b8fbca26abe79c1bf6e2694f5
|
6c1fcaf8d68f739ae7015c3e2c88a8cc7c50b851
|
/KalmanFilter.R
|
854a39bf5f85eb99519a4927483a66073c2f7999
|
[] |
no_license
|
watermouth/FiringRateEstimation
|
fc0b26704bac9779673ee746f0e32b741fd85464
|
09d01eaddabae2a02bbcf30cb57414ad14fd53ce
|
refs/heads/master
| 2021-01-02T08:40:33.509771
| 2013-01-08T13:13:00
| 2013-01-08T13:13:00
| 7,304,681
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
KalmanFilter.R
|
# kalman filter using fkf
source("FilteringHelper.R")
# INPUT
# Tt: system model coefficient
# HHt: system model noize variance matrix ^2
# Zt: observation model coefficient
# GGt: observation model noize variance matrix ^2
# OUTPUT
# att: filtered state
# at: predicted state
# Ptt: variance of att
# Pt: variance of at
KalmanFilter <- function(P0=matrix(1), dt=matrix(0), ct=matrix(0), Tt=array(1,c(1,1,1)), Zt=array(1,c(1,1,1)),
HHt=array(1,c(1,1,1)), GGt=array(1,c(1,1,1))){
return(
function(observation, initialState, check.input = TRUE){
if(check.input) checkInput(observation=observation,initialState=initialState)
library(FKF)
fkf.obj <- fkf(a0=initialState,P0=P0,dt=dt,ct=ct,Tt=Tt,Zt=Zt,HHt=HHt,GGt=GGt,yt=observation,check.input=check.input)
return(fkf.obj)
}
)
}
|
4ddd1903294ead20ca46a9b5a898141bfb78429b
|
b3d721137efd79d9dc7c02ec647edb7cbb30c5ed
|
/R/nonCentralDunnett.R
|
6c259100ef6bb8893065775b175ce6fabdb0d59c
|
[] |
no_license
|
cran/nCDunnett
|
cf6add1b954da530613d841a958fcce6983ca0a4
|
d4a02ceee7ec1e42445f4d7e00ebee0a14208ace
|
refs/heads/master
| 2016-09-05T21:41:47.641294
| 2015-11-16T16:20:29
| 2015-11-16T16:20:29
| 17,697,805
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,323
|
r
|
nonCentralDunnett.R
|
# generates random sample from multivariate normal distribution
# with means mu and covariance sigma positive definite
# input: the sample size N, the parameters mu and sigma
rmultvariate <- function (N = 1, mu, Sigma, tol = 1e-06)
{
p <- length(mu)
if (!all(dim(Sigma) == c(p, p)))
stop("incompatible arguments")
Sd <- eigen(Sigma, symmetric = TRUE)
eva <- Sd$values
if (!all(eva >= -tol * abs(eva[1L])))
stop("'Sigma' is not positive definite")
X <- matrix(rnorm(p * N), N)
X <- drop(mu) + Sd$vectors %*% diag(sqrt(pmax(eva, 0)), p) %*% t(X)
if (N == 1)
drop(X) else t(X)
}
# Function to calculate nodes and weights of Gauss-Legendre
# quadrature, where n is the number of points. It uses the
# Golub-Welsh algorithm
GaussLegendre <- function(n)
{
n <- as.integer(n)
if (n < 0)
stop("Must be a non-negative number of nodes!")
if (n == 0)
return(list(x = numeric(0), w = numeric(0)))
i <- 1:n
j <- 1:(n-1)
mu0 <- 2
b <- j / (4 * j^2 - 1)^0.5
A <- rep(0, n * n)
A[(n + 1) * (j - 1) + 2] <- b
A[(n + 1) * j] <- b
dim(A) <- c(n, n)
sd <- eigen(A, symmetric = TRUE)
w <- rev(as.vector(sd$vectors[1, ]))
w <- mu0 * w^2
x <- rev(sd$values)
return(list(nodes = x, weights = w))
}
# Function to compute cumulative distribution function of the noncentral
# unilateral Dunnett's test statistic with infinity degrees of freedom nu
# given the vector of quantile q, the vector of correlation r, the vector
# of the noncentrality parameter delta, the vector of degrees of freedom
# nu, the number of points n of the Gauss-Legendre quadrature and the
# list x of nodes and weights of the quadrature. It returns cumulative
# probabilities.
pNDUD <- function(q, r, delta, n = 32, x)
{
k <- length(r)
# transformando de -1 a 1 para -infty a +infty, t = x/(1-x^2), x em -1 a 1
y <- matrix(x$nodes/(1-(x$nodes)^2), n, 1)
pArgYi <- function(y, r, delta)
{
ti <- pnorm((r^0.5 * y + q - delta)/(1-r)^0.5 , log.p=TRUE)
ti <- exp(sum(ti)+dnorm(y, log=TRUE))
return(ti)
}
I <- apply(y, 1, pArgYi, r, delta)
I <- I * ((1+x$nodes^2)/(1-(x$nodes)^2)^2)
I <- sum(I * x$weights)
return(I)
}
# Function to compute cumulative distribution function of the noncentral
# bilateral Dunnett's test statistic with infinity degrees of freedom nu
# given the vector of quantile q, the vector of correlation r, the vector
# of the noncentrality parameter delta, the vector of degrees of freedom
# nu, the number of points n of the Gauss-Legendre quadrature and the
# list x of nodes and weights of the quadrature. It returns cumulative
# probabilities.
pNDBD <- function(q, r, delta, n = 32, x)
{
k <- length(r)
# transform from [-1; 1] to (-infty, +infty), t = x/(1-x^2), x in [-1, 1]
y <- matrix(x$nodes/(1-(x$nodes)^2), n, 1)
pArgYi <- function(y, r, delta)
{
ti <- pnorm((r^0.5 * y + q - delta)/(1-r)^0.5 , log.p=FALSE)
ti <- ti - pnorm((r^0.5 * y - q - delta)/(1-r)^0.5, log.p=FALSE)
if (any(ti <= 0))
{
ti[ti <= 0] <- -743.7469
ti[ti > 0] <- log(ti[ti > 0])
} else ti <- log(ti)
ti <- exp(sum(ti)+dnorm(y, log=TRUE))
return(ti)
}
I <- apply(y, 1, pArgYi, r, delta)
I <- I * ((1+x$nodes^2)/(1-x$nodes^2)^2)
I <- sum(I * x$weights)
return(I)
}
# Function to compute the probability density function of the noncentral
# unilateral Dunnett's test statistic with infinity degrees of freedom nu
# given the vector of quantile q, the vector of correlation r, the vector
# of the noncentrality parameter delta, the vector of degrees of freedom
# nu, the number of points n of the Gauss-Legendre quadrature and the
# list x of nodes and weights of the quadrature. It returns density
# values.
dNDUD <- function(q, r, delta, n = 32, x)
{
k <- length(r)
# transform from [-1; 1] to (-infty, +infty), t = x/(1-x^2), x in [-1, 1]
y <- matrix(x$nodes/(1-(x$nodes)^2), n, 1)
rgdgi <- cbind(r, delta, (1:k))
loopg <- function(rd, ym, q)
{
tg <- pnorm((rd[1]^0.5 * ym + q - rd[2]) / (1 - rd[1])^0.5, log.p=TRUE)
return(tg)
}
loopi <- function(rd, ym, q)
{
ti <- dnorm((rd[1]^0.5 * ym + q - rd[2]) / (1 - rd[1])^0.5, log=TRUE) - 0.5 * log(1-rd[1])
return(ti)
}
# loop of y's
loopy <- function(ym, q, rd)
{
Phy <- apply(rd, 1, loopg, ym, q) # loopg
phy <- apply(rd, 1, loopi, ym, q) # loopi
sPhy <- sum(Phy) # sum of Phi's differences
sTi <- sum(exp(phy + sPhy - Phy)) * dnorm(ym)
return(sTi)
}
I <- apply(y, 1, loopy, q, rgdgi)
I <- I * ((1+x$nodes^2)/(1-x$nodes^2)^2)
I <- sum(I * x$weights)
return(I)
}
# Function to compute the probability density function of the noncentral
# bilateral Dunnett's test statistic with infinity degrees of freedom nu
# given the vector of quantile q, the vector of correlation r,the vector
# of the noncentrality parameter delta, the vector of degrees of freedom
# nu, the number of points n of the Gauss-Legendre quadrature and the
# list x of nodes and weights of the quadrature. It returns density
# probabilities.
dNDBD <- function(q, r, delta, n = 32, x)
{
k <- length(r)
# transform from [-1; 1] to (-infty, +infty), t = x/(1-x^2), x in [-1, 1]
y <- matrix(x$nodes/(1-(x$nodes)^2), n, 1)
rgdgi <- cbind(r, delta, (1:k))
loopg <- function(rd, ym, q)
{
tg <- pnorm((rd[1]^0.5 * ym + q - rd[2]) / (1 - rd[1])^0.5) -
pnorm((rd[1]^0.5 * ym - q - rd[2]) / (1 - rd[1])^0.5)
if (tg <= 0) tg <- -743.7469 else tg <- log(tg)
return(tg)
}
loopi <- function(rd, ym, q)
{
ti <- dnorm((rd[1]^0.5 * ym + q - rd[2]) / (1 - rd[1])^0.5) +
dnorm((rd[1]^0.5 * ym - q - rd[2]) / (1 - rd[1])^0.5)
if (ti <= 0) ti <- -743.7469 else ti <- log(ti)
aux <- ti - 0.5 * log(1 - rd[1])
return(aux)
}
# loop of y's
loopy <- function(ym, q, rd)
{
Phy <- apply(rd, 1, loopg, ym, q) # loopg
phy <- apply(rd, 1, loopi, ym, q) # loopi
sPhy <- sum(Phy) # sum of Phi's differences
sTi <- sum(exp(phy + sPhy - Phy)) * dnorm(ym)
return(sTi)
}
I <- apply(y, 1, loopy, q, rgdgi)
I <- I * ((1+x$nodes^2)/(1-x$nodes^2)^2)
I <- sum(I * x$weights)
return(I)
}
# Function to compute quantiles from the noncentral unilateral
# Dunnett's test statistic distribution with infinity degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# quantiles.
qNDUD <- function(p, r, delta, n = 32, x)
{
k <- length(r)
alpha <- 1 - p
alpha_k <- 1 - (1-alpha)^(1/k)
q0 <- qnorm(1 - alpha_k) # initial value
maxIt <- 5000
tol <- 1e-13
conv <- FALSE
it <- 0
while ((conv == FALSE) & (it <= maxIt))
{
q1 <- q0 - (pNDUD(q0, r, delta, n, x) - p) / dNDUD(q0, r, delta, n, x)
if (abs(q1-q0) <= tol) conv <- TRUE
q0 <- q1
it <- it + 1
}
return(q1)
}
# Function to compute quantiles from the noncentral bilateral
# Dunnett's test statistic distribution with infinity degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# quantiles.
qNDBD <- function(p, r, delta, n = 32, x)
{
k <- length(r)
alpha <- 1 - p
alpha_k <- 1 - (1-alpha/2)^(1/k)
q0 <- qnorm(1 - alpha_k) # initial value
if (q0 < 0) q0 <- -q0
maxIt <- 5000
tol <- 1e-13
conv <- FALSE
it <- 0
while ((conv == FALSE) & (it <= maxIt))
{
q1 <- q0 - (pNDBD(q0, r, delta, n, x) - p) / dNDBD(q0, r, delta, n, x)
if (abs(q1-q0) <= tol) conv <- TRUE
q0 <- q1
it <- it + 1
}
return(q1)
}
# Function to compute the Cumulative distribution of the noncentral
# unilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# cumulative probabilities.
pNDUDF <- function(q, r, nu, delta, n = 32, xx)
{
k <- length(r)
x <- xx$nodes
w <- xx$weights
# computing integral in [0,1]
y <- 0.5 * x + 0.5 # from [-1, 1] to [0, 1]
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) + (nu-1) * log(y) - nu * y * y / 2
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- 0.5 * apply(y, 1, pNDUD, r, delta, n, xx) * fx
I <- sum(fy * w)
# computing integral in [1, infty), using the transformation
# y <- -ln(x), 0 < x < exp(-1)
b <- exp(-1)
a <- 0
x <- (b - a) / 2 * x + (b + a) / 2 # from [-1,1] to [0, exp(-1)]
y <- -log(x) # from [0, exp(-1)] to [1, +infty)
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) + (nu-1) * log(y) - nu * y * y / 2
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- (b - a) / 2 * apply(y, 1, pNDUD, r, delta, n, xx) / x * fx
I <- I + sum(fy * w)
return(I)
}
# Function to compute the Cumulative distribution of the noncentral
# bilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# cumulative probabilities.
pNDBDF <- function(q, r, nu, delta, n = 32, xx)
{
k <- length(r)
x <- xx$nodes
w <- xx$weights
# computing integral in [0,1]
y <- 0.5 * x + 0.5 # from [-1,1] to [0, 1]
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) + (nu-1) * log(y) - nu * y * y / 2
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- 0.5 * apply(y, 1, pNDBD, r, delta, n, xx) * fx
I <- sum(fy * w)
# computing integral in [1, infty), using the transformation
# y <- -ln(x), 0 < x < exp(-1)
b <- exp(-1)
a <- 0
x <- (b - a) / 2 * x + (b + a) / 2 # from [-1,1] to [0, exp(-1)]
y <- -log(x) # from [0, exp(-1)] to [1, +infty)
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) + (nu-1) * log(y) - nu * y * y / 2
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- (b - a) / 2 * apply(y, 1, pNDBD, r, delta, n, xx) / x * fx
I <- I + sum(fy * w)
return(I)
}
# Function to compute the probability density function of the noncentral
# unilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# density values.
dNDUDF <- function(q, r, nu, delta, n = 32, xx)
{
k <- length(r)
x <- xx$nodes
w <- xx$weights
# computing integral in [0,1]
y <- 0.5 * x + 0.5 # from [-1,1] to [0, 1]
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) +
(nu-1) * log(y) - nu * y * y / 2 + log(y) # + jacobian
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- 0.5 * apply(y, 1, dNDUD, r, delta, n, xx) * fx
I <- sum(fy * w)
# computing integral in [1, infty), using the transformation
# y <- -ln(x), 0 < x < exp(-1)
b <- exp(-1)
a <- 0
x <- (b - a) / 2 * x + (b + a) / 2 # from [-1,1] to [0, exp(-1)]
y <- -log(x) # from [0, exp(-1)] to [1, +infty)
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) +
(nu-1) * log(y) - nu * y * y / 2 + log(y) # + jacobian
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- (b - a) / 2 * apply(y, 1, dNDUD, r, delta, n, xx) / x * fx
I <- I + sum(fy * w)
return(I)
}
# Function to compute the probability density function of the noncentral
# bilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# density values.
dNDBDF <- function(q, r, nu, delta, n = 32, xx)
{
k <- length(r)
x <- xx$nodes
w <- xx$weights
# computing integral in [0,1]
y <- 0.5 * x + 0.5 # from [-1,1] to [0, 1]
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) +
(nu-1) * log(y) - nu * y * y / 2 + log(y) # + jacobian
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- 0.5 * apply(y, 1, dNDBD, r, delta, n, xx) * fx
I <- sum(fy * w)
# computing integral in [1, infty), using the transformation
# y <- -ln(x), 0 < x < exp(-1)
b <- exp(-1)
a <- 0
x <- (b - a) / 2 * x + (b + a) / 2 # from [-1,1] to [0, exp(-1)]
y <- -log(x) # from [0, exp(-1)] to [1, +infty)
fx <- nu/2 * log(nu) - lgamma(nu/2) - (nu/2-1) * log(2) +
(nu-1) * log(y) - nu * y * y / 2 + log(y) # + jacobian
fx <- exp(fx)
y <- matrix(y, n, 1) * q
fy <- (b - a) / 2 * apply(y, 1, dNDBD, r, delta, n, xx) / x * fx
I <- I + sum(fy * w)
return(I)
}
# Function to compute quantiles from the noncentral
# unilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# quantiles.
qNDUDF <- function(p, r, nu, delta, n = 32, x)
{
k <- length(r)
alpha <- 1 - p
alpha_k <- 1 - (1-alpha)^(1/k)
q0 <- qt(1 - alpha_k, nu) + mean(delta)# initial value
p0 <- pNDUDF(q0, r, nu, delta, n, x)
if (abs(p0-p) > 0.1) q0 <- qNDUD(p, r, delta, n, x)
maxIt <- 5000
tol <- 1e-11
conv <- FALSE
it <- 0
while ((conv == FALSE) & (it <= maxIt))
{
q1 <- q0 - (pNDUDF(q0, r, nu, delta, n, x) - p) / dNDUDF(q0, r, nu, delta, n, x)
if (abs(q1-q0) <= tol) conv <- TRUE
if (q1 < 0) q1 <- -q1
q0 <- q1
it <- it + 1
}
return(q1)
}
# Function to compute quantiles from the noncentral
# bilateral Dunnett's test statistic with finite degrees of freedom
# nu given the vector of quantile q, the vector of correlation r, the
# vector of the noncentrality parameter delta, the vector of degrees of
# freedom nu, the number of points n of the Gauss-Legendre quadrature
# and the list x of nodes and weights of the quadrature. It returns
# quantiles.
qNDBDF <- function(p, r, nu, delta, n = 32, x)
{
k <- length(r)
alpha <- 1 - p
alphak <- 1 - (1-alpha/2)^(1/k)
q0 <- qt(1 - alphak, nu) + mean(delta) # valor
if (q0 < 0) q0 <- -q0
p0 <- pNDBDF(q0, r, nu, delta, n, x)
if (abs(p0-p) > 0.1) q0 <- qNDBD(p, r, delta, n, x)
maxIt <- 5000
tol <- 1e-11
conv <- FALSE
it <- 0
while ((conv == FALSE) & (it <= maxIt))
{
p0 <- pNDBDF(q0, r, nu, delta, n, x)
q1 <- q0 - (p0 - p) / dNDBDF(q0, r, nu, delta, n, x)
if (abs(q1-q0) <= abs(q0) * tol) conv <- TRUE
if (q1 < 0) q1 <- -q1
q0 <- q1
it <- it + 1
}
return(q1)
}
|
67c6f974427a17f544722179cc4a76a9359d91e8
|
5e486717481a8d10db286f67dcfc365568f5b0dc
|
/ui.R
|
5d2f0824a597eeb059c641745d328225e1701aa5
|
[] |
no_license
|
andrewmaher/armed_conflict_app
|
1a8edeb75a8673009b5a9a5d19d6665390a68136
|
f8255cb5e1a312b7e5d2aeff7dd456a20d38d91c
|
refs/heads/master
| 2020-06-04T03:14:33.775258
| 2013-07-30T00:09:02
| 2013-07-30T00:09:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 475
|
r
|
ui.R
|
library(shiny)
reactiveNetwork <- function (outputId)
{
HTML(paste("<div id=\"", outputId, "\" class=\"shiny-network-output\"><svg /></div>", sep=""))
}
shinyUI(pageWithSidebar(
headerPanel("History of Armed Conflict (1946-2012)"),
sidebarPanel(
sliderInput("year", "Year:",format="####.",
min = 1946, max = 2012, value = 1946,step=1)
),
mainPanel(
includeHTML("graph.js"),
reactiveNetwork(outputId = "chart")
)
))
|
a71a154338d5c154cd8415568003068ae634c1f1
|
b7679e307eb2049ea5890d600823d66db6bdf4da
|
/R/matrixplot_gui.R
|
e22b0c56c54fd4ffe121d4ee1ea520ea10b8a65b
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"Unlicense"
] |
permissive
|
ldecicco-USGS/QWToolbox
|
977356dfdf7f0d49524d405edb5b3f77456404e1
|
84e747793ae9a24970a1ef7a80808940d3f37cef
|
refs/heads/master
| 2021-01-17T00:39:51.350305
| 2014-12-04T16:46:30
| 2014-12-04T16:46:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,545
|
r
|
matrixplot_gui.R
|
matrixplot_gui <- function(...){
###These exist checks are so that the data is refreshed when the user does a new data pull
###Since the tables are dependent on the data, they need to be refreshed
## Set up main group, make global so can be deleted
if(exists("matrix.mainGroup"))
{
if(!isExtant(matrix.mainGroup))
{
matrix.mainGroup <<- ggroup(label="Matrix plot",container=interactive.frame)
}
}else(matrix.mainGroup <<- ggroup(label="Matrix plot",container=interactive.frame))
if(exists("matrix.vargroup"))
{
if(isExtant(matrix.vargroup))
{
delete(matrix.mainGroup,matrix.vargroup)
}
matrix.vargroup <<- gframe("Parameters",container=matrix.mainGroup,expand=TRUE,horizontal=FALSE)
}else(matrix.vargroup <<- gframe("Parameters",container=matrix.mainGroup,expand=TRUE,horizontal=FALSE))
visible(matrix.mainGroup) <- FALSE
###Parameter browser
matrix.site.selection <- gtable(items = na.omit(unique(qw.data$PlotTable[c("SITE_NO","STATION_NM")])),multiple=TRUE,container = matrix.vargroup, expand = TRUE)
matrix.plotparm <- gtable(items = na.omit(unique(qw.data$PlotTable[c("PARM_CD","PARM_NM","PARM_SEQ_GRP_CD")])),multiple=TRUE,container = matrix.vargroup, expand = TRUE, fill = TRUE)
###Plotting handler for table
gbutton("Make plot",container=matrix.vargroup,handler = function(h,...) {
matrixplot(matrix.site.selection = svalue(matrix.site.selection),
matrix.plotparm = svalue(matrix.plotparm))
})
visible(matrix.mainGroup) <- TRUE
}
|
5a6c4859e9c25bc75d306e3d8eed12ddf3ba3d86
|
95f731dc0dd228779baef99e6e1bd83358f3d858
|
/formatting Monroe data/STEP4 zero offset correct dive data 15_16 March24.R
|
76b5e1f772c3b9f97465dc63cea3a3612787506a
|
[] |
no_license
|
JAP2018/Chinstrap-penguin-analysis
|
c19db976a372b7c8a3dc5dcc85f9e23c0b484f92
|
49b9c98e8d26bc602599d12c40a8014334913b6c
|
refs/heads/main
| 2023-05-11T10:05:47.677827
| 2021-05-29T19:37:58
| 2021-05-29T19:37:58
| 371,833,354
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,523
|
r
|
STEP4 zero offset correct dive data 15_16 March24.R
|
library(diveMove)
library(foreach)
registerDoSEQ()
setwd("C:/Users/Jessica/Desktop/penguin analysis/15_16 analysis")
trip_info <- read.csv("trip_info_15_16_Dec11.csv")
pg_ids <-unique(trip_info$id) #these give list of individual penguins that go to sea
start_penguin<-1
end_penguin<-length(pg_ids)
trip_number<-0
a<-trip_number
create_df<-data.frame()
for (i in start_penguin:end_penguin){
print(paste("i=",i))
round_num <-substr(pg_ids[i],7,7)
bird_num <-substr(pg_ids[i],9,10)
setwd(paste("C:/Users/Jessica/Desktop/penguin data/2015-2016/Monroe Island/Tracking data/Round "
,round_num,"/Bird ",bird_num,"/",sep=""))
print("s'up")
r <- NULL
attempt <- 1
while( is.null(r) && attempt <= 3 ) {
attempt <- attempt + 1
try(
r <- read.csv(list.files(pattern="*00.csv"),skip=2)
)
}
one_tdr<-r
print("hi")
#zero offset
read_tdr<-readTDR(list.files(pattern="*00.csv"), skip = 2,dateCol=2,timeCol=3, depthCol=6,
speed=FALSE,concurrentCols = 4:5,dtformat = "%d/%m/%Y %H:%M:%S", tz = "GMT")
one_tdr$id <-paste("15_16",round_num,bird_num,sep="_") #assign this penguin a number
locs<-which(trip_info$id %in% one_tdr$id) #gives locations of this id in the list
for (k in 1:length(locs)){
a=a+1
print(paste("a=",a))
one_tdr$POSIXct <- as.POSIXct(paste(one_tdr$Date, one_tdr$Time, sep = " "),
format = "%d/%m/%Y %H:%M:%S", tz = "gmt")
start <- as.POSIXct(trip_info$leave_land[locs[k]])
finish <- as.POSIXct(trip_info$backon_land[locs[k]])
#start time
start_time<-floor((one_tdr$Rec..[one_tdr$POSIXct==start])/5)+1
#end time
end_time<-ceiling((one_tdr$Rec..[one_tdr$POSIXct==finish])/5)+1
test <-read_tdr[start_time:end_time]
tt <- getTime(test)
d <- getDepth(test)
K <- c(10, 50)# specify the smoothing and filtering windows
P <- c(0.5, 0.05)
dcalib<- calibrateDepth(test, dive.thr=2,zoc.method="filter",k = K, probs= P, depth.bounds =c(-2,2),descent.crit.q=0.01, ascent.crit.q=0,knot.factor=20)
diveinfo<- NULL
try(diveinfo<-diveStats(dcalib, depth.deriv=TRUE))
if (!is.null(diveinfo)){
diveinfo$trip_num <-a # total trip number
diveinfo$id <-paste("15_16",round_num,bird_num,sep="_")
}
}
if (!is.null(diveinfo)){
create_df<-rbind(create_df,diveinfo)
}
}
setwd("C:/Users/Jessica/Desktop/penguin analysis/15_16 analysis")
write.csv(file = "15_16_diveinfo_March24_trip2_67.csv", create_df)
|
f10b360a2499701d692358bcbb7415d8cb6bf426
|
439322912321e742e7f92374ecb76c76743984cb
|
/man/bootstrapml.Rd
|
170c3e526370d27d8d27465e147901dbf6681ca7
|
[
"MIT"
] |
permissive
|
vbaliga/univariateML
|
62d2c00017b93f2e1ed8357dd5d9062cf66744cd
|
0410647e2528dc61fa7b7b7cce95273777059baf
|
refs/heads/master
| 2020-09-11T19:04:44.904257
| 2019-11-24T19:12:20
| 2019-11-24T19:12:20
| 222,160,994
| 0
| 0
|
NOASSERTION
| 2019-11-16T21:23:07
| 2019-11-16T21:23:07
| null |
UTF-8
|
R
| false
| true
| 2,962
|
rd
|
bootstrapml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bootstrap.R
\name{bootstrapml}
\alias{bootstrapml}
\title{Parametric Bootstrap on Distributions Fitted with Maximum Likelihood}
\usage{
bootstrapml(object, Nreps = 1000, map = identity,
reducer = stats::quantile, ...)
}
\arguments{
\item{object}{A \code{univariateML} object.}
\item{Nreps}{Positive integer. The number of bootstrap samples.}
\item{map}{A function of the parameters of the \code{univariateML} object.
Defaults to the identity.}
\item{reducer}{A reducer function. Defaults to \code{stats::quantile} with
default argument \code{probs = c(0.025, 0.975)}.}
\item{...}{Passed to \code{reducer}.}
}
\value{
The transposed map-reduced bootstrap samples.
}
\description{
The parametric bootstrap is a resampling technique using random variates from
a known parametric distribution. In this function the distribution of the
random variates is completely determined by the \code{unvariateML} object
\code{object}.
}
\details{
For each bootstrap iteration a maximum likelihood estimate is calculated using
the \code{ml***} function specified by \code{object}. The resulting numeric
vector is then passed to \code{map}. The values returned by \code{map} is collected in
an array and the \code{reducer} is called on each row of the array.
By default the \code{map} function is the identity and the default \code{reducer} is
the quantile function taking the argument \code{probs}, which defaults to \code{c(0.025, 0.975)}.
This corresponds to a 95\% basic percentile confidence interval and is also reported
by \code{\link{confint}}
\emph{Note:} The default confidence intervals are percentile intervals, not empirical intervals.
These confidence intervals will in some cases have poor coverage as they are not studentized,
see e.g. Carpenter, J., & Bithell, J. (2000).
}
\examples{
\dontrun{
set.seed(1)
object = mlgamma(mtcars$qsec)
## Calculate c(0.025, 0.975) confidence interval for the gamma parameters.
bootstrapml(object)
# 2.5\% 97.5\%
# shape 68.624945 160.841557
# rate 3.896915 9.089194
## The mean of a gamma distribution is shape/rate. Now we calculate a
## parametric bootstrap confidence interval for the mean with confidence
## limits c(0.05, 0.95)
bootstrapml(object, map = function(x) x[1]/x[2], probs = c(0.05, 0.95))
# 5\% 95\%
# 17.33962 18.31253
## Print a histogram of the bootstrapped estimates from an exponential.
object = mlexp(mtcars$qsec)
hist(bootstrapml(object, reducer = identity))
}
}
\references{
Efron, B., & Tibshirani, R. J. (1994). An introduction to the bootstrap. CRC press.
Carpenter, J., & Bithell, J. (2000). Bootstrap confidence intervals: when, which, what? A practical guide for medical statisticians. Statistics in medicine, 19(9), 1141-1164.
}
\seealso{
\code{\link{confint}} for an application of \code{bootstrapml}.
}
|
cea9735b4d579584633b4a74a1ca2cacf1841d72
|
bc274026b70d78b3cd5223ffb91316b5cb615778
|
/geoestadistcia.R
|
b535543263add9ae6cb08c0dda4978ed59744f8b
|
[] |
no_license
|
maestria-geotel-201902/unidad-0-asignacion-99-mi-proyecto-yurbaez
|
67865e43fc1812fbc9ad96b7f937fae146f1f194
|
6607caa98a4b6acf2463638fa98829aab015c5ff
|
refs/heads/master
| 2020-08-31T01:47:53.635080
| 2020-02-23T21:11:23
| 2020-02-23T21:11:23
| 218,549,029
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,866
|
r
|
geoestadistcia.R
|
## Universidad Autonoma de Santo Domingo
## MaestrΓa Teledeteccion y Ciencias de la Informacion Geografica
## Materia: Analisis Espacial
## Profesor: JosΓ© RamΓ³n MartΓnez
## USO DE VARIABLES MORFOMETRICAS EN EL ANALISIS DE LA DENSIDAD DE DRENAJE
## DE LAS MICROCUENCAS DE ORDEN 1 (STRAHLER) DEL RIO OCOA
## EN LA REPUBLICA DOMINICANA
## Maestrantes: Alba Cadete, Mirel VolcΓ‘n, Yoenny UrbΓ‘ez
## LIBRERIAS A UTILIZAR
library(sf)
library(tidyverse)
library(gstat)
library(stars)
library(tmap)
library(ez)
library(RColorBrewer)
library (sp)
library(spdep)
library(lmtest)
library(spData)
library(ggplot2)
library(knitr)
library(spatial)
source('lisaclusters.R')
## IMPORTACION, ORGANIZACION DE DATOS E INTEROPERABILIDAD
# Cargar datos de variables
(datos <- st_read('paramsoutlet_orden1.gpkg', crs = 32619))
(datos <- datos %>% st_difference())
(pol1 <- st_read(dsn = 'r_stream_basins_1.geojson', crs = 32619))
pol2 <- st_read(dsn = 'r_stream_basins_2.geojson', crs = 32619)
pol3 <- st_read(dsn = 'r_stream_basins_3.geojson', crs = 32619)
pol4 <- st_read(dsn = 'r_stream_basins_4.geojson', crs = 32619)
# Orden de Red Cuencas 1, clasificacion de Strahler
datos %>% dplyr::filter(Max_order_Strahler==1)
datos %>%
select_if(is.numeric) %>%
gather(variable, valor, -geom) %>%
st_drop_geometry() %>%
group_by(variable) %>%
summarise(m=mean(valor, na.rm=T))
datos %>%
select_if(is.numeric) %>%
gather(variable, valor, -geom) %>%
tm_shape() + tm_dots(col = 'valor') + tm_facets(by='variable', free.coords = F, free.scales = T)
# Tabla cols numericas, con varianza
datosnum <- datos %>%
st_drop_geometry() %>%
select_if(is.numeric) %>%
select_if(~ sum(!is.na(.))>0) %>%
select_if(function(x) var(x, na.rm=T)!=0)
# Evaluacion de correlacion entre las variables como criterio de seleccion
datosnum %>% ezCor(r_size_lims = 2:3, label_size = 2)
datosnum %>% cor
# Union espacial de las variables seleccionadas
VARSEL <- datos %>%
dplyr::select(
DD = Drainage_Density_km_over_km2,
SF = Shape_Factor,
ER = Elongation_Ratio,
TS = Total_Stream_Length_km,
MS = Mean_Slope
)
Varselpol1 <- pol1 %>% st_join(left = F, VARSEL)
Varselpol2 <- Varselpol1 %>%
mutate(logDD = log(DD))
## CreaciΓ³n de objeto XY con atributos del objeto Varselpol2
## mediante el centroide de los polΓgonos
xy <- Varselpol2 %>%
st_centroid() %>%
mutate(x=unlist(map(geometry,1)),
y=unlist(map(geometry,2))) %>%
st_drop_geometry() %>%
select(fid, x, y)
## CreaciΓ³n del objeto Varselpol3 mediante uniΓ³n de XY y Varselpol2
Varselpol3 <- Varselpol2 %>%
inner_join(xy)
Varselpol3 %>%
st_drop_geometry()
Varselpol3
# VECINDAD
## Analisis de vecindad por contiguidad
Vecxcont <- poly2nb(Varselpol3)
summary(Vecxcont)
## AnΓ‘lisis de Vecindad por cantidad de los 5 vecinos mΓ‘s cercanos
Varselpol3.sp <- as_Spatial(Varselpol3)
coords <- coordinates(Varselpol3.sp)
VecxK <- knn2nb(knearneigh(coords, k=5))
## AnΓ‘lisis de Vecindad por peso de observaciones vecinas en Varselpol3
PesoW <- nb2listw(VecxK)
PesoW
PesowB <- nb2listw(VecxK, style = 'B')
PesowB
## AnΓ‘lisis de Vecindad por peso de observaciones vecinas en la data completa
datos <- datos %>% st_difference()
coords <- coordinates(as_Spatial(datos))
nb <- knn2nb(knearneigh(coords, k = 5))
summary(nb)
# ESDA
p1 <- tm_shape(Varselpol3) +
tm_fill(col = "DD", style = 'jenks', palette = brewer.pal(9, name = 'Reds')) +
tm_borders(lwd = 0.5)
p1
p2 <- tm_shape(Varselpol3) +
tm_fill(col = "logDD", style = 'jenks',
palette = brewer.pal(9, name = 'Reds'), midpoint = NA) +
tm_borders(lwd = 0.5)
tmap_arrange(p1, p2)
Varselpol3 %>% st_drop_geometry() %>%
gather(variable, valor, -(fid:label)) %>%
ggplot() + aes(sample=valor) +
stat_qq() + stat_qq_line() + theme_bw() +
theme(text = element_text(size = 14)) +
facet_wrap(~variable, scales = 'free')
Varselpol3 %>% st_drop_geometry() %>%
gather(variable, valor, -(fid:label)) %>% group_by(variable) %>%
summarise(prueba_normalidad=shapiro.test(valor)$p.value)
lisamap(objesp = Varselpol3,
var ='logDD',
pesos = PesoW,
tituloleyenda = 'Significancia\n("x-y", lΓ©ase\ncomo "x"\nrodeado de "y"',
leyenda = T,
anchuratitulo = 1000,
tamanotitulo = 16,
fuentedatos = 'SRTM',
titulomapa = paste0('Clusters LISA de logDD'))
## MODELIZACION
#Para la modelizaciΓ³n se requiere cargar la siguiente librerΓa de tidyverse, sf, spdep, lmtest,
## ademΓ‘s de cargar los objetos generados en los anΓ‘lisis de Vencindad y autocorrelaciΓ³n.
#Varselpol3, sf con los datos fuente.
#PesoW, pesos estandarizados por filas (estilo βWβ).
#PesoB, pesos binarios (estilo βBβ).
#Variable seleccionada Densidad de Drenaje
Varselpctlog <- Varselpol3 %>% mutate_each(
funs(PCT=round(./DD,4)*100,
PCTLOG=log(round(./DD,4)*100)),
-1, -2, -geometry, -label)
Varselpctlog
# Comprobando autocorrelaciΓ³n mediante la prueba moran global
moran.plot(Varselpol3$logDD, PesoW)
(gmoranw <- moran.test(na.action = na.exclude, zero.policy = T, x = log1p(datos$Drainage_Density_km_over_km2), listw = PesoW))
(gmoranb <- moran.test(na.action = na.exclude, zero.policy = T, x = log1p(datos$Drainage_Density_km_over_km2), listw = PesowB))
gmoranb <- moran.test(na.action = na.exclude, zero.policy = T, x = Varselpctlog$logDD_PCT, listw = PesowB)
gmoranb
gmoranwl <- moran.test(na.action = na.exclude, zero.policy = T, x = Varselpctlog$logDD_PCTLOG, listw = PesoW)
gmoranwl
gmoranbl <- moran.test( na.action = na.exclude, zero.policy = Tx = Varselpctlog$logDD_PCTLOG, listw = PesowB)
gmoranbl
(gmoranwale<-moran.test(na.action = na.exclude, zero.policy = T, x=rnorm(3027),listw = PesoW))
#Si el valor de p es inferior al nivel de significancia (comΓΊnmente fijado en 0.05 o 0.01), se rechaza la hipΓ³tesis nula βNo hay autocorrelaciΓ³n espacialβ. Por lo tanto, concluimos que hay, a priori, autocorrelaciΓ³n espacial, tanto para la variable original (sufijo _PCT) como la versiΓ³n transformada (sufijo _PCTLOG).
#Evaluemos si el supuesto de normalidad se cumple:
shapiro.test(Varselpctlog$logDD_PCT)
shapiro.test(Varselpctlog$logDD_PCTLOG)
# Modelo lineal comΓΊn, utilizando las versiones transformadas de las variables, evaluamos homocedasticidad
# Modelo lineal, utilizando todas las variables
modlin <- Varselpol3 %>%
select(logDD, TS, MS, SF, ER) %>%
st_drop_geometry() %>%
lm(logDD ~ ., .)
modlin %>% summary
modlinc <- Varselpctlog %>%
select(contains('_PCTLOG')) %>%
st_drop_geometry() %>%
lm(logDD_PCTLOG ~ ., .)
modlinc %>% summary
modlinc %>% bptest
sar <- Varselpctlog %>% select(contains('_PCTLOG')) %>%
st_drop_geometry() %>%
spautolm(
formula = logDD_PCTLOG ~ .,
data = .,
listw =PesoW)
summary(sar)
sar2 <- Varselpctlog %>% select(contains('_PCTLOG')) %>%
st_drop_geometry() %>%
spautolm(
formula = logDD_PCTLOG ~ TS_PCTLOG + MS_PCTLOG + SF_PCTLOG + ER_PCTLOG,
data = .,
listw = PesoW)
summary(sar2)
Sar3 <- Varselpctlog %>% select(contains('_PCTLOG')) %>%
st_drop_geometry() %>%
spautolm(
formula = logDD_PCTLOG ~ TS_PCTLOG + MS_PCTLOG + SF_PCTLOG,
data = .,
listw = PesoW)
summary(Sar3)
## AUTOCORRELACION ESPACIAL LOCAL
Varselpol_lomo <- localmoran(Varselpctlog$'logDD', listw = PesoW)
summary(Varselpol_lomo)
Varselpctlog$sVarselpctlogDD <- scale (Varselpctlog$'logDD') %>% as.vector()
Varselpctlog$laglogDD <-lag.listw(PesoW, Varselpctlog$'logDD')
summary(Varselpctlog$sVarselpctlogDD)
summary(Varselpctlog$laglogDD)
puntz <- Varselpctlog$sVarselpctlogDD
rezag <- Varselpctlog$laglogDD
df <- data.frame(puntz, rezag)
moran.plot(puntz, PesoW)
# Diagrama de dispersiΓ³n de Moran en ggplot
ggplot(df, aes(puntz, rezag)) +
geom_point() + geom_smooth(method = 'lm', se = F) +
geom_hline(yintercept = 0, linetype = 'dashed') +
geom_vline(xintercept = 0, linetype = 'dashed')
# Variable nueva sobre significancia de la correlaciΓ³n local, rellena con NAs
Varselpctlog$quad_sig <- NA
##Cuadrante high-high quadrant
Varselpctlog[(Varselpctlog$sVarselpctlog >= 0 &
Varselpctlog$laglogDD >= 0) &
(Varselpol_lomo[ , 4] <= 0.05), "quad_sig"] <- "high-high"
# Cuadrante low-low
Varselpctlog[(Varselpctlog$sVarselpctlog <= 0 &
Varselpctlog$laglogDD >= 0) &
(Varselpol_lomo[, 4] <= 0.05), "quad_sig"] <- "low-low"
#No significativas
Varselpctlog[(Varselpol_lomo[, 4] > 0.05), "quad_sig"] <- "not signif."
#Convertir a factorial
Varselpctlog$quad_sig <- as.factor(Varselpctlog$quad_sig)
# Mapa
Varselpctlog %>%
ggplot() +
aes(fill = quad_sig) +
geom_sf(color = "white", size = .05) +
theme_void() + scale_fill_brewer(palette = "Set1")
#Convertir a factorial
Varselpctlog$quad_sig <- as.factor(Varselpctlog$quad_sig)
# Mapa Significancia
Varselpctlog %>%
ggplot() +
aes(fill = quad_sig) +
geom_sf(color = "white", size = .05) +
theme_void() + scale_fill_brewer(palette = "Set1")
# modelo espacial autorregresivo,
sar <- varselpol3log %>% select(contains('_PCTLOG')) %>%
st_drop_geometry() %>%
spautolm(
formula = slogDD_PCTLOG ~ .,
data = .,
listw = PesoW)
summary(sar)
modlin %>% bptest()
modlin %>% plot()
modSAR <- Varselpol3 %>%
select(logDD, TS, MS, SF, ER) %>%
st_drop_geometry() %>%
spautolm(logDD ~ ., ., listw = PesoW)
modSAR %>% summary
## KRIGING ORDINARIO
## Sistema de Coordenadas WGS84 UTM Zona 19
## EPSG: 32619
crsdestino <- 32619
## Los variogramas muestrales
## A partir del variograma muestral, generamos un variograma modelo que serΓ‘ el que utlizarΓ‘
## la funciΓ³n kriging para realizar la interpolaciΓ³n
## Creamos el objeto orden1logdd que es el resultado de la variable transformada logDD
orden1logdd <- datos %>% mutate(logDD=log(Drainage_Density_km_over_km2)) %>% select(logDD)
## Creamos el objeto v para representar el variograma v
v <- gstat::variogram(logDD~1, orden1logdd)
plot(v)
## Variograma Modelo se utlizarΓ‘ la funciΓ³n kriging para realizar la interpolaciΓ³n
## Se necesita un variograma que crezca de inmediato, por esto vamos a utilizar el variograma modelo
## Variograma modelo Esferico con rango de 1000 metros
v_m <- fit.variogram(v, vgm(model = "Sph", range = 1000))
v_m
plot(v, v_m)
## model psill range
## Sph 0.1658994 988.8336
## Variograma modelo Exponencial con rango de 1000 metros
v_m2 <- fit.variogram(v, vgm(model = "Exp", range = 1000))
v_m2
## model psill range
## Exp 0.1658997 219.8573
plot(v, v_m2)
## Variograma modelo Gauseano
v_m3 <- fit.variogram(v, vgm(model = "Gau", range = 1000))
v_m3
## model psill range
## Gau 0.165893 315.4503
plot(v, v_m3, plot.numbers = T)
plot(v, v_m3)
attr(v_m, 'SSErr')
## [1] 2.496814e-07
attr(v_m2, 'SSErr')
## [1] 2.505845e-07
attr(v_m3, 'SSErr')
## [1] 6.398799e-07
grd <- st_bbox(orden1logdd) %>%
st_as_stars(dx = 500) %>% #1000 metros=1km de resoluciΓ³n espacial
st_set_crs(crsdestino)
grd
plot(grd)
##
k <- krige(formula = logDD~1, locations = orden1logdd, newdata = grd, model = v_m)
plot(k)
## Utilicemos ggplot para representar el objeto stars.
ggplot() +
geom_stars(data = k, aes(fill = var1.pred, x = x, y = y)) +
scale_fill_gradient(low="#deebf7", high="#3182bd") +
geom_sf(data = st_cast(Varselpol3, "MULTILINESTRING")) +
geom_sf(data = orden1logdd) +
geom_sf_text(data = Varselpol3, aes(label=''), check_overlap = T, size = 1) +
theme_bw()
ggplot() +
geom_stars(data = exp(k), aes(fill = var1.pred, x = x, y = y)) +
scale_fill_gradient(low="#deebf7", high="#3182bd", trans = 'log10') +
geom_sf(data = st_cast(Varselpol3, "MULTILINESTRING")) +
geom_sf(data = orden1logdd) +
geom_sf_text(data = Varselpol3, aes(label=''), check_overlap = T, size = 1) +
theme_bw()
|
baa77b398c7a462e585ff218656a9f0668e84415
|
5247d313d1637170b6bbc5e367aba46c88725efd
|
/man/senators_profile.Rd
|
7a0958e4bed99c2bcf0f4a5057621c2257f2cf70
|
[] |
no_license
|
fentonmartin/twitterreport
|
dac5c512eea0831d1a84bef8d2f849eab2b12373
|
5ddb467b8650289322ae83e0525b4ff01fba0d1d
|
refs/heads/master
| 2021-08-22T04:25:01.834103
| 2017-11-29T07:47:43
| 2017-11-29T07:47:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,444
|
rd
|
senators_profile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{senators_profile}
\alias{senators_profile}
\title{US senators twitter profile}
\format{A data.frame with 100 rows and 23 columns}
\source{
https://dev.twitter.com
}
\description{
Extracted from the web using webcrawling, the dataset contains the following
columns:
}
\details{
\itemize{
\item \code{tw_id}
\item \code{tw_name}
\item \code{tw_screen_name}
\item \code{tw_contributors_enabled}
\item \code{tw_created_at}
\item \code{tw_default_profile}
\item \code{tw_default_profile_image}
\item \code{tw_description}
\item \code{tw_favourites_count}
\item \code{tw_followers_count}
\item \code{tw_friends_count}
\item \code{tw_geo_enabled}
\item \code{tw_is_translator}
\item \code{tw_lang}
\item \code{tw_listed_count}
\item \code{tw_location}
\item \code{tw_profile_image_url}
\item \code{tw_profile_image_url_https}
\item \code{tw_protected}
\item \code{tw_statuses_count}
\item \code{tw_time_zone}
\item \code{tw_utc_offset}
\item \code{tw_verified}
}
}
\seealso{
Other example datasets: \code{\link{names_female_en}},
\code{\link{names_female_es}},
\code{\link{names_male_en}}, \code{\link{names_male_es}},
\code{\link{senate_tweets}}, \code{\link{senators}},
\code{\link{sentiment_lexicon_neg_en}},
\code{\link{sentiment_lexicon_pos_en}},
\code{\link{warriner_et_al_en}},
\code{\link{warriner_et_al_es}}
}
\concept{example datasets}
|
8decdb50fb142bbcbeac50e9bfdae6bc0dc319ac
|
74796ee185cf786842d1f2bb53569ed908c2dfac
|
/plot3.R
|
2cd3a0252982a28916b6e96975148b5aa9714e0e
|
[] |
no_license
|
kristkerr/ExData_Plotting1
|
9a626d5f4c53ef66420e06bd3c01f645e40609c7
|
41a179aa75a6ee7328fe92a9570be86eb5891a61
|
refs/heads/master
| 2021-01-02T09:46:18.492575
| 2017-08-04T03:03:14
| 2017-08-04T03:03:14
| 99,296,690
| 0
| 0
| null | 2017-08-04T02:57:41
| 2017-08-04T02:57:40
| null |
UTF-8
|
R
| false
| false
| 933
|
r
|
plot3.R
|
###############################################################################
f<-file.path(getwd(), "household_power_consumption.txt")
colNames<-c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intesity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
HPC<-read.table(f, sep = ";", col.names= colNames, na.strings = "?",
header = TRUE, skip = 66636, nrow = 2880)
library(lubridate)
date<-dmy(HPC$Date)
HPC$Date<-date
HPC$dateTime<-as.POSIXct(paste(HPC$Date, HPC$Time), format="%Y-%m-%d %H:%M:%S")
plot(HPC$dateTime, HPC$Sub_metering_1, xlab = "", ylab = "Energy sub metering",
type = "l")
lines(HPC$dateTime, HPC$Sub_metering_2, col = "red")
lines(HPC$dateTime, HPC$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col= c("black", "red", "blue"), lty = c(1, 1, 1))
|
3b29a48f3f7fa1b69342929fe84a46f140b4a4a2
|
adabbe94b928132c6cffc27f3eb8bcc4698dbc3c
|
/plot2.R
|
e71e329d2ad7721f9863b813e37759692bf112f6
|
[] |
no_license
|
ninnakin/EDA_Project1
|
018ad043622b84e88f6adc79c3928171bc806be8
|
eb6f03836664eae7a120f400b030c35c0d8728f5
|
refs/heads/master
| 2021-01-01T18:11:31.519250
| 2015-09-11T09:49:52
| 2015-09-11T09:49:52
| 42,301,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
plot2.R
|
## Code for plotting plot 2 for the first course project in the Exploratory Data Analysis Course
# Data for course project stored in household_power_consumption.txt (semi-colon-separated)
# Format for columns in read in data
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
# Reading data from file
importdata <- read.table("exdata-data-household_power_consumption\\household_power_consumption.txt", sep=";",header=TRUE,na.strings = "?")
feb.data <- subset(importdata,Date=="1/2/2007" |Date=="2/2/2007")
rm(importdata)
# Tidying the data
feb.data$Date <- as.Date(feb.data$Date,"%d/%m/%Y")
feb.data$datetime <- strptime(paste(feb.data$Date,feb.data$Time),"%Y-%m-%d %H:%M:%S")
# construct second plot: Global Active Power in kW over time
# open png device with width and height of 480 pixels
png(filename="plot2.png", width=480, height=480, units="px")
# change settings to english to get weekday names in english
Sys.setlocale("LC_TIME", "English")
# adapt margins to fit with labels and title
par(mar=c(3,5,2,2))
# plot the relationship between global active power and date
plot(feb.data$datetime, feb.data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)",xlab="")
# close graphics device
dev.off()
|
9142c0bd46cf132a43ad2c8dd3938c6ada003d5a
|
35de14603463a45028bd2aca76fa336c41186577
|
/man/LD.get_lead_r2.Rd
|
e41d9f67300cbfbbadaf044a73f83e54f50b3a11
|
[
"MIT"
] |
permissive
|
UKDRI/echolocatoR
|
e3cf1d65cc7113d02b2403960d6793b9249892de
|
0ccf40d2f126f755074e731f82386e4e01d6f6bb
|
refs/heads/master
| 2023-07-14T21:55:27.825635
| 2021-08-28T17:02:33
| 2021-08-28T17:02:33
| 416,442,683
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
LD.get_lead_r2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LD.R
\name{LD.get_lead_r2}
\alias{LD.get_lead_r2}
\title{Find correlates of the lead GWAS/QTL SNP}
\usage{
LD.get_lead_r2(
finemap_dat,
LD_matrix = NULL,
fillNA = 0,
LD_format = "matrix",
verbose = T
)
}
\description{
Find correlates of the lead GWAS/QTL SNP
}
|
00362cf008d6f676d3392fc504d7cb4c04b0363d
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/10422_1/rinput.R
|
00d6b247cadae046bb5121a4fc4f2f865602ddbf
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10422_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10422_1_unrooted.txt")
|
4c6e4488374e6f5bc23bab01dac30c4272d72371
|
f8505119824149e3357fbd530df581264db0aad1
|
/LINREG - SIM STAT IN MED R.1.R
|
d4710741344812fc8c2618add8416b2ee31930c5
|
[] |
no_license
|
vancak/nntx_simulations
|
d5a3502297e574f1323e02a78c1e1d3ca5be6358
|
9e232de61ae9512aa96551256f6f43e174922596
|
refs/heads/main
| 2023-07-13T22:20:02.015071
| 2021-08-25T04:45:37
| 2021-08-25T04:45:37
| 399,691,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,691
|
r
|
LINREG - SIM STAT IN MED R.1.R
|
### LIN REG ###
library(nntcalc)
# rm(list = ls())
set.seed(1)
n = 200
N = 400
tr = rep(1, n/2)
cr = rep(0, n/2)
gr = c(tr, cr)
sig = 1
b0 = 1
bt = 1
bc = 1/2
xxx = rnorm(n, 3, 1.5)
x_seq = seq(0.1, 8, length = n)
yt = numeric()
yc = numeric()
fun_g = function(x){ ifelse( x > 0, 1/x, Inf ) }
tau = 3
p_t_true = function(x){ 1 - pnorm( ( tau - b0 - bt * x ) / sig ) }
p_c_true = function(x){ 1 - pnorm( ( tau - b0 - (bt - 1/2) * x ) / sig ) }
av_ps = mean( p_t_true(xxx) - p_c_true(xxx) )
NNT_UN_TRUE = fun_g( av_ps )
NNT_UN_TRUE
NNT_X = function(x){ fun_g( p_t_true(x) - p_c_true(x) ) }
NNT_X(1.2); NNT_X(1.3); NNT_X(1.4)
x01 = 1.2; x02 = 1.3; x03 = 1.4
list_1.2 = list(); list_1.3 = list(); list_1.4 = list()
for(j in 1:N){
### treatment arm ###
for( i in 1 : (n/2) ){
yt[i] = b0 + bt * xxx[i] + rnorm(1, 0, 1) }
### control arm ###
for( i in 1 : (n/2) ){
yc[i] = b0 + (bt - 1/2) * xxx[(n/2)+i] + rnorm(1, 0, 1)
}
y = c( yt, yc )
d = c( tr, cr )
dat1 = data.frame( cbind( y = y, gr = d, x_var = xxx ) )
list_1.2[[j]] = nnt_x( model = "linreg",
response = dat1$y,
x = dat1$x_var,
group = dat1$gr,
adj = x01,
cutoff = tau,
decrease = FALSE,
data = dat1 )
list_1.3[[j]] = nnt_x( model = "linreg",
response = dat1$y,
x = dat1$x_var,
group = dat1$gr,
adj = x02,
cutoff = tau,
decrease = FALSE,
data = dat1 )
list_1.4[[j]] = nnt_x( model = "linreg",
response = dat1$y,
x = dat1$x_var,
group = dat1$gr,
adj = x03,
cutoff = tau,
decrease = FALSE,
data = dat1 )
print(j)
}
all_est = data.frame( matrix( NA, ncol = (27 + 9*2), nrow = N ))
for( i in 1:N){
all_est[i,] = c( list_1.2[[i]][1,],
list_1.2[[i]][2,],
list_1.2[[i]][3,],
list_1.3[[i]][3,],
list_1.4[[i]][3,] )
}
colnames( all_est ) = c( "NNTL", "NNTL_CI_TR_L", "NNTL_CI_TR_U", "NNTL_CI_DL_L", "NNTL_CI_DL_U", "NNTL_CI_NBS_L", "NNTL_CI_NBS_U", "NNTL_CI_PBS_L", "NNTL_CI_PBS_U",
"NNTML", "NNTML_CI_TR_L", "NNTML_CI_TR_U", "NNTML_CI_DL_L", "NNTML_CI_DL_U", "NNTML_CI_NBS_L", "NNTML_CI_NBS_U", "NNTML_CI_PBS_L", "NNTML_CI_PBS_U",
"NNT1.2", "NNT1.2_CI_TR_L", "NNT1.2_CI_TR_U", "NNT1.2_CI_DL_L", "NNT1.2_CI_DL_U", "NNT1.2_CI_NBS_L", "NNT1.2_CI_NBS_U", "NNT1.2_CI_PBS_L", "NNT1.2_CI_PBS_U",
"NNT1.3", "NNT1.3_CI_TR_L", "NNT1.3_CI_TR_U", "NNT1.3_CI_DL_L", "NNT1.3_CI_DL_U", "NNT1.3_CI_NBS_L", "NNT1.3_CI_NBS_U", "NNT1.3_CI_PBS_L", "NNT1.3_CI_PBS_U",
"NNT1.4", "NNT1.4_CI_TR_L", "NNT1.4_CI_TR_U", "NNT1.4_CI_DL_L", "NNT1.4_CI_DL_U", "NNT1.4_CI_NBS_L", "NNT1.4_CI_NBS_U", "NNT1.4_CI_PBS_L", "NNT1.4_CI_PBS_U" )
write.csv( all_est, "linreg_nnt200_wide_setseed1.csv", row.names = F )
library(tidyr)
long_nnt <- all_est %>% gather()
write.csv(long_nnt, "linreg_nnt200_long_setseed1.csv", row.names = F)
|
0b55fe3c570884b237bc52fcb3a23590ec1b05ec
|
ba65d8b42dfce42e1a4594d5a58a815194082112
|
/data-raw/ExampleVcfDatasets.R
|
63a9b6aae41111dba056acddfb0f2fa8dbcdb0af
|
[
"MIT"
] |
permissive
|
acc-bioinfo/TMBleR
|
b4ac594173ecc2ead98fd19696136f0d235065a3
|
f3ded88b111b8db0867222aaa8be4bcd9fe8e80d
|
refs/heads/main
| 2023-06-19T22:21:49.508537
| 2021-07-16T16:42:20
| 2021-07-16T16:42:20
| 378,995,779
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,796
|
r
|
ExampleVcfDatasets.R
|
### Define working directory and genome assembly
inDir <- find.package("TMBleR") %>% file.path(., "data-raw/")
# Read genome
assembly="hg19"
### Read in input the dataset by Hellman et al., generated by whole exome sequencing that you want to analyze
## Read file names
infile <- "Hellman_CancerCell_2018__Mutations.txt"
## Add file path
infile <- paste0(inDir, infile)
dataset=read.table(file=infile, sep="\t", header=T)
## Subset the WES dataset so that it will only contain variants in the regions targeted by the FM1 panel
Panel=dataset[dataset$FM1=="VERO",]
Panel$PatientID=factor(Panel$PatientID)
Panel$ClinicalResponse=as.factor(Panel$Responder)
Panel=as.data.frame(Panel[,-24])
PanelNumMuts <- Panel %>%
dplyr::select(PatientID, ClinicalResponse) %>%
dplyr::group_by(PatientID) %>%
dplyr::summarise(Panel.NumMuts=length(ClinicalResponse), ClinicalResponse=levels(factor(ClinicalResponse)))
PanelNumMuts$ClinicalResponse <- as.factor(PanelNumMuts$ClinicalResponse)
levels(PanelNumMuts$ClinicalResponse)[levels(PanelNumMuts$ClinicalResponse)=="VERO"] <- "responder"
levels(PanelNumMuts$ClinicalResponse)[levels(PanelNumMuts$ClinicalResponse)=="FALSO"] <- "nonresponder"
PanelNumMuts=as.data.frame(PanelNumMuts)
### Select for WES-based TMB quantification only nonsynonymous variants
WES=dataset[dataset$nonsynonymous=="FALSO",]
WES$PatientID=factor(WES$PatientID)
WES$ClinicalResponse=as.factor(WES$Responder)
PanelNumMuts=as.data.frame(PanelNumMuts[,-24])
WES=as.data.frame(WES[,-24])
WESNumMuts <- WES %>%
dplyr::select(PatientID, ClinicalResponse) %>%
dplyr::group_by(PatientID) %>%
dplyr::summarise(WES.NumMuts=length(ClinicalResponse),
ClinicalResponse=levels(factor(ClinicalResponse)))
WESNumMuts$ClinicalResponse <- as.factor(WESNumMuts$ClinicalResponse)
levels(WESNumMuts$ClinicalResponse)[levels(WESNumMuts$ClinicalResponse)=="VERO"] <- "responder"
levels(WESNumMuts$ClinicalResponse)[levels(WESNumMuts$ClinicalResponse)=="FALSO"] <- "nonresponder"
WESNumMuts=as.data.frame(WESNumMuts)
### Merge simulated panel and WES data
Hellman_SimulatedFM1Panel_WES=merge(PanelNumMuts, WESNumMuts, by=c("PatientID", "ClinicalResponse"), all=F)
usethis::use_data(Hellman_SimulatedFM1Panel_WES, internal=FALSE, compress="gzip")
# Read genome
assembly="hg19"
# Read file names
vcfFiles <- list(Sample1="Sample1_ExampleWES_chr7.vcf", Sample2="Sample2_ExampleWES_chr7.vcf", Sample3="Sample3_ExampleWES_chr7.vcf", Sample4="Sample4_ExampleWES_chr7.vcf")
# Add file path
inDir <- find.package("TMBleR") %>% file.path(., "data-raw/")
vcfFiles <- lapply(vcfFiles, function(x) paste(inDir, x, sep = ""))
# Read vcf files and put in a list
ExampleWESvcfs <- readVcfFiles(vcfFiles=vcfFiles, assembly)
usethis::use_data(ExampleWESvcfs, internal=FALSE, compress="gzip")
|
c42c05a2a5e8f31b3acc5595f8df9d4105b29070
|
3ec39ea137d1aaa0c7106c1ae49ddf395b5fda20
|
/R/settingsMetadata.R
|
b6215237128042a4d8c39341ce613192d7a76f01
|
[] |
no_license
|
mli1/safetyGraphics
|
e48c5bee150926f008d6185c764a179d5a3e5a71
|
165651d98c6894646f84884d1c9f7a24336d25f7
|
refs/heads/master
| 2023-01-22T17:00:10.267847
| 2020-01-16T14:26:14
| 2020-01-16T14:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,981
|
r
|
settingsMetadata.R
|
#' Settings Metadata
#'
#' Metadata about the settings used to configure safetyGraphics charts. One record per unique setting
#'
#' @format A data frame with 29 rows and 17 columns
#' \describe{
#' \item{chart_hepexplorer}{Flag indicating if the settings apply to the Hepatic Explorer Chart}
#' \item{chart_paneledoutlierexplorer}{Flag indicating if the settings apply to the Paneled Safety Outlier Explorer Chart}
#' \item{chart_safetyhistogram}{Flag indicating if the settings apply to the Safety Histogram Chart}
#' \item{chart_safetyoutlierexplorer}{Flag indicating if the settings apply to the Safety Outlier Explorer Chart}
#' \item{chart_safetyresultsovertime}{Flag indicating if the settings apply to the Safety Results Over Time Chart}
#' \item{chart_safetyshiftplot}{Flag indicating if the settings apply to the Safety Shift Plot Chart}
#' \item{chart_safetydeltadelta}{Flag indicating if the settings apply to the Safety Delta-Delta Chart}
#' \item{text_key}{Text key indicating the setting name. \code{'--'} delimiter indicates a nested setting}
#' \item{label}{Label}
#' \item{description}{Description}
#' \item{setting_type}{Expected type for setting value. Should be "character", "vector", "numeric" or "logical"}
#' \item{setting_required}{Flag indicating if the setting is required}
#' \item{column_mapping}{Flag indicating if the setting corresponds to a column in the associated data}
#' \item{column_type}{Expected type for the data column values. Should be "character","logical" or "numeric"}
#' \item{field_mapping}{Flag indicating whether the setting corresponds to a field-level mapping in the data}
#' \item{field_column_key}{Key for the column that provides options for the field-level mapping in the data}
#' \item{setting_cat}{Setting category (data, measure, appearance)}
#' \item{default}{Default value for non-data settings}
#' }
#'
#' @source Created for this package
"settingsMetadata"
|
98658cee30c5487bc3685d097248e63135e92e97
|
e207c7cec00ce6ee27d86668969b60d4e569ced7
|
/queries.R
|
8a3107259748e6ef8d2780c46312ab95aa4a1c3d
|
[] |
no_license
|
tardigrado-visual/prueba-rstudio-cloud
|
7dc88a8b4f38b644fab46ce28b82107dbfcb5770
|
74d79fe9c744eb5f405fae42fafa7882ec8c9e36
|
refs/heads/main
| 2023-02-21T07:27:00.556586
| 2021-01-29T03:55:55
| 2021-01-29T03:55:55
| 334,002,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
queries.R
|
#Queries
library(DBI)
library(RMySQL)
library(ggplot2)
library(dplyr)
MyDataBase <- dbConnect(
drv = RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest")
dbListTables(MyDataBase)
DataDB <- dbGetQuery(MyDataBase, "select * from CountryLanguage")
head(DataDB)
#Seleccionando paises hispanohablantes
spanish <- filter(DataDB, DataDB$Language == "Spanish")
#Graficando
ggplot(data=spanish, aes(x=CountryCode, y=Percentage, fill=IsOfficial))+
geom_col()+
coord_flip()
|
d18dc5c984916020f9e79a8ae6a95913bbd8e1a0
|
1d77ee1419d42d9f521cf5dcaa74497d7a3281a4
|
/scripts/singlem_tune_rf.R
|
f8a0f394b76d8cc069a6b840578f7245b644d63f
|
[
"BSD-3-Clause"
] |
permissive
|
dib-lab/2020-ibd
|
d6f6b8f7908b9075d5dc401c6d255a68532031fd
|
959a2b2e2daf2a4cb0de133adae7af89a4cc591d
|
refs/heads/master
| 2023-04-09T11:50:20.141311
| 2023-01-10T05:16:01
| 2023-01-10T05:16:01
| 219,787,097
| 2
| 1
| null | 2022-06-27T15:33:38
| 2019-11-05T16:06:57
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,227
|
r
|
singlem_tune_rf.R
|
library(readr)
library(dplyr)
library(ggplot2)
library(ranger)
library(mlr)
library(tuneRanger)
source(snakemake@input[['eval_model']])
source(snakemake@input[['ggconfusion']])
set.seed(1)
ibd_filt <- read_csv(snakemake@input[['ibd_filt']])
ibd_filt <- as.data.frame(ibd_filt)
rownames(ibd_filt) <- ibd_filt$X1
ibd_filt <- ibd_filt[ , -1]
## read in study metadata
## collapse duplicate libraries so each sample only has one row
info <- read_tsv(snakemake@input[['info']]) %>%
select(study_accession, library_name, diagnosis) %>%
filter(library_name %in% rownames(ibd_filt)) %>%
distinct()
## set validation cohort and remove it from variable selection
info_validation <- info %>%
filter(study_accession == snakemake@params[['validation_study']])
ibd_validation <- ibd_filt[rownames(ibd_filt) %in% info_validation$library_name, ]
# match order of to ibd_filt
info_validation <- info_validation[order(match(info_validation$library_name, rownames(ibd_validation))), ]
# check names
all.equal(info_validation$library_name, rownames(ibd_validation))
# make diagnosis var
diagnosis_validation <- info_validation$diagnosis
## remove validation cohort from training data
# using tuneRanger, we do not need to use a train/test/validation framework.
# Instead, tuneRanger does not need a test set because each tree is only trained
# on a subset of the data (bag), so we can use the rest (out of bag) to obtain
# an unbiased performance estimation of a single tree and therefore of all trees.
# see: https://github.com/PhilippPro/tuneRanger/issues/8
info_novalidation <- info %>%
filter(study_accession != snakemake@params[['validation_study']])
ibd_novalidation <- ibd_filt[rownames(ibd_filt) %in% info_novalidation$library_name, ]
# match order of to ibd_filt
info_novalidation <- info_novalidation[order(match(info_novalidation$library_name, rownames(ibd_novalidation))), ]
# check names
all.equal(info_novalidation$library_name, rownames(ibd_novalidation))
# make diagnosis var
diagnosis_novalidation <- info_novalidation$diagnosis
# Include classification vars as cols in df
ibd_novalidation$diagnosis <- diagnosis_novalidation
ibd_validation$diagnosis <- diagnosis_validation
# tune ranger -------------------------------------------------------------
# Make an mlr task with the ibd_train dataset here
tmp <- ibd_novalidation
colnames(tmp) <- make.names(colnames(tmp))
ibd_task <- makeClassifTask(data = tmp, target = "diagnosis")
# Run tuning process
res <- tuneRanger(ibd_task, num.threads = snakemake@params[['threads']])
# write model parameters to a file
write_tsv(res$recommended.pars, snakemake@output[['recommended_pars']])
# build optimal model ----------------------------------------------------------
# extract model parameters and use to build an optimal RF
# use model parameters to build optimized RF
ibd_novalidation$diagnosis <- as.factor(ibd_novalidation$diagnosis)
optimal_rf <- ranger(
dependent.variable.name = "diagnosis",
mtry = res$recommended.pars$mtry,
num.trees = 10000,
data = ibd_novalidation,
sample.fraction = res$recommended.pars$sample.fraction,
min.node.size = res$recommended.pars$min.node.size,
seed = 1,
importance = 'impurity'
)
saveRDS(optimal_rf, file = snakemake@output[['optimal_rf']])
# evaluate the accuracy of the model and generate a confusion matrix
# training data
evaluate_model(optimal_ranger = optimal_rf,
data = ibd_novalidation,
reference_class = diagnosis_novalidation,
set = "novalidation",
study_as_validation = snakemake@params[['validation_study']],
accuracy_csv = snakemake@output[['training_accuracy']],
confusion_pdf = snakemake@output[['training_confusion']])
# validation data
evaluate_model(optimal_ranger = optimal_rf,
data = ibd_validation,
reference_class = diagnosis_validation,
set = "validation",
study_as_validation = snakemake@params[['validation_study']],
accuracy_csv = snakemake@output[['validation_accuracy']],
confusion_pdf = snakemake@output[['validation_confusion']])
|
79af43ddaa7500d8a29fa874494d594e58d430f2
|
0dad68bd3a28894180f18ea147026c8438912a73
|
/man/countNmers.Rd
|
b2de07ed190a3efbae543f92bbbbf7bcfcd142e8
|
[] |
no_license
|
sherrillmix/dnar
|
1bcc2fac63d8af059215dea6fd3e5cdc7200e81f
|
dead343faebda27057a1e7a5789e853b5b73316d
|
refs/heads/master
| 2022-08-12T14:04:05.052121
| 2022-07-13T18:59:18
| 2022-07-13T18:59:18
| 54,718,599
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
countNmers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dna.R
\name{countNmers}
\alias{countNmers}
\title{Count nMers in a string}
\usage{
countNmers(string, k = 10)
}
\arguments{
\item{string}{string to be searched for nmers}
\item{k}{length of nmers}
}
\value{
a sorted named vector giving the identity and counts for nMers
}
\description{
Count nMers in a string
}
\examples{
countNmers('AATTAATT',2)
countNmers('AATTAATT',3)
}
|
1ebd1df1dbc1e9f43f04ce1a3a0181cd84f3287b
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.compute/man/ec2_delete_snapshot.Rd
|
9759e259e344860cfb0d2e3d17232f026875bfd3
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,811
|
rd
|
ec2_delete_snapshot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_delete_snapshot}
\alias{ec2_delete_snapshot}
\title{Deletes the specified snapshot}
\usage{
ec2_delete_snapshot(SnapshotId, DryRun)
}
\arguments{
\item{SnapshotId}{[required] The ID of the EBS snapshot.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\value{
An empty list.
}
\description{
Deletes the specified snapshot.
When you make periodic snapshots of a volume, the snapshots are
incremental, and only the blocks on the device that have changed since
your last snapshot are saved in the new snapshot. When you delete a
snapshot, only the data not needed for any other snapshot is removed. So
regardless of which prior snapshots have been deleted, all active
snapshots will have access to all the information needed to restore the
volume.
You cannot delete a snapshot of the root device of an EBS volume used by
a registered AMI. You must first de-register the AMI before you can
delete the snapshot.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html}{Deleting an Amazon EBS snapshot}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$delete_snapshot(
SnapshotId = "string",
DryRun = TRUE|FALSE
)
}
}
\examples{
\dontrun{
# This example deletes a snapshot with the snapshot ID of
# `snap-1234567890abcdef0`. If the command succeeds, no output is
# returned.
svc$delete_snapshot(
SnapshotId = "snap-1234567890abcdef0"
)
}
}
\keyword{internal}
|
37968c33862aff2031089665ffbc553cd96bbc32
|
0344efa4a7730c9a8d5f4e17eb994032a112949f
|
/results by timezone.R
|
2829892baa4033cbb2bd6ea6863a032e4c1e4cbe
|
[] |
no_license
|
erbas/NAV-tool
|
8f313a6bc8a97e787e64765c48716cad9551673f
|
e3f5b01cf5f880ffbb20be7e10841c2fe4b7a1b9
|
refs/heads/master
| 2021-03-12T22:35:04.705374
| 2017-01-14T05:17:50
| 2017-01-14T05:17:50
| 16,805,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,396
|
r
|
results by timezone.R
|
# load libraries and functions
source('unreactive.R')
# load data - currencies only
# path1 <- "CRNCY_Trade File_Model/"
# path2 <- "CMDTY_Trade File_Model/"
# path3 <- "Revaluation rates/"
path1 <- "E:/Cloud Data/Published Returns/Global Currency Program/CRNCY_31 Dec 2013/CRNCY_Trade File_Model/Sub Strategy/"
path2 <- "E:/Cloud Data/Published Returns/Global Commodity Program/Dec 31_2013/CMDTY_Trade File_Model/Gold/"
# path1 <- "E:/Cloud Data/KT"
# path2 <- ""
path3 <- "E:/Cloud Data/Data History/Revaluation rates/"
files.to.load <- c(list.files(path1,pattern="*.csv",full.names=TRUE,recursive=TRUE), list.files(path2,pattern="*.csv",full.names=TRUE,recursive=TRUE))
# # files.to.load <- list.files(path1,pattern="*.csv",full.names=TRUE,recursive=TRUE)
trade.data <- load.all.trades(files.to.load)
reval <- load.reval.files(path3,c("2010-01-01","2014-06-30"))
trades.usd <- make.trades.USD(trade.data, reval)
trades.extended <- split.trades.at.month.ends(trades.usd, reval)
extended.trades.pnl <- calc.pnl(trades.extended, reval)
trade.data <- read.saved.trades()
reval <- read.saved.reval()
trades.usd <- read.saved.usd()
extended.trades.pnl <- read.saved.extended.pnl()
# recombine pnls
pnl.entry <- data.frame(TradeId=unique(sort(extended.trades.pnl$TradeId)), Ccy.pair="",Entry.time=0, PnL.USD=0,stringsAsFactors=FALSE)
for (i in 1:nrow(pnl.entry)) {
idx <- which( extended.trades.pnl$TradeId == pnl.entry$TradeId[i])
x <- extended.trades.pnl[idx,]
pnl.entry$Ccy.pair[i] <- x[1,]$"Ccy pair"
pnl.entry$Entry.time[i] <- x[1,]$"Entry time"
pnl.entry$PnL.USD[i] <- sum(x$"PnL USD")
}
# analysis
# pnl by ccy by four hour time block
# other portfolios
g7.ext <- c("EURUSD", "AUDUSD", "GBPUSD", "USDJPY", "USDCAD", "USDCHF", "USDSGD", "EURJPY", "EURAUD", "EURCAD", "XAUUSD")
g7 <- c("EURUSD", "AUDUSD", "GBPUSD", "USDJPY", "USDCAD", "USDCHF", "USDSGD", "EURJPY", "EURAUD", "EURCAD")
pnl.entry.g7 <- pnl.entry[pnl.entry$Ccy.pair %in% g7,]
pnl.entry.xts <- xts(pnl.entry.g7$PnL.USD,as.POSIXct(as.numeric(pnl.entry.g7$Entry.time),origin='1970-01-01 00:00.00 UTC',tz="Europe/London"))
colnames(pnl.entry.xts) <- "PnL"
pnl.1 <- pnl.entry.xts['T06/T09:59']
pnl.2 <- pnl.entry.xts['T10/T13:59']
pnl.3 <- pnl.entry.xts['T14/T17:59']
pnl.4 <- pnl.entry.xts['T18/T21:59']
pnl.5.1 <- pnl.entry.xts['T22/T23:59']
pnl.5.2 <- pnl.entry.xts['T00:00/T01:59']
pnl.5 <- rbind(pnl.5.1,pnl.5.2)
pnl.6 <- pnl.entry.xts['T02/T05:59']
pnls <- list(pnl.1,pnl.2,pnl.3,pnl.4,pnl.5,pnl.6)
pnls.date <- lapply(pnls, function(x) {index(x) <- as.Date(index(x)); x})
# plot number of trades in each 4 hour block
AUM <- 1.e8
tz.rtns <- sapply(pnls.date,sum)/AUM*100
tz.str <- paste(format(tz.rtns,digits=2),"%",sep="")
tz.labels <- c("6am-10am",
"10am-2pm",
"2pm-6pm",
"6pm-10pm",
"10pm-2am",
"2am-6am")
barplot(sapply(pnls.date,length),names.arg=tz.labels,main="Number of Trades in each 4 hour window",cex.names=0.8)
# plot total returns in each 4 hour block
layout(t(matrix(1:6,3,2)),respect=FALSE)
cex.m <- 1
cex.lg <- 0.8
for (i in 1:6) {
txt <- paste("Total Returns ",tz.labels[i])
if (length(pnls.date[[i]]) > 1)
plot.zoo(cumsum(pnls.date[[i]])/AUM*100,main=txt,ylab="% AUM",xlab="")
}
# analysis by ccy pair
trades.ccy <- split(pnl.entry,pnl.entry$Ccy.pair)
# plot returns by ccy and timezone
for (x in trades.ccy) {
# make xts
ccy <- x$Ccy.pair[1]
pnl.xts <- xts(x$PnL.USD,as.POSIXct(as.numeric(x$Entry.time),origin='1970-01-01 00:00.00 UTC',tz="Europe/London"))
colnames(pnl.xts) <- ccy
# split by time zone
# pnl.1 <- pnl.xts['T06/T09:59']
# pnl.2 <- pnl.xts['T10/T13:59']
# pnl.3 <- pnl.xts['T14/T17:59']
# pnl.4 <- pnl.xts['T18/T21:59']
# pnl.5 <- pnl.xts['T22/T01:59']
# pnl.6 <- pnl.xts['T02/T05:59']
# pnl.0 <- pnl.xts['T00/T00:59']
# pnl.1 <- pnl.xts['T01/T01:59']
# pnl.2 <- pnl.xts['T02/T02:59']
# pnl.3 <- pnl.xts['T03/T03:59']
# pnl.4 <- pnl.xts['T04/T04:59']
# pnl.5 <- pnl.xts['T05/T05:59']
# pnl.6 <- pnl.xts['T06/T06:59']
# pnl.7 <- pnl.xts['T07/T07:59']
# pnl.8 <- pnl.xts['T08/T08:59']
# pnl.9 <- pnl.xts['T09/T09:59']
# pnl.10 <- pnl.xts['T10/T10:59']
# pnl.11 <- pnl.xts['T11/T11:59']
# pnl.12 <- pnl.xts['T12/T12:59']
# pnl.13 <- pnl.xts['T13/T13:59']
# pnl.14 <- pnl.xts['T14/T14:59']
# pnl.15 <- pnl.xts['T15/T15:59']
# pnl.16 <- pnl.xts['T16/T16:59']
# pnl.17 <- pnl.xts['T17/T17:59']
# pnl.18 <- pnl.xts['T18/T18:59']
# pnl.19 <- pnl.xts['T19/T19:59']
# pnl.20 <- pnl.xts['T20/T20:59']
# pnl.21 <- pnl.xts['T21/T21:59']
# pnl.22 <- pnl.xts['T22/T22:59']
# pnl.23 <- pnl.xts['T23/T23:59']
#
# # combine into single data object
# pnls.ccy <- list(pnl.0,pnl.1,pnl.2,pnl.3,pnl.4,pnl.5,pnl.6,
# pnl.7,pnl.8,pnl.9,pnl.10,pnl.11,pnl.12,
# pnl.13,pnl.14,pnl.15,pnl.16,pnl.17,pnl.18,
# pnl.19,pnl.20,pnl.21,pnl.22,pnl.23)
pnl.1 <- pnl.xts['T06/T09:59']
pnl.2 <- pnl.xts['T10/T13:59']
pnl.3 <- pnl.xts['T14/T17:59']
pnl.4 <- pnl.xts['T18/T21:59']
pnl.5.1 <- pnl.xts['T22/T23:59']
pnl.5.2 <- pnl.xts['T00:00/T01:59']
pnl.5 <- rbind(pnl.5.1,pnl.5.2)
pnl.6 <- pnl.xts['T02/T05:59']
pnls.ccy <- list(pnl.1,pnl.2,pnl.3,pnl.4,pnl.5,pnl.6)
pnls.ccy <- lapply(pnls.ccy, function(x) {index(x) <- as.Date(index(x)); x})
# make plot
f.name <- paste0("Performance by Hour/",ccy,".pdf")
pdf(file=f.name,width=11,height=8,onefile=TRUE,paper="a4r")
# f.name <- paste0("Performance by Hour/",ccy,".png")
# png(filename=f.name,width=800,height=570,quality=0.95)
layout(t(matrix(1:6,3,2)),respect=FALSE)
cex.m <- 1
cex.lg <- 0.7
for (i in 1:6) {
# txt <- paste(ccy,"hour",i,sep=" ")
txt <- paste(ccy,tz.labels[i],sep=" ")
cat(ccy,i,length(pnls.ccy[[i]]),"\n")
if (length(pnls.ccy[[i]]) > 1)
plot.zoo(cumsum(pnls.ccy[[i]])/AUM*100,main=txt,ylab="% AUM",xlab="")
}
# barplot(sapply(pnls.ccy,length),names.arg=paste(1:24),main="Trades in hour block",cex.names=0.8)
dev.off()
}
# analysis by ccy and timezone - numbers
performance.by.tz <- vector("list")
for (x in trades.ccy) {
# make xts
ccy <- x$Ccy.pair[1]
pnl.xts <- xts(x$PnL.USD,as.POSIXct(as.numeric(x$Entry.time),origin='1970-01-01 00:00.00 UTC',tz="Europe/London"))
colnames(pnl.xts) <- ccy
# split by time zone
pnl.1 <- pnl.xts['T06/T09:59']
pnl.2 <- pnl.xts['T10/T13:59']
pnl.3 <- pnl.xts['T14/T17:59']
pnl.4 <- pnl.xts['T18/T21:59']
pnl.5 <- pnl.xts['T22/T01:59']
pnl.6 <- pnl.xts['T02/T05:59']
# combine into single data object
pnls.ccy <- list(pnl.1,pnl.2,pnl.3,pnl.4,pnl.5,pnl.6)
pnls.ccy <- lapply(pnls.ccy, function(x) {index(x) <- as.Date(index(x)); x})
# numbers
tmp.rtns <- sapply(pnls.ccy,sum)/AUM*100
performance.by.tz[[ccy]] <- tmp.rtns
}
rtns.by.tz <- as.data.frame(performance.by.tz)
row.names(rtns.by.tz) <- c("6am","10am","2pm","6pm","10pm","2am")
y <- t(rtns.by.tz)
raw.total <- as.data.frame(colSums(rtns.by.tz))
colnames(raw.total) <- "Total Return"
raw.total <- rbind(raw.total,sum(rtns.by.tz))
rtns.filter <- rtns.by.tz
rtns.filter[rtns.filter < 0.5] <- 0
filter.total <- as.data.frame(colSums(rtns.filter))
colnames(filter.total) <- "Total Return (filtered)"
filter.total <- rbind(filter.total,sum(rtns.filter))
rtns.all <- cbind(raw.total,filter.total)
row.names(rtns.all)[17] <- "Total"
|
1dc16f4e31b79f152dd33d4ea2191024f33ef4ad
|
14a2cb7f43957fe709e3591fea10d4e35c0e34a3
|
/run_analysis.R
|
212c13cd077213351fc583d59f2efa0722d28030
|
[] |
no_license
|
rtintor/C3_Final
|
426a7711790878506295c94b6dfbd341427f7acf
|
c6bcb7c8b7f717b9a69383728437f8847daddb9d
|
refs/heads/master
| 2021-07-19T16:56:06.821282
| 2017-10-28T04:37:35
| 2017-10-28T04:37:35
| 108,321,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,528
|
r
|
run_analysis.R
|
library(dplyr)
library(reshape2)
features <- read.delim("../features.txt", sep = " ", header = FALSE, stringsAsFactors = FALSE)
names <- features$V2
nfeatures <- 16
v_fw <- as.numeric(vector(length = 561)) + nfeatures
training <- read.fwf("../train/X_train.txt", header = FALSE, widths = v_fw)
training_subject <- read.fwf("../train/subject_train.txt", header = FALSE, widths = c(5))
training_activity <- read.fwf("../train/y_train.txt", header = FALSE, widths = c(5))
names(training) <- names
training_subject <- rename(training_subject, Subject = V1)
training_activity <- rename(training_activity, Activity = V1)
training_full <- cbind(training_subject, cbind(training_activity, training))
test <- read.fwf("../test/X_test.txt", header = FALSE, widths = v_fw)
test_subject <- read.fwf("../test/subject_test.txt", header = FALSE, widths = c(5))
test_activity <- read.fwf("../test/y_test.txt", header = FALSE, widths = c(5))
names(test) <- names
test_subject <- rename(test_subject, Subject = V1)
test_activity <- rename(test_activity, Activity = V1)
test_full <- cbind(test_subject, cbind(test_activity, test))
mset <- bind_rows(training_full, test_full)
v_mean <- grepl("mean()", names)
v_std <- grepl("std()",names)
v_all <- v_mean | v_std
need_names <- names[v_all]
need_names <- c("Subject","Activity", need_names)
final <- select(mset, need_names)
final <- arrange(final, Subject, Activity)
final_means <- final %>% group_by(Subject, Activity)%>% summarise_all(mean)
write.table(final_means,row.name=FALSE, "dataset.txt")
|
88dbd678a9174d0e0d2222ab97830c60388b5aa5
|
1eefbbd04c5aa78bb4f17d8add39aaadf067af02
|
/BLUP_function.R
|
a1a3679e9421a9b50d5acf8dc18afb51a772ba3a
|
[] |
no_license
|
longmanz/Conjugate_Gradient_Method
|
7f5e97eae58307a8de36e5819506d8582705b233
|
88a1f9b08d1b8c0fdddd174520e8bbd168436efd
|
refs/heads/master
| 2021-05-15T15:38:30.521663
| 2019-02-25T00:01:39
| 2019-02-25T00:01:39
| 107,405,523
| 0
| 1
| null | 2017-10-21T12:25:18
| 2017-10-18T12:30:20
|
R
|
UTF-8
|
R
| false
| false
| 975
|
r
|
BLUP_function.R
|
#' This is the script for BLUP function. It relies on the Conjugate Gradient method in
#' file: "Conjugate_Gradient_Method.R"
#'
#' version 1.05, on Oct 21, 2017.
setwd("~/Documents/GitHub/Conjugate_Gradient_Method/")
source(file = "Conjugate_Gradient_Method.R")
BLUP <- function(geno=NULL, pheno = NULL, Vg = 0, Ve = 1, sig_thres = 1e-5, iter_num=50){
if(is.null(geno) | is.null(pheno)){
stop("please input the correct genotype/phenotype data frame.")
}else if(nrow(geno) != length(pheno)){
stop("the dimensions of the genotype and the phenotype data do not match.")
}
## construct the A matrix
lambda <- Ve/Vg*ncol(geno)
geno <- as.matrix(geno)
A <- t(geno)%*%geno + lambda*diag(x=1, nrow=ncol(geno))
## construct the b vector as in Ax = b
b <- t(geno) %*% pheno
x0 <- rep(0, ncol(geno))
blup_vec <- ConjugateGradient(A, b, x0, sig_thres = sig_thres, iter_num = iter_num)
blup_vec
}
|
85024aa3e7870aa5c7b0e38455f84c7d75907178
|
9a7c905c6f077e2f030ea53c45ef4dfac96e7832
|
/visual_modules/custom_palette.R
|
719df840ddecfeb6f01fc0dd186a0e6462ad0040
|
[] |
no_license
|
dorottyawinter/stock_portfolio
|
f193f313cc7c1a5be9cdd28fd7f27586d4697c0a
|
1d45452788aa2d2266ae1cf430a52a8481e794bd
|
refs/heads/master
| 2022-12-01T14:22:00.715945
| 2020-08-03T21:20:38
| 2020-08-03T21:20:38
| 281,905,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
custom_palette.R
|
library(RColorBrewer)
library(tidyverse)
get_color_palette <- function(df, color_col){
n = length(unique(df %>% pull(!! color_col)))
custom_color <- '#d38dcc'
if(any(grepl(pattern = 'PORTFOLIO', x = df %>% pull(!! color_col)))){
if(n == 1){
custom_palette <- custom_color
}else if(n <= 6){
custom_palette <- c(custom_color, wes_palette(name = 'Darjeeling2', n = n-1))
}else{
custom_palette <- c(custom_color, colorRampPalette(wes_palette(name = 'Darjeeling2', n = 5))(n-1))
}
names(custom_palette) <- c('PORTFOLIO', setdiff(levels(as.factor(df %>% pull(!! color_col))), 'PORTFOLIO'))
}else{
if(n <= 5){
custom_palette <- wes_palette(name = 'Darjeeling2', n = n)
}else{
custom_palette <- colorRampPalette(wes_palette(name = 'Darjeeling2', n = 5))(n)
}
names(custom_palette) <- levels(as.factor(df %>% pull(!! color_col)))
}
return(custom_palette)
}
|
07b33239d0ebab3c629057730c6b829629be271f
|
85df540e33b8aa3809b8f783b5eb51b1a407dc8e
|
/cheb/ui.R
|
fcee49cd295bbdb4aea5c4dd2c4594440b6c3c25
|
[] |
no_license
|
ricky1223321/mathematical
|
c65caaff514aadd7cef2a25ed474f721bba60a44
|
2d2cc457d7c1230498f6ed7eccd74fb772f57ac4
|
refs/heads/master
| 2023-06-25T02:28:42.658872
| 2021-07-30T10:31:52
| 2021-07-30T10:31:52
| 382,778,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,789
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
source("sub_ui.R")
dashboardPage(skin="blue",
dashboardHeader(
title = "Chebyshev Theorem"
),
dashboardSidebar(
sidebarMenu(
menuItem("Continuous Distribution",tabName = "CDistribution", icon=icon("image"),
menuItem("Normal Distribution",tabName = "nor", icon=icon("braille")),
menuItem("Uniform Distribution",tabName = "uni", icon=icon("chart-pie")),
menuItem("Exponential Distribution",tabName = "exp", icon=icon("chart-bar"))
),
menuItem("Discrete Distribution",tabName = "DDistribution", icon=icon("image"),
menuItem("Binomial Distribution",tabName = "bin", icon=icon("braille")),
menuItem("Geometric Distribution",tabName = "geo", icon=icon("chart-pie")),
menuItem("Poisson Distribution",tabName = "poi", icon=icon("chart-bar"))
)
)
),
dashboardBody(
tabItems(
tabItem(tabName = "nor",
nor_ui
),
tabItem(tabName = "uni",
uni_ui
),
tabItem(tabName = "exp",
exp_ui
),
tabItem(tabName = "bin",
bin_ui
),
tabItem(tabName = "geo",
geo_ui
),
tabItem(tabName = "poi",
poi_ui
)
)
)
)
|
7ad28e70566e56c29954c5acccc4eab0627d7e09
|
c1832bdea9445795c9a216f97dcdb6e3bbc02c2a
|
/R/summary.tam.jml.R
|
4c5d41370efe6ffad2b4f650771a25ebf61cf570
|
[] |
no_license
|
alexanderrobitzsch/TAM
|
c5b4a04094336adb198916d87590917f9f7f4c51
|
7fd74230a55c8296fd8865db1700009445e4e54f
|
refs/heads/master
| 2022-09-04T10:52:37.928736
| 2022-08-29T09:31:35
| 2022-08-29T09:31:35
| 95,301,463
| 16
| 10
| null | 2020-06-28T07:59:48
| 2017-06-24T14:08:58
|
R
|
UTF-8
|
R
| false
| false
| 1,932
|
r
|
summary.tam.jml.R
|
## File Name: summary.tam.jml.R
## File Version: 9.260
#***** summary for tam object
summary.tam.jml <- function( object, file=NULL, ...)
{
#* open sink
tam_osink(file=file)
sdisplay <- tam_summary_display()
cat(sdisplay)
cat( tam_packageinfo("TAM"), "\n" )
cat( tam_rsessinfo(), "\n\n")
cat("Start of Analysis:", paste( object$time[1] ), "\n" )
cat("End of Analysis:", paste( object$time[2] ), "\n" )
cat("Computation time:", print( object$time[2] - object$time[1] ), "\n\n")
cat("Joint Maximum Likelihood Estimation in TAM \n\n")
irtmodel <- object$irtmodel
cat("IRT Model", irtmodel )
# print Call
tam_print_call(object$CALL)
cat(sdisplay)
cat( "Number of iterations", "=", object$iter, "\n\n" )
cat( "Deviance", "=", round( object$deviance, 2 ), " | " )
cat( "Log Likelihood", "=", round( -object$deviance/2, 2 ), "\n" )
cat( "Number of persons", "=", object$nstud, "\n" )
if( ! is.null( object$formulaA) ){
cat( "Number of generalized items", "=", object$nitems, "\n" )
cat( "Number of items", "=", ncol(object$resp_orig), "\n" )
} else {
cat( "Number of items", "=", object$nitems, "\n" )
}
cat( "constraint", "=", object$constraint, "\n" )
cat( "bias", "=", object$bias, "\n" )
obji <- object$theta_summary
if (obji$ndim==1){
cat(sdisplay)
cat("Person Parameters xsi\n")
cat( "M", "=", round( obji$M, 2 ), "\n" )
cat( "SD", "=", round( obji$SD, 2 ), "\n" )
}
cat(sdisplay)
cat("Item Parameters xsi\n")
obji <- object$item
tam_round_data_frame_print(obji=obji, digits=3, from=2)
cat(sdisplay)
cat("Item Parameters -A*Xsi\n")
obji <- object$item1
tam_round_data_frame_print(obji=obji, from=2, to=ncol(obji), digits=3,
rownames_null=TRUE)
#** close sink
tam_csink(file=file)
}
|
1d983c8bf9acf6a8252603fb630826fef004e2a0
|
10b673159a50495b4698cafd6e781d52cfb73437
|
/ScRNAseq.R
|
0af197cb5ad3c363c0166ac006ac1955222a03b8
|
[
"MIT"
] |
permissive
|
elifesciences-publications/MAP-C
|
abbc6937ead3edd0871365d1d4ea2fd2c379ccb4
|
6cea752c159d533ea964d8dcbf0c2e9428da95ef
|
refs/heads/master
| 2020-05-24T09:18:16.453499
| 2019-05-16T20:51:06
| 2019-05-16T20:51:06
| 187,202,975
| 0
| 0
| null | 2019-05-17T11:15:27
| 2019-05-17T11:15:27
| null |
UTF-8
|
R
| false
| false
| 18,218
|
r
|
ScRNAseq.R
|
# ScRNAseq.R
# plotting and DESeq2 analysis of S. cerevisiae RNA-seq data
# Seungsoo Kim
# directories ----
setwd("/Volumes/shendure-vol8/projects/mutagenesis.3C/nobackup/MAP-C")
dir <- "data"
out <- "figures"
# load libraries ----
library(ggplot2)
library(dplyr)
library(reshape)
library(RColorBrewer)
library(data.table)
library(grid)
library(gplots)
library(scales)
library(permute)
library(gridExtra)
library(DESeq2)
# get gene lengths
genelens <- read.table("nobackup/sacCer3_genes_length.txt")
colnames(genelens) <- c("gene", "length")
# load data
samp.table <- read.table("ScRNAseq_table.txt", header=T, stringsAsFactors = F)
counts <- read.table(paste(dir,"/WT_1.counts.txt",sep=""),stringsAsFactors = F)
genes <- counts$V1
counts <- data.frame(counts$V1)
for (samp in samp.table$sample) {
temp <- read.table(paste(dir,"/",samp,".counts.txt",sep=""))
colnames(temp) <- c("gene",samp)
temp <- temp[,2]
counts <- cbind(counts,temp)
}
colnames(counts) <- c("gene",samp.table$sample)
# exclude non-uniquely assigned read counts
rawcounts <- counts[-((nrow(counts)-4):(nrow(counts))),-1]
rownames(rawcounts) <- counts[1:nrow(rawcounts),]$gene
# normalize counts by total read count per sample
normed <- sweep(rawcounts,2,colSums(rawcounts),"/")
normed <- sweep(normed,1,genelens$length,"/")
normed <- normed*1000000000
normwgenes <- cbind(counts[1:nrow(rawcounts),1],normed)
colnames(normwgenes) <- c("gene",samp.table$sample)
# single gene bar plots ----
# colors
brewercols <- brewer.pal(7,"Set1")
cols=c("grey",brewercols[c(2,3,5)])
# function for calculating number of * to add based on p-value
# takes a vector of increasingly stringent (lower) p-value cutoffs and
# outputs a vector of strings, each with the appropriate number of asterisks
stars <- function(thresh,pval) {
n = 0
for (t in thresh) {
if (pval < t) {
n = n + 1
}
}
return(paste(rep("*",n),collapse=""))
}
# loop through genes
genesofinterest <- c("YMR290C","YMR291W","YLR451W","YKL038W","YPR022C")
genelabs <- c("HAS1","TDA1","LEU3","RGT1","SDD4")
for (i in 1:length(genesofinterest)) {
geneofinterest <- genesofinterest[i]
genelab <- genelabs[i]
# subset relevant data
sub <- subset(normwgenes,gene==geneofinterest)
table = data.frame
table <- sub[,2:4]
table[2,] <- sub[,5:7]
table[3,] <- sub[,8:10]
table[4,] <- sub[,11:13]
colnames(table) <- c("r1","r2","r3")
table$ave <- (table$r1 + table$r2 + table$r3)/3
table$sd <- apply(table[,1:3],1,sd)
rownames(table) <- c("WT","leu3D","sdd4D","rgt1D")
genos <- c("WT","leu3D","sdd4D","rgt1D")
table$geno <- factor(c("WT","leu3D","sdd4D","rgt1D"),levels=genos)
# calculate p-values
thresh = c(.05,.01,.001,.0001)
leu.test = t.test(table[1,1:3],table[2,1:3])
leu.stars = stars(thresh,leu.test$p.value)
sdd.test = t.test(table[1,1:3],table[3,1:3])
sdd.stars = stars(thresh,sdd.test$p.value)
rgt.test = t.test(table[1,1:3],table[4,1:3])
rgt.stars = stars(thresh,rgt.test$p.value)
table$stars <- c("",leu.stars,sdd.stars,rgt.stars)
pdf(paste(out,"/ScRNAseq_fpkm_",geneofinterest,".pdf",sep=""),2.2,1.8)
print(ggplot(table) +
geom_text(aes(x=geno,y=(ave+sd)+max(table$ave)*.05,label=stars)) +
geom_errorbar(aes(x = geno, ymin=ave-sd/sqrt(3), ymax = ave+sd/sqrt(3)),width=.2) +
geom_bar(aes(x=geno,fill=geno,y=ave),stat="identity",color="black") +
theme_classic() + scale_y_continuous(limits = c(0,1.1*max(table$ave+table$sd)), expand=c(0,0)) + scale_fill_manual(values = cols) + theme(text=element_text(size=8),axis.text=element_text(size=8,color="black"),legend.position="none",plot.title = element_text(hjust=0.5,face ="italic",size=8)) + xlab("") + ylab("FPKM") + ggtitle(genelab))
dev.off()
}
# DESeq2
samples <- read.table("ScRNAseq_table.txt",header=TRUE,stringsAsFactors = TRUE)
dds <- DESeqDataSetFromMatrix(countData = rawcounts,
colData = samples,
design = ~ genotype)
dds <- dds[ rowSums(counts(dds)) > 1, ]
dds <- DESeq(dds)
leu <- data.frame(results(dds,contrast=c("genotype","leu3D","WT")))
leu$gene <- rownames(leu)
sdd <- data.frame(results(dds,contrast=c("genotype","sdd4D","WT")))
sdd$gene <- rownames(sdd)
rgt <- data.frame(results(dds,contrast=c("genotype","rgt1D","WT")))
rgt$gene <- rownames(rgt)
gene.names <- read.table("sacCer3_gene_names.txt",header=F,col.names = c("gene","name"))
leu <- merge(leu,gene.names)
sdd <- merge(sdd,gene.names)
sdd[sdd$padj==0 & !is.na(sdd$padj),]$padj <- 10^-38
sdd[sdd$pvalue==0 & !is.na(sdd$pvalue),]$pvalue <- 10^-38
rgt <- merge(rgt,gene.names)
rgt[rgt$padj==0 & !is.na(rgt$padj),]$padj <- 10^-280
rgt[rgt$pvalue==0 & !is.na(rgt$pvalue),]$pvalue <- 10^-280
# volcano plots
pdf(paste(out,"/ScRNAseq_leu3D_foldChange_volcano_padj.pdf",sep=""), 2, 2.5)
ggplot(leu) + geom_point(aes(x=log2FoldChange,y=-log10(padj)),size=0.5) + theme_classic() +
geom_text(data=subset(leu,-log10(padj)>20),aes(x=log2FoldChange+0.2,y=-log10(padj),label=name),hjust=0,size=8*5/14) +
xlim(min(leu$log2FoldChange),max(leu$log2FoldChange)+1.5) +
ylab(expression(paste("-", Log[10], " adj. P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
pdf(paste(out,"/ScRNAseq_sdd4D_foldChange_volcano_padj.pdf",sep=""), 2, 2.5)
ggplot(sdd) + geom_point(aes(x=log2FoldChange,y=-log10(padj)),size=0.5) + theme_classic() +
geom_text(data=subset(sdd,name=="YPR022C"),aes(x=log2FoldChange+0.2,y=-log10(padj),label="SDD4"),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="PCL1"),aes(x=log2FoldChange+0.2,y=-log10(padj)+1,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="COS8"),aes(x=log2FoldChange+0.05,y=-log10(padj)-1.5,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="YHL050C"),aes(x=log2FoldChange-0.2,y=-log10(padj),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(sdd,name=="HO"),aes(x=log2FoldChange+0.2,y=-log10(padj),label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="HIS1"),aes(x=log2FoldChange-0.2,y=-log10(padj),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(sdd,name=="RPL33B"),aes(x=log2FoldChange+0.2,y=-log10(padj)+1,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="ZEO1"),aes(x=log2FoldChange+0.2,y=-log10(padj)-1,label=name),hjust=0,size=8*5/14) +
xlim(min(sdd$log2FoldChange),max(sdd$log2FoldChange)+1) +
ylab(expression(paste("-", Log[10], " adj. P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
pdf(paste(out,"/ScRNAseq_rgt1D_foldChange_volcano_padj.pdf",sep=""), 3, 2.5)
ggplot(rgt) + geom_point(aes(x=log2FoldChange,y=-log10(padj)),size=0.5) + theme_classic() +
geom_text(data=subset(rgt,-log10(padj)>50 & !(name %in% c("YKR075C","HXT7","HXT4","HXT6","BOP3","IML2","MIG1","YFL054C"))),aes(x=log2FoldChange+0.25,y=-log10(padj),label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("YKR075C","YFL054C"))),aes(x=log2FoldChange-0.25,y=-log10(padj),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("BOP3","IML2"))),aes(x=log2FoldChange-0.25,y=-log10(padj)-3,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT6"))),aes(x=log2FoldChange-0.2,y=-log10(padj)+8,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("MIG1"))),aes(x=log2FoldChange-0.25,y=-log10(padj)+5,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT7"))),aes(x=log2FoldChange,y=-log10(padj)+15,label=name),hjust=0.9,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT4"))),aes(x=log2FoldChange,y=-log10(padj)+15,label=name),hjust=0.1,size=8*5/14) +
xlim(min(rgt$log2FoldChange),max(rgt$log2FoldChange)+2) +
ylab(expression(paste("-", Log[10], " adj. P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
# non adjusted P-value volcano plots
#pdf(paste(out,"/ScRNAseq_leu3D_foldChange_volcano_pvalue.pdf",sep=""), 2, 2.5)
png(paste(out,"/ScRNAseq_leu3D_foldChange_volcano_pvalue.png",sep=""), 2, 2.5, unit="in",res=1200)
ggplot(leu) + geom_point(aes(x=log2FoldChange,y=-log10(pvalue)),size=0.5) + theme_classic() +
geom_text(data=subset(leu,-log10(pvalue)>10 & !(name %in% c("ALD5","PCL1"))),aes(x=log2FoldChange+0.2,y=-log10(pvalue),label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(leu,-log10(pvalue)>10 & name=="PCL1"),aes(x=log2FoldChange+0.2,y=-log10(pvalue)+5,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(leu,-log10(pvalue)>10 & name=="ALD5"),aes(x=log2FoldChange-0.2,y=-log10(pvalue)-5,label=name),hjust=1,size=8*5/14) +
xlim(min(leu$log2FoldChange),max(leu$log2FoldChange)+1.5) +
ylab(expression(paste("-", Log[10], " P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
#pdf(paste(out,"/ScRNAseq_sdd4D_foldChange_volcano_pvalue.pdf",sep=""), 2, 2.5)
png(paste(out,"/ScRNAseq_sdd4D_foldChange_volcano_pvalue.png",sep=""), 2, 2.5, unit="in",res=1200)
ggplot(sdd) + geom_point(aes(x=log2FoldChange,y=-log10(pvalue)),size=0.5) + theme_classic() +
geom_text(data=subset(sdd,name=="YPR022C"),aes(x=log2FoldChange+0.2,y=-log10(pvalue),label="SDD4"),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="PCL1"),aes(x=log2FoldChange+0.2,y=-log10(pvalue)+1,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="COS8"),aes(x=log2FoldChange+0.05,y=-log10(pvalue)-1.5,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="YHL050C"),aes(x=log2FoldChange-0.2,y=-log10(pvalue),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(sdd,name=="HO"),aes(x=log2FoldChange+0.2,y=-log10(pvalue),label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="HIS1"),aes(x=log2FoldChange-0.2,y=-log10(pvalue),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(sdd,name=="RPL33B"),aes(x=log2FoldChange+0.2,y=-log10(pvalue)+1,label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(sdd,name=="ZEO1"),aes(x=log2FoldChange+0.2,y=-log10(pvalue)-1,label=name),hjust=0,size=8*5/14) +
xlim(min(sdd$log2FoldChange),max(sdd$log2FoldChange)+1) +
ylab(expression(paste("-", Log[10], " P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
#pdf(paste(out,"/ScRNAseq_rgt1D_foldChange_volcano_pvalue.pdf",sep=""), 3, 2.5)
png(paste(out,"/ScRNAseq_rgt1D_foldChange_volcano_pvalue.png",sep=""), 3, 2.5, unit="in",res=1200)
ggplot(rgt) + geom_point(aes(x=log2FoldChange,y=-log10(pvalue)),size=0.5) + theme_classic() +
geom_text(data=subset(rgt,-log10(pvalue)>50 & !(name %in% c("YKR075C","HXT7","HXT4","HXT6","BOP3","IML2","MIG1","YFL054C"))),aes(x=log2FoldChange+0.25,y=-log10(pvalue),label=name),hjust=0,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("YKR075C","YFL054C"))),aes(x=log2FoldChange-0.25,y=-log10(pvalue),label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("BOP3","IML2"))),aes(x=log2FoldChange-0.25,y=-log10(pvalue)-3,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT6"))),aes(x=log2FoldChange-0.2,y=-log10(pvalue)+8,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("MIG1"))),aes(x=log2FoldChange-0.25,y=-log10(pvalue)+5,label=name),hjust=1,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT7"))),aes(x=log2FoldChange,y=-log10(pvalue)+15,label=name),hjust=0.9,size=8*5/14) +
geom_text(data=subset(rgt,(name %in% c("HXT4"))),aes(x=log2FoldChange,y=-log10(pvalue)+15,label=name),hjust=0.1,size=8*5/14) +
xlim(min(rgt$log2FoldChange),max(rgt$log2FoldChange)+2) +
ylab(expression(paste("-", Log[10], " P-value"))) +
xlab(expression(paste(Log[2], " Fold Change"))) + theme(text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black"))
dev.off()
# microarray comparisons ----
# load microarray data
leu3ma <- read.table("microarray/leu3D.txt",header=T,stringsAsFactors = F,comment.char = "#",na.strings = "null")
leu3comp <- merge(leu,leu3ma,by.x="gene",by.y="ID_REF")
png(paste(out,"/ScRNAseq_leu3D_sat_v_exp_pvalue.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(leu3comp) + geom_point(aes(x=-log10(P),y=-log10(pvalue),color=name=="LEU3"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated -", Log[10], " P-value"))) +
xlab(expression(paste("Exponential -", Log[10], " P-value")))
dev.off()
png(paste(out,"/ScRNAseq_leu3D_sat_v_exp_fc.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(leu3comp) + geom_point(aes(x=VALUE,y=log2FoldChange,color=name=="LEU3"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated ", Log[2], " Fold Change"))) +
xlab(expression(paste("Exponential ", Log[2], " Fold Change")))
dev.off()
write.table(x=leu3comp,file = "nobackup/Leu3D_RNAfc.txt")
sdd4ma <- read.table("microarray/sdd4D.txt",header=T,stringsAsFactors = F,comment.char = "#",na.strings = "null")
sdd4comp <- merge(sdd,sdd4ma,by.x="gene",by.y="ID_REF")
png(paste(out,"/ScRNAseq_sdd4D_sat_v_exp_pvalue.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(sdd4comp) + geom_point(aes(x=-log10(P),y=-log10(pvalue),color=name=="YPR022C"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated -", Log[10], " P-value"))) +
xlab(expression(paste("Exponential -", Log[10], " P-value")))
dev.off()
png(paste(out,"/ScRNAseq_sdd4D_sat_v_exp_fc.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(sdd4comp) + geom_point(aes(x=VALUE,y=log2FoldChange,color=name=="YPR022C"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated ", Log[2], " Fold Change"))) +
xlab(expression(paste("Exponential ", Log[2], " Fold Change")))
dev.off()
write.table(x=sdd4comp,file = "nobackup/Sdd4D_RNAfc.txt")
rgt1ma <- read.table("microarray/rgt1D.txt",header=T,stringsAsFactors = F,comment.char = "#",na.strings = "null")
rgt1comp <- merge(rgt,rgt1ma,by.x="gene",by.y="ID_REF")
png(paste(out,"/ScRNAseq_rgt1D_sat_v_exp_pvalue.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(rgt1comp) + geom_point(aes(x=-log10(P),y=-log10(pvalue),color=name=="RGT1"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated -", Log[10], " P-value"))) +
xlab(expression(paste("Exponential -", Log[10], " P-value")))
dev.off()
png(paste(out,"/ScRNAseq_rgt1D_sat_v_exp_fc.png",sep=""), 2.2, 2.2, units="in",res=1200)
ggplot(rgt1comp) + geom_point(aes(x=VALUE,y=log2FoldChange,color=name=="RGT1"),size=0.5) +
scale_color_manual(values=c("black","red")) + theme_classic() +
theme(legend.position="none",text=element_text(size=8,color="black"), axis.text=element_text(size=8,color="black")) +
ylab(expression(paste("Saturated ", Log[2], " Fold Change"))) +
xlab(expression(paste("Exponential ", Log[2], " Fold Change")))
dev.off()
write.table(x=rgt1comp,file = "nobackup/Rgt1D_RNAfc.txt")
rgt1reorder <- rgt[order(rgt$gene=="YMR291W"),]
ggplot(rgt1reorder) + geom_point(aes(x=log2FoldChange,y=-log10(padj),color=gene=="YMR291W"))
rgt1reorder <- rgt[order(rgt$gene=="YMR290C"),]
ggplot(rgt1reorder) + geom_point(aes(x=log2FoldChange,y=-log10(padj),color=gene=="YMR290C"))
sdd4reorder <- sdd[order(sdd$gene=="YMR291W"),]
ggplot(sdd4reorder) + geom_point(aes(x=log2FoldChange,y=-log10(padj),color=gene=="YMR291W"))
subset(normwgenes,gene=="YLR451W")
subset(normwgenes,gene=="YPR022C")
subset(normwgenes,gene=="YKL038W")
subset(leu,padj<.0000001)
subset(sdd,padj<.00000001 & log2FoldChange < -0.9)
rgt1_hits <- subset(rgt,padj<.00000001 & log2FoldChange < -2)
GOI <- "YMR291W"
GOI <- "YBR020W"
counts <- counts[order(counts$gene == GOI),]
ggplot(counts) + geom_point(aes(x=asy_r1,y=asy_r2)) + scale_x_log10() + scale_y_log10()
ggplot(counts) + geom_point(aes(x=asy_r1,y=asy_r3)) + scale_x_log10() + scale_y_log10()
ggplot(counts) + geom_point(aes(x=asy_r1,y=gal_r1,color=gene==GOI)) + scale_x_log10() + scale_y_log10()
genes <- read.table("genes2.info",header=TRUE)
data <- data.frame(genes$gene_id)
samples <- read.table("samples.txt",header=TRUE,stringsAsFactors = FALSE)
for (i in samples$samp) {
counts <- read.table(paste(i,".counts.txt",sep=""))
data <- merge(data,counts,by.x="genes.gene_id",by.y="V1")
}
colnames(data) <- c("gene",samples$samp)
countswgenes <- merge(genes,data,by.x="gene_id",by.y="gene")
rownames(data) <- data$gene
data[,1] <- NULL
# joint ----
samples <- read.table("samples2.txt",header=TRUE,stringsAsFactors = FALSE)
dds <- DESeqDataSetFromMatrix(countData = data,
colData = samples,
design = ~ pas + sgn + dg)
dds <- dds[ rowSums(counts(dds)) > 1, ]
dds <- DESeq(dds)
res <- data.frame(results(dds))
res <- data.frame(results(dds, contrast = c("pas","n","y")))
summary(res)
# raw ----
normed <- sweep(countswgenes[,11:34],2,colSums(countswgenes[,11:34]),"/")
normed <- sweep(normed,1,countswgenes$end-countswgenes$start,"/")
normed <- normed*1000000000
normwgenes <- cbind(genes,normed)
|
4b2eea5f271a859335bfbdc34dd9aaac0b3a607a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DMwR/examples/learner-class.Rd.R
|
28b6b392fb6cbae86c31bf6b60ca42fcc7ca196f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
learner-class.Rd.R
|
library(DMwR)
### Name: learner-class
### Title: Class "learner"
### Aliases: learner learner-class show,learner-method
### Keywords: classes
### ** Examples
showClass("learner")
|
9b57f5ba779a271bc34d798e19982a609bb6a10b
|
9d0e613597f8829edb92d69aa7edff3a2d403ecc
|
/man/hook_global_step_waiter.Rd
|
8ed156e2b1d028926c1b9869cf58dc7fc56be61e
|
[] |
no_license
|
cran/tfestimators
|
52d81322245381915ac74c556c17f5032defe2f6
|
2daf8fc062f8288fea6a05a5d56b62804fa79e33
|
refs/heads/master
| 2021-08-27T16:24:09.668239
| 2021-08-09T21:30:02
| 2021-08-09T21:30:02
| 114,889,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,096
|
rd
|
hook_global_step_waiter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/session_run_hooks_builtin_wrappers.R
\name{hook_global_step_waiter}
\alias{hook_global_step_waiter}
\title{Delay Execution until Global Step Reaches to \code{wait_until_step}.}
\usage{
hook_global_step_waiter(wait_until_step)
}
\arguments{
\item{wait_until_step}{An integer indicating that until which global step should we wait.}
}
\description{
This hook delays execution until global step reaches to \code{wait_until_step}. It
is used to gradually start workers in distributed settings. One example usage
would be setting \code{wait_until_step=int(K*log(task_id+1))} assuming that
\code{task_id=0} is the chief.
}
\seealso{
Other session_run_hook wrappers:
\code{\link{hook_checkpoint_saver}()},
\code{\link{hook_history_saver}()},
\code{\link{hook_logging_tensor}()},
\code{\link{hook_nan_tensor}()},
\code{\link{hook_progress_bar}()},
\code{\link{hook_step_counter}()},
\code{\link{hook_stop_at_step}()},
\code{\link{hook_summary_saver}()},
\code{\link{session_run_hook}()}
}
\concept{session_run_hook wrappers}
|
cfb1c0e95035eeb8eeb169f42d110d0cd44f0831
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/codecommit_batch_describe_merge_conflicts.Rd
|
20d2286b950ee11494f8feeb15fccfcd89bda302
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 2,553
|
rd
|
codecommit_batch_describe_merge_conflicts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codecommit_operations.R
\name{codecommit_batch_describe_merge_conflicts}
\alias{codecommit_batch_describe_merge_conflicts}
\title{Returns information about one or more merge conflicts in the attempted
merge of two commit specifiers using the squash or three-way merge
strategy}
\usage{
codecommit_batch_describe_merge_conflicts(
repositoryName,
destinationCommitSpecifier,
sourceCommitSpecifier,
mergeOption,
maxMergeHunks = NULL,
maxConflictFiles = NULL,
filePaths = NULL,
conflictDetailLevel = NULL,
conflictResolutionStrategy = NULL,
nextToken = NULL
)
}
\arguments{
\item{repositoryName}{[required] The name of the repository that contains the merge conflicts you want to
review.}
\item{destinationCommitSpecifier}{[required] The branch, tag, HEAD, or other fully qualified reference used to
identify a commit (for example, a branch name or a full commit ID).}
\item{sourceCommitSpecifier}{[required] The branch, tag, HEAD, or other fully qualified reference used to
identify a commit (for example, a branch name or a full commit ID).}
\item{mergeOption}{[required] The merge option or strategy you want to use to merge the code.}
\item{maxMergeHunks}{The maximum number of merge hunks to include in the output.}
\item{maxConflictFiles}{The maximum number of files to include in the output.}
\item{filePaths}{The path of the target files used to describe the conflicts. If not
specified, the default is all conflict files.}
\item{conflictDetailLevel}{The level of conflict detail to use. If unspecified, the default
FILE_LEVEL is used, which returns a not-mergeable result if the same
file has differences in both branches. If LINE_LEVEL is specified, a
conflict is considered not mergeable if the same file in both branches
has differences on the same line.}
\item{conflictResolutionStrategy}{Specifies which branch to use when resolving conflicts, or whether to
attempt automatically merging two versions of a file. The default is
NONE, which requires any conflicts to be resolved manually before the
merge operation is successful.}
\item{nextToken}{An enumeration token that, when provided in a request, returns the next
batch of the results.}
}
\description{
Returns information about one or more merge conflicts in the attempted merge of two commit specifiers using the squash or three-way merge strategy.
See \url{https://www.paws-r-sdk.com/docs/codecommit_batch_describe_merge_conflicts/} for full documentation.
}
\keyword{internal}
|
331fa3bfaa1d55ccf3809e6db23e93bd0ef55e34
|
30954ed2c633319c48a9542b957dd82fbcbb636b
|
/MilesNesbit/Code/modeling/simulated.R
|
e7f5efcb9493b68762e335df775d9618d63b761c
|
[] |
no_license
|
zongyi2020/CMEEProject
|
ae057530c39a637df2159e44efca6f4af917c0d5
|
1d86ed13218ab1e30e3da31b96321b71f38cea2a
|
refs/heads/master
| 2023-08-02T16:12:43.159300
| 2021-09-20T11:21:49
| 2021-09-20T11:21:49
| 384,176,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 964
|
r
|
simulated.R
|
require("minpack.lm") # for Levenberg-Marquardt nlls fitting
library(tidyverse)
library(ggplot2)
library(dplyr)
library(growthcurver)
library(stringr)
library(taxize)
library(nls.multstart)
library(zoo)
library(broom)
library(growthcurver)
library(cowplot)
library(psych)
library(MASS)
library(fitdistrplus)
rm(list=ls())
graphics.off()
setwd("/home/nesbit/Desktop/Data_RVK/Code")
B_0 = 10
k = 1
E = 2.5
t = 1:100
B0_funct <- function(t,k,E,B_0){
return(B_0*exp(-(E/(k*t))))
}
y_pred = B0_funct(t, k, E, B_0)
plot(t, y_pred)
b=100
x = (sample.int(101,size=100,replace=TRUE)-1)
m=-1
trade_funct <- function(m,x,b){
return((m*x)+b)
}
y_pred = trade_funct(m,x,b)
y_pred = as.integer(y_pred)
x1= as.integer(x)
fakedata <- data.frame(y_pred, x1)
ggplot(fakedata, aes(x=x1,y=y_pred))+
geom_point(position= 'jitter', size= 3)+
ggtitle("Expected r vs K tradeoff")+
xlab('predicted r')+
ylab('predicted K')+
geom_smooth(method='lm', formula= y~x)
|
2331ade1a3eb9aa5b1891e46cb1495fb3526a8f2
|
9d8b86b2a20d5fd3c31a3bce56e7f52312187be1
|
/tests/tests.R
|
d475b1e9113ce08bb7130178dedf39f0922b4a24
|
[] |
no_license
|
hms-dbmi/Rcupcake
|
d4141be5394de83340f476392defa11477fda1ee
|
2f87f7c771ceb0da7813a90529c973e1f028b6e8
|
refs/heads/master
| 2022-01-17T06:03:15.038438
| 2019-07-02T23:44:11
| 2019-07-02T23:44:11
| 81,849,992
| 2
| 5
| null | 2018-04-06T15:36:32
| 2017-02-13T17:08:40
|
HTML
|
UTF-8
|
R
| false
| false
| 4,504
|
r
|
tests.R
|
#' automated testing script
#'
#' structure for the tests object:
#'
#' list(
#' "https://domain/url/" = list(
#' apiKey = "./path/to/file/containing/apiKey",
#' tests = list(
#' "Title of the test" = list(
#' request = function(url, verbose){ .... return(result)},
#' result = an R object corresponding to the expected result of the request
#' )
#' )
#' )
#'
## helper functions for colored output in the console:
escape.colors = list(
title = "34;4",
info = "32",
warn = "33",
error = "35;1" # "31"
)
print.color = function(color) {
function(str){
cat(paste0("\033[", escape.colors[color], "m", str,"\033[0m\n"))
}
}
print.title = print.color("title")
print.info = print.color("info")
print.warn = print.color("warn")
print.error = print.color("error")
## end of helper functions for colored output
## helper for resetting sinks
sink.reset <- function(){
for(i in seq_len(sink.number())){
sink(NULL)
}
}
## performs all the tests specified as argument
test <- function(domainsToTest, verbose = F){
## for each domain to test:
sapply(names(domainsToTest), function(url){
domain <- domainsToTest[[url]]
print.title(paste("ββββββββββββββββ ", url ," ββββββββββββββββ"))
setToken(NULL)
setApiKey(NULL)
if(!is.null(domain$apiKey)){
cat("Api Key authentication\n")
end.session(url, verbose = FALSE)
# setApiKey(readChar(domain$apiKey, file.info(domain$apiKey)$size))
start.session(url = url, apiKey = domain$apiKey)
}
if(!is.null(domain$token)){
cat("token authentication\n")
setToken(domain$token)
}
## read the apiKey from the specified filepath
## key <- readChar(domain$apiKey, file.info(domain$apiKey)$size)
## start the session
## cat(paste(start.session(url, key),"\n"))
## for each test to be performed for that url:
sapply(names(domain$tests), function(title){
t <- domain$tests[[title]]
cat(paste0("\nTesting '", title, "'..."))
tryCatch({
## suppress any of it's output (we just want the result)
if(!verbose) sink("/dev/null")
## get the request's result
r <- t$request( url = url , verbose = T)
}, error = function(e){
r <<- e ## if there is an error, assign it to the result of the test
})
## disable the "output suppressing"
sink.reset()
## check if the test yielded the same result as what was expected
ok = identical(t$result, r)
## log the result
if(ok){
print.info("ok")
}else{
print.error("failed")
print.error("### Test failed ###")
print.warn("Expected:")
print(t$result)
print.warn("Got:")
print(r)
}
})
print.title("ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ")
})
cat("finished testing.\n")
}
## ## helper function to automatically add url and verbose as parameter to a function,
## ## and to clear the cache so that the tests always start with a clean cache
## f <- function(f, ...) function(url, verbose){
## cache.creation()
## f(..., url=url, verbose=verbose)
## }
## tests <- list(
## "https://pmsdn-dev.hms.harvard.edu/" = list(
## token = "<insert token here>",
## tests = list(
## "Listing the resources" = list(
## request = function(url, verbose){
## get.children.updated("", url = url, verbose = verbose)
## },
## result = c("/PMSDN-dev")
## ),
## "Searching for Demographics" = list(
## request = function(...){
## search.path("Demographics", ...)
## },
## result = "//Demo/demographics/demographics/"
## )
## )
## )
## )
## sampleTest <- function() test(tests)
|
f050ab696ef20720997a0f69fa7445897faf4d2b
|
d75108cdb6fd8a92732dca459a0971a62512e9f8
|
/OnFraudDashboard/config.R
|
d3c00dc41455cf09d5ff85d5fb57438b0a0d7048
|
[] |
no_license
|
datalavidaloca/onfraud
|
cd39a3e7d865533f9dfbd6951045b3b98c5e3d6e
|
a886b73fe5b25fa9d54c100735368022594e89a7
|
refs/heads/master
| 2020-03-09T06:11:56.160897
| 2018-07-07T16:07:09
| 2018-07-07T16:07:09
| 128,632,944
| 0
| 13
| null | 2018-07-07T10:19:02
| 2018-04-08T11:13:21
|
HTML
|
UTF-8
|
R
| false
| false
| 1,265
|
r
|
config.R
|
#Este archivo encapsula todas las entradas y librerias de la app.
is.installed <- function(paquete) is.element(
paquete, installed.packages())
if(!is.installed('SPARQL'))
install.packages('SPARQL')
if(!is.installed('ggplot2'))
install.packages('ggplot2')
if(!is.installed('dplyr'))
install.packages('dplyr')
if(!is.installed('rJava'))
install.packages('rJava')
if(!is.installed('shiny'))
install.packages('shiny')
if(!is.installed('shinydashboard'))
install.packages('shinydashboard')
if(!is.installed('plotly'))
install.packages('plotly')
if(!is.installed('devtools'))
install.packages('devtools')
if(!is.installed('jcheng5/bubbles'))
devtools::install_github("jcheng5/bubbles")
if(!is.installed('reshape2'))
install.packages('reshape2')
if(!is.installed('DT'))
install.packages('DT')
if(!is.installed('properties'))
install.packages('properties')
library (SPARQL)
library(ggplot2)
library (dplyr)
library (rJava)
library(shiny)
library(shinydashboard)
library(plotly)
library(dplyr)
library(bubbles)
library(reshape2)
library(DT)
library(properties)
myProps <- read.properties("config.properties")
devolver.endpoint <- function()
{
return (myProps$endpoint);
}
devolver.rdf <- function()
{
return (myProps$rdf);
}
|
b6cdcbd7f10b9f54abeae351d79812db771022fb
|
a16a90cb2397aad560398dd01654cc6ef05f1e99
|
/man/as.matrices.inits.Rd
|
47bd28f79eb6554ba72fcb68161c8ab12203615a
|
[] |
no_license
|
cran/nonmemica
|
b98d928f4c48516d4b9ed3bf1b03e1c340f22c27
|
78cbbfdd93de9d71a1853ed116c2c0220fd086b8
|
refs/heads/master
| 2023-05-26T07:30:31.476950
| 2023-05-10T06:40:07
| 2023-05-10T06:40:07
| 88,920,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 554
|
rd
|
as.matrices.inits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{as.matrices.inits}
\alias{as.matrices.inits}
\title{Coerce to Matrices from Inits}
\usage{
\method{as.matrices}{inits}(x, ...)
}
\arguments{
\item{x}{inits}
\item{...}{ignored}
}
\value{
matrices
}
\description{
Coerces to matrices from inits. Non-block inits is expanded into list of matrices.
}
\seealso{
Other as.matrices:
\code{\link{as.matrices.records}()},
\code{\link{as.matrices}()}
}
\concept{as.matrices}
\keyword{internal}
|
5c95dee170e74a98b12599b7444eca56b3f3ca07
|
e9d2bae436f3f9486845f75c11835fc327919b62
|
/source/05_interactivos_mapas_cluster.R
|
3323ff426204e3f2adeff2a85d02373f3a19e10a
|
[] |
no_license
|
landerlezcano/robo_transporte
|
7287cb6708eac135c1026c8ffc36255f8730ec49
|
34f0f1e0e911a7f51c677d0fb8797b981e89a766
|
refs/heads/master
| 2022-12-02T02:32:53.391905
| 2020-08-11T15:08:05
| 2020-08-11T15:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,827
|
r
|
05_interactivos_mapas_cluster.R
|
##########INTERACTIVOS Y MAPAS CLUSTERS##########
mapa_df<-get_stamenmap(bbox = bbox,maptype = "terrain-background")
head(peores_rutas)
mapa_peores_rutas<-ggmap(mapa_df,maprange = F)+
geom_polygon(data=lineas_for_ggplot[lineas_for_ggplot$id_dataset %in% peores_rutas$id_dataset,],
aes(x=long, y=lat,group=group,text=paste('Desc. ruta: ', detalle,'Calle: ', fields.calle),
text1=n_robos, fill=fields.alcaldiahechos,color=log(n_robos+1)), alpha=0.3)+
scale_color_viridis(option = 'magma')+
guides(color=F)+
#scale_fill_viridis(option = 'magma')+
theme_void()+
theme(legend.text = element_blank(), legend.title = element_blank())
mapa_peores_rutas<-ggplotly(mapa_peores_rutas,
tooltip = c('text','text1', 'color'))
htmlwidgets::saveWidget(mapa_peores_rutas,file = paste0(getwd(),"/out/", 'mapa_peores_rutas.html'))
peores_rutas<-split(peores_rutas,peores_rutas$fields.alcaldiahechos )
peores_rutas<-lapply(peores_rutas, function(x) head(x, 1))
peores_rutas<-rbind_list(peores_rutas)
write.csv(peores_rutas, './out/peores_rutas.csv')
puntos_clust<-data[data@data$cluster>0,]
conv_hull_clust<-lapply(split(puntos_clust,puntos_clust$cluster),
function(x) gConvexHull(x, byid = F,id =unique(x$cluster) ))
conv_hull_clust<-do.call(rbind, conv_hull_clust)
png(filename = './out/metodo_clusters.png')
plot(conv_hull_clust, col=2:length(conv_hull_clust))
points(puntos_clust,col=rgb(0,0,0,0.3))
points(data,col=rgb(0,0,0,0.1))
dev.off()
data_clusters<-over(conv_hull_clust, puntos_clust, returnList = T)
data_clusters_lineas<-over(conv_hull_clust, lineas, returnList = T)
n_robos_cluster<-lapply(data_clusters,
function(x) data.frame(n_robos=nrow(x)))
data_clusters<-lapply(data_clusters,
function(x) data.frame(aggregate(data=x,
cbind(diasemana,
hora,
fields.mes,
fields.ao,
fields.coloniahechos,
fields.calle)~1,getmode)))
data_clusters_lineas<-lapply(data_clusters_lineas,
function(x) data.frame(aggregate(data=x,
descrip~1,getmode)))
n_robos_cluster<-rbindlist(n_robos_cluster)
data_clusters<-rbindlist(data_clusters)
data_clusters_lineas<-rbindlist(data_clusters_lineas)
data_clusters<-cbind.data.frame(data_clusters, data_clusters_lineas, n_robos_cluster)
data_clusters$id<-as.character(1:nrow(data_clusters))
conv_hull_clust<-fortify(conv_hull_clust)
conv_hull_clust<-left_join(conv_hull_clust, data_clusters, by='id')
mapa_df<-get_stamenmap(bbox = bbox, maptype = "terrain-lines")
head(conv_hull_clust)
###HTML CLUSTERS###
mapa_hora_calle<-ggmap(mapa_df)+
geom_polygon(data=conv_hull_clust, aes(x=long, y=lat, group=group,
fill=log(n_robos),
text=paste("AΓ±o con mΓ‘s robos:",fields.ao,
"\n Mes con mΓ‘s robos:", fields.mes,
"\n Hora con mΓ‘s robos:", paste0(hora, ':00'),
"\n DΓa con mΓ‘s robos:", diasemana,
"\n Colonia con mΓ‘s robos:", fields.coloniahechos,
"\n Ruta:", descrip
)))+
scale_fill_viridis(option='magma')+
theme_void()+theme(legend.title = element_blank())+
ggtitle('Clusters de robos en transporte pΓΊblico')
mapa_hora_calle<-ggplotly(mapa_hora_calle, tooltip = c('text', 'text1') )
htmlwidgets::saveWidget(mapa_hora_calle,
paste0(getwd(),"/out/",'info_clusters.html'))
###gif CLUSTERS###
info_tabla<-conv_hull_clust[!duplicated(conv_hull_clust[conv_hull_clust$id,]),]
vars_tooltip<-c('fields.ao',
'fields.mes',
'hora',
'diasemana',
'fields.coloniahechos',
'fields.calle',
'descrip')
for( i in 1:nrow(info_tabla)){
label_tabla<-list(as.data.frame(t(info_tabla[i,vars_tooltip])))
label_tabla[[1]]<-cbind.data.frame(c('AΓ±o con mΓ‘s robos:',
'Mes con mΓ‘s robos:',
'Hora con mΓ‘s robos',
'DΓa con mΓ‘s robos:',
'Colonia con mΓ‘s robos:',
'Calle con mΓ‘s robos:',
'Ruta con mΓ‘s robos:'), label_tabla[[1]])
names(label_tabla[[1]])<-c('Variable','Moda')
mapa_hora_calle_gif<-ggmap(mapa_df)+
geom_polygon(data=conv_hull_clust, aes(x=long, y=lat, group=group,
fill=log(n_robos),
alpha=id==i))+
annotate(geom = "table",
x = min(conv_hull_clust$long),
y = min(conv_hull_clust$lat)-0.02,
label = label_tabla,
vjust = 1, hjust = 0)+scale_fill_viridis(option='magma')+scale_alpha_discrete(guide=F)+
theme_void()+theme(legend.title = element_blank(), legend.text=element_blank())+
ggtitle('Clusters de robos en transporte pΓΊblico')
ggsave(filename = paste0('./out/gif_mapa/', 'mapa_clusters_',i, '.png' ), plot = mapa_hora_calle_gif,device = 'png')
}
|
9f981554f30867912331635c7a824d085f077c76
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LearnBayes/examples/discint.Rd.R
|
9a7406e57d55fb4c9462ca454e0e47772d39f19c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
discint.Rd.R
|
library(LearnBayes)
### Name: discint
### Title: Highest probability interval for a discrete distribution
### Aliases: discint
### Keywords: models
### ** Examples
x=0:10
probs=dbinom(x,size=10,prob=.3)
dist=cbind(x,probs)
pcontent=.8
discint(dist,pcontent)
|
0089060f0595cd83e2464f3e102c199f7e509273
|
c26cfdfbd623d89a0b9f9903d2246dd86ad4b828
|
/man/get_list_entries.Rd
|
c88017d3e3d08c4948427b272b756386293efb3f
|
[
"MIT"
] |
permissive
|
jfontestad/msgraphr
|
0699c657e0982ec354afe4d3b2ff4db4d8346345
|
c5c8bcd9b8bf3a3cc4be14e55a64ba9f902901ad
|
refs/heads/main
| 2023-02-24T11:45:18.081728
| 2020-05-31T20:52:51
| 2020-05-31T20:52:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 445
|
rd
|
get_list_entries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lists.R
\name{get_list_entries}
\alias{get_list_entries}
\title{Fetch the items in a given list}
\usage{
get_list_entries(site_id, list_id, token)
}
\arguments{
\item{site_id}{Site ID from `get_sites()`}
\item{list_id}{List ID from `get_lists()`}
\item{token}{httr oauth token}
}
\value{
A tibble
}
\description{
Fetch the items in a given list
}
\examples{
NULL
}
|
f98f20fa622c365511700610eb09a9a99c8e70aa
|
e4b5aeb86e8281cd9e7a06fcac98717531bcdba2
|
/R/Pipes.R
|
e85b8d8d5a58403675073d06cbca7d6964091483
|
[] |
no_license
|
mikebesso/two.laws.big.bang
|
79e1854892003a9e812140bfe4c921f1f40abe46
|
0aa82e0b15c1937e69a6d07848350f1e440f06e5
|
refs/heads/master
| 2021-09-19T13:11:22.009474
| 2018-07-28T03:00:54
| 2018-07-28T03:00:54
| 108,021,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
Pipes.R
|
#' @export
`%>%` = magrittr::`%>%`
#' @export
`%<>%` = magrittr::`%<>%`
|
0a71f78b98e0152f0619fc920edefcb21e7772f7
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/oce/inst/testfiles/do_sfm_enu/do_sfm_enu_output/log_f367b07f7396f49b2a1dd8ebc0ac460396a5714e/do_sfm_enu-test.R
|
ae5e03625053381135721463190d0847e373093a
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,742
|
r
|
do_sfm_enu-test.R
|
testlist <- list(forward = c(2.502977741546e+141, 7.45452538180369e+300, 5.18461137311734e-140, 2.74389139535678, 6.60827021883223e-70, 1.35725361359648e+261, 4.21056025972608e+254, 4.23675080103059e-255, -1.53898542807685e+26, -2.55546547685742e+227, -4.13880875640446e+243, -1.03560263150895e+260, 1.28119872607888e-163, 3.36108874752027e-243, -5.50149508981152e-23, 2.4702828717709e-188, 4.48484442441918e-88, 5.89470425781855e+126, -1.41081834474353e+202, Inf, Inf, -5.68895368039673e-79, NA, 5.4501254635534e-275, 3.88477914363064e+109, 3.07566686186827e+178, -1.68625271461472e+163, -5.39454889796411e+95, 4.6924177573991e+21, 2.38909028404242e-21, -1.28480239707346e+72, -1.86710574393593e+32, -8.74602359486842e-144, -2.3428874968234e+39, -107852850.952686, -6.13713690761429e-276, 1.41811630388987e+150, 5.43875721716216e+104, -3.85561289829503e+291, 4.06008639062476e-106, -2.15082279897801e+52, NaN, NaN, 9.03191097812305e+76, 0), heading = c(8.62139113971651e-146, -8.35181569585297e+233, -1.20258242633688e-36, 7.75210114282688e+251, 1.82878386335827e+277, 1.08180815345071e+185, -1.26301129234159e+280, -1.46058451153238e-235, 2.85505054679674e-119, -1.540189795519e-157, -Inf, -8.48347111705065e+228, 1.69861233739197e-160, -2.2752060869096e+148, -0.000457799239889749, 3.14604059735217e-223, 1.55255305699581e-306, 1.52991601510354e+303, 2.06002548888494e+146, -4.43905956026972e-213, -4.62960108669836e-29, -1.29582803454692e+291, 4.84179361572312e-214, 7.10186336412867e-109, -1.38826600124418e-193, 6.19433999022615e-61, -1.93158946649319e-93, 1.55572814505845e+216, -5.81780727906157e-21, -3.42020067944593e-248, 1.39821303531217e-216, 1.92500470499193e-101, 2.5334488236927e-79, 1.04896023332078e-299, 0.00156041819153103, 1.11130495995612e-23, -1.40444614239617e+23, NA, Inf, -1.20258242633688e-36, 4.09398699188861e+299, 2.93823694010376e-170, -1.20856032337344e+159, 0), mast = c(-8.89150803580391e+224, -7.47659190282837e+247, 1.0969845410619e+29, 1.69762171863387e-303, -5.04864755680372e-148, 1.35323801676114e-245, -4.94572885057068e+93, -9.47993823167004e-11, 7.89585983106992e-196, 8.11125389534661e-96, -8.28547243406735e-224, -3.81457839205416e+282, -2.394873945438e+189, -2.48309559733213e+50, 3.26223457163231e+68, -Inf, 2.16565722855369e+66, 5.74931744129138e+205, 1.86111493525952e-18, -8.12236126298792e-26, -6.61656620867731e-135, 2.52204942558444e-162, 2.30106099131228e+58, 8.52380228405149e-07, 2.50717042845533e+216, -5.50794861185814e+189, 2.29463729177528e+265, 2.10828560362605e-151, -Inf, 1.43554457078014e+294, 1.06679386072578e-286, 6.17827641270229e+114, -1.30124811167447e+207, -2.84297625945868e+253, 6.52444568379181e-110, NA, 1.02378655557143e-289, -1.12584135867228e-38, 4.52571265668765e+70, -1.57671810403628e-105, -1.2379200683967e-172, 8.79176654178356e+174, 2.28660900605098e+144, -451.423784296419, 7.71086581696097e+125, 1.93563623836942e+226, 4.46739731721526e-274, -1.86926613458982e+69, 4.48861197123203e-214, 8.84194597849751e-265, -2.91102359565316e+137, 1.24829603972435e-113, 4.7041800511905e-31, 4.00925567681481e-307, 1.71223840861478e+251, 3.54794483687798e+53, 3.40413388732935e-69, -3.64724273339852e+55, 1.32875259586659e-228, -1.34156999647176e+54, 0.000763001428431873, -1.05357907778488e-274, -8.49333756792562e+86, 6.36536636761505e+261, 1.32169695347006e-64, -2.38330779888546e-122, -4.0633451758192e+70, 4.54635800941842e-232, -2.33028349617245e+173, 1.16996927250585e+288, 1.45071803816547e+114, -3.51955001284059e-90, -2.67428084809594e-278, 1.04968085066058e-57, 7.8440622037118e+124, 5.81930568375958e+249, 501205131128.794, 6.71415854087589e-300, 3.08149619918387e-174, 4.44114621959349e+201, 6.48561971521085e+144, NA, -1.56084484135639e+170, -3.19271748732218e-57, -1.72458423652777e-87, 8.0561095497035e+117, 5.42762357505888e-199, -321.315369490552, -1.73145022995882e-90, 5.29203866231918e+299, 0), pitch = c(1.48373473466254e-87, -2.41475060315082e+130, 1.630078353713e+218, 3.8077299757652e-107, -1.1565820878806e-140, 6.24173697354392e-161, -5.65863433332104e+97, -2.34274285754284e-70, -5.76902811885023e-211, NA, 8.95568067919461e-19, -3.24347794850632e-253, -1.36068810151454e+37, -Inf, -6.31236541455472e-233, -4.65285309511337e-280, 3.37298809244973e+225, -1.07036056731098e-05, 1.71079973966237e-74, -4.36956136033165e-82, 4.19932750770942e-180, 1.63550257089383e+94, -2.76261299646892e+184, -3.59090364629436e+306, -Inf, -1.28218271404175e+306, -3.51045605542958e-149, 3.95750799704263e+257, -1.15370403349377e+247, 1.43676589440789e-189, 2.00120944167022e-177, -7.83303266749557e+298, -9.30080238111955e-125, 1.35747628387762e+172, -2.76625197984398e-05, 8.53152530028094e-225, -2.29344592631656e-231, 6.6417386359657e+154, Inf), roll = c(1.82906621803126e+82, 1.99182501062716e-103, 1.67156068558946e+67, -1.19502614234829e-179, -6.2566746123712e-56, -2.34965338413172e-109, 3.00823005792124e-73, 6.31352130500499e+100, -1.34624255396906e+255, 5.56845181576314e-303, 1.15221969444439e+102, 1.13199429678615e+260, 3.50780101943188e+90, 7.56222012212534e+24, 7.57312608672084e+33, -1.95009391441696e+116, -3.3024070191398e+33, 1.92840372887768e-212, 2.55051837964119e-231, -8.83256577180467e+231, 1.43601855154239e+75, 1.41002528960909e-289, -9.69003770188556e-209, -3.64629996728017e-104, 7.31593896057699e+298, 7.99947332615795e-127, 2.09478500342449e-63, -Inf, -2.81263811220103e-164, -3.28537338603915e-106, 1.03871883424727e+228, 9.3611038180995e+31, 5.36833773155154e+94, -2.64980958939058e-27, 3.59384767482631e-299, -9.23675632755848e+45, 1.98799823299543e-245, -9.95484047786635e-82, 1.38163935558191e-260, -3.21502627798089e-214, 1.82020167655479e-212, NaN, 9.62676772413136e+50, 1.20983633932682e+196, 7.22040558021518e+158, 7.66889200916266e+258, -4.11510762486643, -1.16084770168401e+280, -3.05967806642446e+21, 2.44827714141275e+147, 2.88446565076697e-133, 6.01084826083138e-86, -6.62588812434651e+191, -2.47669404942089e+165, -2.39177038400832e+294, 6.9328190746185e-241, -6.46763923323219e-139, NA, NaN, 5.92797846905022e-177, -9.44422225692537e-204, 7.49275039411235e+107, 6.50515938564288e-139, 1.73970846260068e+307, -21233827766879821824, -16.7243324751059, 2.05528653689689e+185, -4.30463300390968e+29, -2.10606412190982e+147, 1.80286838733867e+64, -2.86789134713398e+24, -6.75397508706628e-120, -5.09197389388889e-303, 5.46722393937458e-223, 4.8761405482581e-132, -5.73393933647357e-154, 1.47499979505499e+82, -1.40470252989435e+91, -3.15366281062197e+233, -1.59795853312039e+70, -7.65446987299469e+168, 2.62112534495228e-128, -1.09303495750823e+279, -3.20990599907471e+117, 2.21880256423521e-20, 1.8177332127351e-264, 6.91983061582036e-39, 3.50603921701982e-276, 2.07828368529083e-150, 2.12971947928817e+307, -1.76038036075637e-108, 1.52263916184888e-199, -5.96279243413186e-189, 61678626183682.6, 0), starboard = c(-9.93313504309018e-192, -9.86265330207457e+269, -1.47389160005251e-303, NaN, Inf, -1.71058174811154e-140, 2.20716138767635e+162, 7.61261925450494e-87, -9.87691308284778e+191, Inf, Inf, 9.61043493279053e-122, 3.57553570258615e-291, -6.52112535695878e-204, -5.04654771731155e+124, 1.60743160297872e+71, 3.02118101876876e+91, -Inf, 2.71549806881454e+175, -4.13094912034429e+147, 1.57742948861221e+209, -3.49856977188194e+73, 0.0115590403822605, -2.01202783453036e-171, 1.87292395682105e+291, 1.06352086921855e+257, -5.07454327386354e+51, 2.62034245223914e-292, 1.5329931048056e+249, 7.774981700901e+53, 7.40267253523232e+264, 4.94695581550188e+264, -7.34285413880742e+222, 7.86894840752533e-22, 1.18206694542085e-25, 2.4769600005573e+98, 0))
result <- do.call(oce:::do_sfm_enu,testlist)
str(result)
|
eb95e02a20907b60288b64c501cf956fc45a9bcf
|
a7972df8b00072ee0717bffc523cb9b6f7c96469
|
/man/silhouette_plot.Rd
|
9429353a741e1e7a2b4f3524ec7329fdd0fd3e14
|
[] |
no_license
|
suzanbaert/analysistools
|
7b897fe87b2642e515a3b2e3b0da446d84389aa3
|
10154c9213799f74a1b7ac3efb95442492b40aad
|
refs/heads/master
| 2020-03-22T07:52:34.116960
| 2018-07-12T16:13:59
| 2018-07-12T16:14:42
| 139,730,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 449
|
rd
|
silhouette_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{silhouette_plot}
\alias{silhouette_plot}
\title{Clustering - generate silhouette plot}
\usage{
silhouette_plot(data, kmax = 10)
}
\arguments{
\item{data}{dataframe}
\item{kmax}{number of clusters to check}
}
\description{
Generate a plot to understand the number of clusters using the average silhouette width
}
\examples{
silhouette_plot(iris[-5])
}
|
89abcf9346b67686844c0da19e0e6a906cf1f390
|
2808927b279973e925cfdd0a15112a9a72982490
|
/microchannels/server.R
|
e63d041dbe48c816a6852aab07f84d4abb4f0af4
|
[
"Apache-2.0"
] |
permissive
|
pierrerogy/channels
|
c1fb67fe6aa03a1636d1a88b9c5790964ef05e85
|
3f8c28a257eb64afe21c7b0b24acc724821e9249
|
refs/heads/master
| 2023-07-21T15:05:23.373701
| 2023-07-10T21:40:46
| 2023-07-10T21:40:46
| 221,331,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,359
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(tidyverse)
library(viridis)
library(cowplot)
library(gridGraphics)
library(data.table)
library(shiny)
library(shinydashboard)
library(shinyWidgets)
library(here)
source(here::here("functions.R"))
# Lists of mosquito variables unique to experiment 2 -------------------------------
mosq_vars <-
c("Mosquito death", "Mosquito pupation", "Mosquito emergence",
"Time to death", "Time to pupation", "Time to emergence",
"Larval length at death (mm)", "Dry mass at emergence (mg)",
"Average wing length of adult (mm)")
# Build server ------------------------------------------------------------
# Define server logic required to draw a plots
shinyServer(function(input, output) {
# Read in the prepared data
exp1 <-
read.csv(here::here("appdata",
"bromeliad_tax_exp.csv")) %>%
dplyr::rename(temperature_C = temp,
bromsquito = vessel,
pH = ph) %>%
## Make dissolved inorganic nitrogen column
dplyr::mutate(DIN = no2 + no3 + nh4) %>%
## Remove bromeliads in second part of the experiment
## that received the wrong subsidy
dplyr::filter(visit_id %in% c("1", "2", "3", "H0", "H1",
"H4", "H8", "H24", "H48") |
(visit_id %in% c("4", "5", "6") &
subsidy %in% c("litter_only_litter_only",
"litter_feces_litter_feces"))) %>%
## Put value of subsidy in subsidy_1 for time series data
dplyr::mutate(subsidy_1 = ifelse(is.na(subsidy_1),
subsidy, subsidy_1)) %>%
## Change name of subsidy column
dplyr::select(-subsidy) %>%
dplyr::rename(subsidy = subsidy_1)
exp2 <-
read.csv(here::here("appdata",
"weekly_measurements_exp2.csv")) %>%
dplyr::rename(bromsquito = larvae) %>%
## Make dissolved inorganic nitrogen column, make subisdy category same than exp1
dplyr::mutate(DIN = no2 + no3 + nh4,
subsidy = ifelse(subsidy == "litter",
"litter_only", subsidy))
mosquitoes <-
read.csv(here::here("appdata",
"mosquitoes.csv")) %>%
## Add average of two wings
dplyr::mutate(wing_length = (left_wing_mm + right_wing_mm)/2)
# Make reactive datasets ---------------------------------------------------
# Subset data for each plot depending on selection
plot1_dats <- reactive({
## Get data
dats <-
get_those_dats(
y = input$y1,
x = input$x1,
facet_par = input$facet1,
experiment = input$experiment1,
exp1 = exp1,
exp2 = exp2,
mosquitoes = mosquitoes)
## Return data
return(dats)
})
plot2_dats <- reactive({
## Get data
dats <-
get_those_dats(
y = input$y2,
x = input$x2,
facet_par = input$facet2,
experiment = input$experiment2,
exp1 = exp1,
exp2 = exp2,
mosquitoes = mosquitoes)
## Return data
return(dats)
})
plot3_dats <- reactive({
## Get data
dats <-
get_those_dats(
y = input$y3,
x = input$x3,
facet_par = input$facet3,
experiment = input$experiment3,
exp1 = exp1,
exp2 = exp2,
mosquitoes = mosquitoes)
## Return data
return(dats)
})
plot4_dats <- reactive({
## Get data
dats <-
get_those_dats(
y = input$y4,
x = input$x4,
facet_par = input$facet4,
experiment = input$experiment4,
exp1 = exp1,
exp2 = exp2,
mosquitoes = mosquitoes)
## Return data
return(dats)
})
# Make plots --------------------------------------------------------------
output$plot1 <- renderPlot({
# Make blank plot
lineplot1 <-
blank_plot(plot1_dats()) +
ggtitle(input$experiment1)
# IF a data object exists, update the blank ggplot.
# basically this makes it not mess up when nothing is selected
if(nrow(plot1_dats()) > 1){
## Plot type depends on y
if(input$y1 %notin% mosq_vars){
lineplot1 <-
line_blank_plot(lineplot1, plot1_dats(), input$y1, input$facet1, input$experiment1)} else
if(input$y1 %in% mosq_vars){
lineplot1 <-
point_blank_plot(lineplot1, plot1_dats(), input$y1, input$experiment1)}
}
# Print the plot with correct labels
lineplot1 +
ylab(get_y_label(input$y1)) +
xlab(get_x_label(input$x1, input$y1))
}) # end renderplot command
output$plot2 <- renderPlot({
# Make blank plot
lineplot2 <-
blank_plot(plot2_dats()) +
ggtitle(input$experiment2)
# IF a data object exists, update the blank ggplot.
# basically this makes it not mess up when nothing is selected
if(nrow(plot2_dats()) > 1){
## Plot type depends on y
if(input$y2 %notin% mosq_vars){
lineplot2 <-
line_blank_plot(lineplot2, plot2_dats(), input$y2, input$facet2, input$experiment2)} else
if(input$y2 %in% mosq_vars){
lineplot2 <-
point_blank_plot(lineplot2, plot2_dats(), input$y2, input$experiment2)}
}
# Print the plot with correct labels
lineplot2 +
ylab(get_y_label(input$y2)) +
xlab(get_x_label(input$x2, input$y2))
}) # end renderplot command
output$plot3 <- renderPlot({
# Make blank plot
lineplot3 <-
blank_plot(plot3_dats()) +
ggtitle(input$experiment3)
# IF a data object exists, update the blank ggplot.
# basically this makes it not mess up when nothing is selected
if(nrow(plot3_dats()) > 1){
## Plot type depends on y
if(input$y3 %notin% mosq_vars){
lineplot3 <-
line_blank_plot(lineplot3, plot3_dats(), input$y3, input$facet3, input$experiment3)} else
if(input$y3 %in% mosq_vars){
lineplot3 <-
point_blank_plot(lineplot3, plot3_dats(), input$y3, input$experiment3)}
}
# Print the plot with correct labels
lineplot3 +
ylab(get_y_label(input$y3)) +
xlab(get_x_label(input$x3, input$y3))
}) # end renderplot command
output$plot4 <- renderPlot({
# Make blank plot
lineplot4 <-
blank_plot(plot4_dats()) +
ggtitle(input$experiment4)
# IF a data object exists, update the blank ggplot.
# basically this makes it not mess up when nothing is selected
if(nrow(plot4_dats()) > 1){
## Plot type depends on y
if(input$y4 %notin% mosq_vars){
lineplot4 <-
line_blank_plot(lineplot4, plot4_dats(), input$y4, input$facet4, input$experiment4)} else
if(input$y4 %in% mosq_vars){
lineplot4 <-
point_blank_plot(lineplot4, plot4_dats(), input$y4, input$experiment4)}
}
# Print the plot with correct labels
lineplot4 +
ylab(get_y_label(input$y4)) +
xlab(get_x_label(input$x4, input$y4))
}) # end renderplot command
})
|
c745b01388daf3bc37b63166acb3c8b49ac976d3
|
f879702824c2f5aed88b883ae57ce9e3804e44ec
|
/man/mybin.Rd
|
d6f1314e0cb1e8c8de5b38369661a973178782bb
|
[] |
no_license
|
sergiogarcia29/MATH4753PROJ1
|
bbbca4e73d49ad3dfaaff2246ee72651bf7c7759
|
a435b3a09e4e94fd1f9aa304badb2c95805b2270
|
refs/heads/master
| 2023-01-20T19:02:04.106132
| 2020-11-19T16:42:22
| 2020-11-19T16:42:22
| 294,966,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
mybin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mybin.R
\name{mybin}
\alias{mybin}
\title{A binomial simulation}
\usage{
mybin(iter = 100, n = 10, p = 0.5)
}
\arguments{
\item{iter}{the number of iterations}
\item{n}{the number of Bernoulli trials}
\item{p}{the probability of a success in each trial}
}
\value{
returns a barplot of relative frequencies and a table of the same
}
\description{
A binomial simulation
}
\examples{
mybin() -> Runs the simulation with the default parameters
}
|
223719280c6d1b4593b3cade29c1214922cdac4a
|
dc07182714f57d992e99687b083e0bcec4091f07
|
/R/subset.R
|
94a861c02e9fda4db6ff7f596fd956881f6c109b
|
[] |
no_license
|
disenodc/projections
|
30a89cb7f767c376ebac9e626ff4531529154658
|
3a5b65da540f19bd84b932afc0771cf30a221f74
|
refs/heads/master
| 2022-10-10T15:52:46.780685
| 2020-06-03T15:28:04
| 2020-06-03T15:28:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,071
|
r
|
subset.R
|
#' Subsetting 'projections' objects
#'
#' Two functions can be used to subset projections objects. The operator "[" can
#' be used as for matrices, using the syntax \code{x[i,j]} where 'i' is a subset
#' of dates, and 'j' is a subset of simulations.
#'
#' @author Thibaut Jombart \email{thibautjombart@@gmail.com}
#'
#'
#' @rdname subset
#'
#' @aliases "subset.projections" "[.projections"
#'
#' @seealso The \code{\link{project}} function to generate the 'projections'
#' objects.
#'
#' @param x An projections object, generated by the function
#' \code{\link{project}}.
#'
#' @param from The starting date; data strictly before this date are discarded.
#'
#' @param to The ending date; data strictly after this date are discarded.
#'
#' @param sim (optional) The simulations to retained, indicated as subsets of
#' the columns of x.
#'
#' @param ... Further arguments passed to other methods (not used).
#'
#' @export
#' @param i a subset of dates to retain
#' @param j a subset of groups to retain
"[.projections" <- function(x, i, j){
if (missing(i)) {
i <- TRUE
}
if (missing(j)) {
j <- TRUE
}
out <- as.matrix(x)[i, j, drop = FALSE]
dates_present <- rownames(out)
if (is.character(i)) {
tmp <- as.character(get_dates(x))
idx <- which(tmp %in% dates_present)
dates <- get_dates(x)[idx]
} else {
dates <- get_dates(x)[i]
}
cumulative <- attr(x, "cumulative")
new_projections(out, dates, cumulative)
}
#' @export
#' @rdname subset
subset.projections <- function(x, ..., from = NULL, to = NULL,
sim = TRUE){
## We need to make sure the comparison with dates is going to work. As for the
## [ operator, 'from' and 'to' are assumed to be expressed in the same way as
## the attr(x, "dates").
dates <- attr(x, "dates")
if (is.null(from)) {
from <- min(dates, na.rm = TRUE)
}
if (is.null(to)) {
to <- max(dates, na.rm = TRUE)
}
to.keep <- dates >= from & dates <= to
if (sum(to.keep) < 1) {
stop("No data retained.")
}
x[to.keep, sim]
}
|
8f715af4fea962385fffc3dd9288168880c74e68
|
440ad9e927eee7e0080e05a602eada7b8ca645ac
|
/man/SC_Stats_Capture_Table.Rd
|
7f5ea38f4546308092729e74c1614721ab75f4b7
|
[] |
no_license
|
jae0/SCtagging
|
517de7d5ce6d58153af877d5eb7c828092707344
|
bcf5e885bc932657da43643b367c91541f834408
|
refs/heads/master
| 2023-02-24T17:56:40.806931
| 2021-01-21T13:15:59
| 2021-01-21T13:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
SC_Stats_Capture_Table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TagWebGUI.r
\name{SC_Stats_Capture_Table}
\alias{SC_Stats_Capture_Table}
\title{SC_Stats_Capture_Table}
\usage{
SC_Stats_Capture_Table(are = "", years = "", region = "SoctianShelf")
}
\value{
message to webpage
}
\description{
Function generates stats in Table format for display on webpage
}
|
0f02784c1e1468e076a040cdbd413daecf1ecfee
|
c9638f11bce8e138685a4a3643c59c05c488993b
|
/3_Modeling/21_pargonaut.R
|
f9d9ea39d8a3cd848228fd00cbfba33769bf6eb5
|
[] |
no_license
|
illusive-git/FunctionalCompboostAppendix
|
7856d68bfd23be8168e3ab21af0d167641418892
|
335ccf692acd9e04fe8412efe7ff1a6838c116ce
|
refs/heads/master
| 2020-05-07T20:49:52.847099
| 2019-04-13T13:01:32
| 2019-04-13T13:01:32
| 180,879,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,265
|
r
|
21_pargonaut.R
|
# ======================================================================================= #
library(purrr)
library(data.table)
library(dplyr)
library(ggplot2)
library(FDboost)
library(lubridate)
library(reshape2)
library(pspline)
library(urltools)
library(bda)
library(hexbin)
library(RColorBrewer)
library(parallel)
# ======================================================================================= #
# Parameters
MAX_CPC = 10
TARGET = "SESSIONS" # "SESSIONS", "TRANSACTION_REV", "TRANSACTION"
DATASET = "FULL" # "FULL" "NO_PRODUCT"
CORES = 10L
# ======================================================================================= #
# set up local cluster
# set up colour
rf <- colorRampPalette(rev(brewer.pal(11,'Spectral')))
r <- rf(32)
# ======================================================================================= #
options(stringsAsFactors = TRUE)
# get custom losses
source("x_custom_loss.R")
# get tie_fighter
source("x_tie_fighter.R")
# get filter_by_grid
source("x_filter_by_grid.R")
# get step function from vector
source("x_stepf.R")
# Preparators
source("x_preparators.R")
if(TARGET == "SESSIONS"){
dis = fread("~/data/SESSION_FINAL.csv")
}
if(TARGET == "TRANSACTION_REV"){
dis = fread("~/data/TRANSCAT_REV_FINAL.csv")
}
gc()
# ======================================================================================= #
# Filtering
dis %>%
# filter(Category %in% c("TRAVEL","BEAUTY_AND_FITNESS","HEALTHCARE","AUTOMOTIVE")) %>%
# filter(ga.deviceCategory != "tablet") %>%
filter(ga.CPC < MAX_CPC) ->
dis_1
# Feature Engineering
dis_1 %>%
mutate(female_ratio_cat = as.numeric(cut_number(female_ratio, 3))) ->
dis_1
rm(dis)
gc()
# Construct a grid of all unique combinations
key_list = expand.grid(ga.viewId = unique(dis_1$ga.viewId))
# Cast data.table
dis_1 = as.data.table(dis_1)
var_list = colnames(dis_1)
# ======================================================================================= #
setwd("plots/")
# Now put build lists, that contain all the values corresponding to the grid values
# i.e. list objects k belong to grid combination in key_list[k,]
runner = function(i){
# Build container to return everything
i_result = list()
# Filter data according to grid elements key_list[i,]
dis_1 %>%
filter_by_grid(target = ., v_grid = key_list, v_row = i) %>%
tie_fighter(tie.var = "ga.CPC") %>% # FIXME add weights for CPC?
arrange(`ga.CPC`) %>%
# cumulate
mutate(ga.sessions = cumsum(ga.sessions),
ga.transactionRevenue = cumsum(ga.transactionRevenue)) ->
dis_c
# Test if number of rows sufficient and enough unique values for spline creation
if(nrow(dis_c) > 20){
# Report number of rows in each variable combination
print(paste("key_list:",i,
"| Website:", suffix_extract(domain(dis_c$Website[1]))[3],
"| Campaign: <", substr(key_list$ga.campaign[i],1,20),
"> | Rows:", nrow(dis_c),
"| NAs.", sum(is.na(dis_c))
)
)
# Smooth targets
if(TARGET == "SESSIONS"){
i_result[["smoother"]] = stepf(input_x = dis_c$ga.CPC, input_y = dis_c$ga.sessions, fun = mean)
}
if(TARGET == "TRANSACTION_REV"){
i_result[["smoother"]] = smooth.spline(x = dis_c$`ga.CPC`, y = dis_c$normalized_revenue, cv = FALSE)
}
# Now concatenate all the other variables accordingly
for(var_i in var_list){
# mean for numerics - be careful what the current level is!!!
if(is.numeric(dis_c[1,var_i])){
i_result[[var_i]] = mean(dis_c[[var_i]], na.rm = TRUE)
} else{
# else use most frequent value
i_result[[var_i]] = sort(table(dis_c[[var_i]]), decreasing = TRUE)[1]
}
}
# Save used variables
i_result[["legend"]] = paste(as.character(lapply(key_list[i,], as.character)), collapse = " ")
i_result[["n_data_points"]] = nrow(dis_c)
} else{
return(NULL)
}
return(i_result)
}
gc()
################# ########## ########## ########## #################
# #
# #
# Parallel execute #
result_list = mclapply(X = 1:nrow(key_list), FUN = runner, mc.cores = getOption("mc.cores", CORES)) #
# Collect from Forks #
# #
# #
################# ########## ########## ########## #################
# remove NULLS
result_list = result_list[-which(sapply(result_list, is.null))]
result_list_t = list()
for(i_var in var_list){
collector = list()
for(i_obs in 1:length(result_list)){
collector = c(collector, result_list[[i_obs]][[i_var]])
}
if(is.null(names(collector))){
result_list_t[[i_var]] = unlist(collector)
} else {
result_list_t[[i_var]] = names(collector)
}
}
# Add f and t
result_list_t$f = fun_mat
result_list_t$t = fun_grid
result_list = result_list_t
# cut out smoother objects
smoothers = result_list$smoother
result_list$smoother = NULL
rm(result_list_t)
rm(dis_1)
gc()
# ======================================================================================= #
pdf("fun_data.pdf", width = 7, height = 4)
plot(fun_grid, fun_mat[1,], type = "l", ylim = c(min(fun_mat),max(fun_mat)), col = 1,
sub = paste("Dataset:", DATASET),
xlab = "CPC", ylab = TARGET)
for(p in 2:nrow(fun_mat)){
lines(fun_grid, fun_mat[p,], col = p)
}
dev.off()
# ======================================================================================= #
# Model Call
# Small model Website level
FD_call_1 = list(formula = f ~ 1 +bolsc(Category, df = 2),
timeformula = ~bbs(t, df = 4), data = result_list, family = Gaussian())
# Large Model WEbsite level
FD_call_2 = list(formula = f ~ 1 +bolsc(Category, df = 2) + bbsc(uniqueProducts, df = 4) + bbsc(meanBasketSize, df = 4) +
bbsc(female_ratio, df = 4) + bbsc(X18.24, df = 4)+ bbsc(X25.34, df = 4)+ bbsc(X35.44, df = 4)+ bbsc(X45.54, df = 4)+
bbsc(X55.64, df = 4)+ bbsc(X65., df = 4)+ bbsc(meanBasketVariety, df = 4)+ bolsc(account_level, df = 2) ,
timeformula = ~bbs(t, df = 4), data = result_list, family = Gaussian())
# Device specific model
FD_mod = do.call(FDboost, FD_call_1)
FD_mod = do.call(FDboost, FD_call_2)
# Selected variables
selected(FD_mod)
# Cross validate mstop
folds_sof = cv(weights = model.weights(FD_mod), type = "kfold", B = 2)
start_time = Sys.time()
cvm_sof = cvrisk(FD_mod, folds = folds_sof, grid = c(300), papply = lapply)
end_time = Sys.time()
end_time - start_time
mstop(cvm_sof)
#fd_model_val = applyFolds(FD_mod, folds = cv(rep(1, length(unique(FD_mod$id))), B = 3), papply = mclapply, grid = c(1,5,10))
#mstop(fd_model_val)
cvm_sof = 300
# Final Model call
FD_call_2$control = boost_control(mstop = 300)
FD_cv = do.call(FDboost, FD_call_2)
sel = selected(FD_cv)
table(names(FD_cv$baselearner)[sel])
# ======================================================================================= #
# Predict & Plot
setwd("plots/")
var_selected = list()
# build new data of all combinations
for(l in 1:length(FD_cv$baselearner)){
var_selected[[l]] = FD_cv$baselearner[[l]]$get_names()[1]
}
input_df = lapply(var_selected, function(x){a = unlist(unique(result_list[[x]])); return(a)})
names(input_df) = var_selected
input_df = input_df[-which(sapply(input_df, is.null))]
griddata = as.list(expand.grid(input_df))
griddata$t = fun_grid
prediction = predict(FD_cv, griddata)
# Scale to positivity
# prediction = prediction - abs(min(prediction))
# sum up
# prediction = t(apply(X = prediction, MARGIN = 1, FUN = cumsum))
#
pdf("fd_model_pred.pdf", width = 7, height = 4)
plot(fun_grid, prediction[1,], type = "l", ylim = c(min(prediction),max(prediction)), col = 1,
sub = paste("Dataset:", DATASET),
xlab = "CPC", ylab = TARGET, main = "Predictions")
for(p in 2:nrow(prediction)){
lines(fun_grid, prediction[p,], col = p)
}
# closer label
# title(ylab = "Transaction Revenue", mgp = c(1,1,0))
# legend
legend("bottomright", legend = unlist(input_df), fill = 1:nrow(prediction), cex = 0.75)
dev.off()
FD_cv_coef = coef(FD_cv)
capture.output(FD_cv_coef,file = "FD_cv_coefs.txt")
pdf("FD_cv.pdf", width = 7, height = 4)
plot(FD_cv, ask = FALSE, pers = FALSE, xlab = "max CPC")
dev.off()
pdf("FD_cv_pred.pdf", width = 7, height = 4)
plotPredicted(FD_cv, ask = FALSE)
dev.off()
pdf("FD_cv_res.pdf", width = 7, height = 4)
plotResiduals(FD_cv, ask = FALSE)
dev.off()
print("Done")
|
f7a561f4820cf80f95af1ca903a836bd3baff665
|
a50c46a2fbfb62c1051f7c24c3b4c359d949840a
|
/R/prompts.R
|
581d357cbb0d7938d5c7b5a8fb114cf1aebc6c6b
|
[] |
no_license
|
rcodo/prompt
|
6958eb88f04f139370dd649edda7613d93990b0b
|
b332c42631c16eb8701f3efbe6efd5794a922ab4
|
refs/heads/master
| 2021-10-24T18:19:12.349287
| 2019-03-27T09:56:36
| 2019-03-27T09:56:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
r
|
prompts.R
|
prompt_runtime_factory <- function() {
last <- proc.time()
last[] <- NA_real_
function(...) {
diff <- proc.time() - last
elapsed <- sum(diff) - diff["elapsed"]
last <<- proc.time()
if (!is.na(elapsed) && elapsed > 1) {
paste0(round(elapsed), "s > ")
} else {
"> "
}
}
}
#' A prompt that shows the CPU time used by the last top level expression
#'
#' @param ... Arguments, ignored.
#'
#' @family example prompts
#' @export
prompt_runtime <- prompt_runtime_factory()
#' A prompt that shows the status (OK or error) of the last expression
#'
#' @param expr Evaluated expression.
#' @param value Its value.
#' @param ok Whether the evaluation succeeded.
#' @param visible Whether the result is visible.
#'
#' @importFrom clisymbols symbol
#' @family example prompts
#' @export
prompt_error <- function(expr, value, ok, visible) {
if (ok) {
paste0(symbol$tick, " ", symbol$pointer, " ")
} else {
paste0(symbol$cross, " ", symbol$pointer, " ")
}
}
prompt_error_hook <- function() {
update_prompt(expr = NA, value = NA, ok = FALSE, visible = NA)
orig <- prompt_env$error
if (!is.null(orig) && is.function(orig)) orig()
if (!is.null(orig) && is.call(orig)) eval(orig)
}
prompt_memuse_factory <- function() {
size <- 0
unit <- "MiB"
function(...) {
current <- memuse::Sys.procmem()[[1]]
size <<- memuse::mu.size(current)
unit <<- memuse::mu.unit(current)
paste0(round(size, 1), " ", unit, " ", symbol$pointer, " ")
}
}
#' Example prompt that shows the current memory usage of the R process
#'
#' @param ... Ignored.
#'
#' @family example prompts
#' @export
prompt_memuse <- prompt_memuse_factory()
|
82b6c2e88ccde83207f78bbf39bbf7a08515e316
|
7c5d573c8ff95422259654a05dcd9c23f79ae7d6
|
/RProject/4.R
|
41bc48f3789632e3dd71b6148cb558a78c7fd228
|
[] |
no_license
|
projBaseball/projBaseball
|
ca3b3d15b75a1cfb8d93a9aed5f95cd5d1dba343
|
9ce1360acb7a3d8499f920341ab6a3bfc1ff2ed8
|
refs/heads/master
| 2020-06-25T11:34:24.571999
| 2016-11-23T22:10:14
| 2016-11-23T22:10:14
| 74,617,393
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,404
|
r
|
4.R
|
x <- list(data.frame(name = "foo", value = 1), data.frame(name = "bar", value = 2))
x
unlist(x)
x <- list(data.frame(name = "foo", value = 1), data.frame(name = "bar", value = 2))
do.call(rbind, x)
y <- list(data.frame(name = "foo", value = 1), data.frame(name = "bar", value = 2))
y
x <- data.frame(name = "foo", value = 1)
x
lapply(iris[, 1:4], mean)
str(iris)
iris
head(iris)
tail(iris)
tail(iris, 10)
class(iris)
lapply(iris[, 1:4], mean)
sapply(iris[, 1:4], mean)
class(sapply(iris[, 1:4], mean))
class(lapply(iris[, 1:4], mean))
x <- sapply(iris[, 1:4], mean)
as.data.frame(x)
as.data.frame(t(x))
help(t)
y <- sapply(iris[, 1:4], function(x) {x > 3})
class(y)
head(y)
kids <- c("jack", "kim")
ages <- c(24, 21)
d <- data.frame(kids, ages, stringsAsFactors = FALSE)
d
class(kids)
d[[1]]
class(d[[1]])
d$kids
d[, 1]
d[1]
class(d[1])
d[, 2]
kids <- c("jack", "jill", "laura")
ages <- c(12, 10, 19)
dfa <- data.frame(kids, ages, stringsAsFactors = FALSE)
kids <- c("alice", "jill", "laura")
state <- c("ma", "ny", "ca")
dfb <- data.frame(kids, state, stringsAsFactors = FALSE)
dfa
dfb
merge(dfa, dfb)#곡ν΅λ κ°λ€ ν©μΉκΈ°
merge(dfa, dfb, all = TRUE)#κ·Έλ₯ λͺ¨λ κ°λ€ ν©μΉκΈ°
merge(dfa, dfb, all.x = TRUE)#μΌμͺ½μ μλ dfa λ₯Ό κΈ°μ€μΌλ‘ λͺ¨λ κ°λ€ ν©μΉκΈ°
merge(dfa, dfb, all.y = TRUE)#μ€λ₯Έμͺ½μ μλ dfbλ₯Ό κΈ°μ€μΌλ‘ λͺ¨λ κ° ν©μΉκΈ°
firstname <- c("alice", "jill", "laura")
state <- c("ma", "ny", "ca")
dfc <- data.frame(firstname, state, stringsAsFactors = FALSE)
dfc
merge(x = dfa, y = dfc, , "kids", "firstname")
x <- data.frame(name = c("a", "b", "c"), math = c(1, 2, 3))
y <- data.frame(name = c("d", "b", "a"), english = c(4, 5, 6))
merge(x, y)
cbind(x, y)
merge(x, y, all = TRUE)
x <- c(5, 12, 13, 12)
xFactor <- factor(x)
xFactor
str(xFactor)
unclass(xFactor)
length(xFactor)
yFactor <- factor(x, levels = c(5, 12, 13, 88))
yFactor
yFactor[2] <- 88
yFactor
yFactor[2] <- 20
yFactor
ages <- c(25, 26, 55, 37, 21, 42)
affils <- c("r", "d", "d", "r", "u", "d")
tapply(ages, affils, mean)
gender <- c("M", "M", "F", "M", "F", "F")
age <- c(47, 59, 21, 32, 33, 24)
income <- c(55000, 88000, 32450, 76500, 123000, 45650)
temp <- data.frame(gender, age, income)
temp$over25 <- ifelse(temp$age > 25, 1, 0)
temp
tapply(temp$income, list(temp$gender, temp$over25), mean)
tapply(1:10, rep(1, 10), sum)
tapply(iris$Sepal.Length, iris$Species, mean)
|
d11a2ac0c2b6ccf665a265b5e810c3a2438804c0
|
47278feb821db014764ac072a29def5d5821ef53
|
/01-LinearRegression.R
|
7ae5e2db863cd321b9e2d76bb09e00ac572587f6
|
[] |
no_license
|
paulafortuna/DataMining-MachineLearningStuff
|
11429526d13dcaa5ec3be02008df7c63fcdf0b1e
|
4abdd02dc0fa1f8bb45483865c4c0d3c263787de
|
refs/heads/master
| 2021-01-10T05:27:25.944005
| 2016-03-10T11:34:06
| 2016-03-10T11:34:06
| 52,434,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
01-LinearRegression.R
|
library (MASS) #datasets and functions
library (ISLR)
library (car)
dataset = Boston
#1 LOOK TO THE DATA
# columns:
# medv (median house value) - dependent variable
#2 check some charts: is there any linear relation?
pairs(dataset)
#3 just check if a linear regression fits
lm.fit = lm(medv~lstat,dataset)
summary(lm.fit)
#how to interpret RSE
rse = summary(lm.fit)$sigma
m = mean(dataset$medv)
percentage = rse/m
plot(dataset$lstat, dataset$medv)
abline (lm.fit)
#4 residuals plots
residuals = resid(lm.fit) #List of residuals
plot(dataset$lstat, residuals, ylab="Residuals", xlab="lstat")
abline(0, 0)
#5 multiple regression
lm.fit2 = lm(formula = medv ~ lstat + age , dataset)
summary(lm.fit2) # -> one more variable only increases 1% of explained variance check r^2
#6 multiple regression with all variables (use .)
lm.fit3 =lm(medv ~ .,dataset)
summary(lm.fit3)
#r^2
summary(lm.fit3)$r.sq
#vif -> variance inflation factors -> high values are removed 5-10
vif(lm.fit3)
#7 remove bad variables
lm.fit4 =lm(medv ~ . - rad - tax, dataset)
summary(lm.fit4)
lm.fit4 =lm(medv ~ . - age - indus - rad - tax, dataset)
summary(lm.fit4)
#8 Interaction Terms
lm(formula = medv ~ lstat * age , dataset)
#9 regression with polynomial terms
polyregression = lm(formula = medv ~ poly(lstat,4), dataset)
#9 how to use the model to predict
teste = subset(dataset, rad < 2)
predict (lm.fit,teste)
|
97f8b187af52559dd80cb08bdc437e979d17cbc4
|
4bd57b8501d4326ecc06c1d1ea499935e1668d95
|
/MASH-dev/SeanWu/tiny-mbites/R/MBITES/mbites-survival.R
|
ccc71174608ea161c2a34a1eddc236bdddf803d2
|
[] |
no_license
|
aucarter/MASH-Main
|
0a97eac24df1f7e6c4e01ceb4778088b2f00c194
|
d4ea6e89a9f00aa6327bed4762cba66298bb6027
|
refs/heads/master
| 2020-12-07T09:05:52.814249
| 2019-12-12T19:53:24
| 2019-12-12T19:53:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,497
|
r
|
mbites-survival.R
|
###############################################################################
# __ ___ ____ _____________________
# / |/ / / __ )/ _/_ __/ ____/ ___/
# / /|_/ /_____/ __ |/ / / / / __/ \__ \
# / / / /_____/ /_/ // / / / / /___ ___/ /
# /_/ /_/ /_____/___/ /_/ /_____//____/
#
# MBITES - survival
# Sean Wu
# March 2019
#
###############################################################################
# survival function
survival <- function(mosy){
surviveFlight(mosy)
surviveHazards(mosy)
}
# did i survive this bout
surviveFlight <- function(mosy){
# WingTattering(mosy)
p <- baselineSurvival(mosy)
p <- p * pEnergySurvival(mosy$energy)
# p <- p * pWingTattering(mosy$energy) # comment out to disable
# p <- p * pSenesce(mosy) # comment out to disable
if(runif(1) < 1-p){
mosy$statenext <- "D"
mosy$hist$cod <- "surviveFlight"
}
}
# basline survival
baselineSurvival <- function(mosy){
switch(mosy$state,
F = {get("parameters",.GlobalEnv)$F_surv},
B = {get("parameters",.GlobalEnv)$B_surv},
L = {get("parameters",.GlobalEnv)$L_surv},
O = {get("parameters",.GlobalEnv)$O_surv}
)
}
pEnergySurvival <- function(energy){
S_a = get("parameters",.GlobalEnv)$S_a
S_b = get("parameters",.GlobalEnv)$S_b
exp(S_a * energy)/(S_b + exp(S_a * energy))
}
# accumulative wing damage
WingTattering <- function(mosy){
ttsz_p <- get("parameters",.GlobalEnv)$ttsz_p
if(runif(1) < ttsz_p){
ttsz_a <- get("parameters",.GlobalEnv)$ttsz_a
ttsz_b <- get("parameters",.GlobalEnv)$ttsz_b
mosy$damage <- mosy$damage + rbeta(n=1,ttsz_a,ttsz_b)
}
}
pWingTattering <- function(damage){
ttr_a = get("parameters",.GlobalEnv)$ttr_a
ttr_b = get("parameters",.GlobalEnv)$ttr_b
(2+ttr_b)/(1+ttr_b) - exp(damage*ttr_a)/(ttr_b + exp(damage*ttr_a))
}
pSenesce <- function(mosy){
age <- mosy$tNow - mosy$bDay
sns_a <- get("parameters",.GlobalEnv)$sns_a
sns_b <- get("parameters",.GlobalEnv)$sns_b
(2+sns_b)/(1+sns_b) - exp(sns_a*age)/(sns_b + exp(sns_a*age))
}
###############################################################################
# Local Hazards Survival
###############################################################################
surviveHazards <- function(mosy){
if(mosy$statenext != "D"){
p <- get("landscape",.GlobalEnv)[[mosy$site]]$haz
if(runif(1) < p){
mosy$statenext <- "D"
mosy$hist$cod <- "surviveHazards"
}
}
}
|
8e7675739ad839411d88bd97f3a9a4929c604156
|
71bd19d1587e8736f9645736a1e61552ae0d543a
|
/man/selectMax.Rd
|
df56f22f0d53f769549c6013d55cfec79bef62f0
|
[] |
no_license
|
cran/cnaOpt
|
55acd34c2ca18f083abee6d7d7722cca356445a6
|
c6e0b904714713f05f6e3d0989ff841d0646f661
|
refs/heads/master
| 2023-03-18T01:38:03.279181
| 2022-07-08T13:00:11
| 2022-07-08T13:00:11
| 236,573,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,983
|
rd
|
selectMax.Rd
|
\name{selectMax}
\alias{selectMax}
\alias{multipleMax}
\title{
Select the con-cov optima from a '\code{conCovOpt}' object that maximize a specified optimality criterion
}
\description{
\code{selectMax} selects the optima from a '\code{conCovOpt}' object that maximize a specified optimality criterion (cf. Baumgartner and Ambuehl 2021).
}
\usage{
selectMax(x, crit = quote(con * cov), cond = quote(TRUE), warn = TRUE)
multipleMax(x, outcome)
}
\arguments{
\item{x}{
An object output by \code{\link{conCovOpt}}.
}
\item{crit}{
Quoted expression specifying a numeric criterion to be maximized when selecting from the con-cov optima that meet criterion \code{cond}, for example, \code{min(con,cov)} or \code{0.8*con + 0.2*cov}, etc.}
\item{cond}{
Quoted expression specifying a logical criterion to be imposed on the con-cov optima in \code{x} before selecting the optima maximizing \code{crit}, for example, \code{con > 0.85} or \code{con > cov}, etc.
}
\item{warn}{
Logical; if \code{TRUE}, \code{selectMax()} returns a warning if no solution is found.
}
\item{outcome}{
A character string specifying a single outcome value in the original data.
}
}
\details{
While \code{\link{conCovOpt}} identifies \emph{all} con-cov optima in an analyzed data set, \code{selectMax} selects those optima from a '\code{conCovOpt}' object \code{x} that comply with a logical condition \code{cond} and fare best according to the numeric optimality criterion \code{crit}. The default is to select so-called \emph{con-cov maxima}, meaning con-cov optima with highest product of consistency and coverage. % : An ordered pair (con, cov) of consistency and coverage scores is a \strong{con-cov maximum} for outcome Y=k in data \eqn{\delta} iff (con, cov) is a con-cov optimum for Y=k in \eqn{\delta} with \strong{highest product of consistency and coverage} (con-cov product).
But the argument \code{crit} allows for specifying any other numeric optimality criterion, e.g. \code{min(con, cov)}, \code{max(con, cov)}, or \code{0.8*con + 0.2*cov}, etc. (see Baumgartner and Ambuehl 2021). If \code{x} contains multiple outcomes, the selection of the best con-cov optima is done separately for each outcome.
As of package version 0.5.0, the function \code{multipleMax} is obsolete. It is kept for backwards compatibility only.
Via the column \code{id} in the output of \code{selectMax} it is possible to select one among many equally good maxima, for instance, by means of \code{\link{reprodAssign}} (see the examples below).
}
%\section{Remark}{
%From version 0.5.0 of the package, \code{selectMax} returns multiple best lines if the highest \code{crit} value is attained in several rows of the '\code{conCovOpt}' object. As a consequence, the function \code{multipleMax} and the argument \code{allConCov} in \code{\link{conCovOpt}} are now obsolete. They are kept in the package to ensure backward compatibility of existing code.
%}
\value{
\code{selectMax} returns an object of class 'selectMax'.
}
\seealso{
\code{\link{conCovOpt}}, \code{\link{reprodAssign}}
See also examples in \code{\link{conCovOpt}}.
}
\references{
Baumgartner, Michael and Mathias Ambuehl. 2021. \dQuote{Optimizing Consistency and Coverage in Configurational Causal Modeling.} \emph{Sociological Methods & Research}.\cr doi:10.1177/0049124121995554.
}
\examples{
dat1 <- d.autonomy[15:30, c("EM","SP","CO","AU")]
(cco1 <- conCovOpt(dat1, outcome = "AU"))
selectMax(cco1)
selectMax(cco1, cond = quote(con > 0.95))
selectMax(cco1, cond = quote(cov > 0.98))
selectMax(cco1, crit = quote(min(con, cov)))
selectMax(cco1, crit = quote(max(con, cov)), cond = quote(cov > 0.9))
# Multiple equally good maxima.
(cco2 <- conCovOpt(dat1, outcome = "AU"))
(sm2 <- selectMax(cco2, cond = quote(con > 0.93)))
# Each maximum corresponds to a different rep-assignment, which can be selected
# using the id argument.
reprodAssign(sm2, "AU", id = 10)
reprodAssign(sm2, "AU", id = 11)
reprodAssign(sm2, "AU", id = 13)
}
|
1764ce98a7d63971467c76d9489c2e219e425c9f
|
015558bad7fedcf6530bb22055e385f9140246bb
|
/test/test-plot.R
|
980cf266f0955c5a92dd51837088f032d1b47098
|
[] |
no_license
|
geogismx/phenofit
|
25e3a79f19465388bdfbc334d7336bbd24c166c4
|
388611631581c0160ee6d4b2f28ab6f6306fd1a3
|
refs/heads/master
| 2020-03-19T09:24:56.718135
| 2018-05-15T09:08:12
| 2018-05-15T09:08:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,685
|
r
|
test-plot.R
|
season <- function(INPUT, lambda, nptperyear = 46, south = FALSE,
iters = 2, wFUN = wTSM, IsPlot = TRUE,
minpeakdistance = nptperyear/6, ymax_min = 0.5,
TRS = 0.05, meth = c('whit', 'sg'), ...,
max_MaxPeaksperyear = 2, max_MinPeaksperyear = 3)
{
t <- INPUT$t
y <- INPUT$y
ylu <- INPUT$ylu
if (all(is.na(y))) return(NULL)
# npt <- length(y)
npt <- sum(INPUT$w > 0)
nyear <- ceiling(npt/nptperyear)
# if (nyear <= 3) nyear <- ceiling(nyear)
frame <- floor(nptperyear/7) * 2 + 1 #13, reference by TSM SG filter
if (missing(lambda)) lambda <- max(nyear*frame, 15)
## 3. weighted whittaker curve fitting
# wfun <- wTSM#wTSM, bisquare
iloop <- 1
while (iloop <= 3){
# if (meth[1] == 'sg'){
# yfits <- sgfitw(INPUT$y, INPUT$w, nptperyear, INPUT$ylu, wFUN, iters, frame, d=2)
# }else if(meth[1] == 'whit'){
# whitsmw(y, w, ylu, wFUN, iters = 1, lambda = 100, ..., d = 2, missval)
yfits <- whitsmw2(INPUT$y, INPUT$w, INPUT$ylu, nptperyear, wFUN, iters, lambda)$data
# }else{
# stop('Invalid method input! Should be "sg" or "whit".')
# }
## 4. find local extreme values
ypred <- yfits[, ncol(yfits), drop = T]
# ypred <- as.numeric(runmed(ypred, frame))
alpha <- 0.01
ylu <- quantile(ypred, c(alpha/2, 1 - alpha), na.rm = T)
A <- diff(ylu)
threshold <- TRS*A
# For avoid fluctuating in peak of growing season or flat peak growing season,
# like fluxsite: ZM-Mon
# max_slp <- 2*A/nptperyear
# pek_slp <- abs(coefficients(lm(ypred[I]~I))[[2]])
I <- which(ypred > (0.8*A + ylu[1]))
if (length(I)/length(y) > 0.3){
ypred[I] <- median(ypred[I])
}
# local minimum values
# threshold for local extreme values
# peak values is small for minimum values, so can't use threshold here
peaks <- findpeaks(-ypred,
threshold_max = 0.2*A,
minpeakdistance = minpeakdistance, zero = "-", nups = 0)
pos_min <- peaks$X
pos_min[, 1] %<>% multiply_by(-1)
npeak_MinPerYear <- length(peaks$gregexpr)/nyear#max peaks
# local maximum values,
peaks <- findpeaks(ypred, zero = "+",
threshold_max = 0.2*A,
threshold_min = 0*A,
minpeakdistance = minpeakdistance,
minpeakheight = max(0.2*A + ylu[1], ymax_min), nups = 1)
pos_max <- peaks$X
npeak_MaxPerYear <- length(peaks$gregexpr)/nyear#max peaks
cat(sprintf('iloop = %d: lambda = %.1f, npeak_MinPerYear = %.2f, npeak_MaxPerYear = %.2f\n',
iloop, lambda, npeak_MinPerYear, npeak_MaxPerYear))
# maxpeaksperyear <- 2
if (npeak_MaxPerYear > max_MaxPeaksperyear | npeak_MinPerYear > max_MinPeaksperyear){
lambda <- lambda*2
}else if (npeak_MaxPerYear < 1 | npeak_MinPerYear < 1){
lambda <- lambda/2
}else{
break
}
iloop <- iloop + 1
}
# plot curve fitting time-series
if (IsPlot){
plotdata(INPUT, nptperyear)
colors <- c("red", "blue", "green")
for (i in 1:(ncol(yfits) - 1)){
lines(INPUT$t, yfits[, i+1, drop = T], col = colors[i], lwd = 2)
}
}
# plot(ypred, type = "b"); grid()
if (is.null(pos_max) || is.null(pos_min)){
stop("Can't find a complete growing season before trim!")
}
# 1.1 the local minimum value should small than 0.4*A
pos_min %<>% subset((val - ylu[1]) <= 0.7*A)
# add column type: max is 1; min is -1.
# pos column: c("val", "pos", "left", "right", "type")
pos <- rbind(add_column(pos_max, type = 1), add_column(pos_min, type = -1))
pos <- pos[order(pos$pos), ]
# 1.2 remove both points (date or value of min and max too close)
I_date <- which(diff(pos$pos) < (nptperyear/12*1)) #15
I_val <- which(abs(diff(pos$val)) < 0.1*A) #0.15
# I_del <- union(I_date, I_date+1)
I_del <- union(I_date + 1, I_val + 1)
if (length(I_del) > 0) pos <- pos[-I_del, ]
pos$flag <- cumsum(c(1, diff(pos$type) != 0))
# 1.3 remove replicated
pos <- ddply(pos, .(flag), rm_duplicate, y = ypred, threshold = threshold)[, 2:6]
pos$t <- t[pos$pos]
############################################################################
## 5. check head and tail break point, and reform breaks
locals <- pos[, c("pos", "type")]
ns <- nrow(locals)
# check the head and tail minimum values
minlen <- nptperyear/3 #distance from peak point
if (last(pos$type) == 1 && (npt - nth(pos$pos, -2)) > minlen &&
abs(last(ypred) - nth(pos$val, -2)) < 0.15*A )
locals %<>% rbind.data.frame(., data.frame(pos = npt, type = -1))
if (pos$type[1] == 1 && pos$pos[2] > minlen && abs(ypred[1] - pos$val[2]) < 0.15*A)
locals %<>% rbind.data.frame(data.frame(pos = 1, type = -1), .)
# a complete growing season, from minimum to minimum
I <- which(locals$type == -1)
locals <- locals[I[1]:I[length(I)], ]
s <- locals$pos
ns <- length(s)
if (ns < 3) stop("Can't find a complete growing season!")
locals %<>% mutate(val = ypred[pos], t = t[pos])
pos_max <- subset(locals, type == 1)
pos_min <- subset(locals, type == -1)
## 6. divide into multiple growing seasons
di <- data_frame(beg = s[seq(1, ns-1, 2)]+1,
peak = s[seq(2, ns, 2)],
end = s[seq(3, ns, 2)])
dt <- map_df(di, ~t[.x]) %>%
mutate(len = difftime(end, beg, units = "days") + 1, year = year(peak)) %>%
bind_cols(mval = ypred[di$peak], .)
# get the growing season year, not only the calendar year
if (south){
dt %<>% mutate(year = year + as.integer(peak > ymd(sprintf('%d0701', year))) - 1L)
}
dt %<>% group_by(year) %>% dplyr::mutate(season = 1:n(), flag = sprintf("%d_%d", year, season))
## 7. plot
if (IsPlot){
points(pos_max$t, pos_max$val, pch=20, cex = 1.5, col="red")
points(pos_min$t, pos_min$val, pch=20, cex = 1.5, col="blue")
}
# Then curve fitting VI index in every segment, according to local minimum values
# If begin at maximum value, then add the 1th point as min value. Or just begin
# from the original the first minimum value.
return(list(whit = bind_cols(data_frame(t, y), yfits),
pos = pos, dt = dt, di = di))
}
|
1f3a9953133694a72c0501e91529199b68219d1a
|
e37a1d152a1d08f03739d40492665432d15e55fb
|
/study-data/evaluation/errors.R
|
cbee99446fccda013b0a78aafcfa97440a98f6a5
|
[] |
no_license
|
jacr1/molecular-structure-prediction-bayesin-network
|
4450ea07346a9d7080faa9fbf118730744e44268
|
4211ea3528e1385705981fccfe4ed5214d71ee62
|
refs/heads/master
| 2020-03-10T22:49:57.382709
| 2018-04-16T22:27:02
| 2018-04-16T22:27:02
| 129,627,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
errors.R
|
data = read.csv("final-data-python.csv", sep=",", head=TRUE)
data$prediction_type <- as.factor(data$prediction_type)
data$correctly_drawn <- as.logical(data$correctly_drawn)
data$smiles_length <- nchar(as.character(data$smiles))
data$total_errors <- data$rubs + data$undos
pred_used <- data[data$predictions_used > 0, ]
not_pred_used <- data[data$predictions_used <= 0, ]
##############################################################################################
# t-test between predictions and not predictions for if the structure was drawn correctly or not
#
pred_used_correct = nrow(pred_used[pred_used$correctly_drawn == TRUE,])
not_pred_used_correct = nrow(not_pred_used[not_pred_used$correctly_drawn == TRUE,])
cat("predictions used:", pred_used_correct,"out of", nrow(pred_used), "correct")
cat("predictions not used:", not_pred_used_correct,"out of", nrow(not_pred_used), "correct")
t.test(x=not_pred_used$correctly_drawn, y=pred_used$correctly_drawn)
##############################################################################################
# t-test between predictions and not predictions for the total number of rubs and undos used
#
sum(pred_used$total_errors)
sum(not_pred_used$total_errors)
mean(pred_used$total_errors)
mean(not_pred_used$total_errors)
mean(pred_used$smiles_length)
mean(not_pred_used$smiles_length)
t.test(x=not_pred_used$total_errors, y=pred_used$total_errors)
|
c1c78c3bedba948ea5fe45b0712918202743183f
|
c4f324c98487791c39f550645743e2b5dad3b52a
|
/depricated_R/depricated_functions.R
|
f7feb4b327a5d8ae008b35b21af79b4992f90ae1
|
[] |
no_license
|
russelnelson/GeneArchEst
|
06219a1d222087b13f662739f60ebe513aa2bc1f
|
0d126aae50f9b68ee2c36ea6c6d9ba16e7c41a9c
|
refs/heads/master
| 2023-03-13T00:48:25.816747
| 2021-03-04T23:08:15
| 2021-03-04T23:08:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,489
|
r
|
depricated_functions.R
|
# Function to calculate the estimated time untill a population begins to crash (growth rate less than one) based on Burger and Lynch 1995.
# g_var: addative genetic variance
# e_var: environmental variance
# omega: width of the fitness function, usually given as omega^2
# k: rate of environmental change in phenotypic standard deviations
# B: mean number of offspring per individual
# Ne: effective population size
# theta_var: environmental stochasticity
B_L_t1_func <- function(g_var, e_var, omega, k, B, Ne, theta_var){
# calc Vs
Vs <- (omega^2) + e_var
# calc Vlam
# simplified: Vlam = (Vs*(1+2*Ne))/2*Ne + (((1+2*Vs)*(g_var+theta_var))/2*Vs)
V_gt <- (Vs/(2*Ne)) + (g_var*theta_var)/(2*Vs)
Vlam <- Vs + g_var + V_gt + theta_var
#calc kc
Bo <- B*omega/sqrt(Vlam)
if(Bo < 1){
return(list(t1 = NA, kc = NA, Vs = Vs, Vlam = Vlam, Bo = Bo))
}
kc <- (g_var/(g_var + Vs))*sqrt(2*Vs*log(Bo))
if(k<kc){
t1 <- Inf
}
else{
t1 <- -((g_var + Vs)/g_var)*log(1-(kc/k))
}
#calc t1
return(list(t1 = t1, kc = kc, Vs = Vs, Vlam = Vlam, Bo = Bo))
}
# old gs function
gs <- function(x,
gens,
growth.function,
survival.function,
selection.shift.function,
rec.dist,
var.theta = 0,
pred.method = "effects",
plot_during_progress = FALSE,
facet = "group", chr.length = 10000000,
fgen.pheno = FALSE,
intercept_adjust = FALSE,
print.all.freqs = FALSE,
adjust_phenotypes = FALSE,
do.sexes = TRUE,
init = F,
verbose = T){
if(verbose){cat("Initializing...\n")}
#unpack x:
if(pred.method == "effects"){ #unpack estimated effect sizes if provided.
effect.sizes <- x$e.eff[,2]
}
if(fgen.pheno){ #unpack phenotypes if requested
fgen.pheno <- x$phenotypes$p
}
h <- x$h
meta <- x$meta
if(pred.method != "real"){
pred.mod <- x$output.model$mod
pred.dat <- x$output.model$data
model <- x$prediction.program
}
else{
model <- "real"
pred.method <- "effects" #since everything else works the same, just need to change inputs.
effect.sizes <- meta$effect
}
if(pred.method == "effects"){
pred.mod <- NULL
pred.dat <- NULL
}
if(pred.method == "model"){
effect.sizes <- NULL
}
x <- x$x
#=================checks========
if(!pred.method %in% c("model", "effects")){
stop("pred.method must be provided. Options:\n\tmodel: predict phenotypes directly from the model provided.\n\teffects: predict phenotypes from estimated effect sizes.\n")
}
if(!data.table::is.data.table(x)){
x <- data.table::as.data.table(x)
}
if(pred.method == "effects"){
if(nrow(x) != length(effect.sizes) | nrow(x) != nrow(meta)){
stop("Provided x, effect sizes, and meta must all be of equal length!")
}
}
else{
if(nrow(x) != nrow(meta)){
stop("Provided x and meta must be of equal length!")
}
}
if(pred.method == "model"){
if(!model %in% c("JWAS", "BGLR", "ranger")){
stop("To predict from the model, a JWAS, BGLR, or ranger model must be provided.\n")
}
}
else{
if(model == "ranger"){
stop("RF does not estimate effect sizes, so prediction must be done using the ranger model.\n")
}
}
# before doing anything else, go ahead and remove any loci from those provided with no effect! Faster this way.
# don't do this if initializing the population!
if(pred.method == "effects" & !init){
if(any(effect.sizes == 0)){
n.eff <- which(effect.sizes == 0)
x <- x[-n.eff,]
meta <- meta[-n.eff,]
effect.sizes <- effect.sizes[-n.eff]
}
}
#=================get starting phenotypic values and BVs=========
# get starting phenotypes and addative genetic values
## If first gen phenos aren't provided (should be uncommon)
if(length(fgen.pheno) != ncol(x)/2){
if(pred.method == "effects"){
if(verbose){cat("Generating representative starting phenotypes from effect sizes.")}
pheno <- get.pheno.vals(x, effect.sizes, h)
a <- pheno$a # BVs
pheno <- pheno$p # phenotypic values
}
else{
if(verbose){cat("Generating representative starting phenotypes from model.")}
a <- pred.BV.from.model(pred.mod, x, pred.method, model)
pheno <- a + e.dist.func(a, var(a), h) #add environmental effects
#working here
}
}
# otherwise use those, but still need to estimate BVs
else{
if(verbose){cat("Using provided phenotypic values.")}
pheno <- fgen.pheno #provded phenotypic values.
a <- pred.BV.from.model(pred.model = pred.mod, g = x, pred.method = pred.method,
model.source = model, h = h, h.av = "fgen", effect.sizes = effect.sizes)$a
}
#================set up BV variation adjustment to correct for drop in variance from GP methods============
if(adjust_phenotypes){
reorg_gcs <- rand.mating(x, ncol(x)/2, meta, rec.dist, chr.length, do.sexes, facet) # reorganize chrs once, since this causes one heck of a drop in var(a) in some GP results
reorg_gcs <- rand.mating(reorg_gcs, ncol(x)/2, meta, rec.dist, chr.length, do.sexes, facet)
re_p <- pred.BV.from.model(pred.model = pred.mod, g = reorg_gcs, pred.method = pred.method,
model.source = model, h = h, h.av = "fgen", effect.sizes = effect.sizes) # re-predict BVs
re_a <- re_p$a
re_p <- re_p$p
adj.a.var <- var(re_a) #variance next gen
# now need to adjust a and pheno to fit the variance a gen later
# multiply future phenotypes by the square root of these values, then adjust the mean back to the correct mean.
# old version which adjusts back to the starting phenotypic var every generation.
# reorg_gcs <- rand.mating(x, ncol(x)/2, meta, rec.dist, chr.length, do.sexes, facet) # reorganize chrs once, since this causes one heck of a drop in var(a) in some GP results
# re_p <- pred.BV.from.model(pred.model = pred.mod, g = reorg_gcs, pred.method = pred.method,
# model.source = model, h = h, h.av = "fgen", effect.sizes = effect.sizes) # re-predict BVs
# re_a <- re_p$a
# re_p <- re_p$p
# ad.factor <- var(pheno)/(var(re_a)/h) # here's our adjustment factor
# rm(re_a, re_p, reorg_gcs)
}
#if requested, get the amount to adjust phenotypes by in future gens.
if(intercept_adjust){
i.adj <- mean(pheno)
}
#================print out initial conditions, intiallize final steps, and run===========
#starting optimal phenotype, which is the starting mean addative genetic value.
opt <- mean(a) #optimum phenotype
if(verbose){
cat("\n\n===============done===============\n\nStarting parms:\n\tstarting optimum phenotype:", opt,
"\n\tmean phenotypic value:", mean(pheno), "\n\taddative genetic variance:", var(a), "\n\tphenotypic variance:", var(pheno), "\n\th:", h, "\n")
}
#make output matrix and get initial conditions
out <- matrix(NA, nrow = gens + 1, ncol = 8)
colnames(out) <- c("N", "mu_pheno", "mu_a", "opt", "diff", "var_a", "stochastic_opt", "gen")
N <- ncol(x)/2 #initial pop size
h.av <- var(a) #get the historic addative genetic variance.
h.pv <- var(pheno) #historic phenotypic variance.
out[1,] <- c(N, mean(pheno), mean(a), opt, 0, h.av, opt, 1) #add this and the mean initial additive genetic variance
if(plot_during_progress){
library(ggplot2)
pdat <- reshape2::melt(out)
colnames(pdat) <- c("Generation", "var", "val")
ranges <- data.frame(var = c("N", "mu_pheno", "mu_a", "opt", "diff"),
ymin = c(0, out[1,2]*2, out[1,3]*2, out[1,4]*2, -10),
ymax = c(out[1,1]*1.05, 0, 0, 0, 10))
pdat <- merge(pdat, ranges, by = "var")
print(ggplot(pdat, aes(Generation, val)) + geom_point(na.rm = T) +
facet_wrap(~var, ncol = 1, scales = "free_y", strip.position = "left") +
geom_blank(aes(y = ymin)) +
geom_blank(aes(y = ymax)) +
theme_bw() + xlim(c(0, max(pdat$Generation))) +
theme(strip.placement = "outside", axis.title.y = element_blank(), strip.background = element_blank(),
strip.text = element_text(size = 11)))
}
#initialize matrix to return allele frequencies if requested.
if(print.all.freqs){
a.fqs <- matrix(0, nrow(meta), gens + 1)
a.fqs[,1] <- rowSums(x)/ncol(x)
}
#================loop through each additional gen, doing selection, survival, and fisher sampling of survivors====
if(verbose){
cat("\nBeginning run...\n\n================================\n\n")
}
for(i in 2:(gens+1)){
#=========survival====
# get the optimum phenotype this gen
t.opt <- rnorm(1, opt, var.theta)
#survival:
s <- rbinom(out[(i-1),1], 1, #survive or not? Number of draws is the pop size in prev gen, surival probabilities are determined by the phenotypic variance and optimal phenotype in this gen.
survival.function(pheno, t.opt, hist.var = h.pv)) # calling the function in this way ensures that individuals with phenotypes at the optimum have a survival probability of whatever is set in the function.
#if the population has died out, stop.
if(sum(s) <= 1){
break
}
#what is the pop size after growth?
out[i,1] <- round(growth.function(sum(s)))
#make a new x with the survivors
x <- x[, .SD, .SDcols = which(rep(s, each = 2) == 1)] #get the gene copies of survivors
# # check phenotypic variance...
# temp <- get.pheno.vals(x, effect.sizes, h, hist.a.var = h.av)
# ptemp <- data.frame(val = c(a, temp$a), class = c(rep("T0", length(a)), rep("T1", length(temp$a))))
# temp <- tem$p
# if(intercept_adjust){
# temp <- temp + i.adj
# }
# # adjust variance
# if(adjust_phenotypes != FALSE){
# s.p.mean <- mean(temp)
# temp <- temp*sqrt(ad.factor)
# temp <- temp - (mean(temp) - s.p.mean)
# }
# print(var(temp))
#=============do random mating, adjust selection, get new phenotype scores, get ready for next gen====
y <- rand.mating(x, out[i,1], meta, rec.dist, chr.length, do.sexes, facet)
# check that the pop didn't die due to every individual being the same sex (rand.mating returns NULL in this case.)
if(is.null(y)){
break
}
else{
x <- y
rm(y)
}
#get phenotypic/genetic values
pa <- pred.BV.from.model(pred.model = pred.mod,
g = x,
pred.method = pred.method,
model.source = model,
h = h,
h.av = h.av,
effect.sizes = effect.sizes)
a <- pa$a
pheno <- pa$p
#if requested, adjust the phenotypic values.
# adjust intercept
if(intercept_adjust){
pheno <- pheno + i.adj
}
# adjust variance
if(adjust_phenotypes != FALSE){
s.p.mean <- mean(pheno)
pheno <- pheno*sqrt(ad.factor)
pheno <- pheno - (mean(pheno) - s.p.mean)
}
#adjust selection optima
opt <- selection.shift.function(opt, iv = sqrt(h.av))
#save
out[i,2] <- mean(pheno)
out[i,3] <- mean(a)
out[i,4] <- opt
out[i,5] <- opt - mean(a)
out[i,6] <- var(a)
out[i,7] <- t.opt
if(verbose){
cat("gen:", i-1,
"\tf_opt:", round(out[i-1,4],3),
"\ts_opt", round(out[i-1,7],3),
"\tmean(pheno):", round(out[i,2],3),
"\tmean(a):", round(out[i,3],3),
"\tvar(a):", round(var(a),3),
"\tNs:", sum(s),
"\tN(t+1):", out[i,1],"\n")
}
if(plot_during_progress){
pdat <- reshape2::melt(out)
colnames(pdat) <- c("Generation", "var", "val")
ranges <- data.frame(var = c("N", "mu_pheno", "mu_a", "opt", "diff"),
ymin = c(0, out[1,2]*2, out[1,3]*2, out[1,4]*2, -10),
ymax = c(out[1,1]*1.05, 0, 0, 0, 10))
pdat <- merge(pdat, ranges, by = "var")
print(ggplot(pdat, aes(Generation, val)) + geom_line(na.rm = T) + geom_point(na.rm = T) +
facet_wrap(~var, ncol = 1, scales = "free_y", strip.position = "left") +
geom_blank(aes(y = ymin)) +
geom_blank(aes(y = ymax)) +
theme_bw() + xlim(c(0, max(pdat$Generation))) +
theme(strip.placement = "outside", axis.title.y = element_blank(), strip.background = element_blank(),
strip.text = element_text(size = 11)))
}
#add allele frequencies if requested
if(print.all.freqs){
a.fqs[,i] <- rowSums(x)/ncol(x)
}
gc()
}
#prepare stuff to return
out[,"gen"] <- 1:nrow(out)
out <- out[-nrow(out),]
if(print.all.freqs){
a.fqs <- cbind(meta, a.fqs, stringsAsFactors = F)
out <- list(summary = out, frequencies = a.fqs)
}
return(list(run_vars = out, x = x, phenos = pheno, BVs = a))
}
# from http://www2.univet.hu/users/jreiczig/locScaleTests/
lepage.stat=function(x1,x2){
browser()
enne1=as.numeric(length(x1))
enne2=as.numeric(length(x2))
enne=enne1+enne2
e.w=enne1*(enne+1)/2
v.w=enne1*enne2*(enne+1)/12
e.a=enne1*(enne+2)/4
v.a=enne1*enne2*(enne+2)*(enne-2)/48/(enne-1)
w.o=as.numeric(wilcox.test(x1,x2,exact=FALSE)[1])+enne1*(enne1+1)/2
a.o=as.numeric(ansari.test(x1,x2,exact=FALSE,alternative="two.sided")[1])
wp.o=(w.o-e.w)^2/v.w
ap.o=(a.o-e.a)^2/v.a
return(wp.o+ap.o)
}
cucconi.stat=function(x1,x2){
cuc=function(x1,x2){
vett=c(x1,x2)
enne1=as.numeric(length(x1))
enne2=as.numeric(length(x2))
enne=as.numeric(length(vett))
ranghi=rank(vett)
erre2=ranghi[(enne1+1):enne]
media=enne2*(enne+1)*(2*enne+1)
scarto=(enne1*enne2*(enne+1)*(2*enne+1)*(8*enne+11)/5)^0.5
u=(6*sum(erre2^2)-media)/scarto
v=(6*sum((enne+1-1*erre2)^2)-media)/scarto
ro=2*(enne^2-4)/(2*enne+1)/(8*enne+11)-1
cuc=(u^2+v^2-2*u*v*ro)/2/(1-ro^2)
}
return(.5*(cuc(x1,x2)+cuc(x2,x1)))
}
compare_peaks <- function(o, p){
npdiff <- abs(nrow(o) - nrow(p))
diffs <- rep(NA, 31)
if(all(nrow(o) > 0 & nrow(p) > 0)){
diffs <- calc_dist_stats(o$val, p$val)
}
return(c(npeak_diff = npdiff, diffs))
}
# Function to calculate the estimated time untill a population begins to crash (growth rate less than one) based on Burger and Lynch 1995.
# g_var: addative genetic variance
# e_var: environmental variance
# omega: width of the fitness function, usually given as omega^2
# k: rate of environmental change in phenotypic standard deviations
# B: mean number of offspring per individual
# Ne: effective population size
# theta_var: environmental stochasticity
B_L_t1_func <- function(g_var, e_var, omega, k, B, Ne, theta_var){
# calc Vs
Vs <- (omega^2) + e_var
# calc Vlam
# simplified: Vlam = (Vs*(1+2*Ne))/2*Ne + (((1+2*Vs)*(g_var+theta_var))/2*Vs)
V_gt <- (Vs/(2*Ne)) + (g_var*theta_var)/(2*Vs)
Vlam <- Vs + g_var + V_gt + theta_var
#calc kc
Bo <- B*omega/sqrt(Vlam)
if(Bo < 1){
return(list(t1 = NA, kc = NA, Vs = Vs, Vlam = Vlam, Bo = Bo))
}
kc <- (g_var/(g_var + Vs))*sqrt(2*Vs*log(Bo))
if(k<kc){
t1 <- Inf
}
else{
t1 <- -((g_var + Vs)/g_var)*log(1-(kc/k))
}
#calc t1
return(list(t1 = t1, kc = kc, Vs = Vs, Vlam = Vlam, Bo = Bo))
}
|
efaa981f27f6959bfeb1aa86a160760fc3118edc
|
f8ad7b7fa9580621cdd56154130ca39fb47d8697
|
/R/SampleCoveragePlots.R
|
00b3160a9bc2fffb11696a5f6a1a6d57fb09ae7c
|
[
"MIT"
] |
permissive
|
colinpmillar/capture_prob_paper
|
683d0cd8009281afcdd0e4b79e5f259d31978d1a
|
66c4bcfe3474e9b90b19018c813189ff0476408b
|
refs/heads/master
| 2020-12-26T01:13:27.932001
| 2015-12-29T21:40:18
| 2015-12-29T21:40:18
| 34,309,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,976
|
r
|
SampleCoveragePlots.R
|
# ------------------------------------------------
#
# Work out crossover between Trust and MSS / SEPA
#
# ------------------------------------------------
if (Sys.info()["user"] == "millaco") {
setwd("~/Dropbox/SarahColin/PhD/capture_prob_paper")
library(setwidth)
} else
if (Sys.info()["user"] == "millarc") {
setwd("C:/work/repos/papers/capture_prop_paper/")
} else
if (Sys.info()["user"] == "Millarc") {
setwd("C:/work/repos/papers/capture_prop_paper/")
}
# load data
library(CLdata)
library(sp)
load("rData/modelData.rData")
ef <- subset(ef, keep)
ef <- unique(ef[!names(ef) %in% c("Species","Lifestage", "n_R1", "n_R2", "n_R3", "n_R4", "n_R5", "n_R6", "s", "T", "X", "Z", "phi")])
trustpoly <- rgdal::readOGR("mapdata","sa_trust_1110")
trustnames <- read.csv("trust_names.csv")
gis @ proj4string <- trustpoly @ proj4string
# which points are in which trust area:
wk <- sp::over(gis, trustpoly)
ef <- cbind(ef, wk[ef $ Site_OBJECTID,])
# table trusts against the area they fish in
tab <- table(ef $ TrustName, ef $ Trust)
ord <- order(apply(tab, 2, function(x) max(x)/sum(x)))
write.csv(tab[,rev(ord)], file = "TrustFishings_by_TrustRegions.csv")
# first with hydrometric area
tab <- with(ef, table(HAName, Trust))
ord <- order(apply(tab, 1, function(x) max(x)/sum(x)))
write.csv(tab[rev(ord),], file = "TrustFishings_by_HydroArea.csv")
sngl <- rowSums(tab > 1)
range(sngl)
t(t(sngl[sngl < 2]))
#Annan 1
#Beauly 1
#Firth of Tay Group 1
#Helmsdale Group 1
#Leven (Durnbartonshire) 1
#Lochy (Invernesshire) 1
#Outer Hebrides 1
#Tweed 1
#Ythan Group 1
hma @ data $ trusts <- sngl[hma @ data $ HAName]
cols <- c("transparent", "red", "lightblue")
colid <- replace(hma @ data $ trusts, hma @ data $ trusts > 1, 2) + 1
#cols <- c("transparent", "red", colorRampPalette(c("lightblue", "darkblue"))(6))
#colid <- hma @ data $ trusts + 1
sp::plot(hma, col = cols[colid])
xy <- coordinates(hma)
text(xy[,1], xy[,2], label = hma @ data $ trusts, cex = 0.5, font = 2)
png(file = "figures/HMA_single_trust_map.png", width = 9, height = 9, units = "in", res = 400)
plot(hma, col = cols[colid])
dev.off()
png(file = "figures/HMA_single_trust_map_wnos.png", width = 9, height = 9, units = "in", res = 400)
plot(hma, col = cols[colid])
text(xy[,1], xy[,2], label = hma @ data $ trusts, cex = 0.5, font = 2)
dev.off()
# look at sites with wide widths from single sampled HAs
HAs <- names(sngl[sngl < 2])[-(4:5)]
ef_sub <- subset(ef, HAName %in% HAs & Water_W > 20)
by(ef_sub, ef_sub $ HAName[drop = TRUE], function(x) unique(x[c("Site_OBJECTID","Site.Name", "Trust","NEAR_X","NEAR_Y")]))
by(ef_sub, ef_sub $ HAName[drop = TRUE], function(x) unique(x[c("Site_OBJECTID","Trust","NEAR_X","NEAR_Y")]))
# plot these up on a map
tweedwater <- rgdal::readOGR("B:/Env_GIS/Tweed_Catchment/Shapes_Tweed","OSMM_Tweed_InWater")
plot(tweedwater)
points(ef_sub $ NEAR_X, ef_sub $ NEAR_Y, col = "red", cex = 16)
bb <- bbox(cbind(ef_sub $ NEAR_X, ef_sub $ NEAR_Y))
# now with catchments
tab <- with(ef, table(CATCH_ID, Trust))
sngl <- rowSums(tab > 1)
redctm @ data $ trusts <- unname(sngl[paste(redctm @ data $ CATCH_ID)])
redctm @ data $ trusts[is.na(redctm @ data $ trusts)] <- 0
cols <- c("transparent", "red", "lightblue")
colid <- ifelse(redctm @ data $ trusts == 0, 1,
ifelse(redctm @ data $ trusts == 1, 2, 3))
plot(hma, col = grey(0.9), border = grey(0.9))
plot(redctm, col = cols[colid], add = TRUE)
png(file = "figures/catchment_single_trust_map.png", width = 9, height = 9, units = "in", res = 400)
plot(hma, col = grey(0.8), border = grey(0.8))
plot(redctm, col = cols[colid], add = TRUE)
dev.off()
# now for some barcharts by HA
tab <- with(ef, table(HAName, Trust))
df <- data.frame(HAName = rep(rownames(tab), ncol(tab)),
Trust = rep(colnames(tab), each = nrow(tab)),
val = c(tab))
df <- df[df $ val > 0,]
library(ggplot2)
library(dplyr)
ggplot(df, aes(HAName, y = val, fill = Trust)) +
geom_bar(stat = "identity") +
coord_flip() +
xlab("") +
ylab("Number of samples")
library(dplyr)
df3 <- df %>%
group_by(HAName) %>%
mutate(perc=val/sum(val)) %>%
mutate(max=max(perc))
levls <- paste(unique(df3 $ HAName[order(df3 $ max)]))
df3 $ HANamesort <- factor(df3 $ HAName, levels = levls)
png(file = "figures/coverage1.png", width = 9, height = 6, units = "in", res = 400)
ggplot(df3, aes(HANamesort, y = perc*100, fill = Trust)) +
geom_bar(stat = "identity", color = "black") +
coord_flip() +
xlab("") +
ylab("Percentage of samples") +
scale_colour_grey()
dev.off()
png(file = "figures/coverage2.png", width = 9, height = 6, units = "in", res = 400)
ggplot(df3, aes(HANamesort, y = val, fill = Trust)) +
geom_bar(stat = "identity", color = "black") +
coord_flip() +
xlab("") +
ylab("Number of samples")
dev.off()
hma @ data $ lev <- 3
tab <- subset(df3, max == 1)
tab $ HAName
hma @ data $ lev[hma @ data $ HAName %in% tab $ HAName] <- 1
tab <- subset(df3, max > 0.95 & val < 10)
tab $ HAName
hma @ data $ lev[hma @ data $ HAName %in% tab $ HAName] <- 2
hma @ data $ lev[hma @ data $ trusts == 0] <- 0
cols <- c("transparent", "red", "orange", "lightblue")
png(file = "figures/HMA_complex_trust_map.png", width = 9, height = 9, units = "in", res = 400)
plot(hma, col = cols[hma @ data $ lev + 1])
dev.off()
targets <- as.character(hma @ data $ HAName[hma @ data $ lev %in% 1:2])
keep <- c("Site_OBJECTID", "Dataset", "Width", "Trust", "NEAR_X", "NEAR_Y",
"Elevation_", "Slope_deg", "Upcatch_km", "Water_A", "Water_W", "Distance_s",
"Urban", "CTrees", "NCTrees", "Mixed", "Marsh", "Other",
"DESCRIPTIO", "barrier", "HACode", "HAName", "doy", "totlanduse")
wk <- unique(subset(ef, HAName %in% targets & keep)[keep])
write.csv(wk, file = "Sites_1 sampler_per_HydroArea.csv")
|
df2ab8c7d449698481e52dca5a4480fdf918f1c7
|
b7cb6c3b387515f1969278137899a158b75b79ae
|
/json/140.r
|
9c73a3d846c703d1e8bdd65bdc740686885a679f
|
[] |
no_license
|
rweekly/rweekly.org
|
92d8528cde9336cfcf7dfd307de116c61ac73741
|
719d2bff2e16d716d200561384111655d772f829
|
refs/heads/gh-pages
| 2023-09-03T19:17:18.733983
| 2023-09-01T08:49:24
| 2023-09-01T08:49:24
| 59,336,738
| 676
| 559
| null | 2023-09-14T15:33:23
| 2016-05-21T02:03:54
|
R
|
UTF-8
|
R
| false
| false
| 7,808
|
r
|
140.r
|
[
{
"title": "Analyse your bank statements using R",
"href": "https://benjaminlmoore.wordpress.com/2014/01/04/analyse-your-bank-statements-using-r/"
},
{
"title": "Modeling Trick: Impact Coding of Categorical Variables with Many Levels",
"href": "http://www.win-vector.com/blog/2012/07/modeling-trick-impact-coding-of-categorical-variables-with-many-levels/?utm_source=rss&utm_medium=rss&utm_campaign=modeling-trick-impact-coding-of-categorical-variables-with-many-levels"
},
{
"title": "ggplot2: Two Color XY-Area Combo Chart",
"href": "https://learnr.wordpress.com/2009/10/22/ggplot2-two-color-xy-area-combo-chart/"
},
{
"title": "Data science for executives and managers",
"href": "http://www.win-vector.com/blog/2016/10/data-science-for-executives-and-managers/"
},
{
"title": "For fun: Correlation of US State with the number of clicks on online banners",
"href": "https://www.r-statistics.com/2010/02/for-fun-correlation-of-us-state-with-the-number-of-clicks-on-online-banners/"
},
{
"title": "Fun with advanced NBA statsο»Ώ-how to collect data",
"href": "http://junma5.weebly.com/data-blog/fun-with-advanced-nba-stats"
},
{
"title": "Valuation of CDO with equal amount",
"href": "http://mockquant.blogspot.com/2011/01/valuation-of-cdo-with-equal-amount.html"
},
{
"title": "Linear regression from a contingency table",
"href": "http://freakonometrics.hypotheses.org/8580"
},
{
"title": "New data packages",
"href": "https://blog.rstudio.org/2014/07/23/new-data-packages/"
},
{
"title": "Box Plot as Goal Post",
"href": "http://adventuresindata.blogspot.com/2016/05/box-plot-as-goal-post.html"
},
{
"title": "Too Much Parallelism is as Bad",
"href": "http://www.quintuitive.com/2016/05/08/much-parallelism-bad/"
},
{
"title": "Photos of the third Milano R net meeting",
"href": "http://www.milanor.net/blog/photos-of-the-third-milano-r-net-meeting/"
},
{
"title": "Eight Christmas Gift Ideas for the Statistically Interested",
"href": "http://www.sumsar.net/blog/2014/12/christmas-gift-ideas-for-the-statistically-interested/"
},
{
"title": "Testing R Packages",
"href": "http://yihui.name/en/2013/09/testing-r-packages/"
},
{
"title": "Measuring performance of functions in R",
"href": "https://web.archive.org/web/http://www.ai.rug.nl/~jpborst/macsci/files/r_performance.php"
},
{
"title": "Convenience Sample, SRS, and Stratified Random Sample Compared",
"href": "https://r-norberg.blogspot.com/2013/02/in-class-today-we-were-discussing.html"
},
{
"title": "Maps, Geocoding, and the R User Conference 2010",
"href": "http://www.r-chart.com/2010/07/maps-geocoding-and-r-user-conference.html"
},
{
"title": "RSiteCatalyst Version 1.4.1 Release Notes",
"href": "http://randyzwitch.com/rsitecatalyst-version-1-4-1-release-notes/"
},
{
"title": "Documenting Rβs Connections Internals",
"href": "http://biostatmatt.com/archives/551"
},
{
"title": "Hadley Wickhamβs R Development Master Class coming to SF",
"href": "http://blog.revolutionanalytics.com/2011/05/hadley-wickhams-r-development-master-class-coming-to-sf.html"
},
{
"title": "Which countries have Regrexit?",
"href": "http://realizationsinbiostatistics.blogspot.com/2016/06/which-countries-have-regrexit.html"
},
{
"title": "Convenient access to Gapminderβs datasets from R",
"href": "http://factbased.blogspot.com/2012/07/convenient-access-to-gapminder-datasets.html"
},
{
"title": "A statistical project bleg (urgent-ish)",
"href": "http://simplystatistics.tumblr.com/post/34098460201/a-statistical-project-bleg-urgent-ish"
},
{
"title": "Animation in R",
"href": "http://nycdatascience.com/basicanimationinr/"
},
{
"title": "R: k-Means Clustering on Imaging",
"href": "http://alstatr.blogspot.com/2014/09/r-k-means-clustering-on-image.html"
},
{
"title": "the density that did not existβ¦",
"href": "https://xianblog.wordpress.com/2015/01/27/the-density-that-did-not-exist/"
},
{
"title": "Mango EARL Competition entries",
"href": "http://www.mango-solutions.com/wp/2015/08/mango-earl-competition-entries/"
},
{
"title": "Warning: Clusters May Appear More Separated in Textbooks than in Practice",
"href": "http://joelcadwell.blogspot.com/2014/03/warning-clusters-may-appear-more_23.html"
},
{
"title": "N-Way ANOVA",
"href": "http://statistical-research.com/n-way-anova/?utm_source=rss&utm_medium=rss&utm_campaign=n-way-anova"
},
{
"title": "Venturing in text mining with βRβ",
"href": "http://saptarsigoswami.blogspot.com/2012/10/venturing-in-r-for-text-mining.html"
},
{
"title": "DO Something Nifffty with R",
"href": "http://rud.is/b/2015/06/19/do-something-nifffty-with-r/"
},
{
"title": "Block diagonal matrices in R",
"href": "http://chrisladroue.com/2011/04/block-diagonal-matrices-in-r/"
},
{
"title": "Cricket β opinions and facts",
"href": "http://www.rcasts.com/2010/09/cricket-opinions-and-facts.html"
},
{
"title": "help resources for r: the r-help mailing list, ucla academic technology services, and r-bloggers",
"href": "http://www.twotorials.com/2012/03/help-resources-for-r-r-help-mailing.html"
},
{
"title": "Scenario analysis and trading options using R",
"href": "http://quant-day.blogspot.com/2013/06/scenario-analysis-and-trading-options.html"
},
{
"title": "Halo Effects and Multicollinearity: Separating the General from the Specific",
"href": "http://joelcadwell.blogspot.com/2012/08/halo-effects-and-multicollinearity.html"
},
{
"title": "broom: a package for tidying statistical models into data frames",
"href": "http://varianceexplained.org/r/broom-intro/"
},
{
"title": "Thoroughly Unusual Itches",
"href": "http://www.r-chart.com/2010/07/thoroughly-unusual-itches.html"
},
{
"title": "Creating svg graphics for web publishing",
"href": "http://menugget.blogspot.com/2011/07/creating-svg-graphics-for-web.html"
},
{
"title": "Simplifying spatial polygons in R",
"href": "http://www.geotheory.org/"
},
{
"title": "Dynamical systems in R with simecol",
"href": "http://www.magesblog.com/2012/06/dynamical-systems-in-r-with-simecol.html"
},
{
"title": "Using integer programming in R to optimize cargo loads",
"href": "http://blog.revolutionanalytics.com/2012/07/using-integer-programming-in-r-to-optimize-cargo-loads.html"
},
{
"title": "RcppArmadillo 0.2.11",
"href": "http://dirk.eddelbuettel.com/blog/2011/01/08/"
},
{
"title": "Aquamacs customizations (auctex, ESS)",
"href": "https://web.archive.org/web/http://jackman.stanford.edu/blog/?p=1863"
},
{
"title": "Loss Developments via Growth Curves and Stan",
"href": "http://www.magesblog.com/2015/11/loss-developments-via-growth-curves-and.html"
},
{
"title": "IBM DataScientistWorkBench = OpenRefine + RStudio + Jupyter Notebooks in the Cloud, Via Your Browser",
"href": "https://blog.ouseful.info/2015/12/18/ibm-datascientistworkbench-openrefine-rstudio-jupyter-notebooks/"
},
{
"title": "Deep Learning with MXNetR",
"href": "https://web.archive.org/web/http://dmlc.ml/rstats/2015/11/03/training-deep-net-with-R.html"
},
{
"title": "Tools",
"href": "https://theaverageinvestor.wordpress.com/2010/03/18/tools/"
},
{
"title": "NetLogo-R-Extension",
"href": "http://sgsong.blogspot.com/2010/08/netlogo-r-extension.html"
},
{
"title": "Map-reduce in R with Amazon EMR",
"href": "https://feedproxy.google.com/~r/RUserGroups/~3/P6FsOxmi44g/"
}
]
|
f7f7570d3c4d8ebe58def170e1b2d7cd7202d0d3
|
2dcdf36c0659cfba370baf2445addb503aa6153d
|
/R/fips.R
|
c76f066e3c9554190fc85fa5f269000373f8b8e8
|
[] |
no_license
|
shixinliang1996/drat
|
ae911595841858266a6cc85ebfd38260f638d1e8
|
91f16201e577892da9ee0c12e8bf04aeff25ce90
|
refs/heads/master
| 2022-11-07T04:07:47.343522
| 2020-07-07T04:45:31
| 2020-07-07T04:45:38
| 277,713,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
fips.R
|
#' fips dataset
#'
#' @description a fips dataset with 3 columns: FIPS codes, county names, state names
#'
#' @docType data
#' @usage data(fips)
#' @keywords datasets
#'
#' @examples
#' data(fips)
#'
"fips"
|
a0a2e3fbf5c4f0839c354995b8eefc4727f9007c
|
0f9566f6abf18fecfe701259bdb80d78e77a79ba
|
/misc_demo/Makevars.R
|
b15a7e62e0ad21f163b17ddebcb5be8b6d66345c
|
[] |
no_license
|
mrgsolve/examples
|
97ffd3ead1fcaa8a12e5233d620ed215354b0088
|
dd9c71b9d5510a94ed4f34080404c4bd2ca5adf4
|
refs/heads/master
| 2020-05-21T20:48:16.381728
| 2018-09-03T21:01:21
| 2018-09-03T21:01:21
| 61,402,241
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 51
|
r
|
Makevars.R
|
PKG_CPPFLAGS=-I/Users/kyleb/Rlibs/lib/Rcpp/include/
|
3979a92304ecfe8fb3e1007a26bc91f0bf5d6ba6
|
6d78255656cd195f23495a5bd7b8ed9185030e39
|
/man/c.Intervals.Rd
|
fc4f0c1aa0022249f8e84a495babe03869fba384
|
[] |
no_license
|
edzer/intervals
|
e84b15556eeac2013a0f6614d53284c4a6bf506a
|
283fa40dd0179481d5bcaaac940c78680a5b4263
|
refs/heads/master
| 2023-07-10T09:31:05.270554
| 2023-06-27T13:22:13
| 2023-06-27T13:22:13
| 24,235,326
| 9
| 6
| null | 2023-09-06T16:40:13
| 2014-09-19T15:36:50
|
R
|
UTF-8
|
R
| false
| false
| 1,754
|
rd
|
c.Intervals.Rd
|
\name{c}
\alias{c}
\alias{c.Intervals}
\alias{c.Intervals_full}
\title{Combine different interval matrix objects}
\description{
S3 methods for concatenating sets of intervals into a single set.
}
\usage{
\S3method{c}{Intervals}(...)
\S3method{c}{Intervals_full}(...)
}
\arguments{
\item{...}{\code{"Intervals"} or \code{"Intervals_full"} objects.}
}
\details{
All objects are expected to have the same value in the \code{type}
slot. If the \code{closed} slots differ for
\code{"\linkS4class{Intervals}"} objects and \code{type == "Z"}, the
objects will be adjusted to have \code{closed} values matching that of
\code{x}; if \code{type == "R"}, however, then all objects must first
be coerced to class \code{"\linkS4class{Intervals_full}"}, with a
warning. This coercion also occurs when a mixture of object types is
passed in. A \code{NULL} in any argument is ignored.
}
\value{
A single \code{"\linkS4class{Intervals}"} or
\code{"\linkS4class{Intervals_full}"} object. Input objects are
concatenated in their order of appearance in the the argument list.
If any input argument is not a set of intervals, \code{list(...)} is
returned instead.
}
\note{
These methods will be converted to S4 once the necessary dispatch on
\code{...} is supported.
}
\examples{
f1 <- Intervals( 1:2, type = "Z" )
g1 <- open_intervals( f1 + 5 )
# Combining Intervals objects over Z may require closure adjustment
c( f1, g1 )
f2 <- f1; g2 <- g1
type( f2 ) <- type( g2 ) <- "R"
# Combine Intervals objects over R which have different closure requires
# coercion
h <- c( f2, g2 )
# Coercion for mixed combinations as well
c( h, g2 + 10 )
\dontrun{
# Combining different types is not permitted
c( h, g1 + 10 )
}
}
|
eae45272a231e77f9f4c1f18d41d7c23ebb18f8a
|
eaba0d86b6bdc8fb8696f2dc232f67676bfb6019
|
/CLASS/WK1/RCrashCourse.R
|
56fbd269b541671737e0141efec7b234da0dd87c
|
[
"MIT"
] |
permissive
|
andrewcistola/PHC6194-Spatial_Epidemiology
|
c954da0fc9c1d94079131c2d2dfd79630a62166a
|
02b83f0f634ed476a702ec633a7ccaf67608c4f4
|
refs/heads/master
| 2023-04-11T12:32:32.634660
| 2021-04-28T00:01:57
| 2021-04-28T00:01:57
| 329,118,212
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,582
|
r
|
RCrashCourse.R
|
#R crash course
#Adpated from https://billpetti.github.io/Crash_course_in_R/
#setup a working directory
setwd("/home/vmuser/Desktop/PHC6194SPR2021/WK1")
#assign objects
foo <- "hello world!"
foo
#case-sensitive
myvar <- 1
Myvar
myvar
#Comments
#foo2 <- "hello world!"
foo2
#data structures
#vector
x <- c(1,2,3,4,5)
x
firstNames <- c("Shinji","Aska","Rey","Misato")
firstNames
#access different parts of the vector
firstNames[3]
#explore the structure of a vector
str(firstNames)
#factors
gender <- c("f","f","f","m","m","m")
gender <- as.factor(gender)
str(gender)
#lists - a sequence of elements of different types
myList <- list(x=x,firstNames=firstNames,gender=gender)
myList
#access specific elements within the list
myList[[1]]
myList$x
myList[["x"]]
#data frames
franchise <- c("Mets", "Nationals", "Marlins", "Phillies", "Braves")
city <- c("New York", "Washington, DC", "Miami", "Philadelphia", "Atlanta")
teams <- data.frame(franchise, city)
teams
#names of variables
colnames(teams)
#functions
x <- c(1, 2, 3, 4, 5)
x
our.mean <- function(x){
return(sum(x) / length(x))
}
x
mean(x) #built-in mean
our.mean(x) #self-defined mean
mean(x) == our.mean(x) #check whether these two values are equivalent
# a more complex function
our.summary <- function(x) {
mean <- mean(x)
median <- median(x)
standard_deviation <- sd(x)
foo <- cbind(mean, median, standard_deviation)
return(foo)
}
our.summary(x)
#packages
install.packages("ggplot2") #install
library(ggplot2) #load
detach("package:ggplot2") #unload
#read data
dat1 <- read.csv("dat1.csv",header = TRUE, na.strings="NA")
str(dat1)
dat1 <- read.csv("dat1.csv",header = TRUE, na.strings="NA",stringsAsFactors = FALSE)
str(dat1)
head(dat1)
#write data
write.csv(dat1,"dat2.csv",row.names = F)
#data manipulation
data(iris) #load the built-in data "iris"
head(iris)
head(iris$Sepal.Length)
#an easier way to access more than one variables
head(with(iris,Sepal.Length/Sepal.Width))
iris$sepal_length_width_ratio <- with(iris, Sepal.Length / Sepal.Width)
head(iris)
iris$sepal_length_width_ratio <- round(iris$sepal_length_width_ratio, 2)
head(iris)
#categorize a variable into tertiles
iris$sepal_length_width_ratioG<-cut(iris$sepal_length_width_ratio,breaks=quantile(iris$sepal_length_width_ratio,probs=c(0,0.33,0.67,1)),include.lowest = T)
str(iris)
#convert factor into numeric
iris$sepal_length_width_ratioG<-as.numeric(iris$sepal_length_width_ratioG)
str(iris)
#quick summary of a variable
summary(iris$sepal_length_width_ratio)
#subsetting data
unique(iris$Species)
sub_virginica <- subset(iris, Species == "virginica")
head(sub_virginica)
unique(sub_virginica$Species)
ex_virginica <- subset(iris, Species != "virginica")
unique(ex_virginica$Species)
sub_virginica2 <- subset(iris, Species != "virginica" & sepal_length_width_ratio >= 2)
head(sub_virginica2)
#select specific variables
head(iris[,c(1,3)])
#select specific cases
iris[c(1:6),]
#basic descriptive and summary statistics
data(airquality)
summary(airquality)
#dplyr package
if (!require(dplyr)) {
install.packages(dplyr)
}
with(iris,table(Species,Petal.Width))
table(iris$Species,iris$Petal.Width)
with(iris, table(Species, Petal.Width)) %>% prop.table()
with(iris, table(Species, Petal.Width)) %>% prop.table(margin = 1) #row frequencies
with(iris, table(Species, Petal.Width)) %>% prop.table(margin = 2) #column frequencies
cross_column<-with(iris, table(Species, Petal.Width)) %>% prop.table(margin = 2) %>% as.data.frame.matrix()
#basic statistics and modeling
dat2<-read.csv("survey_sample_data.csv",header = T, stringsAsFactors = F)
str(dat2)
#correlation
cor(dat2$Q1,dat2$Q2,use="pairwise.complete.obs")
cor(dat2[,c(2:19)], use = "pairwise.complete.obs")
round(cor(dat2[,c(2:19)], use = "pairwise.complete.obs"),3)
#linear regression
head(iris)
iris_lm <-lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width+ factor(Species),data=iris)
summary(iris_lm)
iris_lm$coefficients
confint(iris_lm)
#visualization
#histogram
hist(dat2$Q2)
hist(dat2$Q2, main = "Frequency of Responses to Q2", xlab = "Response Value",breaks = c(0.0, 1.0, 2.0, 3.0, 4.0, 5.0))
#scatterplots
plot(Sepal.Length~Sepal.Width,data=iris)
#boxplots
boxplot(iris$sepal_length_width_ratio)
#use ggplot2 to enhance visualizations
if (!require(ggplot2)) {
install.packages(ggplot2)
}
#better scatterplots for iris
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point()
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species)) #different color by species
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio)) #differnet size proportional to sepal length width ratio
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) #add some transparency
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3))+stat_smooth() #add a trend line
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species) #separate plots by species
#density plots for dat2
if (!require(reshape2)) {
install.packages(reshape2)
}
dat2_melt<-melt(dat2[,c(2:19)])
ggplot(dat2_melt,aes(value))+geom_density()+facet_wrap(~variable)
#boxplots
ggplot(iris,aes(y=sepal_length_width_ratio,x=Species))+geom_boxplot()
#customize
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species)
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species) + ggtitle("Sepal Length versus Sepal Width") + xlab("Sepal Width")+ylab("Sepal Length") #add title and labels
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species) + ggtitle("Sepal Length versus Sepal Width") + xlab("Sepal Width")+ylab("Sepal Length") + theme(axis.title=element_text(face="bold",size=14),plot.title=element_text(face="bold",size=16)) #resize and bold
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species) + ggtitle("Sepal Length versus Sepal Width") + xlab("Sepal Width")+ylab("Sepal Length") + theme(axis.title=element_text(face="bold",size=14),plot.title=element_text(face="bold",size=16))+ theme(panel.background=element_blank()) #remove the shading
ggplot(iris,aes(x=Sepal.Width,y=Sepal.Length))+geom_point(aes(color=Species,size=sepal_length_width_ratio,alpha=.3)) +stat_smooth() +facet_wrap(~Species) + ggtitle("Sepal Length versus Sepal Width") + xlab("Sepal Width")+ylab("Sepal Length") + theme(axis.title=element_text(face="bold",size=14),plot.title=element_text(face="bold",size=16))+ theme(panel.background=element_blank()) + scale_color_brewer(palette="Greens") #use custom coclr palettes
#correlation heatmap
dat2_cor_melt<-melt(cor(dat2[,c(2:19)],use="pairwise.complete.obs"))
dat2_cor_melt$value<-round(dat2_cor_melt$value,2)
head(dat2_cor_melt)
ggplot(dat2_cor_melt, aes(Var1, Var2)) + geom_tile(aes(fill = value)) + geom_text(aes(label=value), size = 3, fontface = "bold") + scale_fill_gradient2(low = "#67a9cf", high = "#ef8a62") +theme_minimal() +theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(),panel.background = element_blank(), axis.title = element_blank(), axis.text = element_text(size = 12, face = "bold"))
#remove 1d
dat2_cor_melt<-dat2_cor_melt[dat2_cor_melt$value!=1,]
ggplot(dat2_cor_melt, aes(Var1, Var2)) + geom_tile(aes(fill = value)) + geom_text(aes(label=value), size = 3, fontface = "bold") + scale_fill_gradient2(low = "#67a9cf", high = "#ef8a62") +theme_minimal() +theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(),panel.background = element_blank(), axis.title = element_blank(), axis.text = element_text(size = 12, face = "bold"))
#remove the labels
ggplot(dat2_cor_melt, aes(Var1, Var2)) + geom_tile(aes(fill = value)) + scale_fill_gradient2(low = "#67a9cf", high = "#ef8a62") +theme_minimal() +theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(),panel.background = element_blank(), axis.title = element_blank(), axis.text = element_text(size = 12, face = "bold"))
|
c0e16e06c6cb2f6e2da34b568c870ce7b0dd7ea0
|
8b7b4a096cabade05415edc75f8a5adce62b1576
|
/man/plot_techmix.Rd
|
c79bc148ed956986a69331272b5c93474a74528e
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/r2dii.plot.static
|
0350b9fe0952be30d14bf8868ba3816adde6f63b
|
fcb89716c1668774cda5f0bf1ba10af9a0443057
|
refs/heads/master
| 2023-05-05T22:37:08.046611
| 2021-05-28T15:03:22
| 2021-05-28T15:03:22
| 327,862,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,193
|
rd
|
plot_techmix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_techmix.R
\name{plot_techmix}
\alias{plot_techmix}
\title{Create a techmix chart in a ggplot object}
\usage{
plot_techmix(
data,
metric_type_order = NULL,
metric_type_labels = NULL,
tech_colours = NULL
)
}
\arguments{
\item{data}{Filtered input data, with columns: technology, metric_type,
metric and value.}
\item{metric_type_order}{Vector with the order of bars based on 'metric_type'
values.}
\item{metric_type_labels}{Vector with labels for bars. Order must follow that
in 'metric type order'.}
\item{tech_colours}{Dataframe containing colours per technology, with
columns: technology, colour, label(optional).}
}
\description{
The function returns a ggplot object containing a stacked bar chart showing a
technology mix for different categories (portfolio, scenario, benchmark,
etc.).
}
\examples{
data <- prepare_for_techmix_chart(
process_input_data(example_data),
sector_filter = "power",
years_filter = c(2020, 2025),
region_filter = "global",
scenario_source_filter = "demo_2020",
scenario_filter = "sds",
value_to_plot = "technology_share"
)
print(
plot_techmix(data)
)
}
|
25c22e4d7f63d981614650b75fa54b85a6d083f2
|
75fede61f86b20ac700294f351b29eccbb463964
|
/tests/testthat/test-gwasinput.R
|
e9ea71ba3eb3ac3a8ed817d346bbd069a4dcc261
|
[] |
no_license
|
USCbiostats/GxEScanR
|
253b273041c491ac11bc283fca12b852f1c710b9
|
8ba014f5d4be480a9c3d69caed2f872b934cd271
|
refs/heads/master
| 2021-06-03T17:45:54.660174
| 2020-10-03T19:25:25
| 2020-10-03T19:25:25
| 112,395,986
| 8
| 1
| null | 2021-03-19T18:25:07
| 2017-11-28T22:21:21
|
R
|
UTF-8
|
R
| false
| false
| 12,654
|
r
|
test-gwasinput.R
|
test_that("gwas input", {
##############################################################
# Data sets with good values
##############################################################
# Data for phenotype/covariate data
substouse <- c(1,2,4,8,16,32,64,128,256)
sid <- paste(rep("I", length(substouse)), substouse, sep = "")
fid <- paste(rep("F", length(substouse)), substouse, sep = "")
y = c(0, 0, 0, 0, 1, 1, 1, 1, 1)
x = c(1, 2, 4, 3, 2, 5, 3, 4, 5)
# Subject and genetic data sets using only subject IDs
data <- data.frame(sid = sid,
y = y,
x = x,
stringsAsFactors = FALSE)
bdinfofile <- system.file("extdata", "pdata_4_1.bdinfo", package = "GxEScanR")
bdinfo <- readRDS(bdinfofile)
bdinfo$filename <- system.file("extdata", "pdata_4_1.bdose", package = "GxEScanR")
# Large bdinfo data for testing blksize
bdinfofile <- system.file("extdata", "largebdinfo.rds", package = "GxEScanR")
bdinfobig <- readRDS(bdinfofile)
# Subject and genetic data sets using subject IDs and family IDs
dataf <- data.frame(fid = fid,
sid = sid,
y = y,
x = x,
stringsAsFactors = FALSE)
bdinfof <- bdinfo
bdinfof$usesfid <- TRUE
bdinfof$samples$fid <- paste(rep("F", nrow(bdinfo$samples)),
1:nrow(bdinfo$samples),
sep = "")
# Other data values for testing that are valid
outfile <- ""
minmaf <- 0.05
blksize <- 0
binary <- FALSE
##############################################################
# Testing the genetic data
##############################################################
# Testing if bdinfo has information about genetic data
expect_error(gwas(data = data,
bdinfo = 1,
outfile = outfile),
"bdinfo not a genetic-info class")
# Testing bdinfo is information about a binary dosage file
addinfoclass <- class(bdinfo$additionalinfo)
class(bdinfo$additionalinfo) <- "xyz"
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile),
"bdinfo does not have information about a binary dosage file")
class(bdinfo$additionalinfo) <- addinfoclass
##############################################################
# Testing outfile value
##############################################################
# Testing the outfile value is a string value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = 1),
"outfile must be a character value")
# Testing the outfile value is a single string value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = c("a", "b")),
"outfile must be a character vector of length 1")
# Testing the skipfile value is a string value
expect_error(gwas(data = data,
bdinfo = bdinfo,
skipfile = 1),
"skipfile must be a character value")
# Testing the skipfile value is a single string value
expect_error(gwas(data = data,
bdinfo = bdinfo,
skipfile = c("a", "b")),
"skipfile must be a character vector of length 1")
##############################################################
# Testing minmaf value
##############################################################
# Testing minmaf is a numeric value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
minmaf = "a"),
"minmaf must be a numeric value")
# Testing minmaf is a single numeric value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
minmaf = 1:2),
"minmaf must be a numeric vector of length 1")
# Testing minmaf is a number value from 0 to 0.25 inclusive
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
minmaf = 1.2),
"minmaf must be a value from 0 to 0.25, inclusive")
##############################################################
# Testing blocksize value
##############################################################
# Testing if blocksize is an numeric value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
blksize = "a"),
"blksize must be an integer")
# Testing if blocksize is a single integer value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
blksize = 1:2),
"blksize must be an integer vector of length 1")
# Testing if blksize is an integer value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
blksize = 1.2),
"blksize must be an integer")
# Testing if blksize is an positive integer value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
blksize = -2),
"blksize must be greater than or equal to 0")
# Testing if the blksize is too large
expect_error(gwas(data = data,
bdinfo = bdinfobig,
outfile = outfile,
minmaf = minmaf,
blksize = 10001),
"Requested block size greater than twice the recommended block size")
##############################################################
# Testing binary value
##############################################################
# Checking if binary is a logical value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
binary = 1),
"binary must be a logical value")
# Checking if binary is a single logical value
expect_error(gwas(data = data,
bdinfo = bdinfo,
outfile = outfile,
binary = c(FALSE, TRUE)),
"binary must be a logical vector of length 1")
##############################################################
# Testing the subject data
##############################################################
# Testing data is a data frame
expect_error(gwas(data = 1,
bdinfo = bdinfo,
outfile = outfile),
"data must be a data frame")
# Check if subject data has at least two columns
dataerror <- data.frame(sid = data$sid, stringsAsFactors = FALSE)
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = binary),
"There must me at least two columns in the subject data")
# Check if subject data first column is a character value
dataerror <- data
dataerror$sid <- rep(1, nrow(data))
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = binary),
"First column of subject data must be a character value")
# Check if any subjects have complete data
dataerror <- data
dataerror$y[1] <- NA
dataerror$x[2:nrow(data)] <- NA
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = binary),
"No subjects have complete phenotype/covariate data")
# Check if subject data has at least three columns if family id is used
dataerror <- dataf[,1:2]
expect_error(gwas(data = dataerror,
bdinfo = bdinfof,
outfile = outfile,
binary = binary),
"When using family ID, subject data must have at least 3 columns")
# Check if family id is a character value
dataerror <- dataf[,c(1,3:4)]
expect_error(gwas(data = dataerror,
bdinfo = bdinfof,
outfile = outfile,
binary = binary),
"When using family ID, the first two columns must be character values")
# Check if phenotype and covariate values are numeric
dataerror <- data
dataerror$x[1:(nrow(dataerror) - 1)] <- NA
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = binary),
"No subjects have complete data")
# Check if there are two phenotype values
dataerror <- data
dataerror$y[1] <- 9
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = TRUE),
"When using a binary outcome there must only be two values")
# Check if the two phenotype values are 0,1
dataerror <- data
dataerror$y <- dataerror$y + 1
expect_error(gwas(data = dataerror,
bdinfo = bdinfo,
outfile = outfile,
binary = TRUE),
"When using a binary outcome must be coded 0,1")
# Check if snps is a character or positive integer array
expect_error(subsetsnps(snps = TRUE,
snplist = bdinfo$snps$snpid),
"snps must be a character or integer array")
expect_error(subsetsnps(snps = 1.2,
snplist = bdinfo$snps$snpid),
"snps must be a character or integer array")
expect_error(subsetsnps(snps = 0,
snplist = bdinfo$snps$snpid),
"snp indices must be positive")
expect_error(subsetsnps(snps = 10,
snplist = bdinfo$snps$snpid),
"at least one snp index is greater than the number of SNPs available")
expect_error(subsetsnps(snps = "1:10010",
snplist = bdinfo$snps$snpid),
"No matching SNPs found")
expect_error(subsetsnps(snps = character(),
snplist = bdinfo$snps$snpid),
"No SNPs selected")
dataerror <- data[,1:2]
expect_error(gweis(data = dataerror,
bdinfo = bdinfo),
"Subject data has no covariates")
x <- subsetsnps(snps = c("1:10001", "1:10004", "1:10010"),
snplist = bdinfo$snps$snpid)
expect_equal(x[1], TRUE)
expect_equal(x[2], FALSE)
expect_equal(x[3], FALSE)
expect_equal(x[4], TRUE)
expect_equal(x[5], FALSE)
# Checking subsetting of subject data
expect_error(gwas(data = data,
bdinfo = bdinfof),
"When using family ID, the first two columns must be character values")
dataerror <- dataf
dataerror$y <- TRUE
expect_error(gwas(data = dataerror,
bdinfo = bdinfof),
"Phenotype and covariate values must be numeric")
x <- assignblocks(nsub = 10000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 50)
expect_equal(x$snpsperblk, 50)
x <- assignblocks(nsub = 25000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 0)
expect_equal(x$snpsperblk, 100)
x <- assignblocks(nsub = 50000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 0)
expect_equal(x$snpsperblk, 100)
x <- assignblocks(nsub = 100000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 0)
expect_equal(x$snpsperblk, 100)
x <- assignblocks(nsub = 250000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 0)
expect_equal(x$snpsperblk, 100)
x <- assignblocks(nsub = 500000,
nsnps = 100,
snploc = 1:100,
snpbytes = rep(1,100),
reqblksize = 0)
expect_equal(x$snpsperblk, 50)
})
|
bc616e887abe3c04ee221d8934e47f4a0975c953
|
3ae315b04c16c0b282754e98308cf68ca53f3d40
|
/plot1.R
|
5c12e35c462f30c0f795545620a879bf7eb222c5
|
[] |
no_license
|
statdance/ExData_Plotting1
|
58803cf9a3cc1dabe97d7e476a5cea951086ce85
|
5830a5b5e0ff83d426159bb85c0b603b7921a49b
|
refs/heads/master
| 2020-12-26T21:32:37.977860
| 2015-07-11T22:45:35
| 2015-07-11T22:45:35
| 38,942,283
| 0
| 0
| null | 2015-07-11T22:16:58
| 2015-07-11T22:16:58
| null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
plot1.R
|
#########################################################################
## Generates plot1.png
#########################################################################
#########################################################################
## Cleaning the data set
#########################################################################
## First, read the text file
datain <- read.table("household_power_consumption.txt",
sep = ";", header=TRUE, na.strings="?")
## Convert date column to readable dates
datain$Date <- as.Date(datain$Date,"%d/%m/%Y")
## Extract the dates that I am analyzing
datause <- datain[datain$Date == "2007-02-01" | datain$Date == "2007-02-02",]
#########################################################################
#########################################################################
## Actually generating the plot
#########################################################################
## Create the png file to be built
png(filename = "plot1.png", width = 480, height = 480, units = "px")
## Generate histogram of global power consumption these 2 days,
## per the instructions
plot1 <- hist(datause$Global_active_power,breaks=20,
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
col = "red")
## Close the png build session
dev.off()
#########################################################################
|
ffc9122bda3855fe74756e33c3813642f0bc9505
|
882adae2d0de14b2803f45bc6878e20df218ff73
|
/R/auxiliary_checker.R
|
51949fc347e119eb93bfda1967f5e985223d44a3
|
[] |
no_license
|
cran/RiemStiefel
|
60058c35ddf9f56b1c3092389a4f7903d15ca46c
|
aba8f9b265f1d6b13b23070ea2b3562a89a5b9e6
|
refs/heads/master
| 2021-03-24T05:20:11.959750
| 2020-03-25T15:10:05
| 2020-03-25T15:10:05
| 247,520,833
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
auxiliary_checker.R
|
## checkers
## (1) check_data : return
# (1) check_data
#' @keywords internal
#' @noRd
check_data <- function(x){
wow = RiemBase::riemfactory(x, name="stiefel")
N = length(wow$data)
n = wow$size[1]
p = wow$size[2]
output = array(0,c(n,p,N))
for (i in 1:N){
output[,,i] = wow$data[[i]]
}
return(output)
}
# x = list()
# for (i in 1:3){
# xx = matrix(rnorm(5*3),ncol=3)
# x[[i]] = qr.Q(qr(xx))
# }
|
afc977bb8cc47df5f66d34aec72bfefa24ab97a0
|
986a548805b3d8a51b0b6def90674315c08950a7
|
/man/lolliplot_dodgeCoordX.Rd
|
a74c6c97fc1bdfc7352911b2bd44c9f0b8e66135
|
[
"CC0-1.0"
] |
permissive
|
zlskidmore/GenVisR
|
412f74a79e81d08ec614f66f4e6f4c0e50c11af9
|
4655a3931e106974adfe0cc46a7283f5dbcd79c9
|
refs/heads/master
| 2020-12-11T06:12:46.386370
| 2017-10-26T20:31:05
| 2017-10-26T20:31:05
| 37,474,167
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 997
|
rd
|
lolliplot_dodgeCoordX.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lolliplot_dodgeCoordX.R
\name{lolliplot_dodgeCoordX}
\alias{lolliplot_dodgeCoordX}
\title{dodge coordinates}
\usage{
lolliplot_dodgeCoordX(x, rep.fact = 5000, rep.dist.lmt = 500,
attr.fact = 0.1, adj.max = 0.1, adj.lmt = 0.5, iter.max = 50000)
}
\arguments{
\item{x}{numeric vector of position coordinates on x axis}
\item{rep.fact}{repulsive factor for plotted mutations observed track}
\item{rep.dist.lmt}{repulsive distance limit for plotted mutations observed
track}
\item{attr.fact}{attraction factor for plotted mutations observed track}
\item{adj.max}{maximum position change for each iteration observed track}
\item{adj.lmt}{position adjustment limit which simulation stops observed
track}
\item{iter.max}{maximum iterations beyond which to stop the simulation
observed track}
}
\value{
numeric vector of dodged position coordinates on x axis
}
\description{
given amino acid position dodge on x axis
}
|
865fba6d0699cc69f3ce634fa60fa88d8e086fd9
|
5fd3ddd30766a4eae04069b44bfe4f85f9dfaa40
|
/man/get.RxCuiViaMayprevent.Rd
|
68030ac09a67fa70754676bc871f89b8d2196e5e
|
[] |
no_license
|
Angelacheese/pharm
|
1a18155194cbc6551f12e28083f2a01a347dd0fb
|
9f7d600752641edb30353f4575d89a9db6cc67ab
|
refs/heads/master
| 2022-10-06T03:13:18.425075
| 2019-07-18T08:09:56
| 2019-07-18T08:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
get.RxCuiViaMayprevent.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getRxCuiBasedOnMayprevent.R
\name{get.RxCuiViaMayprevent}
\alias{get.RxCuiViaMayprevent}
\title{Get RxCui based on may prevent}
\usage{
get.RxCuiViaMayprevent(strmayprevent)
}
\arguments{
\item{strmaytreat}{A may prevent}
}
\description{
Get RxCui based on may prevent
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.