blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8beb51b9268aa6db0527119a56c065f3fc9f7121
|
b8d4abab24473a1f768de227532510f0052e4610
|
/man/nearSep.Rd
|
5653e9c6850e90010b3bb01a221c47e8d5972473
|
[
"MIT"
] |
permissive
|
momenulhaque/firthGP
|
bdfdee91d86761f8dd207b3b2b3544d201dc6733
|
29252b7cff04fab7b2d76f142d75697b106a89ee
|
refs/heads/main
| 2023-04-16T12:22:37.878898
| 2022-12-31T20:34:12
| 2022-12-31T20:34:12
| 582,021,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 574
|
rd
|
nearSep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nearSep.R
\docType{data}
\name{nearSep}
\alias{nearSep}
\title{A simulated count data that has near-to-quasi-separation problem}
\format{
\subsection{\code{nearSep}}{
A data frame with 100 rows and 3 columns:
\describe{
\item{y}{the response variable, count data}
\item{x1}{a continuous variable}
\item{x2}{a binary variable that causes near-to-quasi-separation problem}
}
}
}
\usage{
nearSep
}
\description{
A simulated count data that has near-to-quasi-separation problem
}
\keyword{datasets}
|
5acc0e97731cedf2b171c735d5d7ca3ce1722159
|
4c0394633c8ceb95fc525a3594211636b1c1981b
|
/R/reexports.R
|
e1d98ca9e5c19ca74a6960b9b569ac409db0550c
|
[
"MIT"
] |
permissive
|
markfairbanks/tidytable
|
8401b92a412fdd8b37ff7d4fa54ee6e9b0939cdc
|
205c8432bcb3e14e7ac7daba1f4916d95a4aba78
|
refs/heads/main
| 2023-09-02T10:46:35.003118
| 2023-08-31T19:16:36
| 2023-08-31T19:16:36
| 221,988,616
| 357
| 33
|
NOASSERTION
| 2023-09-12T20:07:14
| 2019-11-15T19:20:49
|
R
|
UTF-8
|
R
| false
| false
| 988
|
r
|
reexports.R
|
# tidyselect ------------------------
#' @export
tidyselect::starts_with
#' @export
tidyselect::contains
#' @export
tidyselect::ends_with
#' @export
tidyselect::everything
#' @export
tidyselect::any_of
#' @export
tidyselect::all_of
#' @export
tidyselect::matches
#' @export
tidyselect::num_range
#' @export
tidyselect::last_col
#' @export
tidyselect::where
# data.table ------------------------
#' @export
data.table::data.table
#' @export
data.table::fwrite
#' @export
data.table::getDTthreads
#' @export
data.table::setDTthreads
#' @export
data.table::`%between%`
#' @export
data.table::`%like%`
#' @export
data.table::`%chin%`
# rlang ------------------------
#' @export
rlang::enexpr
#' @export
rlang::enexprs
#' @export
rlang::enquo
#' @export
rlang::enquos
#' @export
rlang::expr
#' @export
rlang::exprs
#' @export
rlang::quo
#' @export
rlang::quos
#' @export
rlang::sym
#' @export
rlang::syms
# pillar ------------------------
#' @export
pillar::glimpse
|
e68a420a0678713baf1700393998367d39933264
|
e33c27c79295195487163817be57bff73c4a6526
|
/R/tempr.R
|
faf050d188404fc718bdc4e600fad87d54959799
|
[] |
no_license
|
cran/tempR
|
082e33c6ad7bfb2857f475f0eb1e7c3bc78a1a20
|
d223bd869d90a0155c4e4a04b4b94ba02587ff40
|
refs/heads/master
| 2022-02-28T10:46:59.262280
| 2022-02-18T21:20:02
| 2022-02-18T21:20:02
| 62,116,242
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
tempr.R
|
# tempR
#' @name tempR
#' @aliases tempR
#' @docType package
#' @title tempR
#' @description Analysis and visualization of data from temporal sensory methods, including for temporal check-all-that-apply (TCATA) and temporal dominance of sensations (TDS).
#' @encoding UTF-8
NULL
|
6a7701007c23e80284f022887c091df4880c5f7f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clusternomics/vignettes/using-clusternomics.R
|
0c7c1c0d54ba305f50427a9cc28bfaaa903a25bc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,843
|
r
|
using-clusternomics.R
|
## ---- echo=FALSE, message=FALSE------------------------------------------
# Load dependencies
library(plyr)
library(magrittr)
library(ggplot2)
library(clusternomics)
## ------------------------------------------------------------------------
set.seed(1)
# Number of elements in each cluster, follows the table given above
groupCounts <- c(50, 10, 40, 60)
# Centers of clusters
means <- c(-1.5,1.5)
# Helper function to generate test data
testData <- generateTestData_2D(groupCounts, means)
datasets <- testData$data
## ---- fig.width=6--------------------------------------------------------
qplot(datasets[[1]][,1], datasets[[1]][,2], col=factor(testData$groups)) +
geom_point(size=3) +
ggtitle("Context 1") + xlab("x") + ylab("y") +
scale_color_discrete(name="Cluster")
## ---- fig.width=6--------------------------------------------------------
qplot(datasets[[2]][,1], datasets[[2]][,2], col=factor(testData$groups)) +
geom_point(size=3) +
ggtitle("Context 2") + xlab("x") + ylab("y") +
scale_color_discrete(name="Cluster")
## ------------------------------------------------------------------------
# Setup of the algorithm
dataDistributions <- 'diagNormal'
# Pre-specify number of clusters
clusterCounts <- list(global=10, context=c(3,3))
# Set number of iterations
# The following is ONLY FOR SIMULATION PURPOSES
# Use larger number of iterations for real-life data
maxIter <- 300
burnin <- 200
lag <- 2 # Thinning of samples
## ----runSampling, message=F----------------------------------------------
# Run context-dependent clustering
results <- contextCluster(datasets, clusterCounts,
maxIter = maxIter, burnin = burnin, lag = lag,
dataDistributions = 'diagNormal',
verbose = F)
# Extract resulting cluster assignments
samples <- results$samples
# Extract global cluster assignments for each MCMC sample
clusters <-
laply(1:length(samples), function(i) samples[[i]]$Global)
## ---- fig.width=6--------------------------------------------------------
logliks <- results$logliks
qplot(1:maxIter, logliks) + geom_line() +
xlab("MCMC iterations") +
ylab("Log likelihood")
## ------------------------------------------------------------------------
wrongClusterCounts <- list(global=2, context=c(2,1))
worseResults <-
contextCluster(datasets, wrongClusterCounts,
maxIter = maxIter, burnin = burnin, lag = lag,
dataDistributions = 'diagNormal',
verbose = F)
print(paste('Original model has lower (better) DIC:', results$DIC))
print(paste('Worse model has higher (worse) DIC:', worseResults$DIC))
## ---- fig.width=6--------------------------------------------------------
cc <- numberOfClusters(clusters)
qplot(seq(from=burnin, to = maxIter, by=lag), cc) +
geom_line() + xlab("MCMC iterations") + ylab("Number of clusters")
## ---- fig.width=6--------------------------------------------------------
clusterLabels <- unique(clusters %>% as.vector)
sizes <- matrix(nrow=nrow(clusters), ncol=length(clusterLabels))
for (ci in 1:length(clusterLabels)) {
sizes[,ci] <- rowSums(clusters == clusterLabels[ci])
}
sizes <- sizes %>% as.data.frame
colnames(sizes) <- clusterLabels
boxplot(sizes,xlab="Global combined clusters", ylab="Cluster size")
## ------------------------------------------------------------------------
clusteringResult <- samples[[length(samples)]]
## ---- message=F, fig.width=5, fig.height=5-------------------------------
# Compute the co-clustering matrix from global cluster assignments
coclust <- coclusteringMatrix(clusters)
# Plot the co-clustering matrix as a heatmap
require(gplots)
mypalette <- colorRampPalette(rev(c('#d7191c','#fdae61','#ffffbf','#abd9e9','#4395d2')),
space = "Lab")(100)
h <- heatmap.2(
coclust,
col=mypalette, trace='none',
dendrogram='row', labRow='', labCol='', key = TRUE,
keysize = 1.5, density.info=c("none"),
main="MCMC co-clustering matrix",
scale = "none")
## ------------------------------------------------------------------------
diag(coclust) <- 1
fit <- hclust(as.dist(1 - coclust))
hardAssignments <- cutree(fit, k=4)
## ---- message=FALSE, fig.width=6-----------------------------------------
aris <- laply(1:nrow(clusters),
function(i) mclust::adjustedRandIndex(clusters[i,], testData$groups)) %>%
as.data.frame
colnames(aris) <- "ARI"
aris$Iteration <- seq(from=burnin, to=maxIter, by=lag)
coclustAri <- mclust::adjustedRandIndex(hardAssignments, testData$groups)
aris$Coclust <- coclustAri
ggplot(aris, aes(x=Iteration, y=ARI, colour="MCMC iterations")) +
geom_point() +
ylim(0,1) +
geom_smooth(size=1) +
theme_bw() +
geom_line(aes(x=Iteration, y=Coclust, colour="Co-clustering matrix"), size=1) +
scale_colour_discrete(name="Cluster assignments")
|
cead57b7d664303751d2b67a84c90d5227c362fd
|
da69f2c9ba480b42458978b5d6261140041d948b
|
/30-day-heart-attack-mortality-histogram.R
|
bea09470e52374f5eb5a86cd38f5159aded7b777
|
[] |
no_license
|
talalf/ProgAssignment3
|
c9932feeffeae69efd949abce5bf21bff6e8feed
|
d5c26bd0b16b9fbb91bbee69805e8d2084897c4b
|
refs/heads/master
| 2022-11-21T10:37:23.790306
| 2020-07-17T17:09:59
| 2020-07-17T17:09:59
| 280,279,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
30-day-heart-attack-mortality-histogram.R
|
##make histogram of 30-day death rates
outcome <- read.csv("data/outcome-of-care-measures.csv",
colClasses = "character")
outcome[ , 11] <- as.numeric(outcome[ , 11])
hist(outcome[ ,11], xlab = "30 Day Mortality", main = "30 Day Mortality Histogram")
|
d4dc6660437585785403b8d80caaa01bed36c488
|
9e8936a8cc7beae524251c8660fa755609de9ce5
|
/man/bart-internal.Rd
|
2665bd38069e9b965add5903906ee4010429c23e
|
[
"MIT"
] |
permissive
|
tidymodels/parsnip
|
bfca10e2b58485e5b21db64517dadd4d3c924648
|
907d2164a093f10cbbc1921e4b73264ca4053f6b
|
refs/heads/main
| 2023-09-05T18:33:59.301116
| 2023-08-17T23:45:42
| 2023-08-17T23:45:42
| 113,789,613
| 451
| 93
|
NOASSERTION
| 2023-08-17T23:43:21
| 2017-12-10T22:48:42
|
R
|
UTF-8
|
R
| false
| true
| 1,105
|
rd
|
bart-internal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bart.R
\name{bart-internal}
\alias{bart-internal}
\alias{bartMachine_interval_calc}
\alias{dbart_predict_calc}
\title{Developer functions for predictions via BART models}
\usage{
bartMachine_interval_calc(new_data, obj, ci = TRUE, level = 0.95)
dbart_predict_calc(obj, new_data, type, level = 0.95, std_err = FALSE)
}
\arguments{
\item{new_data}{A rectangular data object, such as a data frame.}
\item{obj}{A parsnip object.}
\item{ci}{Confidence (TRUE) or prediction interval (FALSE)}
\item{level}{Confidence level.}
\item{type}{A single character value or \code{NULL}. Possible values
are \code{"numeric"}, \code{"class"}, \code{"prob"}, \code{"conf_int"}, \code{"pred_int"},
\code{"quantile"}, \code{"time"}, \code{"hazard"}, \code{"survival"}, or \code{"raw"}. When \code{NULL},
\code{predict()} will choose an appropriate value based on the model's mode.}
\item{std_err}{Attach column for standard error of prediction or not.}
}
\description{
Developer functions for predictions via BART models
}
\keyword{internal}
|
82fe3643336d5a2035693add3cd340a10948a843
|
6795df050498702c6ea6440b3907c49e7f425fc4
|
/R/addl_functions.R
|
c81e9e95f4492b1a8aeca02e713495c1bdbd13ab
|
[] |
no_license
|
AFIT-R/OPER782
|
a59b566f6309afa24026137df8d526e0273a2eda
|
ccdd58ee60d6b75eefeda0ba655eaa9bf2d9e9fa
|
refs/heads/master
| 2021-10-24T11:30:39.199842
| 2019-03-25T16:45:01
| 2019-03-25T16:45:01
| 112,660,382
| 4
| 17
| null | 2018-02-26T05:46:27
| 2017-11-30T21:05:10
|
HTML
|
UTF-8
|
R
| false
| false
| 369
|
r
|
addl_functions.R
|
variance <- function(x) {
n <- length(x)
m <- mean(x)
(1/(n - 1)) * sum((x - m)^2)
}
std_dev <- function(x) {
sqrt(variance(x))
}
std_error <- function(x) {
n <- length(x)
sqrt(variance(x) / n)
}
skewness <- function(x) {
n <- length(x)
v <- variance(x)
m <- mean(x)
third.moment <- (1 / (n - 2)) * sum((x - m)^3)
third.moment / (v^(3 / 2))
}
|
adddcbfa902dcebf10841c9edea33d7a22611e8f
|
c6c975b706b0553ae887507f088ebe1271383bac
|
/Simulation_Bayesian_interim.R
|
b9268c7bf07255f771a932121b5332d1b9b3eb4d
|
[] |
no_license
|
Jin93/PreliminaryPoC
|
03e5e9aea5ab326fc76653224b723f40b0cccbd6
|
20791e7c3125dbe157f37e9c32b4759667afe4cb
|
refs/heads/master
| 2020-03-31T00:44:54.792926
| 2018-10-17T21:14:59
| 2018-10-17T21:14:59
| 151,753,080
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,916
|
r
|
Simulation_Bayesian_interim.R
|
########### This code is for the implement of the propose Bayesian approach after adding an interim analysis for futility stop.
########### The approach was considered but not adopted in the end. The results were not shown in the manuscript.
load("simulationdata.RData")
K=3
q0=0.2 # historical benchmark response rate
q1=0.3 # target response rate
num.sim=2000 # number of simulations per setting
alpha=0.1 #level of false positive rate we wish to control.
p.sce=t(sapply(0:K,FUN=function(x){c(rep(q1,x),rep(q0,K-x))}))
######## obtained from the minimax Simon's two-stage design from
######## http://cancer.unc.edu/biostatistics/program/ivanova/SimonsTwoStageDesign.aspx
bayesinterimtable=matrix(NA,12,5+2*K+2)
bayesinterimtable[1,1:5]=c(17,35,4,11,21.4)
bayesinterimtable[2,1:5]=c(13,27,2,9,20)
bayesinterimtable[3,1:5]=c(12,27,2,9,18.6)
bayesinterimtable[4,1:5]=c(14,24,3,8,17)
bayesinterimtable[5,1:5]=c(8,24,1,8,15.9)
bayesinterimtable[6,1:5]=c(10,20,2,7,13.2)
bayesinterimtable[7,1:5]=c(11,17,3,6,12)
bayesinterimtable[8,1:5]=c(6,17,1,6,9.8)
bayesinterimtable[9,1:5]=c(3,13,0,5,7.9)
bayesinterimtable[10,1:5]=c(5,10,1,4,6.3)
bayesinterimtable[11,1:5]=c(3,7,0,3,5)
bayesinterimtable[12,1:5]=c(2,7,0,3,3.8)
Decision.bayesinterim=list()
for (x in 1:nrow(bayesinterimtable)
{
Ni1=bayesinterimtable[x,1]
Ni=bayesinterimtable[x,2]
r1=bayesinterimtable[x,3]
r2=bayesinterimtable[x,4]
samplesize.bayesinterim=ceiling(bayesinterimtable[x,5])
nik=matrix(NA,2,K) # number of patients in indication k at state i
rik=matrix(NA,2,K) # number of responders in indication k at state i
nik[1,]=rep(Ni1,K) # number of patients enrolled at stage 1
Decision.bayesinterim[[samplesize.bayesinterim]]=rep(0,K+1)
###### tuning:
decision.bayesinterim=Tstat=numeric()
p0=rep(q0,K)
tp=which(p0>=q1)
tn=which(p0<q1)
#samplesize=0
for (sim in 1:num.sim)
{
##### Stage 1:
rik[1,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[1,x],prob=p0[x])})
## Futility stop:
stage2.stop=which(rik[1,]<=r1)
stage2.cont=which(rik[1,]>r1)
nik[2,]=sapply(1:K,FUN=function(x){ifelse(is.element(x,stage2.cont),Ni-Ni1,0)})
if (length(stage2.stop)>0)
{
Tstat[sim]=0
}
## Stage 2:
if (length(stage2.cont)>0)
{
rik[2,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[2,x],prob=p0[x])})
ri=colSums(as.matrix(rik[,stage2.cont]))
ni=colSums(as.matrix(nik[,stage2.cont]))
K1=length(stage2.cont)
q=rep(log(((q0+q1)/2)/(1-(q0+q1)/2)),K) ## can consider different settings based on histological data
############ Jags model for BHM:
jags.data <- list("n"=ni, "Y"=ri, "K"=K1, "q"=q,"g1"=log(q1/(1-q1))-log((q1+q0)/2/(1-(q1+q0)/2)),"g0"=-log((q1+q0)/2/(1-(q1+q0)/2))+log(q0/(1-q0)))
jags.fit <- jags.model(file = "~/Jin/Signal Detection Project/a-ina.txt",data = jags.data,
n.adapt=1000,n.chains=1,quiet=T)
update(jags.fit, 4000)
bayes.out <- coda.samples(jags.fit,variable.names = c("p","d","pi","delta","tausq","mu1","mu2"),n.iter=10000)
### Interim analysis:
if (K1 == 1)
{
Tstat[sim]=sum(bayes.out[[1]][,"d"]>0)/nrow(bayes.out[[1]])
}
if (K1 > 1)
{
Tstat[sim]=sum(apply(bayes.out[[1]][,sapply(1:K1,FUN=function(x){paste("d[",x,"]",sep="")})],1,sum)>0)/nrow(bayes.out[[1]])
}
}
print(sim)
}
c.bayesinterim=quantile(Tstat,1-alpha)
###################### Simulations:
for (scenario in 2:(nrow(p.sce)))
{
p0=p.sce[scenario,]
decision.bayesinterim=numeric()
tp=which(p0>=q1)
tn=which(p0<q1)
decision.bayesinterim=matrix(NA,num.sim,K)
samplesize=0
for (sim in 1:num.sim)
{
##### Stage 1:
rik[1,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[1,x],prob=p0[x])})
## Futility stop:
stage2.stop=which(rik[1,]<=r1)
stage2.cont=which(rik[1,]>r1)
nik[2,]=sapply(1:K,FUN=function(x){ifelse(is.element(x,stage2.cont),Ni-Ni1,0)})
if (length(stage2.stop)>0)
{
Tstat[sim]=0
decision.bayesinterim[sim]=0
}
## Stage 2:
if (length(stage2.cont)>0)
{
rik[2,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[2,x],prob=p0[x])})
ri=colSums(as.matrix(rik[,stage2.cont]))
ni=colSums(as.matrix(nik[,stage2.cont]))
K1=length(stage2.cont)
q=rep(log(((q0+q1)/2)/(1-(q0+q1)/2)),K) ## can consider different settings based on histological data
############ Jags model for BHM:
jags.data <- list("n"=ni, "Y"=ri, "K"=K1, "q"=q,"g1"=log(q1/(1-q1))-log((q1+q0)/2/(1-(q1+q0)/2)),"g0"=-log((q1+q0)/2/(1-(q1+q0)/2))+log(q0/(1-q0)))
jags.fit <- jags.model(file = "~/Jin/Signal Detection Project/a-ina.txt",data = jags.data,
n.adapt=1000,n.chains=1,quiet=T)
update(jags.fit, 4000)
bayes.out <- coda.samples(jags.fit,variable.names = c("p","d","pi","delta","tausq","mu1","mu2"),n.iter=10000)
### Interim analysis:
if (K1 == 1)
{
Tstat[sim]=sum(bayes.out[[1]][,"d"]>0)/nrow(bayes.out[[1]])
}
if (K1 > 1)
{
Tstat[sim]=sum(apply(bayes.out[[1]][,sapply(1:K1,FUN=function(x){paste("d[",x,"]",sep="")})],1,sum)>0)/nrow(bayes.out[[1]])
}
decision.bayesinterim[sim]=ifelse(Tstat[sim]>c.bayesinterim,1,0)
}
Decision.bayesinterim[[samplesize.bayesinterim]][scenario]=Decision.bayesinterim[[samplesize.bayesinterim]][scenario] + ifelse((length(tp)>0)&(decision.bayesinterim[sim]==0),1,0)
print(sim)
print(Decision.bayesinterim[[samplesize.bayesinterim]][scenario])
samplesize=samplesize+sum(nik)
}
bayesinterimtable[x,5+scenario]=Decision.bayesinterim[[samplesize.bayesinterim]][scenario]/num.sim
bayesinterimtable[x,9+scenario]=samplesize/num.sim
}
save(bayesinterimtable,Decision.bayesinterimtable,file="bayesinterim_simu_fn_q0=0.3_k3.RData")
}
K=6
q0=0.2
q1=0.3
p.sce=t(sapply(0:K,FUN=function(x){c(rep(q1,x),rep(q0,K-x))}))
######## obtained from the minimax Simon's two-stage design from
######## http://cancer.unc.edu/biostatistics/program/ivanova/SimonsTwoStageDesign.aspx
bayesinterimtable=matrix(NA,13,5+2*K+2)
bayesinterimtable[1,1:5]=c(17,36,4,12,21.6)
bayesinterimtable[2,1:5]=c(15,32,3,11,21)
bayesinterimtable[3,1:5]=c(11,32,2,11,19)
bayesinterimtable[4,1:5]=c(11,28,2,10,17.5)
bayesinterimtable[5,1:5]=c(13,25,3,9,16)
bayesinterimtable[6,1:5]=c(11,21,2,8,14.8)
bayesinterimtable[7,1:5]=c(10,21,2,8,13.5)
bayesinterimtable[8,1:5]=c(7,18,1,7,11.7)
bayesinterimtable[9,1:5]=c(3,15,0,6,8.9)
bayesinterimtable[10,1:5]=c(5,15,1,6,7.6)
bayesinterimtable[11,1:5]=c(2,12,0,5,5.6)
bayesinterimtable[12,1:5]=c(2,9,0,4,4.5)
bayesinterimtable[13,1:5]=c(2,6,0,3,3.4)
colnames(bayesinterimtable)=c("n1","n2","r1","r2","ss1","type1",
sapply(2:(K+1),FUN=function(x){paste("type2-",x,sep="")}),
sapply(1:(K+1),FUN=function(x){paste("ss",x,sep="")}))
Decision.bayesinterim=list()
for (x in 1:nrow(bayesinterimtable))
{
Ni1=bayesinterimtable[x,1]
Ni=bayesinterimtable[x,2]
r1=bayesinterimtable[x,3]
r2=bayesinterimtable[x,4]
samplesize.bayesinterim=ceiling(bayesinterimtable[x,5])
nik=matrix(NA,2,K) # number of patients in indication k at state i
rik=matrix(NA,2,K) # number of responders in indication k at state i
nik[1,]=rep(Ni1,K) # number of patients enrolled at stage 1
Decision.bayesinterim[[samplesize.bayesinterim]]=rep(0,K+1)
###### tuning:
decision.bayesinterim=Tstat=numeric()
p0=rep(q0,K)
tp=which(p0>=q1)
tn=which(p0<q1)
#samplesize=0
for (sim in 1:num.sim)
{
##### Stage 1:
rik[1,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[1,x],prob=p0[x])})
## Futility stop:
stage2.stop=which(rik[1,]<=r1)
stage2.cont=which(rik[1,]>r1)
nik[2,]=sapply(1:K,FUN=function(x){ifelse(is.element(x,stage2.cont),Ni-Ni1,0)})
if (length(stage2.stop)>0)
{
Tstat[sim]=0
}
## Stage 2:
if (length(stage2.cont)>0)
{
rik[2,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[2,x],prob=p0[x])})
ri=colSums(as.matrix(rik[,stage2.cont]))
ni=colSums(as.matrix(nik[,stage2.cont]))
K1=length(stage2.cont)
q=rep(log(((q0+q1)/2)/(1-(q0+q1)/2)),K) ## can consider different settings based on histological data
############ Jags model for BHM:
jags.data <- list("n"=ni, "Y"=ri, "K"=K1, "q"=q,"g1"=log(q1/(1-q1))-log((q1+q0)/2/(1-(q1+q0)/2)),"g0"=-log((q1+q0)/2/(1-(q1+q0)/2))+log(q0/(1-q0)))
jags.fit <- jags.model(file = "~/Jin/Signal Detection Project/a-ina.txt",data = jags.data,
n.adapt=1000,n.chains=1,quiet=T)
update(jags.fit, 4000)
bayes.out <- coda.samples(jags.fit,variable.names = c("p","d","pi","delta","tausq","mu1","mu2"),n.iter=10000)
### Interim analysis:
if (K1 == 1)
{
Tstat[sim]=sum(bayes.out[[1]][,"d"]>0)/nrow(bayes.out[[1]])
}
if (K1 > 1)
{
Tstat[sim]=sum(apply(bayes.out[[1]][,sapply(1:K1,FUN=function(x){paste("d[",x,"]",sep="")})],1,sum)>0)/nrow(bayes.out[[1]])
}
}
print(sim)
}
c.bayesinterim=quantile(Tstat,1-alpha)
###################### Simulations:
for (scenario in 2:(nrow(p.sce)))
{
p0=p.sce[scenario,]
decision.bayesinterim=numeric()
tp=which(p0>=q1)
tn=which(p0<q1)
decision.bayesinterim=matrix(NA,num.sim,K)
samplesize=0
for (sim in 1:num.sim)
{
##### Stage 1:
rik[1,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[1,x],prob=p0[x])})
## Futility stop:
stage2.stop=which(rik[1,]<=r1)
stage2.cont=which(rik[1,]>r1)
nik[2,]=sapply(1:K,FUN=function(x){ifelse(is.element(x,stage2.cont),Ni-Ni1,0)})
if (length(stage2.stop)>0)
{
Tstat[sim]=0
decision.bayesinterim[sim]=0
}
## Stage 2:
if (length(stage2.cont)>0)
{
rik[2,]=sapply(1:K,FUN=function(x){rbinom(n=1,size=nik[2,x],prob=p0[x])})
ri=colSums(as.matrix(rik[,stage2.cont]))
ni=colSums(as.matrix(nik[,stage2.cont]))
K1=length(stage2.cont)
q=rep(log(((q0+q1)/2)/(1-(q0+q1)/2)),K) ## can consider different settings based on histological data
############ Jags model for BHM:
jags.data <- list("n"=ni, "Y"=ri, "K"=K1, "q"=q,"g1"=log(q1/(1-q1))-log((q1+q0)/2/(1-(q1+q0)/2)),"g0"=-log((q1+q0)/2/(1-(q1+q0)/2))+log(q0/(1-q0)))
jags.fit <- jags.model(file = "~/Jin/Signal Detection Project/a-ina.txt",data = jags.data,
n.adapt=1000,n.chains=1,quiet=T)
update(jags.fit, 4000)
bayes.out <- coda.samples(jags.fit,variable.names = c("p","d","pi","delta","tausq","mu1","mu2"),n.iter=10000)
### Interim analysis:
if (K1 == 1)
{
Tstat[sim]=sum(bayes.out[[1]][,"d"]>0)/nrow(bayes.out[[1]])
}
if (K1 > 1)
{
Tstat[sim]=sum(apply(bayes.out[[1]][,sapply(1:K1,FUN=function(x){paste("d[",x,"]",sep="")})],1,sum)>0)/nrow(bayes.out[[1]])
}
decision.bayesinterim[sim]=ifelse(Tstat[sim]>c.bayesinterim,1,0)
}
Decision.bayesinterim[[samplesize.bayesinterim]][scenario]=Decision.bayesinterim[[samplesize.bayesinterim]][scenario] + ifelse((length(tp)>0)&(decision.bayesinterim[sim]==0),1,0)
print(sim)
print(Decision.bayesinterim[[samplesize.bayesinterim]][scenario])
samplesize=samplesize+sum(nik)
}
bayesinterimtable[x,5+scenario]=Decision.bayesinterim[[samplesize.bayesinterim]][scenario]/num.sim
bayesinterimtable[x,12+scenario]=samplesize/num.sim
}
save(bayesinterimtable,Decision.bayesinterim,file="bayesinterim_simu_fn_q0=0.3_k6.RData")
}
|
cac7fba884944434c25b2faf83ffcde8054fdc9a
|
a1f157aef70d937ad92285edbfd97ee20ed54bdd
|
/04_compare_candidates.R
|
97137d084d42a9927ec2371aa6e9312d68898aa3
|
[] |
no_license
|
BrianWeinstein/presidential-debate-nlp
|
582de261da3ec9966f2d348211a3ddfd39ee337d
|
4ffb18b0e5656f7f5a597f95d1e4ca93ad14be8f
|
refs/heads/master
| 2021-05-15T03:09:29.085303
| 2016-12-02T16:58:03
| 2016-12-02T16:58:03
| 69,831,469
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,459
|
r
|
04_compare_candidates.R
|
# Initialize ############################################################################
# load packages
library(readr)
library(dplyr)
library(tidyr)
library(stringr)
library(broom)
library(forcats)
library(ggplot2); theme_set(theme_bw())
# formatting for scientific notation
options(scipen = 50, digits = 7)
# set working directory
setwd("~/Documents/presidential-debate-nlp/")
# load LogisticRegression helper function
source("logistic_helper.R")
# load colors definitions
source("colors.R")
# Load data ############################################################################
# load transcript and sentiment data
transcript <- read_csv("data/vox_transcript.csv")
annotations.tokens <- read_csv("data/annotations_tokens.csv")
tokens.ngrams <- read_csv("data/tokens_ngrams.csv")
# join dataframes
annotations.tokens <- left_join(transcript %>% mutate(text=NULL), annotations.tokens, by="text_id")
tokens.ngrams <- inner_join(transcript %>% mutate(text=NULL), tokens.ngrams, by="text_id")
# examine lemma replacements
annotations.tokens %>%
group_by(lemma) %>%
summarize(words=paste0(unique(text.content), collapse=", "),
num_words=length(unique(text.content))) %>%
arrange(-num_words)
# Compare nominal subjects ############################################################################
# calculate the most common subjects of each candidate's sentences
subjects <- annotations.tokens %>%
filter(dependencyEdge.label=="NSUBJ") %>%
mutate(lemma=str_to_lower(lemma)) %>%
group_by(speaker, lemma) %>%
summarize(num=n()) %>%
spread(key=speaker, value=num, fill = 0) %>%
mutate(Trump_pct=Trump/sum(Trump),
Trump_pct_rank=min_rank(-Trump_pct),
Clinton_pct=Clinton/sum(Clinton),
Clinton_pct_rank=min_rank(-Clinton_pct),
Holt=NULL) %>%
mutate(Clinton_sum=sum(Clinton),
Trump_sum=sum(Trump)) %>%
arrange(Trump_pct_rank)
# # z test for equality of candidates' subject proportions # http://stats.stackexchange.com/questions/2391/what-is-the-relationship-between-a-chi-squared-test-and-test-of-equal-proportion
subjects.compare <- subjects %>%
filter(Clinton >= 10 & Trump >= 10) %>% # prop.test only accurate for the larger counts
group_by(lemma) %>%
do(tidy(prop.test(x=c(.$Clinton, .$Trump),
n=c(.$Clinton_sum, .$Trump_sum))))
# fisher.test more accurate for smaller counts # http://stats.stackexchange.com/questions/123609/exact-two-sample-proportions-binomial-test-in-r-and-some-strange-p-values
# fisher.test(
# matrix(c(subjects$Clinton[15],
# subjects$Clinton_sum[15]-subjects$Clinton[15],
# subjects$Trump[15],
# subjects$Trump_sum[15]-subjects$Trump[15]),
# ncol=2))
# calculate the most common adjectives for each candidate
adjectives <- annotations.tokens %>%
filter(partOfSpeech.tag=="ADJ") %>%
mutate(lemma=str_to_lower(lemma)) %>%
group_by(speaker, lemma) %>%
summarize(num=n()) %>%
spread(key=speaker, value=num, fill = 0) %>%
mutate(Trump_pct=Trump/sum(Trump),
Trump_pct_rank=min_rank(-Trump_pct),
Clinton_pct=Clinton/sum(Clinton),
Clinton_pct_rank=min_rank(-Clinton_pct),
Holt=NULL) %>%
mutate(Clinton_sum=sum(Clinton),
Trump_sum=sum(Trump)) %>%
arrange(Trump_pct_rank)
# Compare each candidates' adjectives # http://stats.stackexchange.com/questions/2391/what-is-the-relationship-between-a-chi-squared-test-and-test-of-equal-proportion
adjectives.compare <- adjectives %>%
filter(Clinton >= 10 & Trump >= 10) %>% # prop.test only accurate for the larger counts
group_by(lemma) %>%
do(tidy(prop.test(x=c(.$Clinton, .$Trump),
n=c(.$Clinton_sum, .$Trump_sum))))
# plot
adjectives.plot <- adjectives %>%
select(lemma, Clinton, Trump) %>%
gather(key = speaker, value = mentions, c(Clinton, Trump)) %>%
group_by(speaker) %>%
filter(row_number(-mentions) <= 10) %>%
rbind(data.frame(lemma="braggadocios", speaker="Trump", mentions=1)) %>%
as.data.frame() %>%
group_by(speaker) %>%
arrange(-mentions)
ggplot(adjectives.plot %>% filter(speaker=="Clinton"),
aes(x=fct_reorder(lemma, mentions), y=mentions, fill=speaker)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=colors$cp1, guide=FALSE) +
labs(x=NULL, y="Frequency", title = "Clinton Adjective Frequency") +
coord_flip() +
# geom_text(aes(label=mentions), hjust=-0.0) +
theme(panel.border = element_blank())
ggsave(filename = "plots/adjectives_clinton.png", width = 5, height = 3, units = "in")
ggplot(adjectives.plot %>% filter(speaker=="Trump"),
aes(x=fct_reorder(lemma, mentions), y=mentions, fill=speaker)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=colors$tp1, guide=FALSE) +
labs(x=NULL, y="Frequency", title = "Trump Adjective Frequency") +
coord_flip() +
# geom_text(aes(label=mentions), hjust=-0.0) +
theme(panel.border = element_blank())
ggsave(filename = "plots/adjectives_trump.png", width = 5, height = 3, units = "in")
# Common bigrams, trigrams ############################################################################
words.bigrams <- tokens.ngrams %>%
filter(speaker != "Holt" & !is.na(bigram)) %>%
group_by(speaker, bigram) %>%
summarize(count=n()) %>%
spread(key = speaker, value=count) %>%
arrange(-Clinton)
words.trigrams <- tokens.ngrams %>%
filter(speaker != "Holt" & !is.na(trigram)) %>%
group_by(speaker, trigram) %>%
summarize(count=n()) %>%
spread(key = speaker, value=count) %>%
arrange(-Clinton)
# Classify Clinton vs Trump ############################################################################
# create a document term matrix
dtm.df <- annotations.tokens %>%
filter(partOfSpeech.tag %in% c("ADJ", "ADV", "NOUN", "PRON", "VERB")) %>%
filter(speaker != "Holt") %>%
mutate(speaker = ifelse(speaker=="Clinton", 1, 0)) %>%
rename(speaker_clinton=speaker) %>%
mutate(lemma=str_to_lower(lemma)) %>%
group_by(text_id, speaker_clinton, lemma) %>%
summarize(occurrences=n()) %>%
ungroup() %>%
spread(key = lemma, value = occurrences, fill = 0) %>%
mutate(text_id=NULL)
# perform logistic regression with lasso
logistic.compare.candidates <- LogisticRegression(input.data = dtm.df,
positive.class = "1",
type.measure="class",
sparsity = 0.98)
# get coefficients
logistic.compare.candidates$coefs %>% View
# get performance metrics
logistic.compare.candidates$confusion.matrix
# Classify Clinton vs Trump, with bigrams ############################################################################
# create a document term matrix
dtm.df.bi <- tokens.ngrams %>%
filter(speaker!="Holt" & !is.na(bigram)) %>%
count(text_id, speaker, bigram) %>%
ungroup() %>%
mutate(speaker = ifelse(speaker=="Clinton", 1, 0)) %>%
rename(speaker_clinton=speaker) %>%
spread(key = bigram, value = n, fill = 0) %>%
mutate(text_id=NULL)
# perform logistic regression with lasso
logistic.compare.candidates.bi <- LogisticRegression(input.data = dtm.df.bi,
positive.class = "1",
type.measure="class",
sparsity = 0.994)
# get coefficients
logistic.compare.candidates.bi$coefs %>% View
# get performance metrics
logistic.compare.candidates.bi$confusion.matrix
# Classify Clinton vs Trump, with trigrams ############################################################################
# create a document term matrix
dtm.df.tri <- tokens.ngrams %>%
filter(speaker!="Holt" & !is.na(trigram)) %>%
count(text_id, speaker, trigram) %>%
ungroup() %>%
mutate(speaker = ifelse(speaker=="Clinton", 1, 0)) %>%
rename(speaker_clinton=speaker) %>%
spread(key = trigram, value = n, fill = 0) %>%
mutate(text_id=NULL)
# perform logistic regression with lasso
logistic.compare.candidates.tri <- LogisticRegression(input.data = dtm.df.tri,
positive.class = "1",
type.measure="class",
sparsity = 0.996)
# get coefficients
logistic.compare.candidates.tri$coefs %>% View
# get performance metrics
logistic.compare.candidates.tri$confusion.matrix
|
6fd1792516e4efc38a88234e13674537e93e014e
|
d7e29a6a1cb9fbe7f4f2eeab61835843f3e6cfe5
|
/bayesian_fm/integration/melting.R
|
3d4eb4fbe582ec995b079222009bb35f259b33c1
|
[] |
no_license
|
MRCIEU/eczema_gwas_fu
|
a8b6dc9d213b13d89a5074d8470823181854a694
|
9c56ad47a827cd406002c1b00794e44a36442771
|
refs/heads/master
| 2022-04-26T18:17:41.315147
| 2020-04-29T13:29:30
| 2020-04-29T13:29:30
| 255,891,011
| 2
| 0
| null | 2020-04-15T11:07:07
| 2020-04-15T11:07:06
| null |
UTF-8
|
R
| false
| false
| 374
|
r
|
melting.R
|
library(tools)
args = commandArgs(trailingOnly=TRUE)
library(reshape2)
my_jam <- read.delim(args[1])
my_jam <- melt(my_jam,id.vars=c("X"))
colnames(my_jam) <- c("study_id", "rsid", "PP")
my_jam <- my_jam[!is.na(my_jam$PP),]
my_jam$PP <- as.numeric(my_jam$PP)
my_jam <- my_jam[my_jam$PP >= 0.05,]
write.table(my_jam, args[2], quote=FALSE, row.names=FALSE, sep="\t")
|
c347cc46c94de62b38b71f8408dd1bd1116bb90d
|
78e656557b5cc6b77f8a30a3792e41b6f79f2f69
|
/aslib/man/plotAlgoCorMatrix.Rd
|
dae982fe337a79b5a12cf403ed84ddf97b730f76
|
[] |
no_license
|
coseal/aslib-r
|
f7833aa6d9750f00c6955bade2b8dba6b452c9e1
|
2363baf4607971cd2ed1d784d323ecef898b2ea3
|
refs/heads/master
| 2022-09-12T15:19:20.609668
| 2022-09-02T17:48:51
| 2022-09-02T17:48:51
| 27,724,280
| 6
| 7
| null | 2021-10-17T17:34:54
| 2014-12-08T16:38:21
|
R
|
UTF-8
|
R
| false
| true
| 1,901
|
rd
|
plotAlgoCorMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotAlgoCorMatrix.R
\name{plotAlgoCorMatrix}
\alias{plotAlgoCorMatrix}
\title{Plots the correlation matrix of the algorithms.}
\usage{
plotAlgoCorMatrix(
asscenario,
measure,
order.method = "hclust",
hclust.method = "ward.D2",
cor.method = "spearman"
)
}
\arguments{
\item{asscenario}{[\code{\link{ASScenario}}]\cr
Algorithm selection scenario.}
\item{measure}{[\code{character(1)}]\cr
Measure to plot.
Default is first measure in scenario.}
\item{order.method}{[\code{character(1)}]\cr
Method for ordering the algorithms within the plot.
Possible values are \dQuote{hclust} (for hierarchical clustering order),
\dQuote{FPC} (first principal component order), \dQuote{AOE} (angular order
of eigenvectors), \dQuote{original} (original order) and \dQuote{alphabet}
(alphabetical order).
See \code{\link[corrplot]{corrMatOrder}}.
Default is \dQuote{hclust}.}
\item{hclust.method}{[\code{character(1)}]\cr
Method for hierarchical clustering. Only useful, when \code{order.method}
is set to \dQuote{hclust}, otherwise ignored.
Possible values are: \dQuote{ward.D2}, \dQuote{single},
\dQuote{complete}, \dQuote{average}, \dQuote{mcquitty}, \dQuote{median} and
\dQuote{centroid}.
See \code{\link[corrplot]{corrMatOrder}}.
Default is \dQuote{ward.D2}.}
\item{cor.method}{[\code{character(1)}]\cr
Method to be used for calculating the correlation between the algorithms.
Possible values are \dQuote{pearson}, \dQuote{kendall} and \dQuote{spearman}.
See \code{\link{cor}}.
Default is \dQuote{spearman}.}
}
\value{
See \code{\link[corrplot]{corrplot}}.
}
\description{
If NAs occur, they are imputed (before aggregation) by
\code{base + 0.3 * range}.
\code{base} is the cutoff value for runtimes scenarios with cutoff or
the worst performance for all others.
Stochastic replications are aggregated by the mean value.
}
|
fefe4b64123b2ad0c8b27a8411b682f0724890b5
|
ded276ce3370b533a2863ab7a4f2298d303e9dd7
|
/KPI_KNN_5_all_obs_DataPartition 75_25.R
|
78e4bc98ebca161c000872a57ecd17787825dd81
|
[] |
no_license
|
NatasjaFortuin/Wifi-locationing
|
a7db46c1299746da31019a084ae72a2a14a9fd73
|
51286391c4ea977105f9828f2a1a0d57bfb2d7c3
|
refs/heads/master
| 2020-09-16T21:29:36.104767
| 2019-12-19T13:39:59
| 2019-12-19T13:39:59
| 223,891,945
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,743
|
r
|
KPI_KNN_5_all_obs_DataPartition 75_25.R
|
library(readr)
library(caret)
library(lattice)
library(ggplot2)
library(tidyverse)
library(gmodels)
library(vcd)
library(grid)
#### KNN ####
## load saved KNN models----
KNNB0Lat <- read_rds("KNN_Fit_lat_B0.rds")
KNNB1Lat <- read_rds("KNN_Fit_lat_B1.rds")
KNNB2Lat <- read_rds("KNN_Fit_lat_B2.rds")
KNNB0Long <- read_rds("KNN_Fit_long_B0.rds")
KNNB1Long <- read_rds("KNN_Fit_long_B1.rds")
KNNB2Long <- read_rds("KNN_Fit_long_B2.rds")
KNNB0Floor <- read_rds("KNN_Fit_floor_B0.rds")
KNNB1Floor<- read_rds("KNN_Fit_floor_B1.rds")
KNNB2Floor <- read_rds("KNN_Fit_floor_B2.rds")
# load training data----
training_B0_lat <- read_rds("training_B0_lat.rds")
training_B0_lat <- training_B0_lat %>%
select(starts_with("WAP"), LATITUDE)
training_B1_lat <- read_rds("training_B1_lat.rds")
training_B1_lat <- training_B1_lat %>%
select(starts_with("WAP"), LATITUDE)
training_B2_lat <- read_rds("training_B2_lat.rds")
training_B2_lat <- training_B2_lat %>%
select(starts_with("WAP"), LATITUDE)
training_B0_long <- read_rds("training_B0_long.rds")
training_B0_long <- training_B0_long %>%
select(starts_with("WAP"), LONGITUDE)
training_B1_long<- read_rds("training_B1_long.rds")
training_B1_long <- training_B1_long %>%
select(starts_with("WAP"), LONGITUDE)
training_B2_long <- read_rds("training_B2_long.rds")
training_B2_long <- training_B2_long %>%
select(starts_with("WAP"), LONGITUDE)
training_B0_floor <- read_rds("training_B0_floor.rds")
training_B0_floor <- training_B0_floor %>%
select(starts_with("WAP"), FLOOR)
training_B1_floor <- read_rds("training_B1_floor.rds")
training_B1_floor <- training_B0_floor %>%
select(starts_with("WAP"), FLOOR)
training_B2_floor <- read_rds("training_B2_floor.rds")
training_B2_floor <- training_B2_floor %>%
select(starts_with("WAP"), FLOOR)
# load test data----
Test_B0_lat <- read_rds("testing_B0_lat.rds")
Test_B1_lat <- read_rds("testing_B1_lat.rds")
Test_B2_lat <- read_rds("testing_B2_lat.rds")
Test_B0_long <- read_rds("testing_B0_long.rds")
Test_B1_long <- read_rds("testing_B1_long.rds")
Test_B2_long <- read_rds("testing_B2_long.rds")
Test_B0_Floor <- read_rds("testing_B0_floor.rds")
Test_B1_Floor <- read_rds("testing_B1_floor.rds")
Test_B2_Floor <- read_rds("testing_B2_floor.rds")
#### PREDICT & CHECK KPI on test data ####
# B0----
## Lat----
predictions_KNNB0Lat= predict(object = KNNB0Lat, newdata = Test_B0_lat)
# Lat KPI----
postResample(pred = predictions_KNNB0Lat, obs = Test_B0_lat$LATITUDE)
# RMSE Rsquared MAE
# 5.982871 0.967413 3.168045
error_KNNB0Lat <- predictions_KNNB0Lat - Test_B0_lat$LATITUDE
rmse_KNNB0Lat <- sqrt(mean(error_KNNB0Lat^2))
rmse_KNNB0Lat
rsquared_KNNB0Lat <- 1 - (sum(error_KNNB0Lat^2) /
sum((Test_B0_lat$LATITUDE-mean(Test_B0_lat$LATITUDE))^2))
rsquared_KNNB0Lat <- rsquared_KNNB0Lat * 100
rsquared_KNNB0Lat
MAE_KNN_B0Lat <- MAE(predictions_KNNB0Lat, Test_B0_lat$LATITUDE)
## Long----
predictions_KNNB0Long= predict(object = KNNB0Long, newdata = Test_B0_long)
# Long KPI----
postResample(pred = predictions_KNNB0Long, obs = Test_B0_long$LONGITUDE)
# RMSE Rsquared MAE
# 5.6387300 0.9480333 3.0377195
error_KNNB0Long <- predictions_KNNB0Long - Test_B0_long$LONGITUDE
rmse_KNNB0Long <- sqrt(mean(error_KNNB0Long^2))
rmse_KNNB0Long
rsquared_KNNB0Long <- 1 - (sum(error_KNNB0Long^2) /
sum((Test_B0_long$LONGITUDE-mean(Test_B0_long$LONGITUDE))^2))
rsquared_KNNB0Long <- rsquared_KNNB0Long * 100
rsquared_KNNB0Long
MAE_KNN_B0Long <- MAE(predictions_KNNB0Long, Test_B0_long$LONGITUDE)
## Floor----
predictions_KNNB0Floor= predict(object = KNNB0Floor, newdata = Test_B0_Floor)
KNNB0Floor
#kmax Accuracy Kappa
#13 0.9223176 0.8961545
#Confusion matrix & KPI----
CF_B0_Floor <- confusionMatrix(KNNB0Floor)
CF_B0_Floor
table_CF_B0_Floor <- table(predictions_KNNB0Floor, Test_B0_Floor$FLOOR)
accuracy_KNNB0Floor <- (sum(diag(table_CF_B0_Floor))) / sum(table_CF_B0_Floor)
accuracy_KNNB0Floor <- accuracy_KNNB0Floor * 100
accuracy_KNNB0Floor
CF_B0_Floor <- confusionMatrix(table_CF_B0_Floor)
CF_B0_Floor
# B1----
## Lat----
predictions_KNNB1Lat= predict(object = KNNB1Lat, newdata = Test_B1_lat)
# Lat KPI----
postResample(pred = predictions_KNNB1Lat, obs = Test_B1_lat$LATITUDE)
# RMSE Rsquared MAE
# 5.8600480 0.9735459 2.9833834
error_KNNB1Lat <- predictions_KNNB0Lat - Test_B0_lat$LATITUDE
rmse_KNNB1Lat <- sqrt(mean(error_KNNB1Lat^2))
rmse_KNNB1Lat
rsquared_KNNB1Lat <- 1 - (sum(error_KNNB1Lat^2) /
sum((Test_B1_lat$LATITUDE-mean(Test_B1_lat$LATITUDE))^2))
rsquared_KNNB1Lat <- rsquared_KNNB1Lat * 100
rsquared_KNNB1Lat
MAE_KNN_B1Lat <- MAE(predictions_KNNB1Lat, Test_B1_lat$LATITUDE)
## Long----
predictions_KNNB1Long= predict(Fit_long_B1, Test_B1_long)
# Long KPI----
postResample(pred = predictions_KNNB1Long, obs = Test_B1_long$LONGITUDE)
# RMSE Rsquared MAE
# 6.6183878 0.9819367 3.3280642
error_KNNB1Long <- predictions_KNNB1Long - Test_B1_long$LONGITUDE
rmse_KNNB1Long <- sqrt(mean(error_KNNB1Long^2))
rmse_KNNB1Long
rsquared_KNNB1Long <- 1 - (sum(error_KNNB1Long^2) /
sum((Test_B1_long$LONGITUDE-mean(Test_B1_long$LONGITUDE))^2))
rsquared_KNNB1Long <- rsquared_KNNB1Long * 100
rsquared_KNNB1Long
MAE_KNN_B1Long <- MAE(predictions_KNNB1Long, Test_B1_long$LONGITUDE)
## Floor----
predictions_KNNB1Floor= predict(object = KNNB1Floor, newdata = Test_B1_Floor)
KNNB1Floor
#kmax Accuracy Kappa
#5 0.867244 0.8226709
#Confusion matrix & KPI----
CF_B1_Floor <- confusionMatrix(KNNB1Floor)
CF_B1_Floor
table_CF_B1_Floor <- table(predictions_KNNB1Floor, Test_B1_Floor$FLOOR)
accuracy_KNNB1Floor <- (sum(diag(table_CF_B1_Floor))) / sum(table_CF_B1_Floor)
accuracy_KNNB1Floor <- accuracy_KNNB1Floor * 100
accuracy_KNNB1Floor
# B2----
## Lat----
predictions_KNNB2Lat= predict(Fit_lat_B2, Test_B2_lat)
#Lat KPI----
postResample(pred = predictions_KNNB2Lat, obs = Test_B2_lat$LATITUDE)
#RMSE Rsquared MAE
#4.9402155 0.9682882 2.5529108
error_KNNB2Lat <- predictions_KNNB2Lat - Test_B2_lat$LATITUDE
rmse_KNNB2Lat <- sqrt(mean(error_KNNB2Lat^2))
rmse_KNNB2Lat
rsquared_KNNB2Lat <- 1 - (sum(error_KNNB2Lat^2) /
sum((Test_B2_lat$LATITUDE-mean(Test_B2_lat$LATITUDE))^2))
rsquared_KNNB2Lat <- rsquared_KNNB2Lat * 100
rsquared_KNNB2Lat
MAE_KNN_B2Lat <- MAE(predictions_KNNB2Lat, Test_B2_lat$LATITUDE)
## Long----
predictions_KNNB2Long= predict(Fit_long_B2, Test_B2_long)
#Long KPI----
postResample(pred = predictions_KNNB2Long, obs = Test_B2_long$LONGITUDE)
# RMSE Rsquared MAE
# 7.3556032 0.9404916 3.3675497
error_KNNB2long <-predictions_KNNB2Long - Test_B2_long$LONGITUDE
rmse_KNNB2Long <- sqrt(mean(error_KNNB2long^2))
rmse_KNNB2Long
rsquared_KNNB2Long <- 1 - (sum(error_KNNB2long^2) /
sum((Test_B2_lat$LONGITUDE-mean(Test_B2_long$LONGITUDE))^2))
rsquared_KNNB2Long <- rsquared_KNNB2Long * 100
rsquared_KNNB2Long
MAE_KNN_B2Long <- MAE(predictions_KNNB2Long, Test_B2_long$LONGITUDE)
## Floor----
predictions_KNNB2Floor= predict(object = KNNB2Floor, newdata = Test_B2_Floor)
KNNB2Floor
#kmax Accuracy Kappa
#7 0.9603923 0.9494419
#Confusion matrix & KPI----
CF_B2_Floor <- confusionMatrix(KNNB1Floor)
CF_B2_Floor
table_CF_B2_Floor <- table(predictions_KNNB2Floor, Test_B2_Floor$FLOOR)
accuracy_KNNB2Floor <- (sum(diag(table_CF_B2_Floor))) / sum(table_CF_B2_Floor)
accuracy_KNNB2Floor <- accuracy_KNNB2Floor * 100
accuracy_KNNB2Floor
# CREATE DF's for KPI check & PLOTS----
## LATITUDE
## All Lat KPI's per Floor----
Combi_StatSum_Lat <- data.frame( RMSE = c(rmse_KNNB0Lat,
rmse_KNNB1Lat,
rmse_KNNB2Lat),
RSQ = c(rsquared_KNNB0Lat,
rsquared_KNNB1Lat,
rsquared_KNNB2Lat),
MAE = c(MAE_KNN_B0Lat,
MAE_KNN_B1Lat,
MAE_KNN_B2Lat),
row.names = c("B0","B1","B2"))
## All Long KPI's per Floor----
Combi_StatSum_Long <- data.frame( RMSE = c(rmse_KNNB0Long,
rmse_KNNB1Long,
rmse_KNNB2Long),
RSQ = c(rsquared_KNNB0Long,
rsquared_KNNB1Long,
rsquared_KNNB2Long),
MAE = c(MAE_KNN_B0Long,
MAE_KNN_B1Long,
MAE_KNN_B2Long),
row.names = c("B0","B1","B2"))
## KPI RESULTS Lat & Long----
Combi_StatSum_Lat
# RMSE RSQ MAE
#B0 5.982871 96.71153 3.168045
#B1 5.982871 97.21531 2.983383
#B2 4.940216 96.81935 2.552911
Combi_StatSum_Long
# RMSE RSQ MAE
#B0 5.638730 94.79819 3.037719
#B1 6.618388 98.19182 3.328064
#B2 7.355603 94.04222 3.367550
## KPI RESULTS Floor----
Combi_StatSum_Floor_acc <- data.frame(acc = c(
accuracy_KNNB0Floor,
accuracy_KNNB1Floor,
accuracy_KNNB2Floor),
row.names = c("B0","B1","B2"))
Combi_StatSum_Floor_kappa <- data.frame(kappa = c(kappa_KNNB0Floor$Unweighted,
kappa_KNNB1Floor$Unweighted,
kappa_KNNB2Floor$Unweighted))
Combi_StatSum_Floor_acc #B1 has a low accuracy, needs check
#B0 91.29771
#B1 32.25558
#B2 96.92243
Combi_StatSum_Floor_kappa
#kappa
#B0 Weighted 0.883266749
#B0 ASE 0.010453337
#B1 Weighted 0.088937612
#B1 ASE 0.012252931
#B2 Weighted 0.960692577
#B2 ASE 0.004528678
#### PLOT CF as Crosstable ####
CrossTable(table_CF_B0_Floor, prop.chisq = FALSE, dnn = c('predicted', 'actual'))
CrossTable(table_CF_B1_Floor, prop.chisq = FALSE, dnn = c('predicted', 'actual'))
CrossTable(table_CF_B2_Floor, prop.chisq = FALSE, dnn = c('predicted', 'actual'))
kappa_KNNB0Floor <- Kappa(table_CF_B0_Floor)
kappa_KNNB0Floor
kappa_KNNB1Floor <- Kappa(table_CF_B1_Floor)
kappa_KNNB1Floor
kappa_KNNB2Floor <- Kappa(table_CF_B2_Floor)
kappa_KNNB2Floor
#### IF MORE MODELS COMPARE THEM WITH RESAMPLING ####
#In order to use resamples on your three trained models you should use the
#following format:
#ModelData <- resamples(list(KNN = KNNB0Lat, SVM = ----, RF = -----))
#Summary(ModelData)
#Here is an example of the output showing the respective metrics for each model:
|
43c84c76ebddd75ec3358806b68a9e9941b27892
|
890f5f1b70a8ec82548b5bdec8db74441b9644ab
|
/R/onload.R
|
d031959014bb4cdd372d7e0d85140247814e2e47
|
[
"MIT"
] |
permissive
|
adam-gruer/bortles
|
1e144ca4fe1d4a110bdcc1aea6d2757607b0783e
|
068a1b12bf3ba9aac9cef291e0c3f4e129eb95f1
|
refs/heads/master
| 2020-04-10T22:34:56.693908
| 2018-12-13T05:22:16
| 2018-12-13T05:22:16
| 161,327,365
| 22
| 1
|
NOASSERTION
| 2018-12-12T20:49:58
| 2018-12-11T12:00:23
|
R
|
UTF-8
|
R
| false
| false
| 457
|
r
|
onload.R
|
.onLoad <- function(...){
copy_gifs_to_temp()
}
pkg_gif_paths <- function(){
pkg_gif_dir <- system.file("gifs", package = "bortles")
gif_files <- list.files(pkg_gif_dir, pattern = ".+\\.gif$")
fs::path(pkg_gif_dir, gif_files)
}
copy_gifs_to_temp <- function(){
gif_temp <- fs::path(tempdir(), "bortles_gifs")
if (!dir.exists(gif_temp)) dir.create(gif_temp)
file.copy(pkg_gif_paths(),
gif_temp,
overwrite = TRUE)
}
|
746e93a88dd914e2ef9846dffcc06de624f5efd1
|
ed9cd5469879a3b9bc63279b28aaa2344fc0e535
|
/Codes/remotely_sensed_data_extraction.R
|
158ac23856fdc67f0b527ea392e96fb466551c8c
|
[] |
no_license
|
jms5151/Coral_times_series_Gulf_of_Mannar
|
4cd63ecaf5f44f01aa6de33709ab67dc48ab26d5
|
ee62c2e993e7ed1f9b578fed7a6a693d81ac9993
|
refs/heads/master
| 2022-05-28T02:01:14.517060
| 2022-05-20T20:58:57
| 2022-05-20T20:58:57
| 253,721,938
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,686
|
r
|
remotely_sensed_data_extraction.R
|
# Extract DHW and Chl-a data for bleaching years ----------------------
rm(list=ls()) #remove previous variable assignments
# load libraries
library(raster)
library(ncdf4)
library(tidyverse)
# load site data
gom_coords <- read.csv("Data/GoM_GPS_coordinates.csv", head=T, stringsAsFactors = F)
# degree heating week data ------------------------------------------------------
# ftp://ftp.star.nesdis.noaa.gov/pub/sod/mecb/crw/data/5km/v3.1/nc/v1.0/annual
# create new file to store daily temperautre data
newFileName <- "Data/GoM_max_dhw_2010_2016.csv"
sst.df <- data.frame(matrix(ncol=3, nrow=0))
colnames(sst.df) <- c("Island", "Year", "Max_DHW")
write.csv(sst.df, newFileName, row.names = F)
# extract daily temperature data for all sites from coralTemp
ncFiles <- list.files("Data/DHW/")
for (j in 1:length(ncFiles)){
ncFileName <- paste0("Data/DHW/", ncFiles[j])
ncTempBrick <- brick(ncFileName)
surveySST <- extract(ncTempBrick, cbind(gom_coords$Longitude, gom_coords$Latitude))
temp.df <- data.frame("Island"=gom_coords$Island, "Year"=substr(ncFiles[j],20,23), "Max_DHW"=surveySST[,])
write.table(temp.df, file=newFileName, row.names = F, sep = ",", col.names = !file.exists(newFileName), append = T)
}
# chlorophyll-a data ------------------------------------------------------------
# https://oceandata.sci.gsfc.nasa.gov/MODIS-Aqua/Mapped/Monthly/4km/chlor_a/
# replace lat/lon for some Islands
gom_coords$Longitude[gom_coords$Island == "Pullivasal"] <- 79.190392
gom_coords$Latitude[gom_coords$Island == "Pullivasal"] <- 9.230677
gom_coords$Longitude[gom_coords$Island == "Manoliputi"] <- 79.167717
gom_coords$Latitude[gom_coords$Island == "Manoliputi"] <- 9.208859
gom_coords$Longitude[gom_coords$Island == "Manoli"] <- 79.128234
gom_coords$Latitude[gom_coords$Island == "Manoli"] <- 9.205270
gom_coords$Longitude[gom_coords$Island == "Upputhanni"] <- 78.494096
gom_coords$Latitude[gom_coords$Island == "Upputhanni"] <- 9.083069
# list chlorophyll-a data files
chl_files <- list.files("Data/Chla/")
chl.df <- data.frame()
# data frame of chl-a values
for (k in 1:length(chl_files)){
ncFileName <- paste0("Data/Chla/", chl_files[k])
ncChlBrick <- brick(ncFileName)
surveyChl <- extract(ncChlBrick, cbind(gom_coords$Longitude, gom_coords$Latitude))
chl_tmp_df <- data.frame("Island"=gom_coords$Island, "Year"=substr(chl_files[k],2,5), "MMM_Chla"=surveyChl[,])
chl.df <- rbind(chl.df, chl_tmp_df)
}
# summarize max chl-a value by island and bleaching year
chla_max <- chl.df %>%
group_by(Island, Year) %>%
summarize(MMM_Chla = max(MMM_Chla, na.rm=T))
# save data
write.csv(chla_max, "Data/GoM_chla_mmm_2010_2016.csv", row.names = F)
|
228792d64c68c1c68fad1a6dad1b73fbf134db33
|
e4237bc0070f3f4b7928630a475ed8c2c3204d83
|
/code/cellar/POAR_county_records.R
|
c2fdaa8839671b0381c7bba3b39692d46f2f3815
|
[] |
no_license
|
texmiller/POAR-range-limits
|
5f85a880387f0c19c07c98e8ce5361e926f1e79e
|
912b9b987abf97da3eceb6d8120effda0175e247
|
refs/heads/master
| 2022-01-30T14:18:22.070257
| 2022-01-19T15:27:06
| 2022-01-19T15:27:06
| 133,686,004
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
POAR_county_records.R
|
## I searched all POSR records on TORCH on 4/12/2020
## DOwnloaded these search results: http://portal.torcherbaria.org/portal/collections/listtabledisplay.php?db=all&taxa=Poa+arachnifera&usethes=1&taxontype=2
library(tidyverse)
torch <- read.csv("TORCH_POAR_records.csv")
levels(torch$stateProvince)
torch_counties <- torch %>%
select(stateProvince,county) %>%
subset(stateProvince %in% c("Texas","TEXAS","Oklahoma","Kansas")) %>%
unique() %>%
arrange(stateProvince,county) %>%
mutate(POAR = 1)
##are there any duplicated county names between states?
TX_OK <- which(torch_counties$county[torch_counties$stateProvince=="Texas"|torch_counties$stateProvince=="TEXAS"] %in% torch_counties$county[torch_counties$stateProvince=="Oklahoma"])
torch_counties$county[torch_counties$stateProvince=="Texas"|torch_counties$stateProvince=="TEXAS"][TX_OK]
## there is an Ellis county TX and Ellis county OK
TX_KS <- which(torch_counties$county[torch_counties$stateProvince=="Texas"|torch_counties$stateProvince=="TEXAS"] %in% torch_counties$county[torch_counties$stateProvince=="Kansas"])
## no TX-KS overlap
OK_KS <-which(torch_counties$county[torch_counties$stateProvince=="Oklahoma"] %in% torch_counties$county[torch_counties$stateProvince=="Kansas"])
torch_counties$county[torch_counties$stateProvince=="Oklahoma"][OK_KS]
## Comanche and Kiowa counties in OK and KS
write_csv(torch_counties,"POAR_county_records.csv")
## now go through the geodatabase feature by hand to make sure these counties have historical==yes in the attribute table
torch %>% filter(stateProvince=="Kansas") %>% select(county)
|
6e9c6f9b1420f7b34baebfc946de3fe8fdff3c1f
|
3da549338dc72005d5b905ba5c1319d708b21f47
|
/SerumSamples.R
|
bf586aab37201f13d51eb10fd93239290a5bdae6
|
[] |
no_license
|
smdecripan/RIpred
|
84f0042f5730008090d9700082b0acdadb31b2dd
|
a86344a8fe1de2b26a151010cad7282e580a5cd8
|
refs/heads/main
| 2023-04-30T20:43:52.500705
| 2021-05-24T10:37:52
| 2021-05-24T10:37:52
| 370,274,354
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,531
|
r
|
SerumSamples.R
|
library(erah)
load("id_eRah.rda")
load("gmdRi.rda")
db <- data.frame(matrix(vector(), nrow = length(gmdRi), ncol = 2,
dimnames = list(c(), c("Name", "RI.VAR5.ALK"))))
for (i in 1:length(gmdRi@database)) {
db[i,1] <- gmdRi@database[[i]]$Name
db[i,2] <- as.numeric(gmdRi@database[[i]]$RI.VAR5.ALK)
}
idF_info <- data.frame(Nrow = 1:nrow(idF),
idF$Name.1,
idF$Name.2,
idF$Name.3,
idF$Exp.RI)
testNames <- data.frame(Names = unique(sort(c(idF_info$idF.Name.1,
idF_info$idF.Name.2,
idF_info$idF.Name.3))))
load("fgSet/fgGmd_dragonTMS.rda")
dataTMS <- as.data.frame(matrix(unlist(fgGmd),
ncol = ncol(fgGmd), nrow = nrow(fgGmd)),
stringsAsFactors = FALSE)
colnames(dataTMS) <- colnames(fgGmd)
db <- db[dataTMS$Name,]
db$Nrowdb <- 1:nrow(db)
db$Nameid <- rownames(db)
dataF <- merge(db, testNames,
by.x = "Name", by.y = "Names",
all.x = FALSE, all.y = FALSE)
idF_info <- merge(idF_info, db[,1:2], by.x = "idF.Name.1", by.y = "Name",
all.x = TRUE, all.y = FALSE, sort = FALSE, suffixes = 1)
idF_info <- merge(idF_info, db[,1:2], by.x = "idF.Name.2", by.y = "Name",
all.x = TRUE, all.y = FALSE, sort = FALSE, suffixes = 2)
idF_info <- merge(idF_info, db[,1:2], by.x = "idF.Name.3", by.y = "Name",
all.x = TRUE, all.y = FALSE, sort = FALSE, suffixes = 3)
colnames(idF_info) <- c("idF.Name.3", "idF.Name.2","idF.Name.1", "Nrow",
"Exp.RI", "RI.1", "RI.2", "RI.3")
idF_info <- idF_info[order(idF_info$Nrow),c(4,3,2,1,6,7,8,5)]
test_data <- dataTMS[dataF$Nrowdb,-(1:2)]
test_labels <- dataTMS[dataF$Nrowdb,2]*10
train_data <- dataTMS[-dataF$Nrowdb,-(1:2)]
train_labels <- dataTMS[-dataF$Nrowdb,2]*10
library(e1071)
set.seed(123)
linear.SVM <- NULL
linear.SVM <- svm(x = train_data, y = train_labels,
kernel = "linear",
type = "eps-regression",
cost = 100,
epsilon = 0.1)
testPred <- predict(linear.SVM, newdata = test_data)
resSVMlinear <- data.frame("EmpiricalRI" = test_labels,
"PredictedRI" = testPred,
"AE" = c(abs(testPred - test_labels)),
"APE" = c(abs((testPred - test_labels) / test_labels)))
resSVMtransf <- resSVMlinear
resSVMtransf[,1:3] <- resSVMtransf[,1:3]/10
resMet <- cbind(dataF, resSVMtransf)
idF_info <- merge(idF_info, resMet[,c(1,6)], by.x = "idF.Name.1", by.y = "Name",
all.x = TRUE, all.y = FALSE)
idF_info <- merge(idF_info, resMet[,c(1,6)], by.x = "idF.Name.2", by.y = "Name",
all.x = TRUE, all.y = FALSE)
idF_info <- merge(idF_info, resMet[,c(1,6)], by.x = "idF.Name.3", by.y = "Name",
all.x = TRUE, all.y = FALSE)
colnames(idF_info) <- c("idF.Name.3", "idF.Name.2", "idF.Name.1", "Nrow",
"RI.1", "RI.2", "RI.3", "Exp.RI", "RI.Pred.1",
"RI.Pred.2", "RI.Pred.3")
idF_info <- idF_info[order(idF_info$Nrow),c(4,3,2,1,5,6,7,9:11,8)]
idF_info <- cbind(idF_info, idF[,c(19:21,23)])
idF_info$RI.Pred.error.1 <- round(abs((idF_info$RI.Pred.1 - idF_info$Exp.RI)/idF_info$Exp.RI)*100,
digits = 2)
idF_info$RI.Pred.error.2 <- round(abs((idF_info$RI.Pred.2 - idF_info$Exp.RI)/idF_info$Exp.RI)*100,
digits = 2)
idF_info$RI.Pred.error.3 <- round(abs((idF_info$RI.Pred.3 - idF_info$Exp.RI)/idF_info$Exp.RI)*100,
digits = 2)
idF_infoFilt <- idF_info[-which(apply(idF_info[,15:17], 1, function(x){length(which(is.na(x) == TRUE))})>= 2),]
idF_infoFilt$Rank.Pred <- unname(apply(idF_infoFilt[,16:18], 1, which.min))
s <- summary(lm(unname(apply(idF_infoFilt[,c("RI.1", "RI.2", "RI.3", "Rank")], 1,
function(x){x[x[4]]}))~idF_infoFilt$Exp.RI))
s.Pred <- summary(lm(unname(apply(idF_infoFilt[,c("RI.Pred.1", "RI.Pred.2", "RI.Pred.3", "Rank.Pred")],
1, function(x){x[x[4]]}))~idF_infoFilt$Exp.RI))
layout(matrix(c(1,1,2,3,3,4), 3, 2, byrow = FALSE))
plot(x = unname(apply(idF_infoFilt[,c("RI.1", "RI.2", "RI.3", "Rank")], 1, function(x){x[x[4]]})),
y = idF_infoFilt$Exp.RI,
pch = 19, cex = 1.5,
#col = "#67001f",
xlab = "Reference RI", ylab = "Experimental RI",
cex.axis = 1.5, cex.lab = 1.5,
main = "")
abline(coef = c(0,1), lty = 4, lwd = 1.3)
abline(lm(unname(apply(idF_infoFilt[,c("RI.1", "RI.2", "RI.3", "Rank")], 1, function(x){x[x[4]]}))~idF_infoFilt$Exp.RI),
col = "red", lwd = 1.3
)
text(x = 1500, y = 2000,
paste("R2 =", round(s$adj.r.squared, 3)),
cex = 2)
#legend("topleft", inset = .02, legend=c("Match 1", "Match 2", "Match 3"),
#fill=c("#67001f", "#053061", "#d6604d")
#)
boxplot(unname(apply(idF_infoFilt[,c("RI.error.1", "RI.error.2", "RI.error.3", "Rank")], 1, function(x){x[x[4]]})),
horizontal = TRUE, col = "red",
outline = TRUE, frame.plot = FALSE,
ylim = c(0,1.5),
sub = "RI error (%)",
boxlty = 0, whisklty = 3, whisklwd = 3,
staplelwd = 2,cex.axis = 1.2)
plot(x = unname(apply(idF_infoFilt[,c("RI.Pred.1", "RI.Pred.2", "RI.Pred.3", "Rank.Pred")], 1, function(x){x[x[4]]})),
y = idF_infoFilt$Exp.RI,
pch = 19, cex = 1.5,
#col = "#67001f",
xlab = "Predicted RI", ylab = "Experimental RI",
cex.axis = 1.5, cex.lab = 1.5,
main = "")
abline(coef = c(0,1), lty = 4, lwd = 1.3)
abline(lm(unname(apply(idF_infoFilt[,c("RI.Pred.1", "RI.Pred.2", "RI.Pred.3", "Rank.Pred")], 1, function(x){x[x[4]]}))~idF_infoFilt$Exp.RI),
col = "red", lwd = 1.3)
text(x = 1500, y = 2000,
paste("R2 =", round(s.Pred$adj.r.squared, 3)),
cex = 2)
#legend("topleft", inset = .02, legend=c("Match 1", "Match 2", "Match 3"),
#fill=c("#67001f", "#053061", "#d6604d")
#)
boxplot(unname(apply(idF_infoFilt[,c("RI.Pred.error.1", "RI.Pred.error.2", "RI.Pred.error.3", "Rank.Pred")],
1, function(x){x[x[4]]})),
horizontal = TRUE, col = "red",
outline = TRUE, frame.plot = FALSE,
ylim = c(0,10),
sub = "RI error (%)",
boxlty = 0, whisklty = 3, whisklwd = 3,
staplelwd = 2, cex.axis = 1.2)
|
ea9fc1e7a5fc79af4fd12e464cb0265dc6043adb
|
3ed96bb9a7e7e0ed668b2fc05d29311ac4756a5d
|
/clases/M2_clase2_dplyr.R
|
f27d251328a10eb3c4292dd333e29cabc3305775
|
[] |
no_license
|
JulioCursos/analisis_reproducible_iibp
|
ccababc0e55f205d0bee2e37e107063f37e36c56
|
5fb83e1e6e7908fab7873288a483e007da4248d5
|
refs/heads/main
| 2023-05-03T06:49:14.143767
| 2021-05-26T17:59:58
| 2021-05-26T17:59:58
| 371,774,381
| 0
| 0
| null | 2021-05-28T17:31:01
| 2021-05-28T17:31:00
| null |
UTF-8
|
R
| false
| false
| 8,239
|
r
|
M2_clase2_dplyr.R
|
############-----------MODULO 2: CLASE 2. Manipulacion de datos usando dplyr----------############
#CONTENIDO:
# 2.1. Gramatica de dplyr.
# 2.2. Estructura de las funciones de dplyr
# 2.3. Select
# 2.4. Filter
# 2.5. Operadores logicos y booleanos
# 2.6. arrange()
# 2.7. rename()
# 2.8. mutate()
# 2.9. Agrupar con summarize(); group_by(); count()
# 2.10. Pipes %>%
#### 2.1. Gramatica de dplyr.
select() # seleccionar columnas
filter() # seleccionar filas
%>% # operador pipe para unir operaciones
mutate() # crear nuevas variables
summarize(); group_by(); count() # opciones para agrupar
arrange() # ordenar una columna
rename() # renombrar encabezados de columnas
#### 2.2. Estructura de las funciones de dplyr
# 1. data: data.frame
# 2. variables: columnas sin usar $ o []
# 3. el resultado es un tidy
# 2.2. Estructura de las funciones de dplyr
# 1. data: data.frame
# 2. variables: columnas sin usar $ o []
# 3. el resultado es un tidy
#### 2.3. Select: funcion para extraer subconjuntos de columnas
# Cargar librerias
# install.packages("tidyverse")
library(tidyverse)
# Bajar datos
#download.file("http://bit.ly/MS_trafficstops_bw_age",
# "./datos/MS_trafficstops_bw_age.csv")
stops <- read_csv("./datos/MS_trafficstops_bw_age.csv")
names(stops)
# algunos operaciones con select()
select(stops, id, police_department, officer_id, driver_age)# selecciono por nombres
select(stops, - (id)) # todos excepto la variable id
# comparacion con r base
select(stops, -(id:stop_date)) # con dplyr
i <- match("id", names(stops))
j <- match("stop_date", names(stops))
head(stops[, -(i:j)]) # con r-base
head(stops[,-(1:2)])# r-base mas sencillo
# algunas funciones especiales que funcionan dentro de select()
# starts_with(), ends_with(), contains()
select(stops, starts_with("driver"))
select(stops, contains("id"))
#### # 2.4. Filter: funcion para extraer subconjuntos de filas
filter(stops, driver_gender == "female") # filtar solo mujeres
filter(stops, driver_age > 50)
#### 2.5. Operadores logicos y booleanos
## < (menor a)
## > (mayor a)
## & (y)
## | (o)
## ! (no)
## == (es igual a)
## != (es distinto de)
filter(stops, driver_age > 50 & driver_gender == "female")
# Filtrar conductores hombres negros, mayores de 30, que cometieron la infraccion de no usar cinturon
filter(stops, driver_gender == "male") # hombres
filter(stops, driver_race == "Black") # negros
filter(stops, driver_age > 30) # mayores de 30
# hombres negros mayores de 30
b_male_30 <- filter(stops, driver_gender == "male" & driver_race == "Black" & driver_age > 30)
output <- filter(b_male_30, violation == "Seat belt" )
head(output)
# Ver mas ejemplos y ejercicios con el paquete (datos)
library(datos)
data(package= "datos")
vuelos # dataset dentro de datos
# inspeccionar
class(vuelos)
glimpse(vuelos)
# ejercio 1. Vuelos que partieron en noviembre y diciembre
filter(vuelos, mes == 11 | mes == 12)
filter(vuelos, mes %in% c(11, 12))
# ejercicio 2. Vuelos que no se retrasaron (llegada y partida) mas de 2 horas.
filter(vuelos, !(atraso_salida > 120 | atraso_llegada > 120))
# Algunos ejercicios de https://es.r4ds.hadley.nz/transform.html
# 1. Tuvieron un retraso de llegada de dos o mas horas
# 2. Volaron a Houston (IAH o HOU)
# 3. Fueron operados por United, American o Delta
# 4. Partieron en invierno del hemisferio sur (julio, agosto y septiembre)
#### 2.6. arrange(). Reordenar filas
# ejemplos de de https://es.r4ds.hadley.nz/transform.html
arrange(vuelos, anio, mes, dia)
arrange(vuelos, desc(mes)) # desc( ) para ordenar una columna en orden descendente.
#### 2.7. rename ( ). renombrar las variables
vuelos_renamed <- rename(vuelos, h_sal = horario_salida,
s_p = salida_programada,
a_s = atraso_salida)
names(vuelos) # antes de renombrar
names(vuelos_renamed) # despues de renombrar
# COMENTARIO: con select( ) tambien se puede renombrar, pero en ese caso elimina las variables no selecionadas
#### 2.8. mutate()
# crear una variable nueva. Se pueden hacer operaciones
vuelos_duracion <- mutate(vuelos,
duracion_vuelo = horario_llegada - horario_salida)
# otro ejemplo con mutate
vuelos_sml <- select(vuelos,
anio:dia,
starts_with("atraso"),
distancia,
tiempo_vuelo)
vuelos_sml <- mutate(vuelos_sml,
ganancia = atraso_salida - atraso_llegada,
velocidad = distancia / tiempo_vuelo * 60)
#COMENTARIO: usar transmutate( ) si solo queres conservar las nuevas variables
#### 2.8. Agrupar con summarize(); group_by(); count()
# group_by(): agrupa o colapsa en una fila
# summarise(): se usa con agrupar. Se usa para resumir operaciones
vuelos_mes <- group_by(vuelos, mes)
summarise(vuelos_mes, atraso_mensual_promedio = mean(atraso_salida, na.rm= TRUE) )
# COMENTARIO: Explicar con ejemplos los NA
#### 2.9. Pipes %>%
vuelos %>% group_by(mes) %>%
summarise(atraso_mensual_promedio = mean(atraso_salida, na.rm= TRUE) )
# otro ejemplo con pipe
stops %>%
group_by(driver_race) %>%
summarize(mean_age = mean(driver_age, na.rm= TRUE))
stops %>%
filter(!is.na(driver_race)) %>% # excluyendo los NA de driver_race
group_by(driver_race) %>%
summarize(mean_age = mean(driver_age, na.rm= TRUE))
# Imagina que queremos explorar la relacion entre la distancia y
# el atraso promedio para cada ubicacion
por_destino <- group_by(vuelos, destino)
atraso <- summarise(por_destino,
conteo = n(),
distancia = mean(distancia, na.rm = TRUE),
atraso = mean(atraso_llegada, na.rm = TRUE)
)
atraso <- filter(atraso, conteo > 20, destino != "HNL")
# tres pasos para el ejemplo anterior
# 1. Agrupar vuelos por destino
# 2. Resumir para calcular la distancia, la demora promedio y el n??mero de vuelos en cada grupo.
# 3. Filtrar para eliminar puntos ruidosos y el aeropuerto de Honolulu
atrasos <- vuelos %>%
group_by(destino) %>%
summarise(
conteo = n(),
distancia = mean(distancia, na.rm = TRUE),
atraso = mean(atraso_llegada, na.rm = TRUE)
) %>%
filter(conteo > 20, destino != "HNL")
# Ejemplos para entender mejor group_by y summarize
# group_by() :
stops %>%
group_by(driver_race) %>%
summarise(n= n()) # cantidad de conductores por raza
stops %>%
count(violation) # contar las observaciones por grupo
stops %>%
count(violation, name = "n_infracciones")# si queremos poner un nombre
# Numero de mujeres por infraccion
stops %>%
filter(driver_gender== "female") %>%
group_by(violation) %>%
summarise(n_infracciones_mujeres = n())
# Numero de mujeres por infraccion
stops %>%
filter(driver_gender== "female" & driver_race == "White") %>%
group_by(violation) %>%
summarise(n_infracciones = n())
# tratamientos de valones faltantes o no disponibles NA (non available)
# Pregunta: Por que es los NA complican?
# Respuesta: Porque los NA son contagiosos. Cualquier operacion con un valor desconocido
# dara como resultado un valor desconocido
NA + 10
NA / 2
NA > 5
10 == NA
# Aplicacion
# Sea x la edad de Maria. No sabemos que edad tiene.
x <- NA
# Sea y la edad de Juan. No sabemos que edad tiene.
y <- NA
# ??Tienen Juan y Maria la misma edad?
x == y
# ??No sabemos!
# ejemplo con archivo importado tipo excel
library(readxl)
data <- read_excel("./datos/datasets_NA.xlsx")
View(data) # R transforma en NA todas las celdas vacias
# ver si hay los valores faltantes
is.na(data) # operacion logica
data %>% filter(is.na(Sepal.Length))# aplicado a un filtro
# !is.na(Sepal.Length) todos los que no sean datos faltantes
# eliminar valores faltantes
library(tidyr)
data_sin_na <- drop_na(data)# elimina las filas con valores faltantes
# Pero a veces se llenan manualmente con 0 o guiones "-"
# na_if()
data <- na_if(data, "-")
data <- na_if(data, 0)
# guardar ese archivo
library(openxlsx)
openxlsx::write.xlsx(data, file = ".datos/datasets_NA.xlsx") # con esta libreria no hay problemas
write.csv(data, "./datos/datasets_NA.csv", row.names = FALSE)# exporta llenando con NA
# completar con na = "" si queres que quede las celdas vacias
|
9c68da78ac51c028fe5a3f3ac8c75de66cf2e0ec
|
66f8711bc942a1bc635a6deea253e9a49c718094
|
/R/uniqnameCheck.R
|
1a0d0a615ff810dc85d22c60d6f3dfa22b996e35
|
[
"MIT"
] |
permissive
|
seanrsilver/novnet
|
bd179476c48a8dd809757c60488dde7193a4145b
|
85107cfbbabc68c603134db5b5fc8bbf9219624b
|
refs/heads/master
| 2020-06-05T18:20:58.057024
| 2019-06-18T14:29:45
| 2019-06-18T14:29:45
| 192,495,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,696
|
r
|
uniqnameCheck.R
|
#' uniqnameCheck()
#'
#' Checks for duplicate rownames in Char.csv uniqname, and returns a matrix
#' Run if txtDisambig fails with "duplicate row names not allowed"
#'
#' @param filename File name as character string, i.e. "Crusoe".
#' @param local Default = TRUE. If FALSE, searches for file in google drive. If TRUE, seeks file in folder filename/.
#' @keywords Text Preparation
#'
#' @import googledrive
#'
#' @export
uniqnameCheck <- function(filename,
local = FALSE){
## a) pull *Char.csv spreadsheet of name alternates
if(local == FALSE){
cat("Downloading character data from Google Drive.\n")
drive_download(file = paste0(filename, "Char.csv"),
overwrite = TRUE,
type = "csv",
path = paste0("data/", filename, "/", filename, "Char.csv"))
}
char.data.df <- read.csv(file = paste0("data/", filename, "/", filename, "Char.csv"),
header = TRUE, sep = ",",
skip = 7,
stringsAsFactors = FALSE,
blank.lines.skip = TRUE)
char.data.df[is.na(char.data.df)] <- "" # removes NA from completely blank columns
## Pull vector of uniqnames-- char.data.df[, 1]
uniq.names.v <- char.data.df[, 1]
uniq.names.t <- table(uniq.names.v)
dup.uniqnames.t <- uniq.names.t[which(uniq.names.t > 1)]
## Return to screen as matrix, or report no duplicates found
dup.uniqnames.df <- data.frame(uniqname = names(dup.uniqnames.t), numOfDups = as.numeric(dup.uniqnames.t))
if(nrow(dup.uniqnames.df) == 0){
cat("No duplicate uniqnames have been found.")
} else {
dup.uniqnames.df
}
}
|
478e8047c067d596919ac32938d0c17fe8a03a3f
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/R/xlabel_-.R
|
8ddcba6b76195d2a87d3a41a1d544b0d4b770a67
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 102
|
r
|
xlabel_-.R
|
`xlabel<-` <-
function (data.ld, value)
{
attr(data.ld, "xlabel") <- value
return(data.ld)
}
|
ac8a00571acaf1c5f3c020acb5db9bc813fa1ea1
|
287c1be93b3d5786f221973a0705ab66bfc4dd03
|
/Projects/smd_2016/metrics/code/functions/fiber.R
|
9825fff0e61ceece612b087d09007783d3014df6
|
[] |
no_license
|
mtejas88/esh
|
c6c6c8e6d7ee2b47891421f2736f7e97d43c4dc5
|
e28651449ce5be492c8eea8a75e66ddafe2b87bc
|
refs/heads/master
| 2021-07-11T02:32:51.307530
| 2017-10-10T16:46:57
| 2017-10-10T16:46:57
| 106,446,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,814
|
r
|
fiber.R
|
## =========================================================================
##
## REFRESH STATE METRICS:
## FIBER
##
## 2015 SAMPLE: exclude_from_analysis == FALSE
## 2016 SAMPLE: exclude_from_ia_analysis == FALSE
##
## hist: percent of schools on fiber
## targets: sourced, 2016
## ranking: unweighted/weighted, campuses 2016
##
## =========================================================================
fiber <- function(sots.2015, dd.2015, ds.2015, dd.2016, ds.2016, dta, states.with.schools){
## METRIC ACROSS TIME
## save a version of dd.2016 all
dd.2016.all <- dd.2016
ds.2016.all <- ds.2016
## subset to districts "fit for analysis"
sots.districts.2015 <- sots.districts.2015
dd.2015 <- dd.2015[dd.2015$exclude_from_analysis == FALSE,]
ds.2015 <- ds.2015[ds.2015$exclude_from_analysis == FALSE,]
#dd.2016 <- dd.2016[dd.2016$exclude_from_ia_analysis == FALSE,]
states.with.schools.dta <- data.frame(postal_cd=states.with.schools)
## create subsets for the new fiber metric -- just for 2016
## group A: dirty for both ia analysis and fiber analysis
dd.2016.A <- dd.2016[dd.2016$exclude_from_ia_analysis == TRUE & dd.2016$exclude_from_current_fiber_analysis == TRUE,]
## group B: dirty for ia analysis and clean for fiber analysis
dd.2016.B <- dd.2016[dd.2016$exclude_from_ia_analysis == TRUE & dd.2016$exclude_from_current_fiber_analysis == FALSE,]
## group C: clean for both ia analysis and fiber analysis
dd.2016.C <- dd.2016[dd.2016$exclude_from_ia_analysis == FALSE & dd.2016$exclude_from_current_fiber_analysis == FALSE,]
## create subsets for the new fiber metric -- just for 2016 schools
## group A: dirty for both ia analysis and fiber analysis
ds.2016.A <- ds.2016[ds.2016$exclude_from_ia_analysis == TRUE & ds.2016$exclude_from_current_fiber_analysis == TRUE,]
## group B: dirty for ia analysis and clean for fiber analysis
ds.2016.B <- ds.2016[ds.2016$exclude_from_ia_analysis == TRUE & ds.2016$exclude_from_current_fiber_analysis == FALSE,]
## group C: clean for both ia analysis and fiber analysis
ds.2016.C <- ds.2016[ds.2016$exclude_from_ia_analysis == FALSE & ds.2016$exclude_from_current_fiber_analysis == FALSE,]
## 1) Campuses on Fiber (Count): "_campuses_on_fiber"
##-----------------------------------------------------------
## sots 2015:
sots.2015[,'sots15_campuses_on_fiber_perc'] <- sots.2015$X..of.schools..campuses..that.have.fiber.connections..or.equivalent.
## 2015 current:
## aggregate across the districts level
dd.2015$counter <- dd.2015$current_known_scalable_campuses + dd.2015$current_assumed_scalable_campuses
campuses.on.fiber.2015 <- aggregate(dd.2015$counter, by=list(dd.2015$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2015) <- c('postal_cd', 'current15_campuses_on_fiber')
## 2015 current: -- schools
## aggregate across the districts level
ds.2015$counter <- ds.2015$current_known_scalable_campuses + ds.2015$current_assumed_scalable_campuses
campuses.on.fiber.2015.sch <- aggregate(ds.2015$counter, by=list(ds.2015$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2015.sch) <- c('postal_cd', 'current15_campuses_on_fiber')
campuses.on.fiber.2015.sch <- merge(campuses.on.fiber.2015.sch, states.with.schools.dta, all=T)
## 2016 current:
## New Fiber Metric:
## num_scalable_campuses == [#scalable(B & C) + (%scalable C)*(#campuses A)]
dd.2016.B$counter <- dd.2016.B$current_known_scalable_campuses + dd.2016.B$current_assumed_scalable_campuses
campuses.on.fiber.2016.B <- aggregate(dd.2016.B$counter, by=list(dd.2016.B$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2016.B) <- c('postal_cd', 'current16_campuses_on_fiber.B')
dd.2016.C$counter <- dd.2016.C$current_known_scalable_campuses + dd.2016.C$current_assumed_scalable_campuses
campuses.on.fiber.2016.C <- aggregate(dd.2016.C$counter, by=list(dd.2016.C$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2016.C) <- c('postal_cd', 'current16_campuses_on_fiber.C')
## find percent scalable C * #campuses in A for each state
dd.2016.C$counter.all <- dd.2016.C$num_campuses
all.campuses.2016.C <- aggregate(dd.2016.C$counter.all, by=list(dd.2016.C$postal_cd), FUN=sum, na.rm=T)
names(all.campuses.2016.C) <- c("postal_cd", "current16_campuses_all.C")
## merge
campuses.on.fiber.2016.C <- merge(campuses.on.fiber.2016.C, all.campuses.2016.C, by='postal_cd', all=T)
## calculate percentage scalable
campuses.on.fiber.2016.C$percentage.scalable.C <- campuses.on.fiber.2016.C$current16_campuses_on_fiber.C / campuses.on.fiber.2016.C$current16_campuses_all.C
## calculate number of campuses in A
dd.2016.A$counter.all <- dd.2016.A$num_campuses
all.campuses.2016.A <- aggregate(dd.2016.A$counter.all, by=list(dd.2016.A$postal_cd), FUN=sum, na.rm=T)
names(all.campuses.2016.A) <- c("postal_cd", "current16_campuses_all.A")
## merge
all.campuses.2016.A <- merge(all.campuses.2016.A, campuses.on.fiber.2016.C, by='postal_cd', all=T)
all.campuses.2016.A$extrapolated.campuses.on.fiber.2016.A <- all.campuses.2016.A$current16_campuses_all.A * all.campuses.2016.A$percentage.scalable.C
campuses.on.fiber.2016 <- merge(campuses.on.fiber.2016.B, campuses.on.fiber.2016.C, by='postal_cd', all=T)
campuses.on.fiber.2016 <- merge(campuses.on.fiber.2016, all.campuses.2016.A[,c('postal_cd', 'extrapolated.campuses.on.fiber.2016.A')], by='postal_cd', all=T)
campuses.on.fiber.2016$current16_campuses_on_fiber <- rowSums(campuses.on.fiber.2016[,c('current16_campuses_on_fiber.B', 'current16_campuses_on_fiber.C',
'extrapolated.campuses.on.fiber.2016.A')], na.rm=T)
## 2016 current -- schools:
## New Fiber Metric:
## num_unscalable_campuses == [#scalable(B & C) + (%scalable C)*(#campuses A)]
ds.2016.B$counter <- ds.2016.B$current_known_scalable_campuses + ds.2016.B$current_assumed_scalable_campuses
campuses.on.fiber.2016.B <- aggregate(ds.2016.B$counter, by=list(ds.2016.B$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2016.B) <- c('postal_cd', 'current16_campuses_on_fiber.B')
ds.2016.C$counter <- ds.2016.C$current_known_scalable_campuses + ds.2016.C$current_assumed_scalable_campuses
campuses.on.fiber.2016.C <- aggregate(ds.2016.C$counter, by=list(ds.2016.C$postal_cd), FUN=sum, na.rm=T)
names(campuses.on.fiber.2016.C) <- c('postal_cd', 'current16_campuses_on_fiber.C')
## find percent scalable C * #campuses in A for each state
ds.2016.C$counter.all <- ds.2016.C$num_campuses
all.campuses.2016.C <- aggregate(ds.2016.C$counter.all, by=list(ds.2016.C$postal_cd), FUN=sum, na.rm=T)
names(all.campuses.2016.C) <- c("postal_cd", "current16_campuses_all.C")
## merge
campuses.on.fiber.2016.C <- merge(campuses.on.fiber.2016.C, all.campuses.2016.C, by='postal_cd', all=T)
## calculate percentage scalable
campuses.on.fiber.2016.C$percentage.scalable.C <- campuses.on.fiber.2016.C$current16_campuses_on_fiber.C / campuses.on.fiber.2016.C$current16_campuses_all.C
## calculate number of campuses in A -- none for schools-level
if (nrow(ds.2016.A) > 0){
ds.2016.A$counter.all <- ds.2016.A$num_campuses
all.campuses.2016.A <- aggregate(ds.2016.A$counter.all, by=list(ds.2016.A$postal_cd), FUN=sum, na.rm=T)
names(all.campuses.2016.A) <- c("postal_cd", "current16_campuses_all.A")
## merge
all.campuses.2016.A <- merge(all.campuses.2016.A, campuses.on.fiber.2016.C, by='postal_cd', all=T)
all.campuses.2016.A$extrapolated.campuses.on.fiber.2016.A <- all.campuses.2016.A$current16_campuses_all.A * all.campuses.2016.A$percentage.scalable.C
campuses.on.fiber.2016.sch <- merge(campuses.on.fiber.2016.B, campuses.on.fiber.2016.C, by='postal_cd', all=T)
campuses.on.fiber.2016.sch <- merge(campuses.on.fiber.2016.sch, all.campuses.2016.A[,c('postal_cd', 'extrapolated.campuses.on.fiber.2016.A')], by='postal_cd', all=T)
campuses.on.fiber.2016.sch$current16_campuses_on_fiber <- rowSums(campuses.on.fiber.2016.sch[,c('current16_campuses_on_fiber.B', 'current16_campuses_on_fiber.C',
'extrapolated.campuses.on.fiber.2016.A')], na.rm=T)
campuses.on.fiber.2016.sch <- merge(campuses.on.fiber.2016.sch, states.with.schools.dta, all=T)
} else{
all.campuses.2016.A <- NULL
## merge
campuses.on.fiber.2016.sch <- merge(campuses.on.fiber.2016.B, campuses.on.fiber.2016.C, by='postal_cd', all=T)
campuses.on.fiber.2016.sch$current16_campuses_on_fiber <- rowSums(campuses.on.fiber.2016.sch[,c('current16_campuses_on_fiber.B', 'current16_campuses_on_fiber.C')], na.rm=T)
campuses.on.fiber.2016.sch <- merge(campuses.on.fiber.2016.sch, states.with.schools.dta, all=T)
}
## merge in stats to dta
dta <- merge(dta, sots.2015[,c('postal_cd', 'sots15_campuses_on_fiber_perc')], by='postal_cd', all=T)
dta <- merge(dta, campuses.on.fiber.2015[,c('postal_cd', 'current15_campuses_on_fiber')], by='postal_cd', all.x=T)
dta <- merge(dta, campuses.on.fiber.2016[,c('postal_cd', 'current16_campuses_on_fiber')], by='postal_cd', all.x=T)
## add in national level population
cols <- c('current15_campuses_on_fiber', 'current16_campuses_on_fiber')
for (j in 1:length(cols)){
dta[dta$postal_cd == 'ALL', names(dta) == cols[j]] <- sum(dta[,names(dta) == cols[j]], na.rm=T)
}
## merge in schools-level metrics for the states with schools
## order the datasets the same
dta <- dta[order(dta$postal_cd),]
campuses.on.fiber.2016.sch <- campuses.on.fiber.2016.sch[order(campuses.on.fiber.2016.sch$postal_cd),]
dta[dta$postal_cd %in% states.with.schools, 'current16_campuses_on_fiber'] <-
campuses.on.fiber.2016.sch[campuses.on.fiber.2016.sch$postal_cd %in% states.with.schools, 'current16_campuses_on_fiber']
campuses.on.fiber.2015.sch <- campuses.on.fiber.2015.sch[order(campuses.on.fiber.2015.sch$postal_cd),]
dta[dta$postal_cd %in% states.with.schools, 'current15_campuses_on_fiber'] <-
campuses.on.fiber.2015.sch[campuses.on.fiber.2015.sch$postal_cd %in% states.with.schools, 'current15_campuses_on_fiber']
## 2) Campuses on Fiber (%):
##---------------------------------------------------------------------------------------------------------------
## for each dataset, aggregate through dta and calculate the percentage of the samples
datasets <- c('current15', 'current16')
for (j in 1:length(datasets)){
new.col.name <- paste(datasets[j], "campuses_on_fiber_perc", sep='_')
## don't round the percentage yet, so can calculate the ranking first
if (datasets[j] == 'current15'){
dta[,new.col.name] <- (dta[,paste(datasets[j], "campuses_on_fiber", sep='_')] / dta[,paste(datasets[j], "campuses_sample", sep='_')]) * 100
} else{
dta[,new.col.name] <- (dta[,paste(datasets[j], "campuses_on_fiber", sep='_')] / dta[,paste(datasets[j], "campuses_pop", sep='_')]) * 100
}
}
## hard code SotS % with fiber -- 88%
dta$sots15_campuses_on_fiber_perc[dta$postal_cd == 'ALL'] <- 88
##************************************************************************************************************************************
## TARGETS
## first, aggregate the number of targets and potential targets at the state level
## define function to append the 4 types of target counts
append.targets <- function(dta, col, campus.flag, states.with.schools){
if (campus.flag == 1){
dd.2016.all$counter <- dd.2016.all[,col] * (dd.2016.all$current_known_scalable_campuses + dd.2016.all$current_assumed_scalable_campuses)
targets <- aggregate(dd.2016.all$counter, by=list(dd.2016.all$postal_cd), FUN=sum)
col.name <- paste("num_campuses", col, sep='_')
} else{
targets <- aggregate(dd.2016.all[,col], by=list(dd.2016.all$postal_cd), FUN=sum)
col.name <- paste("num", col, sep="_")
}
names(targets) <- c('postal_cd', col.name)
## merge in dta
dta <- merge(dta, targets, by='postal_cd', all.x=T)
## add national number
dta[dta$postal_cd == 'ALL', col.name] <- sum(dta[!dta$postal_cd %in% states.with.schools, col.name], na.rm=T)
return(dta)
}
## make counters for the 4 types:
## Targets, Clean Targets, Potential Targets, Clean Potential Targets
dd.2016.all$fiber_targets <- ifelse(dd.2016.all$fiber_target_status == "Target", 1, 0)
dd.2016.all$fiber_targets_clean <- ifelse(dd.2016.all$fiber_target_status == "Target" & dd.2016.all$exclude_from_ia_analysis == FALSE, 1, 0)
dd.2016.all$fiber_po_targets <- ifelse(dd.2016.all$fiber_target_status == "Potential Target", 1, 0)
dd.2016.all$fiber_po_targets_clean <- ifelse(dd.2016.all$fiber_target_status == "Potential Target" & dd.2016.all$exclude_from_ia_analysis == FALSE, 1, 0)
## call function for each
dta <- append.targets(dta, "fiber_targets", 0, states.with.schools)
dta <- append.targets(dta, "fiber_targets_clean", 0, states.with.schools)
dta <- append.targets(dta, "fiber_po_targets", 0, states.with.schools)
dta <- append.targets(dta, "fiber_po_targets_clean", 0, states.with.schools)
dta <- append.targets(dta, "fiber_targets", 1, states.with.schools)
dta <- append.targets(dta, "fiber_targets_clean", 1, states.with.schools)
dta <- append.targets(dta, "fiber_po_targets", 1, states.with.schools)
dta <- append.targets(dta, "fiber_po_targets_clean", 1, states.with.schools)
## then, create target subset to be displayed in the tool
## create an indicator for no data district
dd.2016.all$no_data <- ifelse(dd.2016.all$lines_w_dirty == 0, TRUE, FALSE)
## create number of circuits field
dd.2016.all$num_circuits <- dd.2016.all$non_fiber_lines + dd.2016.all$fiber_wan_lines + dd.2016.all$fiber_internet_upstream_lines
## create total number of unknown campuses field
dd.2016.all$total_unknown_campuses <- dd.2016.all$current_assumed_scalable_campuses + dd.2016.all$current_assumed_unscalable_campuses
fiber.targets <- dd.2016.all[dd.2016.all$fiber_target_status == 'Target' | dd.2016.all$fiber_target_status == 'Potential Target',]
fiber.targets <- fiber.targets[,c('esh_id', 'postal_cd', 'name', 'locale', 'district_size',
'num_students', 'num_campuses', 'num_circuits',
'bundled_and_dedicated_isp_sp', 'most_recent_ia_contract_end_date',
'ia_bandwidth_per_student_kbps', 'ia_bw_mbps_total',
'current_known_scalable_campuses', 'current_assumed_scalable_campuses',
'current_assumed_unscalable_campuses', 'current_known_unscalable_campuses', 'total_unknown_campuses',
'fiber_target_status', 'no_data', names(dd.2016.all)[grepl('exclude', names(dd.2016.all))])]
names(fiber.targets)[names(fiber.targets) == 'bundled_and_dedicated_isp_sp'] <- 'bundled_and_dedicated_isp_sp_2016'
names(fiber.targets)[names(fiber.targets) == 'most_recent_ia_contract_end_date'] <- 'most_recent_ia_contract_end_date_2016'
## merge in 2015 data
names(dd.2015)[names(dd.2015) == 'bundled_and_dedicated_isp_sp'] <- 'bundled_and_dedicated_isp_sp_2015'
names(dd.2015)[names(dd.2015) == 'most_recent_ia_contract_end_date'] <- 'most_recent_ia_contract_end_date_2015'
fiber.targets <- merge(fiber.targets, dd.2015[,c('esh_id', 'bundled_and_dedicated_isp_sp_2015',
'most_recent_ia_contract_end_date_2015')], by='esh_id', all.x=T)
## round out variables
fiber.targets$ia_bandwidth_per_student_kbps <- round(fiber.targets$ia_bandwidth_per_student_kbps, 0)
fiber.targets$ia_bw_mbps_total <- round(fiber.targets$ia_bw_mbps_total, 0)
## order the dataset
fiber.targets <- fiber.targets[order(fiber.targets$current_assumed_unscalable_campuses, decreasing=T),]
fiber.targets <- fiber.targets[,c('esh_id', 'postal_cd', 'name', 'locale',
'district_size', 'num_students', 'num_campuses', 'num_circuits',
'bundled_and_dedicated_isp_sp_2015', 'bundled_and_dedicated_isp_sp_2016',
'most_recent_ia_contract_end_date_2015', 'most_recent_ia_contract_end_date_2016',
'ia_bandwidth_per_student_kbps', 'ia_bw_mbps_total',
'current_known_scalable_campuses', 'current_assumed_scalable_campuses',
'current_assumed_unscalable_campuses', 'current_known_unscalable_campuses', 'total_unknown_campuses',
'fiber_target_status', 'no_data', names(fiber.targets)[grepl('exclude', names(fiber.targets))])]
## add in IRT links
fiber.targets$irt_link <- paste("<a href='http://irt.educationsuperhighway.org/districts/", fiber.targets$esh_id, "'>",
"http://irt.educationsuperhighway.org/districts/", fiber.targets$esh_id, "</a>", sep='')
## also record average number of campuses with assumed or known unscalable
fiber.targets$sum.unscalable <- fiber.targets$current_assumed_unscalable_campuses + fiber.targets$current_known_unscalable_campuses
agg.states.mean <- aggregate(fiber.targets$sum.unscalable, by=list(fiber.targets$postal_cd), FUN=mean, na.rm=T)
names(agg.states.mean) <- c('postal_cd', 'mean_num_campuses_unscalable_targets')
dta <- merge(dta, agg.states.mean, by='postal_cd', all.x=T)
dta$mean_num_campuses_unscalable_targets[dta$postal_cd == 'ALL'] <- mean(fiber.targets$sum.unscalable, na.rm=T)
fiber.targets$sum.unscalable <- NULL
dta$mean_num_campuses_unscalable_targets <- round(dta$mean_num_campuses_unscalable_targets, 2)
## CLICK-THROUGH DATA -- those not meeting goals in 2016
## create data subset to be displayed in the tool
## add in target status indicator
## combine schools level and district level for this click-through
dd.2016 <- dd.2016[!dd.2016$postal_cd %in% states.with.schools,]
dd.2016 <- rbind(dd.2016, ds.2016)
fiber.click.through <- dd.2016[,c('postal_cd', 'esh_id', 'name', 'num_campuses',
'bundled_and_dedicated_isp_sp', 'most_recent_ia_contract_end_date',
'current_known_scalable_campuses', 'current_assumed_scalable_campuses',
'current_assumed_unscalable_campuses', 'current_known_unscalable_campuses',
names(dd.2016)[grepl('exclude', names(dd.2016))], names(dd.2016)[grepl('flag', names(dd.2016))],
names(dd.2016)[grepl('tag', names(dd.2016))])]
names(fiber.click.through)[names(fiber.click.through) == 'bundled_and_dedicated_isp_sp'] <- 'bundled_and_dedicated_isp_sp_2016'
names(fiber.click.through)[names(fiber.click.through) == 'most_recent_ia_contract_end_date'] <- 'most_recent_ia_contract_end_date_2016'
## merge in 2015 data
fiber.click.through <- merge(fiber.click.through, dd.2015[,c('esh_id', 'bundled_and_dedicated_isp_sp_2015',
'most_recent_ia_contract_end_date_2015')], by='esh_id', all.x=T)
#sots.names <- names(fiber.click.through)[grepl('sots', names(fiber.click.through))]
#fiber.click.through <- fiber.click.through[,!names(fiber.click.through) %in% sots.names]
fiber.click.through$target <- ifelse(fiber.click.through$esh_id %in% fiber.targets$esh_id, TRUE, FALSE)
## order the dataset
fiber.click.through <- fiber.click.through[order(fiber.click.through$current_assumed_unscalable_campuses, decreasing=T),]
fiber.click.through <- fiber.click.through[,c('postal_cd', 'esh_id', 'name', 'num_campuses',
'bundled_and_dedicated_isp_sp_2015', 'bundled_and_dedicated_isp_sp_2016',
'most_recent_ia_contract_end_date_2015', 'most_recent_ia_contract_end_date_2016',
'current_known_scalable_campuses', 'current_assumed_scalable_campuses',
'current_assumed_unscalable_campuses', 'current_known_unscalable_campuses',
names(dd.2016)[grepl('exclude', names(dd.2016))], names(dd.2016)[grepl('flag', names(dd.2016))],
names(dd.2016)[grepl('tag', names(dd.2016))])]
## add in IRT links
fiber.click.through$irt_link <- paste("<a href='http://irt.educationsuperhighway.org/districts/", fiber.click.through$esh_id, "'>",
"http://irt.educationsuperhighway.org/districts/", fiber.click.through$esh_id, "</a>", sep='')
##************************************************************************************************************************************
## NUMBER OF CAMPUSES ON FIBER (EXTRAPOLATED)
## multiply percentage of students meeting to total population of students
#dta$num_campuses_on_fiber_extrap <- round((dta$current16_campuses_on_fiber_perc/100), 2)*dta$current16_campuses_pop
dta$num_campuses_on_fiber_extrap <- (dta$current16_campuses_on_fiber_perc/100)*dta$current16_campuses_pop
##************************************************************************************************************************************
## NATIONAL RANKING
dta <- national.ranking(dta, "current16_campuses_on_fiber_perc", "fiber")
assign("dta", dta, envir = .GlobalEnv)
assign("fiber.targets", fiber.targets, envir = .GlobalEnv)
assign("fiber.click.through", fiber.click.through, envir=.GlobalEnv)
}
|
8399ab01a53a84efb176927ba0fcbe8c5bd99ab1
|
bc6bd59c8507bdd4ff4d45fd10185115f2a068c9
|
/Live Scripts/Live Coding.R
|
4fea8513947f6434f7d5c2b5d599ee7eb8f85cb1
|
[] |
no_license
|
Paulgbullard/NHSR-Forecasting-Webinars
|
a799d00c689878f815a6b3bf165996b4d0c04b94
|
eb4113fa8e2742cdc801e213f813763daa284c98
|
refs/heads/master
| 2022-11-10T20:37:34.179646
| 2020-06-30T13:50:38
| 2020-06-30T13:50:50
| 273,520,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,088
|
r
|
Live Coding.R
|
library(fpp3)
library(readr)
#------- Lab Session 1---------------
#1.1-Import data into R
ae_uk_original <- readr::read_csv("Data/ae_uk.csv",
col_types = cols(
arrival_time=col_datetime(format = "%d/%m/%Y %H:%M"),
gender=col_character(),
type_injury=col_character()))
#2.1- check duplications and fix it
ae_uk_original %>%
duplicated() %>%
sum()#check duplicates
ae_wd <- ae_uk_original %>%
dplyr::distinct(.)# remove duplicates and get a distinct tibble
nrow(ae_uk_original)-nrow(ae_wd) #check the number of duplication if you want
#3.1- create tsibble
ae_tsb <- ae_wd %>%
as_tsibble(key = c(gender,type_injury),
index = arrival_time,
regular=FALSE)
# if you start working with a irregular index, you need to use `regular=FALSE` in as_tsibble
# regularise an irregular index, create a new tsibble
ae_hourly <- ae_tsb %>%
group_by(gender,type_injury) %>%
index_by(arrival_1h = lubridate::floor_date(arrival_time, "1 hour")) %>%
summarise(n_attendance=n())
# 4.1. check implicit NA / gaps in time
has_gaps(ae_hourly)#check gaps
scan_gaps(ae_hourly)# show mw gaps
count_gaps(ae_hourly)# coun gaps
# if there is any gap, them fill it with zero
ae_hourly <- ae_tsb %>%
group_by(gender, type_injury) %>%
index_by(arrival_1h = lubridate::floor_date(arrival_time, "1 hours")) %>%
summarise(n_attendance=n()) %>%
fill_gaps(n_attendance=0L) %>%
ungroup()
#you can use `index_by()` and `summarise()` to regularise index
# ae_hourly is a tsibble with regular space of 1 hour, you can change it to any interval,e.g. "2 hours","3 hours", etc or create any granular level from the hourly series such as daily, weekly , etc
# create a daily series to work with a single time series, in tsibble you can work many time series, go to lab session 12 for more information
ae_daily <- ae_hourly %>%
index_by(year_day=as_date(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_daily_keys <- ae_hourly %>% group_by(gender, type_injury) %>%
index_by(year_day=as_date(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_weekly <- ae_hourly %>%
index_by(weekly=yearweek(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_weekly_keys <- ae_hourly %>% group_by(gender, type_injury) %>%
index_by(weekly=yearweek(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_monthly <- ae_hourly %>%
index_by(monthly=yearmonth(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_monthly_keys <- ae_hourly %>% group_by(gender, type_injury) %>%
index_by(monthly=yearmonth(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_quarterly <- ae_hourly %>%
index_by(quarter=yearquarter(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_quarterly_keys <- ae_hourly %>% group_by(gender, type_injury)
index_by(quarter=yearquarter(arrival_1h)) %>%
summarise(n_attendance=sum(n_attendance))
ae_daily %>%autoplot()
ae_weekly %>% autoplot()
ae_monthly %>% autoplot()
#save ae_daily
write_rds(ae_daily, "Data/ae_daily.rds")
write_rds(ae_hourly, "Data/ae_hourly.rds")
#######
#Live coding here:
#######
#seasons
ae_daily %>% gg_season(n_attendance)
ae_daily %>% gg_season(n_attendance, period = "week")
ae_daily %>% gg_season(n_attendance, period = "month")
ae_daily %>% gg_subseries(n_attendance, period = "week")
#ACF
ae_daily %>% gg_lag(n_attendance, lags = c(1:14), geom = "point")
ae_daily %>% ACF(lag_max = 14)
ae_daily %>% ACF(lag_max = 21) %>% autoplot()
#Show all...
ae_daily %>% gg_tsdisplay()
#Significant ACF? Small p-value means significant.
ae_daily %>% features(n_attendance, ljung_box, dof = 0)
#########################
#Simple forecasting methods
#fit model
ae_fit <- ae_daily %>%
model(mean = MEAN(n_attendance),
naive = NAIVE(n_attendance),
snaive = SNAIVE(n_attendance, lag = "week"),
drift = RW(n_attendance ~ drift())
)
model2 <- ae_daily_keys %>%
model(mean = MEAN(n_attendance),
naive = NAIVE(n_attendance),
snaive = SNAIVE(n_attendance, lag = "week"),
drift = RW(n_attendance ~ drift())
)
#view mable (model table)
model1
#View some information
model1 %>% select(simpleaverage) %>% glance()
#Forecast!
all_forecast <- ae_fit %>%
forecast(h = 42)
all_forecast %>% autoplot(filter_index(ae_daily, "2016" ~ .))
#Residual diagnostics
ae_fit %>% augment() %>% filter(.model == "snaive") %>% select(.resid) %>% ACF() %>% autoplot()
ae_fit %>% select(mean) %>% gg_tsresiduals()
ae_fit %>% select(naive) %>% gg_tsresiduals()
ae_fit %>% select(snaive) %>% gg_tsresiduals()
ae_fit %>% select(drift) %>% gg_tsresiduals()
#Time series cross validation
f_horizon <- 42
ae_daily_test <- ae_daily %>% slice((n()-(f_horizon-1)):n())
ae_daily_train <- ae_daily %>% slice(1:(n()-f_horizon))
train_XV <- ae_daily_train %>%
slice(1:(n()-f_horizon)) %>%
stretch_tsibble(.init = 5 * 365, .step = 7)
ae_fit_XV <- train_XV %>%
model(mean = MEAN(n_attendance),
naive = NAIVE(n_attendance),
snaive = SNAIVE(n_attendance, lag = "week"),
drift = RW(n_attendance ~ drift()),
#automatic_ets=ETS(n_attendance),
my_ets=ETS(n_attendance ~ error("A")+trend("A", alpha = 0.2)+season("M", gamma = 0.2))
)
all_forecast_XV <- ae_fit_XV %>%
forecast(h = 42)
fc_accuracy <- all_forecast_XV %>%
accuracy(ae_daily_train, measures = list(point_accuracy_measures, interval_accuracy_measures))
fc_accuracy %>% select(.model, RMSE, MAE, winkler)
###### Follow along coding
ae_monthly
f_horizon = 6
test <- ae_monthly %>% slice((n() - (f_horizon-1)):n())
train <- ae_monthly %>% slice(1:(n()-f_horizon))
nrow(ae_monthly) ==(nrow(test)+nrow(train))
train_XV <- train %>%
slice(1:(n()-f_horizon)) %>%
stretch_tsibble(.init = 4*12, .step = 1)
test_fit <- train_XV %>%
model(ets1=ETS(n_attendance ~ error("A")+trend("N")+season("A")),
ets2=ETS(n_attendance ~ error("A")+trend("N", alpha = 0.2)+season("N"))
)
ae_fc <- test_xv %>%
forecast(h = f_horizon)
ae_acc <- ae_fc %>%
accuracy(train, measures = list(point_accuracy_measures, interval_accuracy_measures))
ae_acc %>% select(.model, RMSE, MAE, winkler) #ets1 has the smallest error
ae_fit1 <- train %>% model(ets1 = ETS(n_attendance ~ error("A")+trend("N")+season("A")))
ae_fit1 %>% gg_tsresiduals()
ae_fit1 %>% report()
ae_fit1 %>% components() %>% autoplot()
ae_fc1 <- ae_fit1 %>% forecast(h=f_horizon)
ae_fc1 %>% hilo(level=99) %>% mutate(lower = `99%`$lower,
upper = `99%`$upper)
ae_fc1 %>% autoplot(ae_monthly)
#########
#ARIMA
ar1 <- arima.sim(n=1000, list(ar=.9))
acf(ar1)
pacf(ar1)
fit <- ae_daily %>%
model(arima = ARIMA(n_attendance)) #automaticall selects p,d,q,P,D,Q
fit %>% report()
fcst_arima <- fit %>% forecast(h = 42)
fcst_arima %>% autoplot()
##############
#Regression
library(GGally)
|
268eabb54528f47f240633bea961ae46907665fc
|
c1cf117422895fc60fa045805c14f3e46f0ccead
|
/script/C4_fit_remko.R
|
b7a6d56b175a189818318c00928911303c379cad
|
[] |
no_license
|
bvsonawane/C4_modelling
|
508602ae30fec79978fd9e26d8926ab8ca464905
|
1f1ca9746bde21f100a6947b411759a296cd88f2
|
refs/heads/master
| 2021-05-29T06:42:27.973593
| 2015-06-11T06:53:15
| 2015-06-11T06:53:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,895
|
r
|
C4_fit_remko.R
|
rm(list=ls())
require(plyr)
require(dplyr)
require(plantecophys)
require(minpack.lm)
#source("functions/bala_c4test.R") ## this file contain all the function created for C4 fitting
source("functions/functions.R")
# source("functions/load packages.R")
## read dataframe
df<- read.csv("rawdata/aci_expt2.csv")
df<- df[, c("code","growth","Photo","Cond", "Ci","Tleaf", "PARi","CO2R", "RH_S")]
df$spp<- lab_spp(df)
df$treat<- df$growth
df$PPFD<- df$PARi
df$ALEAF<- df$Photo
df$spp<- as.factor(df$spp)
c4<- droplevels(df%>%
filter(!spp== "P. bisulcatum ")%>%
filter(!spp== "P. milliodes "))
c3<- droplevels(df%>% filter(spp== "P. bisulcatum " | spp== "P. milliodes "))
c425<- c4%>% filter(Tleaf<30)
c435<-c4%>% filter(Tleaf>30)
c325 <- c3%>% filter(Tleaf<30)
c335<-c3%>% filter(Tleaf>30)
vpmax_guess<- 70
vcmax_guess<-30
gbs_guess<- 0.003
## need to subset dat to remove cilliaris cool code e1.2. work out on it latter
mz<- filter(c425, code== "m1.1")
dat<- mz
dat$Ci<- c(12.47992, 30.13439, 47.90162, 69.87553, 90.45599, 140.02482, 195.58920, 360.14771, 505.34163, 820.89045, 900.29349)
fitmz<- fitc4(mz)
fitmz<- fitc4(dat)
summary(fitmz)
plot(fitmz, add= T, lwd= 9, lty=5)
par(mfrow= c(2,2))
fit<- dlply(c425, .(code),function(x) fitc4(x))
const <- ldply(fit, function(x) coef(x))
## In 35C aci, no prblem in the data collected from warm grown plants
## need to subset dat to remove cilliaris code e1.2, and coloratum f1.1
n<- filter(c435, !spp=="P. coloratum ")
n1<- filter(n, !spp== "C. ciliaris ")
unique(n1$spp)
fit_w<- dlply(c425, .(code),function(x) fitc4(x))
const <- ldply(fit_w, function(x) coef(x))
const
## fit aci for C3 grasses
fit_c3<- dlply(c3, .(code),function(x) fitaci(x))
const <- ldply(fit_w, function(x) coef(x))
pdf("tmp.pdf")
for(i in 1:12)plot(fit_w[[i]], main=names(fit_w)[i])
dev.off()
|
6a85e62438913d3d3dff15a1b4629681c68a0e94
|
788e31a5c894ef28df014cebf88bd00546b32760
|
/regressao_quasi_poisson.R
|
fa5f538b81b9d857c9e06e58fd84d7df3a64d781
|
[] |
no_license
|
lucianogaldino/Curso-Estatistica-Analises-de-Regressoes-com-Linguagem-R
|
d786b4a6eecd21e58f899b7844b5399198809cf7
|
d9fece22230b7a0e90fefc5055e2ac733fd7a563
|
refs/heads/main
| 2023-04-21T21:37:36.765229
| 2021-05-12T02:08:48
| 2021-05-12T02:08:48
| 366,565,372
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,578
|
r
|
regressao_quasi_poisson.R
|
###################################
### REGRESSÃO QUASI POISSON ###
###################################
library(dplyr) # Manipulação de dados
# BUSCAR DIRETÓRIO (PASTA COM OS ARQUIVOS)
setwd("C:/Users/Luciano/Desktop/regressoes_R")
# Objetivo: Analisar reclamações em uma nova empresa de internet
#ABRIR ARQUIVO
library(readxl)
reclamacoes <- read_xlsx('reclamacoes.xlsx')
View (reclamacoes)
# CRIAÇÃO DOS MODELOS DE QUASI POISSON
modelo_quasi1 <- glm(velocidade ~ dia, data = reclamacoes,
family = "quasipoisson")
summary(modelo_quasi1)
# Equação: velocidade = e^(3.767909+0.034464*dia)
reclamacoes$modelo_veloc <- modelo_quasi1$fitted.values
modelo_quasi2 <- glm(instabilidade ~ dia, data = reclamacoes,
family = "quasipoisson")
summary(modelo_quasi2)
# Equação: instabilidade = e^(3.61161-0.098534*dia)
reclamacoes$modelo_insta <- modelo_quasi2$fitted.values
View(reclamacoes)
modelo_quasi3 <- glm(conexao ~ dia, data = reclamacoes,
family = "quasipoisson")
summary(modelo_quasi3)
# Equação: conexao = e^(2.860358+0.005103*dia)
reclamacoes$modelo_con <- modelo_quasi3$fitted.values
View(reclamacoes)
modelo_quasi4 <- glm(velocidade ~ dia+instabilidade, data = reclamacoes,
family = "quasipoisson")
summary(modelo_quasi4)
# Equação: velocidade = e^(3.7560318+0.0350997*dia+0.0003691*instabilidade)
reclamacoes$modelo_veloc2 <- modelo_quasi4$fitted.values
View(reclamacoes)
|
60cdc5a215404fa8c6608f869994cbb542a74d4b
|
567bcd8300c7c7e2bafbf45b378797de111406fd
|
/man/psi.plot.stepfun.Rd
|
d0e41220dc4408bf7c67a6a0cb824eaf7ce69b60
|
[] |
no_license
|
GeoBosh/psistat
|
996eb4fd5e551e8a8d7f1ab56f2ec801e52f3fd1
|
81306a78241c6a984e95a835975bc324f48ec3b3
|
refs/heads/master
| 2021-05-01T06:31:14.356997
| 2020-11-17T12:54:41
| 2020-11-17T12:54:41
| 121,145,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,986
|
rd
|
psi.plot.stepfun.Rd
|
\name{psi.plot.stepfun}
\alias{psi.plot.stepfun}
\title{ Plot piecewise constant functions }
\description{
Plot a stepfun object. The function is a modification of
plot.stepfun from package stats but has an additional argument to
force the domain of x-values rigidly.
}
\usage{
psi.plot.stepfun(x, xval, xlim, ylim = range(c(y, Fn.kn)),
xlab = "x", ylab = "f(x)", main = NULL,
add = FALSE, verticals = TRUE, do.points = TRUE,
pch = par("pch"), col.points = par("col"), cex.points = par("cex"),
col.hor = par("col"), col.vert = par("col"),
lty = par("lty"), lwd = par("lwd"),
rigid.xlim = FALSE, ...)
}
\arguments{
\item{x}{ an R object inheriting from '"stepfun"', usually created by
\code{stepfun} }
\item{xlim}{ numeric(2), range of 'x' values to use, has sensible defaults. }
\item{add}{ logical; if 'TRUE' only \emph{add} to an existing plot. }
\item{verticals}{ logical; if 'TRUE', draw vertical lines at steps. }
\item{rigid.xlim}{ If \code{TRUE} respect \code{xlim} rigidly, see details. }
\item{xval}{ see help page of \code{plot.stepfun}. }
\item{ylim}{ see help page of \code{plot.stepfun}. }
\item{xlab}{ see help page of \code{plot.stepfun}. }
\item{ylab}{ see help page of \code{plot.stepfun}. }
\item{main}{ see help page of \code{plot.stepfun}. }
\item{do.points}{ see help page of \code{plot.stepfun}. }
\item{pch}{ see help page of \code{plot.stepfun}. }
\item{col.points}{ see help page of \code{plot.stepfun}. }
\item{cex.points}{ see help page of \code{plot.stepfun}. }
\item{col.hor}{ see help page of \code{plot.stepfun}. }
\item{col.vert}{ see help page of \code{plot.stepfun}. }
\item{lty}{ see help page of \code{plot.stepfun}. }
\item{lwd}{ see help page of \code{plot.stepfun}. }
\item{\dots}{ see help page of \code{plot.stepfun}. }
}
\details{
The default method for plotting \code{stepfun} objects extends
slightly the domain requested by \code{xlim}. This is not always
desirable, especially if the function is not defined outside the
specified limits. This function has all the arguments of
\code{plot.stepfun} and does the same job with the additional option
to force the use of \code{xlim} as given by setting the argument
\code{rigid.xlim} to \code{TRUE}.
}
\value{
A list with two components
\item{t}{abscissa (x) values, including the two outermost ones.}
\item{y}{y values `in between' the `t[]'.}
}
\references{ R package "stats" for the code of the original
\code{plot.stepfun}.}
\author{Georgi N. Boshnakov (to be blamed for bugs; the credits should
go to the \R core team)}
\note{%
This function is a modification of \code{plot.stepfun} from the
\code{stats} package as supplied with R~2.8.1. Some of the text in
this help page has been taken from the help page of
\code{plot.stepfun}.
}
\seealso{\code{\link{plot.stepfun}}, \code{\link{stepfun}}}
\examples{
# define empirical quantile functon as a step function.
eqf <- function(x) stepfun((1:(length(x)))/length(x),c(x,NA),right=TRUE)
# create eqf for a random sample.
x <- sort(rnorm(10))
f1 <- eqf(x)
# plot f1
psi.plot.stepfun(f1,xlim=c(0,1),rigid.xlim=TRUE)
psi.plot.stepfun(f1,xlim=c(0,1),rigid.xlim=TRUE,verticals=FALSE)
psi.plot.stepfun(f1,xlim=c(0,1),rigid.xlim=TRUE,verticals=FALSE,
main="An emprirical qf")
psi.plot.stepfun(f1,xlim=c(0,1),rigid.xlim=TRUE,pch=19,verticals=FALSE)
# plot(f1) would give an error because of the NA, so modify.
eqf2 <- function(x) stepfun((1:(length(x)))/length(x),c(x,0),right=TRUE)
f2 <- eqf2(x)
# the default method for stepfun plots outside the domain of eqf.
plot(f2,xlim=c(0,1),verticals=FALSE)
# eqf's with overlaid qf's
psi.plot.stepfun(f1,xlim=c(0,1),rigid.xlim=TRUE,pch=19,verticals=FALSE)
curve(qnorm,add=TRUE, col="red")
psi.plot.stepfun(eqf(sort(rnorm(100))),xlim=c(0,1),rigid.xlim=TRUE,
pch=19,verticals=FALSE,do.points=FALSE)
curve(qnorm,add=TRUE, col="red")
}
\keyword{ hplot }
|
887636b7530cf55ef79f8a85dc8c9d401cecca76
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSpincalc/examples/EVrandom.Rd.R
|
dc01950ca7463fa762502417210418cc6a20d541
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
EVrandom.Rd.R
|
library(RSpincalc)
### Name: EVrandom
### Title: Generate uniform random Euler Vectors
### Aliases: EVrandom
### Keywords: programming
### ** Examples
EVrandom()
EVrandom(5)
|
e1ec479146c7fb2c367ffc05475213394f75cf91
|
29e978889657cc65a089d418ed4c889fa6aa7257
|
/dataPrep.R
|
b9b303f4f06e4d05b6f52a5fb50efa96caf86af5
|
[] |
no_license
|
klkinsch/ExData_Plotting1
|
e83b3028e9c83834d4f078f086bbf07ee5e4df46
|
9730c1561f51dd1e54526087f1adcd804f4e1b6d
|
refs/heads/master
| 2021-01-15T22:57:05.474628
| 2016-06-05T19:13:54
| 2016-06-05T19:13:54
| 60,428,300
| 0
| 0
| null | 2016-06-04T20:08:53
| 2016-06-04T20:08:52
| null |
UTF-8
|
R
| false
| false
| 1,161
|
r
|
dataPrep.R
|
##dataPrep.R assumes the following
# 1. data file household_power_consumption.txt resides in the data folder in the
# same path that the r script is run
# 2. packages dplyr and data.table are installed
##
##dataPrep.R does the following
# 1. Loads packages dplyr and data.table
# 2. Loads Electric Power Consumption data
# Use fread Fast and friendly file finagler - recognize ? as NA
# Subsets data for 2007-02-01 and 2007-02-02
# Make data tidy
library(dplyr)
library(data.table)
# Read data with Fast and friendly file finagler and then subset for 2007-02-01 and 2007-02-02
fastRead <- fread("./data/household_power_consumption.txt", na.strings="?",stringsAsFactors = FALSE)
epcdata <- filter(fastRead, grep("^[1,2]/2/2007", Date))
# Transform data
#Set date and time data
epcdata$dateTime <-paste(epcdata$Date, epcdata$Time)
epcdata$Date <- as.Date(epcdata$Date, format = "%d/%m/%Y" )
#Tidy variable names
names(epcdata) <- gsub("_","",names(epcdata))
names(epcdata) <- tolower(names(epcdata))
#Convert to numeric
##epcdata[,c(3:9)]= apply(epcdata[,c(3:9)], 2, function(x) as.numeric(as.character(x)))
|
5af07e2391d9cc61ebec5204fc19105d8ea65b41
|
510d092d3344d53755946f110e92147fd00184c7
|
/Code/Simulationen/Auswertung.R
|
ea1c200b58d5a31d36d09aabfe80d37052fb213e
|
[] |
no_license
|
maamuse/MT-Maximization-Voter-Representation
|
71cc6a355c90fc2db5af3ebf680ebc29242abf8f
|
fa5f587ec446cd8567c39e94a12b4f8d17c299ef
|
refs/heads/main
| 2023-04-18T08:56:14.889256
| 2021-05-07T13:45:04
| 2021-05-07T13:45:04
| 363,432,310
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,796
|
r
|
Auswertung.R
|
#############################################################################################################
# Voting behaviour Accuracy
#############################################################################################################
#setwd
setwd("C:\\Users\\Marco\\Desktop\\Master\\Master\\Master Thesis\\Master Thesis\\Data")
#libraries
library(gridExtra)
############################################################
# Load Data
############################################################
#extract filenames of candidate csv files
councillor_names = list.files(path = "Smartvote Downloads\\candidates Votes transformes\\Q1", pattern = NULL, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
#import first questionnaire
councillor_quest = read.csv2(file = paste0("Smartvote Downloads\\candidates Votes transformes\\Q1\\", councillor_names[1]), stringsAsFactors = FALSE)
councillor_quest = councillor_quest[2]
#cbind remaining questionnaires to councillor_quest
for(i in 2:length(councillor_names)){
councillor_quest_2 = read.csv2(file = paste0("Smartvote Downloads\\candidates Votes transformes\\Q1\\", councillor_names[i]), stringsAsFactors = FALSE)
councillor_quest_2 = councillor_quest_2[2]
councillor_quest = cbind(councillor_quest, councillor_quest_2)
}
#use councillor names as header
names(councillor_quest) <- councillor_names
#Map the answer no (0) to -1 to allow for neutral answers
councillor_quest[councillor_quest==0] =-1
rm(councillor_quest_2)
#import councillor voting data
councillor_votes = read.csv2(file = "Abstimmungen Nationalrat\\Unique Votes for R.csv", stringsAsFactors = FALSE)
#translate voting data into numerical representation
councillor_votes[councillor_votes=="Ja"] = 1
councillor_votes[councillor_votes=="Nein"] = -1
councillor_votes[councillor_votes == "Enthaltung"] = 0
councillor_votes[councillor_votes == "Hat nicht teilgenommen"] = 0
councillor_votes[councillor_votes == "Entschuldigt"] = 0
councillor_votes[councillor_votes == "Der Präsident stimmt nicht"] = 0
#import matching table that relates parlimentary businesses with the smartvote questionnaire
matters_catalogue = read.csv2(file = "Vorlagenkategorisierung\\Vorlagenkategorisierung R.csv", stringsAsFactors = FALSE, check.names = FALSE)
#ids of parliamentary businesses that were matched to the Smartvote questionnaire.
matter = c("16.055", "16.3111", "15.3803", "08.432", "13.468",
"17.3047", "17.047", "18.075", "16.3006", "17.3971",
"16.489", "16.3865", "16.3007", "13.074", "14.319",
"14.320", "16.056", "18.096", "15.3714", "17.429",
"15.3933", "18.3797", "15.3559")
#initialize empty lists
question = c()
row_question = c()
neg_row_question = c()
#Extract for each matter that was listed the the question and the row number.
counter = 0
for(i in 1:length(matter)){
question[i] = matters_catalogue[!is.na(matters_catalogue[matter[i]]),1]
row_question[i] = which(matters_catalogue[matter[i]]==1|matters_catalogue[matter[i]]==-1)
if(!is.na(match(TRUE, matters_catalogue[matter[i]]==-1))){
counter = counter + 1
neg_row_question[counter] = match(TRUE, matters_catalogue[matter[i]]==-1)
}
}
#reverse the voting sign in cases wehere the Smartvote question is the inverse of the parliamentary vote
councillor_quest[neg_row_question,] = councillor_quest[neg_row_question,]*(-1)
############################################################
# Extract relevant Data Points
############################################################
#Create data extraction for voting data of first parliamentary business listed in matter
row_councillor_votes = which(councillor_votes$ID == matter[1])
extr_councillor_votes = councillor_votes[row_councillor_votes,]
extr_councillor_votes$ID = NULL
#rbind voting data of remaining parliamentary businesses listed in matter
for(i in 2:length(matter)){
row_councillor_votes2 = which(councillor_votes$ID == matter[i])
extr_councillor_votes2 = councillor_votes[row_councillor_votes2,]
extr_councillor_votes2$ID = NULL
extr_councillor_votes = rbind(extr_councillor_votes, extr_councillor_votes2)
}
#extract anwsers to parliamentary businesses from Smartvote questionnaire
extr_councillor_quest = councillor_quest[row_question,]
#clean evironment
rm(councillor_votes,i, row_councillor_votes2, row_councillor_votes, councillor_quest)
############################################################
# Data Description
############################################################
#Check number of politicians (must be 134)
number_politicians = length(councillor_names)
# number_politicians
#Check number of matched businesses (must be 23)
# number_bus = length(matter)
# number_bus
#percentage of votes that were answered with yes in the questionnaire
# y_share_quest = sum(extr_councillor_quest == 1)/(dim(extr_councillor_quest)[1]*dim(extr_councillor_quest)[2])
#percentage of votes that were answered with yes in parliament
# y_share_vote_vote = sum(extr_councillor_votes == 1)/(dim(extr_councillor_votes)[1]*dim(extr_councillor_votes)[2])
#percentage of neutral votes in parliament
# zero_share_vote_vote = sum(extr_councillor_votes == 0)/(dim(extr_councillor_votes)[1]*dim(extr_councillor_votes)[2])
############################################################
# Base Accuracy - Individual Voter Representability
############################################################
#Deviation from expected Smartvote Score.
#1 means 100% as indicated by smartvote.
#0 means the exact opposite as indicated by smartvote.
#change from char to num value
extr_councillor_votes <- data.frame(apply(extr_councillor_votes, 2, function(x) as.numeric(as.character(x))))
#calculate overlap between parliament and Smartvotefor on parliamentary businesses
overlap = 1 - (abs(extr_councillor_quest-extr_councillor_votes)/2)
bus_overlap = rowSums(overlap)/number_politicians
#summary stats overlap
mean(bus_overlap)
median(bus_overlap)
sd(bus_overlap)
hist(bus_overlap)
############################################################
# Base Accuracy - Individual Politicians Overlapp
############################################################
#Deviation from expected Smartvote Score.
#1 means 100% as indicated by smartvote.
#0 means the exact opposite as indicated by smartvote.
#calculate overlap between parliament and smartvote for each politician
indiv_overlap = colSums(overlap)/length(matter)
#summary stats overlap
mean(indiv_overlap)
median(indiv_overlap)
sd(indiv_overlap)
hist(indiv_overlap)
#list of all politicians inc overlap
indiv_overlap = as.data.frame(round(indiv_overlap,2))
grid.table(indiv_overlap)
|
9c6419800aaab34c13145106bc2ec8474b5b1803
|
d31979485b23b98a22d0402cf9c1cd54857538cd
|
/Functions.R
|
377dbce896c0ab0a0502744f59b441cfe06fbfce
|
[] |
no_license
|
p-schaefer/RWorkshop
|
1a17b5f91f7afdeadd97890e1245658d1882183f
|
f637cfeffa431335fd6170110a4db30624833b46
|
refs/heads/master
| 2021-01-12T09:53:43.201702
| 2017-01-12T16:08:45
| 2017-01-12T16:08:45
| 76,288,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,414
|
r
|
Functions.R
|
###################################################################
#
#Functions for use in the OBBN R workshop - January 18 2017
#
#Created by: Patrick Schaefer (pschaefer@creditvalleyca.ca)
#
###################################################################
"Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
1) Identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if designated);
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
####################################################################
ci_fun2<-function(model,model.variable,data=NULL,col.variable=NULL,data.variable,plot=T,stat="median",level=0.95,oddsRatio=F,nsim=1000,sig.only=FALSE,...){
require(plotrix)
require(merTools)
sim<-REsim(model,n.sim=nsim,...)
output<-sim[sim$term==model.variable,]
output$upper<-output[,stat]+output[,"sd"]*qnorm(1-((1-level)/2))
output$lower<-output[,stat]-output[,"sd"]*qnorm(1-((1-level)/2))
output$sig<- output[, "lower"] > 0 | output[, "upper"] < 0
hlineInt<-0
if (oddsRatio == TRUE) {
output[, "ymax"] <- exp(output[, "upper"])
output[, stat] <- exp(output[, stat])
output[, "ymin"] <- exp(output[, "lower"])
hlineInt <- 1
}
if (plot==T) {
if (sig.only==T){
plot.output<-output[order(eval(parse(text=paste0('output$',stat)))),]
plot.output<-plot.output[plot.output$sig==T,]
sitenum<-nrow(plot.output)
if (!is.null(data) & !is.null(col.variable)){
colours<-data[sapply(as.character(plot.output$groupID),function(x) match(x,as.character(eval(parse(text=paste0('data$',data.variable)))))),col.variable]
}
plotCI(x=1:sitenum,y=plot.output[, stat],
li=as.numeric(plot.output$lower),
ui=as.numeric(plot.output$upper),
lwd=1,xlab="",ylab="site-specific slope (95% CI)",xaxt="n",las=1,main=paste0(colnames(attr(model,"frame"))[1]),
col=if (!is.null(data)& !is.null(col.variable)) {col=colours} else {"black"})
axis(1,at=1:sitenum,labels=plot.output$groupID,cex.axis=0.6,las=2)
abline(h=hlineInt,lty=2,lwd=1)
if (!is.null(data) & !is.null(col.variable)){
legend("topleft",c("Lower","Middle","Upper"),pch=c(21),pt.bg=c(1:3),bty="n",cex=1)
}
}
if (sig.only==F) {
plot.output<-output[order(eval(parse(text=paste0('output$',stat)))),]
sitenum<-nrow(plot.output)
if (!is.null(data) & !is.null(col.variable)){
colours<-data[sapply(as.character(plot.output$groupID),function(x) match(x,as.character(eval(parse(text=paste0('data$',data.variable)))))),col.variable]
}
plotCI(x=1:sitenum,y=plot.output[, stat],
li=as.numeric(plot.output$lower),
ui=as.numeric(plot.output$upper),
lwd=1,xlab="",ylab="site-specific slope (95% CI)",xaxt="n",las=1,main=paste0(colnames(attr(model,"frame"))[1]),
col=if (!is.null(data) & !is.null(col.variable)) {col=colours} else {"black"})
axis(1,at=1:sitenum,labels=plot.output$groupID,cex.axis=0.6,las=2)
abline(h=hlineInt,lty=2,lwd=1)
if (!is.null(data) & !is.null(col.variable)){
legend("topleft",c("Lower","Middle","Upper"),pch=c(21),pt.bg=c(1:3),bty="n",cex=1)
}
}
}
return(output)
}
xyplot_fun2<-function(model,model.variable,data,data.variable="Site",...) {
require(lattice)
data$Site<-as.factor(as.character(eval(parse(text=paste0('data$',data.variable)))))
temp.data<-data
ci.mod<-ci_fun2(model=model,model.variable=model.variable,data.variable=data.variable,data=data,plot=F,...)
temp.data$Site.sig<-temp.data$Site
if (any(ci.mod$upper>0&ci.mod$lower>0)){
levels(temp.data$Site.sig)[which(levels(temp.data$Site.sig)%in%ci.mod$groupID[(ci.mod$upper>0&ci.mod$lower>0)])]<-unique(paste0(unlist(levels(temp.data$Site.sig)[which(levels(temp.data$Site.sig)%in%ci.mod$groupID[(ci.mod$upper>0&ci.mod$lower>0)])]), "*+*"))
}
if (any(ci.mod$upper<0&ci.mod$lower<0)){
levels(temp.data$Site.sig)[which(levels(temp.data$Site.sig)%in%ci.mod$groupID[(ci.mod$upper<0&ci.mod$lower<0)])]<-unique(paste0(unlist(levels(temp.data$Site.sig)[which(levels(temp.data$Site.sig)%in%ci.mod$groupID[(ci.mod$upper<0&ci.mod$lower<0)])]), "*-*"))
}
xyplot(formula(paste0(colnames(model@frame)[1],"~ Year | Site.sig")),
data=temp.data,auto.key=T,type=c("p","r"))
}
|
465543394d4b549740e8e32923d21c674a425a79
|
47967b9f3870fd0aa07115fa9f327a1a440bebab
|
/Reddit_Analysis/Network_Analysis/network_visulalisation_script.R
|
8da30fb9f610909bfdfe8f53ec9986001cff6225
|
[
"MIT"
] |
permissive
|
zachHFT/Optimal-Cryptocurrency-Trading-Strategies-Step2
|
ec290c99800daf777cfc008b44e240ad49318132
|
5880839d1b139b837d914279964a61ed53e4939d
|
refs/heads/main
| 2023-09-05T13:58:35.855259
| 2021-11-22T12:18:10
| 2021-11-22T12:18:10
| 423,907,146
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,183
|
r
|
network_visulalisation_script.R
|
library(igraph)
library(ggplot2)
library(dplyr)
library(influential)
library(ggraph)
library(graphlayouts)
library(visNetwork)
largest_connected_component_g_reduced <- read_graph("largest_connected_component_g_reduced",
format="gml")
ggraph(largest_connected_component_g_reduced,
layout = "manual",
x = V(largest_connected_component_g_reduced)$x,
y = V(largest_connected_component_g_reduced)$y) +
geom_edge_link(aes(end_cap = circle(1.5, 'mm')),
edge_colour = "#A8A8A8",
edge_width = 0.3,
edge_alpha = 1,
arrow = arrow(angle = 30,
length = unit(1.5, "mm"),
ends = "last",
type = "closed")) +
geom_node_point(aes(size = IVIcentrality),
fill = "#FF4500",
colour = "white",
shape = 21,
stroke = 1) +
#geom_node_text(aes(label= ifelse(IVIcentrality > 4.5,V(largest_connected_component_g_reduced)$names,""),
#family = 'Palatino')) +
scale_size(range = c(0, 25)) +
theme_graph() +
theme(legend.position = "right")
################ visNetwork ############################
#visN <- visIgraph(largest_connected_component_g_reduced)
#visN
G <- largest_connected_component_g_reduced
nodes <- data.frame(id=as.vector(V(G)),
#label=as.vector(V(G)$names),
value=1e7*V(G)$IVIcentrality,
x=x,
y=y)
edges <- data.frame(from=as.vector(tail_of(G,E(G))),
to=as.vector(head_of(G,E(G))),
arrows="to")
visN <- visNetwork(nodes=nodes,edges=edges) %>%
visIgraphLayout() %>%
visNodes(color=list(background="#FF4500", border='white', hover=list(background='#7FFFD4')),
scaling=list(min=0.1,max=250)) %>%
visEdges(color=list(color='grey', hover='#7FFFD4'),
width=8) %>%
visInteraction(dragNodes = T,
dragView = T,
zoomView = T,
hover=T) %>%
visLegend()
visN
|
599c92a89ccbc808111859f6a5e7f53bbbf090a7
|
40e318d095bb5eca2f763321acc121748b472749
|
/man/single.mod.Rd
|
fe3047d7f210386afc9a4affe2f2adc93e13fdc6
|
[] |
no_license
|
tyut2018/FusedPCA
|
17c5ffee51607471d9349fc30f039805f38565e2
|
4c0c3d220ed478f234108cf1e1e08e48937f5ee8
|
refs/heads/master
| 2020-04-05T17:38:53.528437
| 2013-11-10T00:00:00
| 2013-11-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,471
|
rd
|
single.mod.Rd
|
\name{single.mod}
\alias{single.mod}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Modularity based on DCBM and SBM assumptions
}
\description{
Get the modularity values based on DCBM and SBM assumptions for a single community detection estimator.
}
\usage{
single.mod(A, clusters, K = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{A}{
input matrix -- adjacency matrix of an observed graph based on the non-isolated nodes, of dimension \code{n.noniso} x \code{n.noniso}, where \code{n.noniso} is the number of the non-isolated nodes.
}
\item{clusters}{
input vector -- the estimator of the community labels of the non-isolated nodes in the network, of dimension \code{n.noniso}, values taken from 1 to K, where K is the number of communities.
}
\item{K}{
the number of the communities, with 2 as the default value.
}
}
\value{
\item{mod.dcbm}{the modularity value based on the DCBM assumption.}
\item{mod.sbm}{the modularity value based on the SBM assumption.}
%% ...
}
\references{
Yang Feng, Richard J. Samworth and Yi Yu, Community Detection via Fused Principal Component Analysis, manuscript.
}
\author{
Yang Feng, Richard J. Samworth and Yi Yu
}
\examples{
## to generate an adjacency matrix
A = matrix(c(0,1,1,1,0,0,1,0,0), byrow = TRUE, ncol = 3)
## have a look at A
A
## ratio and normalised cut values
## given the community labels 1, 1 and 2 to nodes 1, 2 and 3
single.mod(A, c(1,1,2))
}
|
d124870c51854b99bad8067c020b8b7f6808689a
|
9159f5c0c900dcbf446c36d941a1ed455187d0d5
|
/크롤링_워드클라우드.R
|
fe70f3b4b588ef40a3b13dc99fff5b8627f51faa
|
[] |
no_license
|
joy3968/R_project
|
658d134881ec836cfc3458f45ae53100750384db
|
c55e1d1ade8a1240d9cf8615051d89c18a055dc3
|
refs/heads/main
| 2022-12-20T01:07:21.151938
| 2020-10-13T07:47:50
| 2020-10-13T07:47:50
| 303,286,996
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 4,322
|
r
|
크롤링_워드클라우드.R
|
## 크롤링 통해 데이터 가져오기
install.packages('rvest')
library(rvest)
html <- read_html("https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=134963&target=after&page=1")
html
# html 체계 확인하기
guess_encoding(html)
comment <- html_nodes(html,'.title')%>%
html_text()
comment <- gsub('\n','',comment)
comment <- gsub('\t','',comment)
comment
## 평점 출력
rate <- html_nodes(html,'.list_netizen_score')%>%
html_text()
rate
library(stringr)
x <- unlist(str_extract_all(rate,'[[:digit:]]{1,}'))
x
rrate <- c()
j <- 0
for(i in 1:length(x)){
if(i%%2==0){
j <- j+1
rrate[j] <- x[i]
}
}
rrate
point <- c()
for (i in 1:10){
point <- c(point, str_extract_all(rate,'[[:digit:]]{1,}')[[i]][2])
}
point
## 글쓴이/ 날짜 (각각)출력
wridate <- html_nodes(html,'.num')%>%
html_text()
wridate
wridate1 <- c()
j <- 1
for(i in 1:length(wridate)){
if(i%%2==0){
wridate1[j] <- wridate[i]
j <- j+1
}
}
wridate1 <- wridate1[1:10]
wridate
wridate1
# 정규표현식을 활영해서 더욱 간단하기 추출 가능
x <- unlist(str_extract_all(wridate,'\\w{1,}\\*{1,}\\d{2}\\.\\d{2}\\.\\d{2}'))
id <- unlist(str_extract_all(wridate,'\\w{1,}\\*{1,}'))
id
date <- unlist(str_extract_all(wridate,'\\d{2}\\.\\d{2}\\.\\d{2}'))
date
## xpath를 활용한 데이터 추출
//*[@id="old_content"]/table/tbody/tr[1]/td[1]/text()
comment <- html_nodes(html,xpath='//*[@id="old_content"]/table/tbody/tr[1]/td[2]/text()')
comment
## 평점 출력 (xpath 활용)
html_nodes(html, xpath='//*[@id="old_content"]/table/tbody/tr/td[2]/div/em')%>%
html_text()
## 더 간단하게 data.frame 만들기(html_table)
Sys.setlocale("LC_ALL", "English") # English os로 변경(이렇게 하지 않으면 오류 발생)
html <- read_html("https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=134963&target=after&page=1")
t <- html_nodes(html, 'table')
# table 태그안의 내용 출력
View(html_table(t[[2]]))
# ---> t의 2번 인덱스를 테이블로 만들고 보여주기
review <- html_table(t[[2]])
names(review) <- c('no', 'comment', 'id.date')
View(review)
## 여러 페이지 데이터 출력
review <- NULL
for(i in 1:3) {
html <- read_html(paste0("https://movie.naver.com/movie/point/af/list.nhn?st=mcode&sword=134963&target=after&page=",i),encoding="CP949")
t <- html_nodes(html, 'table')
review <- rbind(review, html_table(t[[2]]))
}
View(review)
## 데이터 정제 작업 & 워드 클라우드
# 1)컬럼 이름 설정
names(review) <- c('no', 'comment', 'id.date')
# 2) \n 과 \t 제거
review$comment <- gsub('\n','', review$comment)
review$comment <- gsub('\t','', review$comment)
# 3) 맨 끝의 두 글자(신고) 삭제
co <- NULL
for(i in review$comment){
co <- rbind(co,substr(i, 1, nchar(i)-2))
}
co
review$comment
review$comment <- co
# 4) 맨 앞의 '라라랜드별점 - 총 10점 중' 삭제
review$comment <- gsub('라라랜드별점 - 총 10점 중','',review$comment)
# 5) 맨 앞의 점수 삭제 (10인 경우와 아닌 경우로 나누어서)
comment <- NULL
for(i in review$comment){
if(substr(i,1,2)=='10'){
comment <- rbind(comment,substr(i,3,nchar(i)))
} else {
comment <- rbind(comment, substr(i,2,nchar(i)))
}
}
review$comment <- comment
# 6) 특수문자 삭제
text <- review$comment
text
##### 명사뽑기 위한 과정(추가) ####
buildDictionary(ext_dic = "woorimalsam") # '우리말씀' 한글사전 로딩
pal <- brewer.pal(8, "Dark2") # 팔레트 생성
noun <- sapply(text, extractNoun, USE.NAMES=F) # 명사추출
noun
noun2 <- unlist(noun)
noun2
noun2 <- noun2[nchar(noun2)>1] # 1글자 단어 제거
noun2
#################################
test <- str_replace_all(text,"[[:punct:]]","")
# 7) 단어로 분류
word <- unlist(str_split(test,' '))
word <- word[nchar(word)>1] # 1글자 이상
word
#df <- data.frame(table(word))
df
df <- data.frame(table(noun2))
df
# 8) 워드클라우드
install.package("wordcloud")
library(wordcloud)
# wordcloud(df$word ,df$Freq,random.order=TRUE, min.freq = 2)
wordcloud(df$noun2 ,df$Freq,random.order=TRUE, min.freq = 2)
|
72a9eac8b2eb6f5ada00fb30e3ff48b0814211c5
|
64dc0c6f076051d1c9db94371492d04ecc686d4a
|
/Competition_dynamics.R
|
8b39c8a455dce8d24a011b8e8738bb8ba96393d4
|
[] |
no_license
|
jdyeakel/DiffusingForager
|
8c232054c4f59cf1a97d3eda2ee9b5387b0ad4f9
|
77b0e8709b00b1f85977a77306ad9c579d9a10d1
|
refs/heads/master
| 2020-04-06T05:24:25.900411
| 2017-12-13T17:40:53
| 2017-12-13T17:40:53
| 27,739,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,833
|
r
|
Competition_dynamics.R
|
library(deSolve)
#library(rgl)
library(RColorBrewer)
library(wesanderson)
source("R/filled_contour.r")
source("R/smooth_pal.R")
#Starving forager ODE
source("R/comp_forage_ode.R")
state <- c(
R = 0.5,
X1 = 0.5,
Y1 = 0.5,
X2 = 0.5,
Y2 = 0.5)
parameters <- c(
alpha = 1,
epsilon = 0.8,
sigma1 = 0.5,
sigma2 = 0.2,
rho1 = 0.8,
rho2 = 0.8,
gamma1 = 0.8,
gamma2 = 0.8,
mu = 0.02
)
time <- seq(0,300, by = 0.1)
out <- ode(y = state, times = time, func = comp_forage_ode, parms = parameters)
#Plot ALL
colors <- brewer.pal(5,"Set1")
plot(out[,1],out[,2], xlab = "time", ylab = "-",type="l",lwd=2,col=colors[2],
ylim=c(0,max(as.numeric(cbind(out[,2],out[,3],out[,4],out[,5],out[,6]))))) #Blue
lines(out[,1],out[,3],col=colors[5],type="l",lwd=2) #Orange
lines(out[,1],out[,4],col=colors[3],type="l",lwd=2) #Green
lines(out[,1],out[,5],col=colors[5],type="l",lwd=2,lty=3) #Orange
lines(out[,1],out[,6],col=colors[3],type="l",lwd=2,lty=3) #Green
#Who wins sigma1 vs. sigma2
source("R/comp_forage_ode.R")
sigma1_vec <- seq(0,1,0.05)
sigma2_vec <- seq(0,1,0.05)
l_sigma1 <- length(sigma1_vec)
l_sigma2 <- length(sigma2_vec)
pr_m <- matrix(0,l_sigma1,l_sigma2)
pr_c <- matrix(0,(l_sigma1*l_sigma2),3)
pr_pa <- matrix(0,(l_sigma1*l_sigma2),3)
threshold <- 0.05
tic <- 0
for (i in 1:l_sigma1) {
for (j in 1:l_sigma2) {
tic <- tic + 1
state <- c(
R = 0.5,
X1 = 0.5,
Y1 = 0.5,
X2 = 0.5,
Y2 = 0.5)
parameters <- c(
alpha = 1,
epsilon = 0.8,
sigma1 = sigma1_vec[i],
sigma2 = sigma2_vec[j],
rho1 = 0.8,
rho2 = 0.8,
gamma1 = 0.8,
gamma2 = 0.8,
mu = 0.02
)
time <- seq(0,300, by = 0.1)
out <- ode(y = state, times = time, func = comp_forage_ode, parms = parameters)
pop1 <- out[,3] + out[,4]
pop2 <- out[,5] + out[,6]
#pop_ratio <- pop1/pop2
pop1_term <- median(pop1[(length(pop1)-100):(length(pop1))])
pop2_term <- median(pop2[(length(pop2)-100):(length(pop2))])
if ((pop1_term < threshold) && (pop2_term < threshold)) {
pr_pa[tic,] <- c(sigma1_vec[i],sigma2_vec[j],0)
}
if ((pop1_term > threshold) || (pop2_term > threshold)) {
pr_pa[tic,] <- c(sigma1_vec[i],sigma2_vec[j],1)
}
if ((pop1_term > threshold) && (pop2_term > threshold)) {
pr_pa[tic,] <- c(sigma1_vec[i],sigma2_vec[j],2)
}
pr_m[i,j] <- median(pop_ratio[(length(pop_ratio)-100):(length(pop_ratio))])
pr_c[tic,] <- c(sigma1_vec[i],sigma2_vec[j],pr_m[i,j])
}
}
bw <- pr_c[,3] > 1
#Pop1 vs. Pop2 ratio
plot(pr_c[,1],pr_c[,2],pch=16,col=bw)
points(pr_c[,1],pr_c[,2])
#Presence Absence Coexistence
pal <- brewer.pal(3,"Set2")
plot(pr_pa[,1],pr_pa[,2],pch=16,col=pal[pr_pa[,3]],xlab="sigma_1",ylab="sigma_2")
|
37a6646734a9d016aebf8326d038672d980bdeb6
|
585c347c8c2b307f658333b75daa64d9ef076ec4
|
/data_mining/preprocessing.R
|
46db86b75c8e8f9d47b4a03a8a4df554a10cb05f
|
[] |
no_license
|
davidjudilla/csc177_data_mining_warehousing_student_performance
|
1e329dba0af27f856fdf39d90e177ab291fcdef5
|
eb461d353b287bbbb881e17121d5c6b09b29d45d
|
refs/heads/master
| 2021-01-20T00:35:13.465346
| 2017-05-15T21:21:17
| 2017-05-15T21:21:17
| 89,156,731
| 2
| 2
| null | 2017-05-15T21:21:18
| 2017-04-23T16:40:05
|
Python
|
UTF-8
|
R
| false
| false
| 1,938
|
r
|
preprocessing.R
|
mathStud=read.table("student/student-mat.csv",sep=";",header=TRUE)
portStud=read.table("student/student-por.csv",sep=";",header=TRUE)
# Change labels of columns such as ("yes", "no") -> (1, 0)
#factor(select(mathStud, schoolsup:romantic), levels = c("yes", "no"), labels = c(1,0))
mathStud$schoolsup = factor(mathStud$schoolsup, levels = c("yes", "no"), labels = c(1,0))
mathStud$famsup = factor(mathStud$famsup, levels = c("yes", "no"), labels = c(1,0))
mathStud$paid = factor(mathStud$paid, levels = c("yes", "no"), labels = c(1,0))
mathStud$activities = factor(mathStud$activities, levels = c("yes", "no"), labels = c(1,0))
mathStud$nursery = factor(mathStud$nursery, levels = c("yes", "no"), labels = c(1,0))
mathStud$higher = factor(mathStud$higher, levels = c("yes", "no"), labels = c(1,0))
mathStud$internet = factor(mathStud$internet, levels = c("yes", "no"), labels = c(1,0))
mathStud$romantic = factor(mathStud$romantic, levels = c("yes", "no"), labels = c(1,0))
portStud$schoolsup = factor(portStud$schoolsup, levels = c("yes", "no"), labels = c(1,0))
portStud$famsup = factor(portStud$famsup, levels = c("yes", "no"), labels = c(1,0))
portStud$paid = factor(portStud$paid, levels = c("yes", "no"), labels = c(1,0))
portStud$activities = factor(portStud$activities, levels = c("yes", "no"), labels = c(1,0))
portStud$nursery = factor(portStud$nursery, levels = c("yes", "no"), labels = c(1,0))
portStud$higher = factor(portStud$higher, levels = c("yes", "no"), labels = c(1,0))
portStud$internet = factor(portStud$internet, levels = c("yes", "no"), labels = c(1,0))
portStud$romantic = factor(portStud$romantic, levels = c("yes", "no"), labels = c(1,0))
# Look at all students, not just those who take BOTH math and portugese
# Do later
unionStud <- merge(mathStud, portStud,
all = TRUE
)
unionStud$id = 1:nrow(unionStud)
write.csv(unionStud, file = "data_mining/students.csv")
library(dplyr)
|
c926fc8574ba487356563b1b93f995b8bcd7b301
|
6d8ba0201575eb0ae25aa6b7f94ff516a88f6295
|
/man/insertPseudocounts.Rd
|
3cac5e452966579f380a8a13778ee94899f64487
|
[] |
no_license
|
vreuter/SwissR
|
bdd2f9070309995943f45ac252f37724c390804b
|
eba4932eea7514a80daf82fe720646c711ba0528
|
refs/heads/master
| 2021-03-27T18:52:53.391769
| 2018-02-13T21:44:13
| 2018-02-13T21:44:13
| 94,234,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 599
|
rd
|
insertPseudocounts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats-utils.R
\name{insertPseudocounts}
\alias{insertPseudocounts}
\title{Assurance that each value of a vector is nonempty.}
\usage{
insertPseudocounts(observations, pseudocount = 1)
}
\arguments{
\item{observations}{Vector of values to ensure nonzero.}
\item{pseudocount}{Value with which to replace zeros.}
}
\value{
Updated observations vector.
}
\description{
\code{insertPseudocounts} takes an observation vector and a
\code{pseudocount}, replacing all zeros in \code{observations} with the
\code{pseudocount}.
}
|
01667d199d7fcbed18cc268fe15bc7407c7fa359
|
a6f2c3deb38a1e9ae51866e2e06a3f51d8a936fa
|
/R/map_elements.R
|
6e21ded880df6b3f0d8fb67953d0ad77bb870da1
|
[] |
no_license
|
mpio-be/bib2
|
d39ede015568ceb18aada2993a1aa4cad127b202
|
cf06cc7baab1e8610cfbdab271d6ea5927dc347a
|
refs/heads/master
| 2021-12-15T02:31:29.472265
| 2021-12-02T08:07:20
| 2021-12-02T08:07:20
| 76,011,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,069
|
r
|
map_elements.R
|
#' @name maps
#' @title maps
NULL
#' @export
#' @rdname maps
theme_bib2 <- function() {
theme(
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid = element_blank(),
panel.spacing = unit(0, "lines"),
plot.background = element_blank(),
plot.margin = unit(c(0,0,0,0), "in")
)
}
#' @export
#' @rdname maps
#' @return a list of geoms defining the legend around the box
map_legend <- function(size = 2.5, right = 'box', left = 'checked days ago',
top = 'stage age|days to hatch', bottom = 'eggs|chicks',
x = 543 , y = 735 ) {
isp = data.frame( x = x, y = y, right, left, top, bottom)
list(
geom_point(data = isp, aes(x = x, y = y), pch = 19, size = size*.5) ,
geom_text(data = isp, aes(x = x, y = y, label = right), hjust = 'left', nudge_x = 5) ,
geom_text(data = isp, aes(x = x, y = y, label = left), hjust = 'right', nudge_x = -5) ,
geom_text(data = isp, aes(x = x, y = y, label = top), vjust = 'bottom', nudge_y = 7) ,
geom_text(data = isp, aes(x = x, y = y, label = bottom), vjust = 'top', nudge_y = -7) )
}
#' @export
#' @rdname maps
print_ann <- function(color = 'grey', x = Inf, y = Inf, vjust = 'bottom', hjust = 'top', angle = 90) {
z = data.frame(
x = x,
y = y,
annlabel = paste( 'Printed on',format(Sys.time(), "%a, %d %b %y %H:%M") ),
color = color)
geom_text(data = z ,color = z$color, vjust =vjust, hjust = hjust, angle =angle,
aes(x = x, y = y, label = annlabel) )
}
#' @export
#' @rdname maps
#' @examples
#' \donttest{
#' map_empty()
#' }
map_empty <- function() {
ggplot() +
geom_polygon(data = map_layers[nam == 'buildings'] , aes(x=long, y=lat, group=group), size = .2, fill = 'grey97', col = 'grey97' ) +
geom_path(data = map_layers[nam == 'streets'], aes(x=long, y=lat, group=group) , size = .2, col = 'grey60' ) +
coord_equal(ratio=1) +
scale_x_continuous(expand = c(0,0), limits = map_layers[nam == 'streets', c( min(long), max(long) )] ) +
scale_y_continuous(expand = c(0,0)) +
theme_bib2()
}
#' @export
#' @rdname maps
#' @examples
#' \donttest{
#' map_base(family = 'sans')
#' }
map_base <- function(size = 2.5, family = 'sans', fontface = 'plain',printdt = FALSE) {
g =
map_empty() +
geom_point(data = boxesxy, color = 'grey', pch = 21, size = size,
aes(x = long, y = lat) ) +
geom_text(data = boxesxy,family = family, fontface = fontface, size= size, nudge_x = 10,
aes(x = long, y = lat, label = box) ) +
theme( legend.justification = c(0, 1),legend.position = c(0,1) ) +
if(printdt) print_ann() else NULL
g
}
#' @export
#' @rdname maps
#' @param n a data.table returned by nests()
#' @param title goes to ggtitle (should be the reference date)
#' @param notes notes under legend annotations
#' @param nx notes x location
#' @param ny notes y location
#' @examples
#' \donttest{
#' x = nests(Sys.Date() - 1 )
#' notes = c('note 1: this is note 1\nnote 99: this is note 99\nnote 9999+1: this is note 9999+1')
#' n = nest_state(x, hatchingModel = predict_hatchday_model(Breeding(), rlm) )
#' map_nests(n)
#' map_nests(n, notes = notes) + print_ann()
#'}
#'
map_nests <- function(n, size = 2.5, family = 'sans', fontface = 'plain',
title = paste('made on:', Sys.Date() ), notes = '', nx = -20, ny = 650) {
legend = nest_legend(n)
nxy = merge(n, boxesxy, by= 'box')
# frame
map_empty()+
theme( legend.justification = c(0, 1),
legend.position = c(0,1) ) +
ggtitle(title) + map_legend() +
# boxes
geom_point(data = boxesxy, color = 'grey', pch = 21, size = size,
aes(x = long, y = lat) ) +
geom_text( data = boxesxy, hjust = 'left', nudge_x = 5, family = family, fontface = fontface, size = size,
aes(x = long, y = lat, label = box) )+
# nest stage
geom_point(data = nxy, pch = 19, size = size,
aes(x = long, y = lat, color = nest_stage), na.rm = TRUE ) +
scale_colour_manual(values = legend$col , labels = legend$labs ) +
# last check
geom_text(data = nxy,
aes(x = long, y = lat, label = lastCheck),
hjust = 'right', nudge_x = -5,size = size, family = family) +
# nest stage age
geom_text(data = nxy,
aes(x = long, y = lat, label = AGE), vjust = 'bottom', nudge_y = 5,
size = size, family = family)+
# clutch | chicks
geom_text(data = nxy[!is.na(ECK)] ,
aes(x = long, y = lat, label = ECK), vjust = 'top', nudge_y = -5,
size = size, family = family) +
guides( color = guide_legend(title = NULL, ncol = 3)) +
annotate('text', size = size+1, x = nx, y = ny,
hjust = 'left', vjust = 'top',
label= notes)
}
#' @export
#' @rdname maps
#' @param exp_id the id of the experiment as defined in the experiments table.
#' @return a list of geoms to append to map_nests()
#' @examples
#' \donttest{
#' x = map_experiment(2)
#' }
map_experiment <- function(exp_id) {
x = bibq( paste('SELECT * FROM EXPERIMENTS
WHERE ID = ', exp_id) )$R_script
x = stringr::str_replace_all(x, '\r\n', '\n')
fallback = glue('function() {{
list(ggtitle("Experiment {exp_id} cannot be shown.Review the EXPERIMENTS table!")) }}')
if( length(x)>0 && nchar(x) > 0) {
f = glue('function() {{
{x}
}}' )
} else
f = fallback
o = try( eval(parse( text= f ) ), silent = TRUE)
if(inherits (o, 'try-error')) o = eval(parse( text= fallback ) )
o
}
|
87a95cbc459b6ff9b5801b19294bf450f39fdd1e
|
918e48a1d361f5f10be6ba157a0b80f889a8726e
|
/man/plot_bidev.Rd
|
457355e50f4adc45d2991bc46b97d9cd3584644b
|
[] |
no_license
|
riatelab/MTA
|
b1dd731c0856b6266bbe1445ec531265611c3a5a
|
eb198c04fd32c45257945fa2185f1f8bf79fc5f3
|
refs/heads/master
| 2022-06-17T05:14:00.022031
| 2022-06-03T08:43:07
| 2022-06-03T08:43:07
| 83,419,887
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,200
|
rd
|
plot_bidev.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_bidev.R
\name{plot_bidev}
\alias{plot_bidev}
\title{Plot Multiscalar Typology (2 deviations)}
\usage{
plot_bidev(
x,
dev1,
dev2,
breaks = c(25, 50, 100),
dev1.lab = NULL,
dev2.lab = NULL,
lib.var = NULL,
lib.val = NULL,
cex.lab = 1,
cex.axis = 0.7,
cex.pt = 0.5,
cex.names = 0.8,
pos.names = 4
)
}
\arguments{
\item{x}{a sf object or a dataframe including 2 pre-calculated deviations.}
\item{dev1}{column name of the first relative deviation in x.}
\item{dev2}{coumn name of the second relative deviation in x.}
\item{breaks}{distance to the index 100 (average of the context), in
percentage. A vector of three values. Defaut c(25,50,100). 25 % corresponds
to indexes 80 and 125. 50 % to indexes 67 and 150 and 100 % to indexes 50
and 200.}
\item{dev1.lab}{label to be put in x-axis of the scatter plot (default: NULL).}
\item{dev2.lab}{label to be put in y-axis of the scatter plot (default: NULL).}
\item{lib.var}{column name of x including territorial units name/code we
want to display on the plot.}
\item{lib.val}{a vector of territorial units included in lib.label we want
to display on the plot.}
\item{cex.lab}{size of the axis label text (default = 1).}
\item{cex.axis}{size of the tick label numbers (default = 0.7).}
\item{cex.pt}{size of the dot used for extract specific territorial units
(default 0.5).}
\item{cex.names}{size of the territorial units labels if selected
(default 0.8).}
\item{pos.names}{position of territorial units labels (default 4, to the right).}
}
\value{
A scatter-plot displaying the 13 bidev categories, which are the synthesis
of the position of territorial units according to 2 deviations and their
respective distance to the average. X-Y axis are expressed in logarithm
(25 % above the average corresponding to index 125 and 25 % below the average
being index 80).
\itemize{bidev typology values :
\item{ZZ: Near the average for the two selected deviation, in grey}
\item{A1: Above the average for dev1 and dev2, distance to the avarage : +,
in light red}
\item{A2: Above the average for dev1 and dev2, distance to the avarage : ++,
in red}
\item{A3: Above the average for dev1 and dev2, distance to the avarage : +++,
in dark red}
\item{B1: Above the average for dev1 and below for dev2, distance to the
avarage : +, in light yellow}
\item{B2: Above the average for dev1 and below for dev2, distance to the
avarage : ++, in yellow}
\item{B3: Above the average for dev1 and below for dev2, distance to the
avarage : +++, in dark yellow}
\item{C1: Below the average for dev1 and dev2, distance to the avarage : +,
in light blue}
\item{C2: Below the average for dev1 and dev2, distance to the avarage : ++,
in blue}
\item{C3: Below the average for dev1 and dev2, distance to the avarage : +++,
in dark blue}
\item{D1: Below the average for dev1 and above for dev2, distance to the
avarage : +, in light green}
\item{D2: Below the average for dev1 and above for dev2, distance to the
avarage : ++, in green}
\item{D3: Below the average for dev1 and above for dev2, distance to the
avarage : +++, in dark green}
}
}
\description{
Vizualizing bidev and select some territorial units on it.
}
\examples{
# Load data
library(sf)
com <- st_read(system.file("metroparis.gpkg", package = "MTA"), layer = "com", quiet = TRUE)
# Prerequisite - Compute 2 deviations
com$gdev <- gdev(x = com, var1 = "INC", var2 = "TH")
com$tdev <- tdev(x = com, var1 = "INC", var2 = "TH", key = "EPT")
# EX1 standard breaks with four labels
plot_bidev(x = com,
dev1 = "gdev",
dev2 = "tdev",
dev1.lab = "General deviation (MGP Area)",
dev2.lab = "Territorial deviation (EPT of belonging)",
lib.var = "LIBCOM",
lib.val = c("Marolles-en-Brie", "Suresnes",
"Clichy-sous-Bois", "Les Lilas"))
# EX2, change breaks, enlarge breaks
plot_bidev(x = com,
breaks = c(75, 150, 300),
dev1 = "gdev",
dev2 = "tdev",
dev1.lab = "General deviation (MGP Area)",
dev2.lab = "Territorial deviation (EPT of belonging)")
}
|
b341f52efc2b3dca52db1d23cb28e68fcdda4a5f
|
968681bc2010c74b83235daa83d4b8f344bbd73f
|
/R/BODCbiometrics.R
|
36be0beaa78cc549701ab643f131ccb27981a45d
|
[] |
no_license
|
EMODnet/EMODnetBiocheck
|
bdae6c6e40c78bf8300c8667dbf3b7640a8cfbfc
|
2b6e5380c6826e4c42431d0ab270dc6c0081314e
|
refs/heads/master
| 2023-08-31T10:26:35.348008
| 2023-08-28T09:38:09
| 2023-08-28T09:38:09
| 151,568,194
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
BODCbiometrics.R
|
#' BODC vocabulary terms related to Biometric or other Biotic data.
#'
#' A character vector containing URIs of BODC vocabulary terms related to Biometric or other Biotic data.
#' These will be added to th duplicates check if they exist in the dataset.
#'
"BODCbiometrics"
|
dda4b5a8f1eea61bce96df20a5315b5540ad643f
|
0a94bc20c15cb0c310e8f4df140af91968faabb4
|
/diningdata.R
|
e3b2997ef8be4f7ce5657f63bfb572d1685fb36a
|
[] |
no_license
|
kkorn99/sys2202
|
e5a931805b629d79263d76395988eb84ce0db62d
|
4e52e40c628583efe38586e1182091e0f768f1d4
|
refs/heads/master
| 2022-05-10T06:09:47.212334
| 2020-04-27T00:19:33
| 2020-04-27T00:19:33
| 259,130,770
| 0
| 0
| null | 2020-04-26T20:52:14
| 2020-04-26T20:52:14
| null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
diningdata.R
|
# Script created by Matt Thompson (mlt2we)
library(gcookbook)
library(tidyverse)
library(farver)
library(dplyr, quietly = T)
library(anytime)
d = read.table(file = 'dining/txt/u01.txt', header = FALSE, col.names = c('datetime','diningHall','meal'), sep = ',')
?read.table
view(d)
write_csv(d, 'diningcsv/u01.csv')
print(file.exists('dining/txt/u00.txt'))
for(i in 0:59){
if(i<10){
currentfile = paste('dining/txt/u','0',as.character(i),'.txt', sep = '')
} else {
currentfile = paste('dining/txt/u',as.character(i),'.txt', sep = '')
}
if(file.exists(currentfile)){
currentcsv = read.table(file = currentfile, header = FALSE, col.names = c('datetime','diningHall','meal'), sep = ',')
currentcsv$datetime = as.POSIXct(currentcsv$datetime, format="%Y-%m-%d%H:%M:%OS")
currentcsv$diningHall = as.character(currentcsv$diningHall)
currentcsv$meal = as.character(currentcsv$meal)
newfile = paste('dining/csvraw/',substr(currentfile,12,14),'.csv',sep='')
write_csv(currentcsv, newfile)
} else {
print(paste('file does not exist: ', currentfile, sep = ''))
}
}
d = read_csv('dining/csvraw/u01.csv')
view(d)
for (i in 1:length(d$datetime)){
for (j in 1:ncol(d)){
if(d[[i,j]] == "53 Commons"){
d[[i,j]] <- "bad dining hall"
}
}
}
|
15913e517563265714822835168d12362c3d68a4
|
13db7f8bc01c0d2a51bf1bca03a007caf49e9b8e
|
/R/xy2SpatialPolygon.r
|
550065df0135eecae07b04dd1ca369440c3fb600
|
[
"MIT"
] |
permissive
|
PEDsnowcrab/aegis
|
e8fbe3b81b8b072371749c1a7497a9fb5818ef40
|
92d4b045c17773c184df4ff3ed47c226ba4eddbf
|
refs/heads/master
| 2022-06-08T06:53:15.019770
| 2019-12-18T16:37:25
| 2019-12-18T16:37:25
| 257,596,288
| 0
| 0
|
MIT
| 2020-04-21T12:58:58
| 2020-04-21T12:58:57
| null |
UTF-8
|
R
| false
| false
| 228
|
r
|
xy2SpatialPolygon.r
|
xy.to.SpatialPolygon = function( xy, id=1, crs=NA ) {
#\\Convert xy matrix of coordinates (lon,lat) to a spatialpolygon
SpatialPolygons( list(
Polygons( list( Polygon( coords=xy)), id ) ), proj4string=sp::CRS(crs) )
}
|
4affb4ebbafec7d69bc0ee0d2439b5b397904107
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/synlik/R/I_extractHessian.R
|
3de5538bbb21e952496168898b628632c2d7ed13
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
r
|
I_extractHessian.R
|
.extractHessian <- cmpfun(function(secondDeriv, nPar)
{
hessian <- matrix(NA, nPar, nPar)
# Calculate indexes to store coefficients in the Hessian
if(nPar == 1){
indexes <- matrix(1, 1, 1)
}else{
# Create matrix of indexes to manage the second derivarives stored in beta
indexes <- diag(seq(1:nPar))
entries <- seq(nPar + 1, nPar + factorial(nPar)/(factorial(2)*factorial(nPar-2)))
zz <- 1
for(jj in 1:nPar){
indexes[jj, -(1:jj)] <- entries[zz:(zz + nPar - jj - 1)]
zz <- zz + nPar - jj
}
}
for(iRow in 1:nPar)
for(iCol in iRow:nPar)
hessian[iRow, iCol] <- hessian[iCol, iRow] <- secondDeriv[ indexes[iRow, iCol] ]
return( hessian )
})
|
41da21e442eced7bbc697ca9659c1c169171d483
|
bbd73e22684497a51ec61013277dcce89d417485
|
/src/basis.R
|
76b6a39c94b64cf45f027f1a328787313fb5160c
|
[] |
no_license
|
tlaepple/paleolibrary
|
a432c6405388474ca3295fdc2aadd65f9f6edbac
|
daed039f08854dcbb99b8194fd99ce7fec662842
|
refs/heads/master
| 2021-01-23T08:34:51.242448
| 2011-10-31T18:27:44
| 2011-10-31T18:27:44
| 2,659,270
| 1
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 6,591
|
r
|
basis.R
|
#Testplan:
#sorting in das Laden der Dateien einbauen !
#+ - scale, detrend [], Bereich und Punktwahl, plot, summary, cor
#Allgemeines NA Handling 30% Schwelle (später da für Poster nicht nötig)
addhistory<-function(x,newhist)
{
newhist<-paste(date(),newhist)
attr(x,"history")<-c(attr(x,"history"),newhist)
return(x)
}
#Coordinate conversion from 1D<->2D
c1t2<-function(x,nLon)
{
x<-x-1
lat<-x%/%nLon+1
lon<-x%%nLon+1
return(list(lat=lat,lon=lon))
}
c2t1<-function(lat,lon,nLon)
{
return(nLon*(lat-1)+(lon))
}
mergeattr <- function(data,source1,source2,newhistory='')
{
result<-data
temp1<-attributes(source1)
temp2<-attributes(source2)
attr(result,'lat')<-c(temp1$lat,temp2$lat)
attr(result,'lon')<-c(temp1$lon,temp2$lon)
attr(result,'name')<-c(temp1$name,temp2$name)
attr(result,'history')<-c(temp1$history,paste(date(),newhistory))
return(result)
}
copyattr <- function(data,source,newhistory='',cclass=TRUE)
{
temp<-attributes(source)
attr(data,'lat')<-temp$lat
attr(data,'lon')<-temp$lon
attr(data,'name')<-temp$name
attr(data,'history')<-c(temp$history,paste(date(),newhistory))
if (cclass) class(data)<-class(source)
return(data)
}
is_pTs <- function(data) (sum(class(data) == 'pTs')>0)
is_pField <- function(data) (sum(class(data) == 'pField')>0)
summary.pTs <- function(x, ...)
{
temp<-attributes(x)
print('Proxy timeseries object')
print(paste('Names: ',paste(temp$name,collapse=' / ')))
print('History')
print(temp$history)
print("")
cat("Time range: ",min(time(x))," - ",max(time(x)), "N:",length(time(x)),"\n")
cat("Data range: ",min(x)," - ",max(x),"\n")
}
summary.pField <- function(x, ...)
{
temp<-attributes(x)
print('Proxy field object')
print(paste('Names: ',paste(temp$name,collapse=' / ')))
print('History')
print(temp$history)
print("")
cat("Time range: ",min(time(x))," - ",max(time(x)), "N:",length(time(x)),"\n")
cat("Data range: ",min(x)," - ",max(x),"\n")
print("spatial extent ")
cat('lat: ',min(temp$lat)," - ",max(temp$lat),"N:",length(temp$lat),"\n")
cat('lon: ',min(temp$lon)," - ",max(temp$lon),"N:",length(temp$lon),"\n")
}
#remove the points which are only containing NA's
prcompNA.pField <- function(data,nPc=2,center=TRUE,scale=TRUE, ...)
{
temp<-attributes(data)
class(data)<-"matrix"
dat<-data[,!is.na(colSums(data))]
result<-prcomp(dat,center=center,scale=scale)
tm<-matrix(NA,ncol(data),ncol(result$rotation))
tm[!is.na(colSums(data)),]<-result$rotation
pc<-pTs(result$x[,1:nPc],time(data),paste("PC",1:nPc,temp$name),c(temp$history,"prcomp"))
eof<-pField(tm[,1:nPc],1:nPc,temp$lat,temp$lon,paste("EOF",temp$name),c(temp$history,"prcomp"))
sdev<-result$sdev[1:nPc]
sdsum<-sum(result$sdev)
return(list(pc=pc,eof=eof,sdev=sdev,sdsum<-sdsum))
}
#apply a function on fields containing complete NA sets...
na.apply<-function(x,FUN,... )
{
index<-!is.na(colSums(x))
x[,index]<-FUN(x[,index], ...)
return(x)
}
getlat <- function(data) return(attr(data,"lat"))
getlon <- function(data) return(attr(data,"lon"))
getname <- function(data) return(attr(data,"name"))
gethistory <- function(data) return(attr(data,"history"))
maxpoint <- function(data)
{
pos<-which(data==max(data))
value<-max(data)
lat<-getlat(data)
lon<-getlon(data)
pos2d<-c1t2(pos,length(lon))
return(list(lat=lat[pos2d$lat],lon=lon[pos2d$lon],value=value))
}
minpoint <- function(data)
{
pos<-which(data==min(data))
value<-min(data)
lat<-getlat(data)
lon<-getlon(data)
pos2d<-c1t2(pos,length(lon))
return(list(lat=lat[pos2d$lat],lon=lon[pos2d$lon],value=value))
}
#apply FUN(field->scalar) for each timestep and gives back a timeseries
applyspace<-function(data,FUN)
{
index<-!is.na(colSums(data))
ts<-apply(data[,index],1,FUN)
return(pTs(ts,time(data),name=getname(data)))
}
#apply FUN(field->scalar) for each gridbox and gives back a single field
applytime<-function(data,FUN,newtime=NULL)
{
if (is.null(newtime)) newtime<-mean(time(data))
field<-apply(data,2,FUN)
return(pField(field,newtime,getlat(data),getlon(data),name=getname(data)))
}
#return 2D Fields filled with lats and lons
latlonField <- function(data)
{
lat<-getlat(data)
lon<-getlon(data)
nlat<-length(lat)
nlon<-length(lon)
lon2d<-rep(lon,nlat)
lat2d<-rep(lat,each=nlon)
return(list(lat2d=lat2d,lon2d=lon2d))
}
schwerpunkt<-function(data)
{
#nicht allgemien !
t<-latlonField(data)
t$lon2d[t$lon2d>180]<- t$lon2d[t$lon2d>180]-360
lat<-weighted.mean(t$lat2d,data)
lon<-weighted.mean(t$lon2d,data)
if (lon < 0) lon<-lon+360
return(list(lat=lat,lon=lon))
}
#removed as it causes problems with newer R-versions (likely as cbind.ts is not there anymore)
#combine timeseries
#cbind.pTs <- function(..., deparse.level = 1)
# {
# print("cbind.pTs")
# result<-cbind(..., deparse.level=deparse.level)
# args <- list(...)
# lat<-NULL
# lon<-NULL
# name<-NULL
# for (a in args)
# {
# lat<-c(lat,getlat(a))
# lon<-c(lon,getlon(a))
# name<-c(name,getname(a))
# }
# return(pTs(result,time(result),lat,lon,name,"cbind"))
# }
scale_space <- function(data)
{
data[,]<-scale(as.vector(data))
return(data)
}
rollmean.pTs <- function(x, k, na.pad = TRUE, align = c("center", "left", "right"), ...)
{
return(applyData(x,rollmean,k,na.pad, align, ...))
}
applyData<-function(x,fun,... )
{
x[]<-fun(as.vector(x),... )
return(x) }
## Converts a list of single pTs timeseries to one pTs object
## x = list containing the pTs objects; all need to have the same length
## returns a pTs object with all the timeseries of x, including the
#lat/lon and name information
list2pTs<-function(x)
{
TOLERANCE = 0.01 #tolerance for different time steps
N<-length(x) #Number of timeseries in the list
newTime<-time(x[[1]])
names<-lapply(x,getname) #get all anmes
lat<-lapply(x,getlat) #get the latitudes
lon<-lapply(x,getlon) #get the longitudes
result<-pTs(NA,newTime,lat,lon,names)
for (i in 1:length(x))
{
if (sum((newTime-time(x[[i]]))^2) > TOLERANCE) stop("time steps
are different in the different timeseries")
result[,i]<-x[[i]]
}
return(result)
}
|
0460a4476f3f3380b9c8abc2c08ccf56d916f183
|
1d2806b6d2a7a889ff23a7c474c7c81acec4b375
|
/sess-4.R
|
16210e8946850da5f937b394483fd838df5aeecc
|
[] |
no_license
|
aljrico/math-for-bd
|
4169f81d6c22f8a7a7ff0548f0629c4368ac1d5d
|
96984f358828fcf389744e7c12b3a266d5aeb985
|
refs/heads/master
| 2020-03-13T14:22:38.367588
| 2018-07-05T11:35:43
| 2018-07-05T11:35:43
| 131,157,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37
|
r
|
sess-4.R
|
# SESSION 4: PERFORMANCE ASSESSMENT
|
71d400d893dd4fd2ca7400a5794a00daa948b599
|
0678bb13d5cf2f3c3c53ddf506771a8fd4ecedc5
|
/tests/testthat/test_convert.R
|
751f2fc2d2f2fffb7d2dd60823ec8f0180ffd6a1
|
[] |
no_license
|
Swarchal/fridayDemo
|
edfbbfa24855d8bd1808adb19d896f5ff764e10a
|
50dc0672a59b9b9691f20e56d64710849b9f30d1
|
refs/heads/master
| 2021-01-10T15:06:45.366684
| 2016-01-22T17:17:34
| 2016-01-22T17:17:34
| 50,136,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
test_convert.R
|
context("sqrt_minus_one and square_plus_one convert from one to the other")
# this should fail!
test_that("convert vector from one to the other",{
original_vals <- seq(1, 10)
sq_p_1 <- square_plus_one(original_vals)
rev_vals <- sqrt_minus_one(sq_p_1)
test_equals(original_vals, rev_vals)
})
|
646058ee86683d4461dd7d37c9be1647d1b2790b
|
7d46827baf192311807e0a44820bb7acdd637429
|
/man/myColorRamp.Rd
|
4efc01fcd9c82f5104b3dcab95215fa95d53c119
|
[] |
no_license
|
castudil/RegressionLibs
|
2822c1ffe1771ad34881af64f7ea68aabf8e83c0
|
d57f0218eaa29c6e3a10fe4e43b734ab0f99dedf
|
refs/heads/master
| 2021-01-24T16:01:59.843257
| 2015-11-09T03:55:16
| 2015-11-09T03:55:16
| 45,933,955
| 0
| 0
| null | 2015-11-10T19:06:39
| 2015-11-10T19:06:39
| null |
UTF-8
|
R
| false
| false
| 704
|
rd
|
myColorRamp.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RegressionLibs.R
\name{myColorRamp}
\alias{myColorRamp}
\title{Color Ramp}
\source{
http://stackoverflow.com/questions/10413678/how-to-assign-color-scale-to-a-variable-in-a-3d-scatter-plot
}
\usage{
myColorRamp(colors, values)
}
\arguments{
\item{colors}{a list of name colors.}
\item{values}{an object of class data frame with a dependent variable.}
}
\value{
a list colors in HEX format.
}
\description{
Function that transforms a list of values in their corresponding color in the
given list.
}
\examples{
iris.y <- iris[,4]
cols <- myColorRamp(c("darkred", "yellow", "darkgreen"), iris.y)
}
\seealso{
PlotPC3D
}
|
2cfca4e465e1a7067e81b7ca2dc56808ee597f7c
|
eaef4dd2d510ff30ac2ee45460687224583ce028
|
/code/plotting/remove_tmp_obj.R
|
cd0661f2603c7e7648a3e27d526ec1a942bd4170
|
[] |
no_license
|
guizar/carbon-stocks
|
d6f1dc6e39dd39e6c4c5110b92fedbfac78fbb6c
|
b075f9dcc9aabf05372aa2e461278dbbc22907bd
|
refs/heads/master
| 2020-06-03T15:15:18.034093
| 2017-10-26T15:32:16
| 2017-10-26T15:32:16
| 30,377,221
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
r
|
remove_tmp_obj.R
|
## Remove tmp objects
rm(rec_lab)
rm(shot_lab)
rm(gaussAxis)
rm(i)
rm(heightAxis)
rm(d_gpCntRngOff_m)
rm(matchedTable_rown)
rm(positive)
rm(negative)
rm(zero)
rm(m)
rm(r_rng_wf_m)
rm(d_Gamp_m)
rm(df)
rm(rec)
rm(shot)
rm(waveform)
rm(vertical_maxGauss)
rm(vertical_waveform)
rm(d_SigBegOff_m)
rm(d_SigEndOff_m)
rm(maxGaussVolts)
rm(maxGaussHeight)
rm(waveformHeight)
rm(waveformVolts)
rm(maxGauss)
rm(d_Gsigma_m)
rm(cropToSig)
rm(savePlot)
rm(writeCsv)
rm(plotModelWave)
rm(plotWave)
rm(ltype)
rm(lwidth)
rm(modelwave)
rm(pal)
rm(y1)
rm(y2)
rm(y3)
rm(y4)
rm(y5)
rm(y6)
rm(ind_gauss)
rm(d_maxRecAmp_m)
rm(d_maxSmAmp_m)
rm(check_d_gpCntRngOff)
|
d354603ef5763545cb6953c543024fc4c2128431
|
a339a2618e68c666bccd7cbdea5882ed96994115
|
/keras.r
|
c6e94186b8a9b7972e0fc2abfa2eed07366ef12e
|
[] |
no_license
|
daanvandermaas/raspberry
|
e3c0f8e6960e0b453ad120cf8f18fdd1badd2e63
|
35687e6e16ca39f63dd15a41b8ea42fdc19727ca
|
refs/heads/master
| 2020-03-16T15:41:28.966757
| 2018-09-06T07:59:58
| 2018-09-06T07:59:58
| 132,754,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,444
|
r
|
keras.r
|
library(keras)
library(jpeg)
library(abind)
######Parameters
epochs = 280
batch_size = 2L
h = as.integer(512) #heigth image dim image = 2448
w = as.integer(1024) #width image
channels = 3L
class = 2L
drop = 0.5
#####
source('model.r')
opt<-optimizer_adam( lr= 0.0001 , decay = 0, clipnorm = 1 )
compile(model, loss="categorical_crossentropy", optimizer=opt, metrics = "accuracy")
#data loading
train_yes = readRDS( 'db/train_yes.rds' )
train_no = readRDS( 'db/train_no.rds' )
i= 1
#Train the network
for (epoch in 11:epochs){
order = sample(c(1:nrow(train_yes) ), nrow(train_yes), replace = FALSE)
train_yes = train_yes[order,]
order = sample(c(1:nrow(train_no) ), nrow(train_no), replace = FALSE)
train_no = train_no[order,]
for(i in 1:nrow(train_yes)){
im_yes = readJPEG(train_yes$images[i])[257:768,,]
im_no = readJPEG(train_no$images[i])[257:768,,]
im_yes = array(im_yes, dim = c(1, dim(im_yes)))
im_no = array(im_no, dim = c(1, dim(im_no)))
input_im = abind(im_yes, im_no, along = 1)
input_lab = matrix(c(1,0,0,1), nrow = 2, ncol = 2, byrow = TRUE)
model$fit( x= input_im, y= input_lab, batch_size = batch_size, epochs = 1L )
}
print(paste('epoch:', epoch))
model$save( paste0('db/model/model_small_', epoch) )
}
#model$evaluate(x = batch_files, y = batch_labels)
#model = keras::load_model_hdf5('db/model/model_big_10')
|
371358a76ec54c629d24ee11a80b4a8c008dce82
|
56fb62e298e2a112d5acc6a07c50e060f868ed49
|
/GenerateDictionaries.R
|
a088b8a2034135ff8b001bc06497b517b61fe165
|
[] |
no_license
|
Stella1017/CourseraCapstone
|
e9e39beee066e49adf5cdbf3244338d99879ffb6
|
fd4927c52c265d53e671c08e614bd81fae910ef7
|
refs/heads/master
| 2021-08-14T12:46:00.714938
| 2017-11-15T19:13:58
| 2017-11-15T19:13:58
| 110,382,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,430
|
r
|
GenerateDictionaries.R
|
setwd("~/Google Drive/Coursera-DataScience/Capstone Project/final/en_US/")
con.twt <- file("en_US.twitter.txt", "r"); twt <- readLines(con.twt); close(con.twt)
con.news <- file("en_US.news.txt","r"); news <- readLines(con.news); close(con.news)
con.blogs <- file("en_US.blogs.txt", "r"); blogs <- readLines(con.blogs); close(con.blogs)
rm(con.twt); rm(con.news); rm(con.blogs)
set.seed(1101)
index.test <- sample(1:length(blogs),270000) #30% of length of Blogs
train.twt <- twt[-index.test]
train.news <- news[-index.test]
train.blogs <- blogs[-index.test]
test.twt <- twt[index.test]
test.news <- news[index.test]
test.blogs <- blogs[index.test]
source(file="GetCleanedText.R")
source(file="GetDictionary.R")
voc.twt <- GetVoc(train.twt, smpsize = 0.05)
voc.news <- GetVoc(train.news, smpsize = 0.05)
voc.blogs <- GetVoc(train.blogs, smpsize = 0.05)
voc.twt.test <- GetVoc(test.twt, smpsize = 0.1)
voc.news.test <- GetVoc(test.news, smpsize = 0.1)
voc.blogs.test <- GetVoc(test.blogs, smpsize = 0.1)
voc.train <- c(voc.twt, voc.news, voc.blogs)
voc.test <- c(voc.blogs.test, voc.news.test, voc.twt.test)
uni.train <- GetDict(voc.train)
uni.test <- GetDict(voc.test)
bi.cor.train <- vapply(ngrams(voc.train, 2), paste, "", collapse = " ")
bi.train <- GetDict(bi.cor.train)
bi.cor.test <- vapply(ngrams(voc.test, 2), paste, "", collapse = " ")
bi.test <- GetDict(bi.cor.test)
tri.cor.train <- vapply(ngrams(voc.train, 3), paste, "", collapse = " ")
tri.train <- GetDict(tri.cor.train)
tri.cor.test <- vapply(ngrams(voc.test, 3), paste, "", collapse = " ")
tri.test <- GetDict(tri.cor.test)
quad.cor.train <- vapply(ngrams(voc.train, 4), paste, "", collapse = " ")
quad.train <- GetDict(quad.cor.train)
uni.dict <- uni.train[1:1000, ] #top 1000, cover 71%
bi.dict <- bi.train[1:177556, ] #all the instances that appeared more than twice, cover 66%
tri.dict <- tri.train[1:278900, ] #all the instances that appeared more than once, cover 31%
quad.dict <- quad.train[1:107105, ] #all the instances that appeared more than once, only cover 8.4%
bi.dict$element <- strsplit(as.character(bi.dict$Vocabulary), split=" ")
tri.dict$element <- strsplit(as.character(tri.dict$Vocabulary), split=" ")
quad.dict$element <- strsplit(as.character(quad.dict$Vocabulary), split=" ")
save(uni.dict, file="unigram.RData")
save(bi.dict, file="bigrams.RData")
save(tri.dict, file="trigrams.RData")
save(quad.dict, file="quadgrams.RData")
|
dbe10dbb43af87a30b62ff67f2a9050205339097
|
c0bce42fcea5993c3d9976248c157f4a4433db0b
|
/figure_paired_cd34_pbmc/code/09_viz_anecdotes.R
|
b3b5d0aa00d33e715f8c8a78707a80ba0f8b366f
|
[] |
no_license
|
ChenPeizhan/mtscATACpaper_reproducibility
|
a01b22f43c6222a56e04e731d68de7440c3cfc76
|
e543610bf29dbac1094994c54d3e7edd41609d5a
|
refs/heads/master
| 2022-12-11T11:31:24.877462
| 2020-08-29T18:40:36
| 2020-08-29T18:40:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,530
|
r
|
09_viz_anecdotes.R
|
library(BuenColors)
library(dplyr)
library(ggrastr)
# Import processed data
cd34_clone_df <- readRDS("../output/CD34_clone_DF.rds")
pbmc_clone_df <- readRDS("../output/PBMC_clone_DF.rds")
cd34_mut_se <- readRDS("../output/filteredCD34_mgatk_calls.rds")
pbmc_mut_se <- readRDS("../output/filteredpbmcs_mgatk_calls.rds")
tb <- theme(legend.position = "none",
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
panel.background = element_blank())
make_4plot_grid <- function(clone_one, variant, variant_name){
# Make plots of clones
cd34_clone_df$color_clone <- cd34_clone_df$mito_cluster == clone_one
p_CD34_clone <- ggplot(cd34_clone_df %>% arrange(color_clone), aes(x= X1, y = X2, color = color_clone)) +
geom_point_rast(size = 3, raster.dpi = 500) +
tb + scale_color_manual(values = c("lightgrey", "dodgerblue3"))
pbmc_clone_df$color_clone <- pbmc_clone_df$mito_cluster == clone_one
p_PBMC_clone <- ggplot(pbmc_clone_df %>% arrange(color_clone), aes(x= UMAP_1, y = UMAP_2, color = color_clone)) +
geom_point_rast(size = 3, raster.dpi = 500) +
tb + scale_color_manual(values = c("lightgrey", "dodgerblue3"))
cd34_clone_df$color_AF <- assays(cd34_mut_se)[["allele_frequency"]][variant,]
p_CD34_AF <- ggplot(cd34_clone_df %>% arrange(color_AF), aes(x= X1, y = X2, color = color_AF)) +
geom_point_rast(size = 3, raster.dpi = 500) +
tb + scale_color_gradientn(colors = c("lightgrey", "firebrick"))
pbmc_clone_df$color_AF <- assays(pbmc_mut_se)[["allele_frequency"]][variant,]
p_PBMC_AF <- ggplot(pbmc_clone_df %>% arrange(color_AF), aes(x= UMAP_1, y = UMAP_2, color = color_AF)) +
geom_point_rast(size = 3, raster.dpi = 500) +
tb + scale_color_gradientn(colors = c("lightgrey", "firebrick"))
cowplot::ggsave2(cowplot::plot_grid(p_CD34_AF,p_PBMC_AF,p_CD34_clone,p_PBMC_clone, nrow = 2),
filename = paste0("../plots/raster_clones_",variant_name,".png"),
width = 2.0, height = 2.0, units = "in", dpi = 500)
}
make_4plot_grid("119", "12868G>A", "12868G-A")
make_4plot_grid("008", "2788C>A", "2788C-A")
make_4plot_grid("032", "3209A>G", "3209A-G")
sum(cd34_clone_df$mito_cluster == "119")
sum(cd34_clone_df$mito_cluster == "008")
sum(cd34_clone_df$mito_cluster == "032")
sum(pbmc_clone_df$mito_cluster == "119")
sum(pbmc_clone_df$mito_cluster == "008")
sum(pbmc_clone_df$mito_cluster == "032")
|
0b2c6bc2aa8b3cd7eef60d83c4a6383f13a30c6a
|
078bf836f420c94805ea22214f952752dca611c1
|
/xship/server/tab-enginemonitoring.R
|
a00297d19083e479c42ae1e532f627751015b6fe
|
[] |
no_license
|
nikhadharman/shiny
|
20e8cdc3e4e6b6d7b463c7cd494a5f301e945906
|
e461d48b7d5f3a1e350298468b103067947ddc70
|
refs/heads/master
| 2021-09-09T23:59:30.755964
| 2018-03-20T09:26:29
| 2018-03-20T09:26:29
| 111,196,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88,300
|
r
|
tab-enginemonitoring.R
|
#ENGINE ANALYSIS ...................................
#enginedata =readWorksheetFromFile( "data/ENGINE ANALYSIS.xlsx", sheet = 1, header = FALSE )
enginedata =readWorksheetFromFile( "data/Engine Analysis_test.xlsx", sheet = 1, header = TRUE )
shoptrial = readWorksheetFromFile( "data/SHOPTRIAL DATA.xlsx", sheet = 1, header = TRUE ) # read excel file
#ENGINE TEMP @ CYLINDER......................
output$enginedate <- renderUI({
r=enginedata
vessel = input$engineVessel
r=subset(r,r[,1]== vessel)
r <- r[order(as.Date(r$Date,"%d-%m-%y"),decreasing = T),]
date_List = unique(as.Date(r[,6],"%d-%m-%y"), incomparables = FALSE)
selectInput("enginedate", label="Test Date Selection", choices = date_List, selected = as.Date(date_List[1],"%d-%m-%y"), multiple = FALSE, selectize = TRUE, width = "50%", size = NULL)
})
output$enginevessel <- renderUI({
r=enginedata
Vessel_List = unique(as.character(r[,1]), incomparables = FALSE)
selectInput("engineVessel", label="Vessel", choices = Vessel_List, selected = "STRATEGIC ALLIANCE", multiple = FALSE, selectize = TRUE, width = "50%", size = NULL)
})
output$enginemonth <- renderUI({
numericInput("monthno",label = "Enter Number of Months",value = 4,width = "50%",min = 1,max = 12)
})
enginedatatable = reactive({
r=enginedata
vessel = input$engineVessel
r=subset(r,r[,1]== vessel)
date = input$enginedate
r$Date = as.Date(r$Date,"%d-%m-%y")
r = subset(r,Date <= date)
r <- r[order(r$Date,decreasing = T),]
r = head(r,input$monthno)
r
})
shoptrialdata = reactive({
r=shoptrial
vess = input$engineVessel
r=subset(r,r[,3]== vess)
r
})
output$vessel_name = renderText({
paste("Vessel Selection :",input$engineVessel)
})
output$Engine_Date=renderText({
paste("Date:",input$enginedate)
})
output$vessel_name1=renderText({
paste("Vessel Selection :",input$engineVessel)
})
output$Engine_Date1=renderText({
paste("Date:",input$enginedate)
})
output$vessel_name11=renderText({
paste("Vessel Name :",input$engineVessel)
})
output$Engine_Date11=renderText({
paste("Date:",input$enginedate)
})
#Texh---------------------------------------------------------------------------------------------------------------------------------------------
exhaustdata= reactive({
r = enginedatatable()
Exhaust.Temp = c(r$ExTemp1[1],r$ExTemp2[1],r$ExTemp3[1],r$ExTemp4[1],r$ExTemp5[1],r$ExTemp6[1])
Cylinder = c(01:6)
tableET = data.frame(Cylinder,Exhaust.Temp)
tableET=subset(tableET, !is.na(tableET$Exhaust.Temp))
tableET
})
output$exhausttemp = renderPlotly({
validate(
need(try(exhaustdata()),"Press Wait or NO DATA AVAILABLE..........")
)
m = exhaustdata()
m=subset(m, !is.na(m$Exhaust.Temp))
x = enginedatatable()
avg = x$ExTempAvg[1]
p <- plot_ly(m,
x =~Cylinder ,
y = ~Exhaust.Temp,
name = "Cyl Exhaust Temp",
type = "bar",
marker=list(color="#9B59B6"))%>%
add_trace(x=c(head(m$Cylinder,1),tail(m$Cylinder,1)),y=c(avg,avg),name="Average Line",type="scatter",mode="lines+markers",marker= list(color="#FE0707",size=8,opacity = 0),showlegend = FALSE)
p=p%>%layout(title="Texh", titlefont = s,xaxis=list(title="Cylinder No", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Exh. Temp at Cyl. out(deg.C)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF")
p
})
output$ETtable1 = renderDataTable({
validate(
need(try(exhaustdata()),"Press Wait or NO DATA AVAILABLE..........")
)
m = exhaustdata()
normal = 400
r=subset(m, !is.na(m$Exhaust.Temp))
mean = NA
m$mean_value <- mean
m$diff <- mean-m$Exhaust.Temp
m$result <- ifelse(m$Exhaust.Temp<normal, "Normal", "High Value")
m[1,3] = round(mean(as.numeric(m$Exhaust.Temp ),na.rm = TRUE),digits = 1)
m[,4] = round((m[1,3]-m$Exhaust.Temp ),digits = 1)
names(m)<-c("Cylinder Number","Measured Value (deg.C)","Average Value","(Measured)-(Average)","Result")
datatable(m, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(m),color="#000", backgroundColor = "white")
})
output$ETtable2 = renderDataTable({
validate(
need(try(exhaustdata()),"Press Wait or NO DATA AVAILABLE..........")
)
m=exhaustdata()
mean = NA
devtableET = data.frame(mean,abs(mean-m$Exhaust.Temp))
names(devtableET)<-c("Average Value","(Measured)-(Average)")
devtableET
devtableET[1,1] = round(mean(as.numeric(m$Exhaust.Temp),na.rm = TRUE),digits = 1)
devtableET[,2] = round((devtableET[1,1]-m$Exhaust.Temp),digits = 1)
datatable(devtableET, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(devtableET),color="#000",backgroundColor = "white")
})
output$ETtable3 = renderDataTable({
validate(
need(try(exhaustdata()),"Press Wait or NO DATA AVAILABLE..........")
)
m=exhaustdata()
normal = 400
r=subset(m, !is.na(m$Exhaust.Temp))
y=nrow(r)
result = NA *m$Exhaust.Temp
resulttableET = data.frame(m$Cylinder,result)
names(resulttableET)<-c("Cylinder Number","Result")
for(i in 1:y) {
x = m[i,2]
if (x< normal){ resulttableET[i,2] = "Normal"}
else{resulttableET[i,2] = "High Value!"}
resulttableET
}
datatable(resulttableET, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(resulttableET),color="#000",backgroundColor = "white")
})
#PUMP MARK
pumpmark= reactive({
r = enginedatatable()
Pump.Mark = c(r$PumpMark1[1],r$PumpMark2[1],r$PumpMark3[1],r$PumpMark4[1],r$PumpMark5[1],r$PumpMark6[1])
Cylinder = c(01:6)
tablePM = data.frame(Cylinder,Pump.Mark)
tablePM=subset(tablePM, !is.na(tablePM$Pump.Mark))
tablePM
})
output$pumpmark = renderPlotly({
validate(
need(try(pumpmark(),enginedatatable()),"Press Wait or NO DATA AVAILABLE..........")
)
m=pumpmark()
m=subset(m, !is.na(m$Pump.Mark))
x = enginedatatable()
avg = x$PumpMarkAvg[1]
p <- plot_ly(m,
x =~Cylinder ,
y = ~Pump.Mark,
name = "Pump Mark",
type = "bar",
marker=list(color="#4DD0E1"))
p=p%>%layout(title="Pump Mark", titlefont = s,xaxis=list(title="Cylinder No", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Pump Mark", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF")%>%
add_trace(x=c(head(m$Cylinder,1),tail(m$Cylinder,1)),y=c(avg,avg),name="Average Line",type="scatter",mode="lines+markers",marker= list(color="#FE0707",size=8,opacity = 0),showlegend = FALSE)
p
})
output$PMtable1 = renderDataTable({
validate(
need(try(pumpmark()),"Press Wait or NO DATA AVAILABLE..........")
)
m=pumpmark()
normal = 55
r=subset(m, !is.na(m$Pump.Mark))
mean = NA
m$mean_value <- mean
m$diff <- mean-m$Pump.Mark
m$result <- ifelse(m$Pump.Mark<normal, "Normal", "High Value")
m[1,3] = round(mean(as.numeric(m$Pump.Mark ),na.rm = TRUE),digits = 1)
m[,4] = round((m[1,3]-m$Pump.Mark ),digits = 1)
names(m)<-c("Cylinder Number","Measured Value","Average Value","(Measured)-(Average)","Result")
datatable(m, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(m),color="#000",backgroundColor = "white")
})
output$PMtable2 = renderDataTable({
validate(
need(try(pumpmark()),"Press Wait or NO DATA AVAILABLE..........")
)
m=pumpmark()
mean = NA
devtablePM = data.frame(mean,abs(mean-m$Pump.Mark))
names(devtablePM)<-c("Average Value","(Measured)-(Average)")
devtablePM
devtablePM[1,1] = round(mean(as.numeric(m$Pump.Mark),na.rm = TRUE),digits = 1)
devtablePM[,2] = round((devtablePM[1,1]-m$Pump.Mark),digits = 1)
datatable(devtablePM, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(devtablePM),color="#000",backgroundColor = "white")
})
output$PMtable3 = renderDataTable({
validate(
need(try(pumpmark()),"Press Wait or NO DATA AVAILABLE..........")
)
m=pumpmark()
normal = 55
r=subset(m, !is.na(m$Pump.Mark))
y=nrow(r)
result = NA * m$Pump.Mark
resulttablePM = data.frame(m$Cylinder,result)
names(resulttablePM)<-c("Cylinder Number","Result")
for(i in 1:y) {
x = m[i,2]
if (x< normal){ resulttablePM[i,2] = "Normal"}
else{resulttablePM[i,2] = "High Value!"}
resulttablePM
}
datatable(resulttablePM, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(resulttablePM),color="#000",backgroundColor = "white")
})
# COMPRESSION PRESSURE
pcomp= reactive({
r = enginedatatable()
PComp = c(r$PressComp1[1],r$PressComp2[1],r$PressComp3[1],r$PressComp4[1],r$PressComp5[1],r$PressComp6[1])
Cylinder = c(01:6)
table = data.frame(Cylinder,PComp)
table=subset(table, !is.na(table$PComp))
table
})
output$PComp = renderPlotly({
validate(
need(try(pcomp(),enginedatatable()),"Press Wait or NO DATA AVAILABLE..........")
)
m= pcomp()
m=subset(m, !is.na(m$PComp))
x = enginedatatable()
avg = x$PressCompAvg[1]
p <- plot_ly(m,
x =~Cylinder ,
y = ~PComp ,
type = "bar",
marker=list(color="#9334E6"))%>%
add_trace(x=c(head(m$Cylinder,1),tail(m$Cylinder,1)),y=c(avg,avg),name="Average Line",type="scatter",mode="lines+markers",marker= list(color="#FE0707",size=8,opacity = 0),showlegend = FALSE)
p=p%>%layout(title="PComp", titlefont = s,xaxis=list(title="Cylinder No", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Compression Pressure(bar)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF")
p
})
output$PCtable1 = renderDataTable({
m= pcomp()
normal = 75
r=subset(m, !is.na(m$PComp))
mean = NA
m$mean_value <- mean
m$diff <- mean-m$PComp
m$result <- ifelse(m$PComp<normal, "Normal", "High Value")
m[1,3] = round(mean(as.numeric(m$PComp ),na.rm = TRUE),digits = 1)
m[,4] = round((m[1,3]-m$PComp ),digits = 1)
names(m)<-c("Cylinder Number","Measured Value (bar)","Average Value","(Measured)-(Average)","Result")
datatable(m, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(m),color="#000",backgroundColor = "white")
})
output$PCtable2 = renderDataTable({
m= pcomp()
x = NA
devtablePC = data.frame(x,abs(x-m$PComp ))
names(devtablePC)<-c("Average Value","(Measured)-(Average)")
devtablePC
devtablePC[1,1] = round(mean(as.numeric(m$PComp ),na.rm = TRUE),digits = 1)
devtablePC[,2] = round((devtablePC[1,1]-m$PComp ),digits = 1)
devtablePC
datatable(devtablePC, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(devtablePC),color="#000",backgroundColor = "white")
})
output$PCtable3 = renderDataTable({
m= pcomp()
normal = 75
r=subset(m, !is.na(m$PComp))
y=nrow(r)
result = NA * m$PComp
resulttablePC = data.frame(m$Cylinder,result)
names(resulttablePC)<-c("Cylinder Number","Result")
for(i in 1:y) {
x = m[i,2]
if (x< normal){ resulttablePC[i,2] = "Normal"}
else{resulttablePC[i,2] = "High Value!"}
resulttablePC
}
datatable(resulttablePC, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(resulttablePC),color="#000",backgroundColor = "white")
})
# MAXIMUM PRESSURE
pmax= reactive({
r = enginedatatable()
MaxPress = c(r$MaxPress1[1],r$MaxPress2[1],r$MaxPress3[1],r$MaxPress4[1],r$MaxPress5[1],r$MaxPress6[1])
Cylinder = c(01:6)
table = data.frame(Cylinder,MaxPress)
table=subset(table, !is.na(table$MaxPress))
table
})
output$Pmax = renderPlotly({
m= pmax()
m=subset(m, !is.na(m$MaxPress))
x = enginedatatable()
avg = x$MaxPressAvg[1]
p <- plot_ly(m,
x =~Cylinder ,
y = ~MaxPress ,
type = "bar",
marker=list(color="#27E474"))%>%
add_trace(x=c(head(m$Cylinder,1),tail(m$Cylinder,1)),y=c(avg,avg),name="Average Line",type="scatter",mode="lines+markers",marker= list(color="#FE0707",size=8,opacity = 0),lines = list(color = "#085EA2"),showlegend = FALSE)
p=p%>%layout(title="Pmax", titlefont = s,xaxis=list(title="Cylinder No", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Maximum Pressure(bar)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF")
p
})
output$MPtable1 = renderDataTable({
m= pmax()
normal = 110
r=subset(m, !is.na(m$MaxPress))
mean = NA
m$mean_value <- mean
m$diff <- mean-m$MaxPress
m$result <- ifelse(m$MaxPress<normal, "Normal", "High Value")
m[1,3] = round(mean(as.numeric(m$MaxPress ),na.rm = TRUE),digits = 1)
m[,4] = round((m[1,3]-m$MaxPress ),digits = 1)
names(m)<-c("Cylinder Number","Measured Value (bar)","Average Value","(Measured)-(Average)","Result")
datatable(m, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(m),color="#000",backgroundColor = "white")
})
output$MPtable2 = renderDataTable({
m= pmax()
mean = NA
devtableMP = data.frame(mean,abs(mean-m$MaxPress))
names(devtableMP)<-c("Average Value","(Measured)-(Average)")
devtableMP
devtableMP[1,1] = round(mean(as.numeric(m$MaxPress ),na.rm = TRUE),digits = 1)
devtableMP[,2] = round((devtableMP[1,1]-m$MaxPress ),digits = 1)
datatable(devtableMP, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(devtableMP),color="#000",backgroundColor = "white")
})
output$MPtable3 = renderDataTable({
m= pmax()
normal = 110
r=subset(m, !is.na(m$MaxPress))
y=nrow(r)
result = NA * m$MaxPress
resulttableMP = data.frame(m$Cylinder,result)
names(resulttableMP)<-c("Cylinder Number","Result")
for(i in 1:y) {
x = m[i,2]
if (x< normal){ resulttableMP[i,2] = "Normal"}
else{resulttableMP[i,2] = "High Value!"}
resulttableMP
}
datatable(resulttableMP, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(resulttableMP),color="#000",backgroundColor = "white")
})
#TC SPEED VS ENGINE SPEED------------------------------------------------------------------------------------------------------------------------
output$graphTCEn = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$engine.speed
y=STdata$TCspeed
testx = seq(from=min(STdata$engine.speed,na.rm = T), to=max(STdata$engine.speed,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = n1*testx*testx + n2*testx + k
p <- plot_ly()#x = as.numeric(mydata$RPM[1]), y = as.numeric(mydata$TCrpm[1]), type='scatter' ,
# mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 :input$monthno){
p=p%>%add_trace(mydata,x = as.numeric(mydata$RPM[i]), y = as.numeric(mydata$TCrpm[i]), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(mydata,x = as.numeric(mydata$RPM[3]), y = as.numeric(mydata$TCrpm[3]), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(mydata,x = as.numeric(mydata$RPM[4]), y = as.numeric(mydata$TCrpm[4]), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"), name = "Shoptrial data", showlegend = TRUE)
p = p%>%add_trace(p,x=testx,y = testy, type='scatter' , mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0),name = "reg",showlegend = F)
p <-p%>%layout(title="T/C Speed Vs Engine Speed", titlefont = s, xaxis = list(title="Engine Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected T/C Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphTCEn1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
c = input$monthno
x= STdata$engine.speed
y=STdata$TCspeed
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
TCspeed = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$RPM[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
TCspeed[i] = mydata$TCrpm[i]
deviation[i] = round((TCspeed[i]-testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialTCEn = renderDataTable({
STdata = shoptrialdata()
x= STdata$engine.speed
y=STdata$TCspeed
table = data.frame(x,y)
names(table)<-c("Engine Speed","T/C Speed")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicTCEn = renderDataTable({
mydata = enginedatatable()
dates = mydata$Date
#names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$RPM
historicy = round(mydata$TCrpm,1)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Engine Speed","T/C Speed")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
# Load Diagram------------------
output$graph_Load_diagram = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
ddd= STdata$ld_rpm
yyy1=STdata$ld_load1
yyy2=STdata$ld_load2
p = plot_ly( )#x = as.numeric(mydata$RPM[1]), y = as.numeric(mydata$Engine.Load[1]), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
for(i in 1:input$monthno){
p = p%>%add_trace(x = as.numeric(mydata$RPM[i]), y = as.numeric(mydata$Engine.Load[i]), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
# p = p%>%add_trace(x = as.numeric(mydata$RPM[3]), y = as.numeric(mydata$Engine.Load[3]), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
# p = p%>%add_trace(x = as.numeric(mydata$RPM[4]), y = as.numeric(mydata$Engine.Load[4]), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = as.numeric(STdata$ld_rpm), y = as.numeric(STdata$ld_load1), type='scatter', mode = "lines+markers",line=list(shape="spline",smoothing = 0.5,color = "#640017") ,marker=list(opacity=0),name = "reg",showlegend = F)
p = p%>%add_trace(x = as.numeric(STdata$ld_rpm2),y = as.numeric(STdata$ld_load2), type='scatter', mode = "lines+markers",line=list(shape="spline",color = "#640017") ,marker=list(opacity=0),name = "reg",showlegend = F)
p = p%>%add_trace(x = as.numeric(STdata$ld_rpm3),y = as.numeric(STdata$ld_load3), type='scatter', mode = "lines+markers",line=list(shape="spline",smoothing = 1.3,color = "#640017") ,marker=list(opacity=0),name = "reg",showlegend = F)
p =p%>%layout(title="Load Diagram", titlefont = s, xaxis = list(title="Engine Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF",range = seq(70,135,by=5),linecolor='#636363',
linewidth=2),yaxis=list(title="Engine Load (%)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9",range = seq(40,110,by=10),linecolor='#636363',
linewidth=2),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
})
output$historical_Daigram = renderDataTable({
mydata = enginedatatable()
dates = mydata$Date
#names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$RPM
historicy = round(mydata$Engine.Load,0)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Engine RPM","Engine Load")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#Torque rich index....................................
output$graph_TorqueRichIndex_diagram = renderPlotly({
mydata = enginedatatable()
daterange = c(as.Date(as.character(mydata$Date[1])),as.Date(as.character(mydata$Date[4])))
referenceline = 1.00
Cautionline = 1.10
alarmline = 1.20
Cautionline1 = 0.82
alarmline1 = 0.80
p = plot_ly( x = as.Date(as.character(mydata$Date[1])), y = as.numeric(mydata$TorqueRichIndex[1]), type='scatter',
mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
for(i in 2 : input$monthno){
p = p%>%add_trace(x = as.Date(as.character(mydata$Date[i])), y = as.numeric(mydata$TorqueRichIndex[i]), type='scatter',
mode = "markers",marker=list(size=12),name = mydata$Date[i], showlegend = TRUE)
}
#p = p%>%add_trace(x = as.Date(as.character(mydata$Date[3])), y = as.numeric(mydata$TorqueRichIndex[3]), type='scatter',
# mode = "markers",marker=list(size=12),name = mydata$Date[3], showlegend = TRUE)
#p = p%>%add_trace(x = as.Date(as.character(mydata$Date[4])), y = as.numeric(mydata$TorqueRichIndex[4]), type='scatter',
# mode = "markers",marker=list(size=12),name = mydata$Date[4], showlegend =TRUE)
p = p%>%add_trace(x = daterange, y = c(referenceline,referenceline), type='scatter',
mode="lines+markers",marker= list(color=" #4B0082",size=2),name = "Reference Line", showlegend =TRUE)
p =p%>% add_trace(x = daterange, y = c(Cautionline, Cautionline), type='scatter',
mode="lines+markers",marker= list(color="#7FFF00",size=2),name = "Caution Line", showlegend =TRUE)
p = p%>%add_trace(x = daterange, y = c(alarmline, alarmline), type='scatter',
mode="lines+markers",marker= list(color="#DC143C",size=2),name = "Alarm Line", showlegend =TRUE)
p = p%>%add_trace(x = daterange, y = c(Cautionline1, Cautionline1), type='scatter',
mode="lines+markers",marker= list(color="#7FFF00",size=2),name = "Caution Line", showlegend =FALSE)
p = p%>%add_trace(x = daterange, y = c(alarmline1, alarmline1), type='scatter',
mode="lines+markers",marker= list(color="#DC143C",size=2),name = "Alarm Line", showlegend =FALSE)
p =p%>%layout(title="Trend of Torque Rich", titlefont = s, xaxis = list(title="Date", titlefont = s, tickfont = s,gridcolor = "#FFFFFF",linecolor='#636363',
linewidth=2),yaxis=list(title="Torque Rich", titlefont = s, tickfont = s,gridcolor = "#E5E7E9",range = seq(0,1.5,by=0.2),linecolor='#636363',
linewidth=2),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
})
output$Torquerich_Daigram = renderDataTable({
mydata = enginedatatable()
dates = mydata$Date
#names = c("Latest Data","Others1","Others2","Others3")
historicx =round( mydata$TorqueRichIndex,4)
historicy = mydata$Condition
result = NA
for(i in 1:length (dates)){
if(historicx [i] >= 0.9000 )
{
results="NORMAL"
} else if(historicx [i] >= 0.8000 & historicx [i] <= 0.9000)
{
results="Light Condition"
} else
{
results="Very Light Condition"
}
result[i]=results
}
table = data.frame(dates,historicx,historicy,result)
names(table) = c("Date Of Measurement","Torque Rich Index","Condition","Result")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#PUMP MARK VS ENGINE SPEED.....................................
output$graphPMEn = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$engine.speed
y=STdata$pumpmark
testx = seq(from=min(STdata$engine.speed,na.rm = T), to=max(STdata$engine.speed,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly( )#x = as.numeric(mydata$RPM[1]), y = as.numeric(mydata$PumpMarkAvg[1]), type='scatter' ,
# mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 : input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$RPM[i])), y = as.numeric(as.character(mydata$PumpMarkAvg[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$RPM[3])), y = as.numeric(as.character(mydata$PumpMarkAvg[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$RPM[4])), y = as.numeric(as.character(mydata$PumpMarkAvg[4])), mode = "markers",marker=list(size=12),
# name =mydata$Date[4], showlegend = TRUE)
p =p%>% add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p= p%>%add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0),name = "reg",showlegend = F)
p <-p%>%layout(title="Pump Mark Vs Engine Speed", titlefont = s, xaxis = list(title="Engine Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected Pump Mark", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphPMEn1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$engine.speed
y=STdata$pumpmark
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
Pmark = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$RPM[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
Pmark[i] = mydata$PumpMarkAvg[i]
deviation[i] = round((Pmark[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialPMEn = renderDataTable({
STdata = shoptrialdata()
x= STdata$engine.speed
y=STdata$pumpmark
table = data.frame(x,y)
names(table)<-c("Engine Speed","Pump Mark")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicPMEn = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$RPM
historicy = round(mydata$PumpMarkAvg,1)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Engine Speed","Pump Mark")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#Pscav VS T/C SPEED
output$graphPsTC = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$TCspeed
y=STdata$pscav
testx = seq(from=min(STdata$TCspeed,na.rm = T), to=max(STdata$TCspeed,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = testx^2*n1+testx*n2+k
p <- plot_ly()# x = as.numeric(mydata$TCrpm[1]), y = as.numeric(mydata$Pscav[1]), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 : input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$TCrpm[i])), y = as.numeric(as.character(mydata$Pscav[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$TCrpm[3])), y = as.numeric(as.character(mydata$Pscav[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$TCrpm[4])), y = as.numeric(as.character(mydata$Pscav[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p= p%>% add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="Pscav Vs T/C Speed", titlefont = s, xaxis = list(title="Corrected T/C Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected Pscav (MPa.abs)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphPsTC1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$TCspeed
y= STdata$pscav
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pscav = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$TCrpm[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pscav[i] = mydata$Pscav[i]
deviation[i] = round(( pscav[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialPsTC = renderDataTable({
STdata = shoptrialdata()
x= STdata$TCspeed
y=STdata$pscav
table = data.frame(x,y)
names(table)<-c("T/C Speed","Pscav(MPa.abs)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicPsTC = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$RPM
historicy = round(mydata$Pscav,2)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","T/C Speed","Pscav (MPa.abs)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#Press. drop at A/C VS Pscav
output$graphPdPs = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pressdrop
testx = seq(from=min(STdata$pscav,na.rm = T), to=max(STdata$pscav,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = as.numeric(mydata$Pscav[1]), y = as.numeric(mydata$PressdropAC[1]), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 : input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[i])), y = as.numeric(as.character(mydata$PressdropAC[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
# p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[3])), y = as.numeric(as.character(mydata$PressdropAC[3])), mode = "markers",marker=list(size=12),
# name =mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[4])), y = as.numeric(as.character(mydata$PressdropAC[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p=p%>% add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="Press Drop at A/C Vs Pscav", titlefont = s, xaxis = list(title="Corrected Pscav (MPa.abs)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Press Drop at A/C (kPa)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphPdPs1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pressdrop
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pd = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$Pscav[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pd[i] = mydata$PressdropAC[i]
deviation[i] = round(( pd[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialPdPs = renderDataTable({
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pressdrop
table = data.frame(x,y)
names(table)<-c("Pscav(MPa.abs)","Pr. Drop at A/C")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicPdPs = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
#names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$Pscav
historicy = round(mydata$PressdropAC,1)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Pscav (MPa.abs)","Pr. Drop at A/C")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#Pcomp VS Pscav
output$graphPcPs = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pcomp
testx = seq(from=min(STdata$pscav,na.rm = T), to=max(STdata$pscav,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = as.numeric(mydata$Pscav[1]), y = as.numeric(mydata$PressCompAvg[1]), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 : input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[i])), y = as.numeric(as.character(mydata$PressCompAvg[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[3])), y = as.numeric(as.character(mydata$PressCompAvg[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Pscav[4])), y = as.numeric(as.character(mydata$PressCompAvg[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p= p%>% add_trace(p,x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="Pcomp Vs Pscav", titlefont = s, xaxis = list(title="Corrected Pscav (MPa.abs)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected Pcomp (bar)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphPcPs1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pcomp
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pc = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$Pscav[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pc[i] = mydata$PressCompAvg[i]
deviation[i] = round(( pc[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialPcPs = renderDataTable({
STdata = shoptrialdata()
x= STdata$pscav
y=STdata$pcomp
table = data.frame(x,y)
names(table)<-c("Pscav(MPa.abs)","Pcomp (bar)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicPcPs = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$Pscav
historicy = round(mydata$PressCompAvg,0)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Pscav (MPa.abs)","Pcomp (bar)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#Texh VS Engine Load
output$graphETEL = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$Texh
testx = seq(from=min(STdata$Load,na.rm = T), to=max(STdata$Load,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = as.numeric(mydata$Engine.Load[1]), y = as.numeric(mydata$ExTempAvg[1]), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 :input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[i])), y = as.numeric(as.character(mydata$ExTempAvg[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[3])), y = as.numeric(as.character(mydata$ExTempAvg[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[4])), y = as.numeric(as.character(mydata$ExTempAvg[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p=p%>%add_trace(x=testx,y = testy,type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="Texh Vs Load", titlefont = s, xaxis = list(title="Engine Load(%)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected Texh Cyl. Out. (deg.C)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphETEL1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$Texh
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pc = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$Engine.Load[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pc[i] = mydata$ExTempAvg[i]
deviation[i] = round(( pc[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialETEL = renderDataTable({
STdata = shoptrialdata()
x= STdata$Load
y=STdata$Texh
table = data.frame(x,y)
names(table)<-c("Engine Load (%)","Texh (deg.C)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicETEL = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = mydata$Engine.Load
historicy = round(mydata$ExTempAvg,0)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Engine Load (%)","Texh (deg.C)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#PMAX-PCOMP VS PUMP MARK
output$graphPM = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$pumpmark
y=STdata$pmax.pcomp
testx = seq(from=min(STdata$pumpmark,na.rm = T), to=max(STdata$pumpmark,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = mydata$PumpMarkAvg[1], y = mydata$MaxPressAvg[1]-mydata$PressCompAvg[1], type='scatter' ,
#mode = "markers",marker=list(size=12),name =mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 :input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$PumpMarkAvg[i])), y = as.numeric(as.character(mydata$MaxPressAvg[i]-mydata$PressCompAvg[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$PumpMarkAvg[3])), y = as.numeric(as.character(mydata$MaxPressAvg[3]-mydata$PressCompAvg[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$PumpMarkAvg[4])), y = as.numeric(as.character(mydata$MaxPressAvg[4]-mydata$PressCompAvg[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p = p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p=p%>%add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="(PMAX-PCOMP) Vs Pump Mark", titlefont = s, xaxis = list(title="Pump Mark(measured)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Pmax-Pcomp(bar)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),
showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphPM1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$pumpmark
y= STdata$pmax.pcomp
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pc = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$PumpMarkAvg[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pc[i] = mydata$MaxPressAvg[i]-mydata$PressCompAvg[i]
deviation[i] = round(( pc[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialPM = renderDataTable({
STdata = shoptrialdata()
x= STdata$pumpmark
y=STdata$pmax.pcomp
table = data.frame(x,y)
names(table)<-c("Pump Mark","Pmax - Pcomp")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicPM = renderDataTable({
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = round(mydata$PumpMarkAvg,1)
historicy = round(mydata$MaxPressAvg-mydata$PressCompAvg,2)
table = data.frame(dates,historicx,historicy)
names(table) = c("Date of Measurement","Pump Mark","Pmax - Pcomp")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#SFOC VS ENGINE LOAD
output$graphSFOC = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$SFOC
testx = seq(from=min(STdata$Load,na.rm = T), to=max(STdata$Load,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = as.numeric(as.character(mydata$Engine.Load[1])), y = as.numeric(as.character(mydata$SFOC[1])), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
for(i in 1 :input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[i])), y = as.numeric(as.character(mydata$SFOC[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[3])), y = as.numeric(as.character(mydata$SFOC[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[4])), y = as.numeric(as.character(mydata$SFOC[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p=p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p=p%>%add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="Fuel Oil Consumption Rate Vs Engine Load", titlefont = s,xaxis=list(title="Engine Load (%)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Fuel Oil Consumption(g/kWhr)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphSFOC1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$SFOC
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pc = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$Engine.Load[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pc[i] = mydata$SFOC[i]
deviation[i] = round(( pc[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialSFOC = renderDataTable({
STdata = shoptrialdata()
x= STdata$Load
y=STdata$SFOC
table = data.frame(x,y)
names(table)<-c("Engine Load","SFOC(g/kW-hr)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicSFOC = renderDataTable({
mydata=enginedatatable()
dates = as.Date(mydata$Date,"%d%m%y")
names = c("Latest Data","Others1","Others2","Others3")
historicx = round(mydata$Engine.Load,2)
historicy = round(mydata$SFOC,0)
table = data.frame(dates,historicx,historicy)
names(table)<-c("Date of Measurement","Engine Load","SFOC(g/kW-hr)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
#T/C SPEED VS ENGINE LOAD---------------------------------------------------------------------------------------------------------
output$graphTCspeed = renderPlotly({
mydata=enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$TCspeed
testx = seq(from=min(STdata$Load,na.rm = T), to=max(STdata$Load,na.rm=T), length.out= 30)
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
testy = round(testx^2*n1+testx*n2+k,2)
p <- plot_ly()# x = as.numeric(as.character(mydata$Engine.Load[1])), y = as.numeric(as.character(mydata$TCrpm[1])), type='scatter' ,
#mode = "markers",marker=list(size=12),name = mydata$Date[1], showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
for(i in 1 : input$monthno){
p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[i])), y = as.numeric(as.character(mydata$TCrpm[i])), mode = "markers",marker=list(size=12),
name = mydata$Date[i], showlegend = TRUE)
}
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[3])), y = as.numeric(as.character(mydata$TCrpm[3])), mode = "markers",marker=list(size=12),
# name = mydata$Date[3], showlegend = TRUE)
#p=p%>%add_trace(x = as.numeric(as.character(mydata$Engine.Load[4])), y = as.numeric(as.character(mydata$TCrpm[4])), mode = "markers",marker=list(size=12),
# name = mydata$Date[4], showlegend = TRUE)
p=p%>%add_trace(x = x, y = y, mode = "markers",marker=list(size=8,color = "#640017"),
name = "Shoptrial data", showlegend = TRUE)
p=p%>%add_trace(x=testx,y = testy, type="scatter" ,mode = "lines+markers", line=list(shape="spline",color = "#640017"),marker=list(opacity=0), name = "reg",showlegend = F)
p <-p%>%layout(title="T/C Speed Vs Engine Load", titlefont = s,xaxis=list(title="Engine Load (%)", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="Corrected T/C Speed (rpm)", titlefont = s, tickfont = s,gridcolor = "#E5E7E9"),showlegend=T,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$graphTCspeed1 = renderPlotly({
mydata = enginedatatable()
STdata = shoptrialdata()
x= STdata$Load
y=STdata$TCspeed
reg = lm(y ~ poly(x, 2, raw=TRUE))
n1=as.numeric(reg$coefficients[3])
n2=as.numeric(reg$coefficients[2])
k=as.numeric(reg$coefficients[1])
c = input$monthno
tdate = rep(0,c)
testx = rep(0,c)
testy = rep(0,c)
pc = rep(0,c)
deviation = rep(0,c)
for(i in 1 : input$monthno){
tdate[i] = mydata$Date[i]
testx[i] = mydata$Engine.Load[i]
testy[i] = n1*testx[i]*testx[i] + n2*testx[i] + k
pc[i] = mydata$TCrpm[i]
deviation[i] = round(( pc[i] - testy[i])*100/ testy[i],2)
}
df = data.frame(tdate,deviation)
p <- plot_ly(df,x = as.Date(tdate) , y = deviation, type='scatter' ,
mode = "lines+markers",marker=list(size=12), showlegend = TRUE)
#add_trace(x = Cylinder,
# y = Avg, mode = "lines")
p <-p%>%layout(title="Deviation %", titlefont = s, xaxis = list(title="Month", titlefont = s, tickfont = s,gridcolor = "#FFFFFF"),yaxis=list(title="% deviation", titlefont = s,ticksuffix = "%", tickfont = s,gridcolor = "#E5E7E9"),
showlegend=F,plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend=l)
p
})
output$shoptrialTCspeed = renderDataTable({
STdata = shoptrialdata()
x= STdata$Load
y=STdata$TCspeed
table = data.frame(x,y)
names(table)<-c("Engine Load","T/C Speed (rpm)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$historicTCspeed = renderDataTable({
validate(
need(try(enginedatatable()),"Please Wait or Select the vessel")
)
mydata=enginedatatable()
dates = mydata$Date
names = c("Latest Data","Others1","Others2","Others3")
historicx = round(mydata$Engine.Load,2)
historicy = round(mydata$TCrpm,0)
table = data.frame(dates,historicx,historicy)
names(table)<-c("Date of Measurement","Engine Load","SFOC(g/kW-hr)")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
output$mainparti = renderDataTable({
validate(
need(try(enginedatatable()),"Please Wait or Select the vessel")
)
mydata=enginedatatable()
title = c("Ship Name","Main Engine Type")
parti = c(as.character(mydata$Ship.Name[1]),as.character(mydata$Main.Engine.Type[1]))
table = data.frame(title,parti)
names(table)<-c("Title","Particulars ")
datatable(table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(table),color="#000",backgroundColor = "white")
})
result_table=reactive({
r = shoptrialdata()
STD = data.frame(r, header = TRUE)
mydata=enginedatatable()
fit2aa <- lm(STD$pmax.pcomp ~ poly(STD$pumpmark, 2, raw=TRUE))
n1=as.numeric(fit2aa$coefficients[3])
n2=as.numeric(fit2aa$coefficients[2])
k=as.numeric(fit2aa$coefficients[1])
m = as.numeric(as.character(mydata$PumpMarkAvg[1]))
pmaxpm = round(m^2*n1+m*n2+k,2)
fit2a <- lm(STD$SFOC ~ poly(STD$Load, 2, raw=TRUE))
n1=as.numeric(fit2a$coefficients[3])
n2=as.numeric(fit2a$coefficients[2])
k=as.numeric(fit2a$coefficients[1])
m = as.numeric(as.character(mydata$Engine.Load[1]))
SFOC = round(m^2*n1+m*n2+k,2)
fit2b <- lm(STD$TCspeed ~ poly(STD$Load, 2, raw=TRUE))
n1=as.numeric(fit2b$coefficients[3])
n2=as.numeric(fit2b$coefficients[2])
k=as.numeric(fit2b$coefficients[1])
m = as.numeric(as.character(mydata$Engine.Load[1]))
TCspeed = round(m^2*n1+m*n2+k,2)
fit2c <- lm(STD$Texh ~ poly(STD$Load, 2, raw=TRUE))
n1=as.numeric(fit2c$coefficients[3])
n2=as.numeric(fit2c$coefficients[2])
k=as.numeric(fit2c$coefficients[1])
m = as.numeric(as.character(mydata$Engine.Load[1]))
Texh = round(m^2*n1+m*n2+k,2)
fit2d <- lm(STD$pcomp ~ poly(STD$pscav, 2, raw=TRUE))
n1=as.numeric(fit2d$coefficients[3])
n2=as.numeric(fit2d$coefficients[2])
k=as.numeric(fit2d$coefficients[1])
m = as.numeric(as.character(mydata$Pscav[1]))
pcs = round(m^2*n1+m*n2+k,2)
fit2e <- lm(STD$pressdrop ~ poly(STD$pscav, 2, raw=TRUE))
n1=as.numeric(fit2e$coefficients[3])
n2=as.numeric(fit2e$coefficients[2])
k=as.numeric(fit2e$coefficients[1])
m = as.numeric(as.character(mydata$Pscav[1]))
pdpscav = round(m^2*n1+m*n2+k,2)
fit2f <- lm(STD$pscav ~ poly(STD$TCspeed, 2, raw=TRUE))
n1=as.numeric(fit2f$coefficients[3])
n2=as.numeric(fit2f$coefficients[2])
k=as.numeric(fit2f$coefficients[1])
m = as.numeric(as.character(mydata$TCrpm[1]))
pscavspeed = round(m^2*n1+m*n2+k,2)
fit2g <- lm(STD$pumpmark ~ poly(STD$engine.speed, 2, raw=TRUE))
n1=as.numeric(fit2g$coefficients[3])
n2=as.numeric(fit2g$coefficients[2])
k=as.numeric(fit2g$coefficients[1])
m = as.numeric(as.character(mydata$RPM[1]))
pmenspeed = round(m^2*n1+m*n2+k,2)
fit2h <- lm(STD$TCspeed ~ poly(STD$engine.speed, 2, raw=TRUE))
n1=as.numeric(fit2h$coefficients[3])
n2=as.numeric(fit2h$coefficients[2])
k=as.numeric(fit2h$coefficients[1])
m = as.numeric(as.character(mydata$RPM[1]))
TCenspeed = round(m^2*n1+m*n2+k,2)
cc = c(TCenspeed,pmenspeed,pscavspeed,pdpscav,pcs,Texh,TCspeed,SFOC,pmaxpm,3,3,35,3)
cc
})
output$STD = renderDataTable({
mydata=enginedatatable()
m_pmax = pmax()
mean_pmax = round(mean(as.numeric(m_pmax$MaxPress ),na.rm = TRUE),digits = 1)
max_dev_pmax = round(max(abs(mean_pmax-m_pmax$MaxPress),na.rm = TRUE),digits = 1)
m_pcomp = pcomp()
mean_pcomp = round(mean(as.numeric(m_pcomp$PComp ),na.rm = TRUE),digits = 1)
max_dev_pcomp = round(max(abs(mean_pcomp-m_pcomp$PComp),na.rm = TRUE),digits = 1)
m_temp=exhaustdata()
mean_temp = round(mean(as.numeric(m_temp$Exhaust.Temp),na.rm = TRUE),digits = 1)
max_dev_temp = round(max(abs(mean_temp-m_temp$Exhaust.Temp),na.rm = TRUE),digits = 1)
m_pumpmark =pumpmark()
mean_pumpmark = round(mean(as.numeric(m_pumpmark$Pump.Mark),na.rm = TRUE),digits = 1)
max_dev_pumpmark = round(max(abs(mean_pumpmark-m_pumpmark$MaxPress),na.rm = TRUE),digits = 1)
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
if(x$PM == "N"){
a = "Engine Performance"
b = "Comparison of Each Cylinder"
aa = c(a,a,a,a,a,a,a,b,b,b)
bb = c("T/C Speed Vs Engine Speed","Pscav Vs T/C Speed","Press. drop at A/C Vs Pscav","Pcomp vs Pscav","Texh Vs Engine Load","T/C Speed Vs Engine Load","SFOC Vs Engine Load","Pmax Deviation","Pcomp Deviation","Texh Deviation")
cc1 = result_table()
cc = c(cc1[1],cc1[3],cc1[4],cc1[5],cc1[6],cc1[7],cc1[8],cc1[10],cc1[11],cc1[12])
dd = c(as.numeric(mydata$TCrpm[1]),as.numeric(mydata$Pscav[1]),as.numeric(mydata$PressdropAC[1]),as.numeric(mydata$PressCompAvg[1]),as.numeric(mydata$ExTempAvg[1]),as.numeric(mydata$TCrpm[1]),as.numeric(mydata$SFOC[1]),max_dev_pmax,max_dev_pcomp,max_dev_temp)
dd = round(dd,2)
summary_table = data.frame(aa,bb,cc,dd)
names(summary_table)<-c("Title","Kind of Graph","Standard Value","Analysis Result")
summary_table$Status <- ifelse(abs(cc-dd)<0.25*cc, "Normal", ifelse(cc>dd,"Lowvalue","High Value"))
summary_table$Status[7] <- ifelse(dd[7]<cc[7], "Normal", "High Value")
summary_table$Status[8] <- ifelse(dd[8]<cc[8], "Normal", "High Value")
summary_table$Status[9] <- ifelse(dd[9]<cc[9], "Normal", "High Value")
summary_table$Status[10] <- ifelse(dd[10]<cc[10], "Normal", "High Value")
datatable(summary_table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(summary_table),color="#000",backgroundColor = "white")
}
else{
a = "Engine Performance"
b = "Comparison of Each Cylinder"
aa = c(a,a,a,a,a,a,a,a,a,b,b,b,b)
bb = c("T/C Speed Vs Engine Speed","Pump Mark Vs Engine Speed","Pscav Vs T/C Speed","Press. drop at A/C Vs Pscav","Pcomp vs Pscav","Texh Vs Engine Load","T/C Speed Vs Engine Load","SFOC Vs Engine Load","(Pmax-Pcomp) Vs Pump Mark","Pmax Deviation","Pcomp Deviation","Texh Deviation","Pump Mark Deviation")
cc = result_table()
dd = c(as.numeric(mydata$TCrpm[1]),as.numeric(mydata$PumpMarkAvg[1]),as.numeric(mydata$Pscav[1]),as.numeric(mydata$PressdropAC[1]),as.numeric(mydata$PressCompAvg[1]),as.numeric(mydata$ExTempAvg[1]),as.numeric(mydata$TCrpm[1]),as.numeric(mydata$SFOC[1]),as.numeric(mydata$MaxPressAvg[1])-as.numeric(mydata$PressCompAvg[1]),max_dev_pmax,max_dev_pcomp,max_dev_temp,max_dev_pumpmark)
dd = round(dd,2)
summary_table = data.frame(aa,bb,cc,dd)
names(summary_table)<-c("Title","Kind of Graph","Standard Value","Analysis Result")
summary_table$Status <- ifelse(abs(cc-dd)<0.25*cc, "Normal", ifelse(cc>dd,"Lowvalue","High Value"))
summary_table$Status[10] <- ifelse(dd[10]<cc[10], "Normal", "High Value")
summary_table$Status[11] <- ifelse(dd[11]<cc[11], "Normal", "High Value")
summary_table$Status[12] <- ifelse(dd[12]<cc[12], "Normal", "High Value")
summary_table$Status[13] <- ifelse(dd[13]<cc[13], "Normal", "High Value")
datatable(summary_table, options = list(searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(summary_table),color="#000",backgroundColor = "white")
}
})
output$spider_chart = renderPlot({
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
table = result_table()
mydata=enginedatatable()
if(x$PM == "Y"){
dev_1 = (as.numeric(mydata$SFOC[1])-table[8])*100/table[8]
dev_2 = (as.numeric(mydata$PumpMarkAvg[1])-table[2])*100/table[2]
dev_3 = (as.numeric(mydata$Pscav[1])-table[3])*100/table[3]
dev_4 = (as.numeric(mydata$PressCompAvg[1])-table[5])*100/table[5]
dev_5 = (as.numeric(mydata$TCrpm[1])-table[1])*100/table[1]
dev_6 = (as.numeric(mydata$TCrpm[1])-table[7])*100/table[7]
dev_7 = (as.numeric(mydata$ExTempAvg[1])-table[6])*100/table[6]
m= matrix(NA,2,7 )
colnames(m)=c("SFOC Vs BHP","P.M' Vs EngSpeed","PScav' Vs T/CSpeed'","PComp' Vs PScav'","T/CSpeed' Vs EngSpeed ","T/CSpeed' Vs BHP","ExhTemp' Vs BHP")
rownames(m)=c("std","actual")
m[1,]=c(rep(0,7))
m[2,]=c(dev_1,dev_2,dev_3,dev_4,dev_5,dev_6,dev_7)
m = as.data.frame(m)
m=rbind(rep(20,7) , rep(-20,7),m)
par(bg='grey90')
radarchart(m)
colors_border=c( rgb(0.2,0.5,0.5,0.9), rgb(0.8,0.2,0.5,0.9) )
colors_in=c( rgb(0.2,0.5,0.5,0.4), rgb(0.8,0.2,0.5,0.4) )
radarchart( m , axistype=1 ,
#custom polygon
pcol=colors_border , pfcol=colors_in , plwd=4 , plty=1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(-20,20,10), cglwd=0.8,
#custom labels
vlcex=0.8
)
}
else{
dev_1 = (as.numeric(mydata$SFOC[1])-table[8])*100/table[8]
dev_3 = (as.numeric(mydata$Pscav[1])-table[3])*100/table[3]
dev_4 = (as.numeric(mydata$PressCompAvg[1])-table[5])*100/table[5]
dev_5 = (as.numeric(mydata$TCrpm[1])-table[1])*100/table[1]
dev_6 = (as.numeric(mydata$TCrpm[1])-table[7])*100/table[7]
dev_7 = (as.numeric(mydata$ExTempAvg[1])-table[6])*100/table[6]
m= matrix(NA,2,6 )
colnames(m)=c("SFOC Vs BHP","PScav' Vs T/CSpeed'","PComp' Vs PScav'","T/CSpeed' Vs EngSpeed ","T/CSpeed' Vs BHP","ExhTemp' Vs BHP")
rownames(m)=c("std","actual")
m[1,]=c(rep(0,6))
m[2,]=c(dev_1,dev_3,dev_4,dev_5,dev_6,dev_7)
m = as.data.frame(m)
m=rbind(rep(20,6) , rep(-20,6),m)
par(bg='grey90')
radarchart(m)
colors_border=c( rgb(0.2,0.5,0.5,0.9), rgb(0.8,0.2,0.5,0.9) )
colors_in=c( rgb(0.2,0.5,0.5,0.4), rgb(0.8,0.2,0.5,0.4) )
radarchart( m , axistype=1 ,
#custom polygon
pcol=colors_border , pfcol=colors_in , plwd=4 , plty=1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(-20,20,10), cglwd=0.8,
#custom labels
vlcex=0.8
)
}
})
output$enginechart <- renderUI({
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
if(x$PM =="Y"){
selectInput("FLEET1","Select the Graph",choices = c("Load Diagram & Torque Rich "=1,"T/C Speed Vs Engine Speed"=2,
"Pscav Vs T/C Speed"=4,"Press. Drop at A/C Vs Pscav"=5,
"Pcomp Vs Pscav"=6,"Texh Vs Engine Load"=7,"T/C Speed Vs Engine Load"=8,"SFOC Vs Engine Load"=9,
"Pump Mark Vs Engine Speed"=3,"Pmax-Pcomp Vs Pump mark"=10),selected = 1,width="25%")
}
else{
selectInput("FLEET1","Select the Graph",choices = c("Load Diagram & Torque Rich "=1,"T/C Speed Vs Engine Speed"=2,
"Pscav Vs T/C Speed"=3,"Press. Drop at A/C Vs Pscav"=4,
"Pcomp Vs Pscav"=5,"Texh Vs Engine Load"=6,"T/C Speed Vs Engine Load"=7,"SFOC Vs Engine Load"=8),
selected = 1,width="25%")
}
})
output$Chart = renderUI({
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
if(x$PM =="Y"){
i=input$FLEET1
if(i==1){
tagList(column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graph_Load_diagram"))),
column(width=6,br(),box(width=NULL,solidHeader = T,title = "Test Data",status = "info",dataTableOutput("historical_Daigram")))
) }
else if(i==2){tagList(
column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCEn")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicTCEn"))),
column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCEn1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialTCEn")))
)}
else if(i==3){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPMEn")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPMEn"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPMEn1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPMEn")))
)}
else if(i==4){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPsTC")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPsTC"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPsTC1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPsTC")))
)}
else if(i==5){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPdPs")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPdPs"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPdPs1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPdPs")))
)}
else if(i==6){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPcPs")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPcPs"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPcPs1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPcPs")))
)}
else if(i==7){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphETEL")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicETEL"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphETEL1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialETEL")))
)}
else if(i==8){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCspeed")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicTCspeed"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCspeed1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialTCspeed")))
)}
else if(i==9){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphSFOC")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicSFOC"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphSFOC1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialSFOC")))
)}
else if(i==10){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPM")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPM"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPM1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPM")))
)}
else{return()}
}
else {
i=input$FLEET1
if(i==1){
tagList(column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graph_Load_diagram"))),
column(width=6,br(),box(width=NULL,solidHeader = T,title = "Test Data",status = "info",dataTableOutput("historical_Daigram")))
) }
else if(i==2){tagList(
column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCEn")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicTCEn"))),
column(width=6,br(),box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCEn1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialTCEn")))
)}
else if(i==3){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPsTC")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPsTC"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPsTC1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPsTC")))
)}
else if(i==4){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPdPs")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPdPs"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPdPs1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPdPs")))
)}
else if(i==5){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPcPs")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicPcPs"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphPcPs1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialPcPs")))
)}
else if(i==6){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphETEL")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicETEL"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphETEL1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialETEL")))
)}
else if(i==7){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCspeed")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicTCspeed"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphTCspeed1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialTCspeed")))
)}
else if(i==8){tagList(
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphSFOC")),box(width = NULL,solidHeader = TRUE,title = "Test Data",status = "info",dataTableOutput("historicSFOC"))),
column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("graphSFOC1")),box(width=NULL,solidHeader = TRUE,title = "Shop Trial Data",status = "info",dataTableOutput("shoptrialSFOC")))
)}
else{return()}
}
})
#.................................For power curve.........................................................................................
powerplotdata <- reactive({
r = shoptrialdata()
Engine.Load = r$Load
SFOC = r$SFOC
PSCAV = r$pscav
PMAX = r$pmax
PCOMP = r$pcomp
Engine.RPM = r$engine.speed
TC.RPM = r$TCspeed
Date1 = r$Date
m = data.frame(Engine.Load,PSCAV,SFOC,PMAX,TC.RPM,PCOMP,Engine.RPM)
m
})
powerplotdata1 <- reactive({
r = enginedatatable()
Engine.Load = r$Engine.Load
SFOC = r$SFOC
PSCAV = r$Pscav
PMAX = r$MaxPressAvg
PCOMP = r$PressCompAvg
Engine.RPM = r$RPM
TC.RPM = r$TCrpm
Date1 = r$Date
m = data.frame(Engine.Load,PSCAV,SFOC,PMAX,TC.RPM,PCOMP,Engine.RPM)
m
})
powerplotdata11 <- reactive({
r = enginedatatable()
Date = r$Date
m = data.frame(Date)
m
})
output$ooopowerplot <- renderPlotly({
pp = data.frame(powerplotdata())
p <- plot_ly(pp, x = ~Engine.Load, y = ~PSCAV,type ="scatter", mode= "line+markers",height = 200)
q <- plot_ly(pp, x = ~Engine.Load, y = ~SFOC,type ="scatter", mode= "line+markers",height = 200)
r <- plot_ly(pp, x = ~Engine.Load, y = ~PMAX,type ="scatter", mode= "line+markers",height = 200)
s <- plot_ly(pp, x = ~Engine.Load, y = ~TC.RPM,type ="scatter", mode= "line+markers",height = 200)
t <- plot_ly(pp, x = ~Engine.Load, y = ~PCOMP,type ="scatter", mode= "line+markers",height = 200)
u <- plot_ly(pp, x = ~Engine.Load, y = ~Engine.RPM,type ="scatter", mode= "line+markers",height = 200)
subplot(p,q,r,s,t,u, nrows = 6, shareX = TRUE,titleY = TRUE) %>% layout(yaxis = list(domain = c(0, 0.16)),
yaxis2 = list(domain = c(0.16, 0.32)))
})
output$powerplot <- renderPlotly({
r = powerplotdata()
b = powerplotdata1()
c = powerplotdata11()
mydata = enginedatatable()
pt <- r%>%
tidyr::gather(variable,value, -Engine.Load) %>%
transform(id = as.integer(factor(variable)))
p=plot_ly(pt,x = ~Engine.Load, y = ~value , color = ~variable, colors = "Dark2", type ="scatter", mode= "lines+markers",line=list(shape="spline"), width = 800, height = 800,
yaxis = ~paste0("y", id), legendgroup = ~ variable ,showlegend = FALSE)
clr = c("blue","red","#884EA0","green","#ccb400","#800000","#CD5C5C","#FFA07A","teal","#E6B0AA","#082336","#F4D03F")
for(i in 1:input$monthno){
bt1 <- b[i,]%>%
tidyr::gather(variable, value, -Engine.Load) %>%
transform(id = as.integer(factor(variable)))
p = p%>%add_markers(x = bt1$Engine.Load, y = bt1$value, color = bt1$variable,name = c$Date[i],text=c$Date[i], type ="scatter",mode = "markers",marker=list(size=8,color=clr[i]),
yaxis = paste0("y", bt1$id),showlegend = F)
}
p = p%>%subplot(nrows = 6, shareX = TRUE,titleY = TRUE ) %>%
layout(autosize = F,yaxis =list(title = "TC RPM"),yaxis6 =list(title = "SFOC"),yaxis2 =list(title = "Engine RPM"),yaxis3 =list(title = "PCOMP"),yaxis4 =list(title = "PMAX"),yaxis5 =list(title = "PSCAV"))
p
})
output$datetext <- renderText({
mydata = enginedatatable()
Date = mydata$Date
Date1 = as.data.frame(Date)
clr = c("blue","red","#884EA0","green","#ccb400","#800000","#CD5C5C","#FFA07A","teal","#E6B0AA","#082336","#F4D03F")
HTML(sprintf("<li style='color:%s;font-size:20px'> <text style='color:#000000;font-size:12px'> %s <br/> </text></li>",clr[1:input$monthno],t(Date1[1:input$monthno,1])))
})
#..............................................cylinder comparison.....................................................................#
output$cylpara <- renderUI({
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
if(x$PM == "Y"){
selectInput("cylcomp","Select the Parameter",
choices = c("Maximum Pressure"=1,"Compressor Pressure"=2,"M/E Cyl. Exhaust Temperature"=3,"Pump Mark"=4),
selected = 1,width ="25%")
}
else{
selectInput("cylcomp","Select the Parameter",
choices = c("Maximum Pressure"=1,"Compressor Pressure"=2,"M/E Cyl. Exhaust Temperature"=3),
selected = 1,width ="25%")
}
})
output$cylchart <- renderUI({
x = VESSELDETAILS
x = subset(x,Vessel == input$engineVessel)
if(x$PM == "Y"){
i = input$cylcomp
if(i == 1){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("Pmax"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("MPtable1"))))
}
else if(i == 2){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("PComp"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("PCtable1"))))
}
else if(i == 3){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("exhausttemp"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("ETtable1"))))
}
else if(i == 4){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("pumpmark"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("PMtable1"))))
}
else {return(NULL)}
}
else if(x$PM == "N"){
i = input$cylcomp
if(i == 1){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("Pmax"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("MPtable1"))))
}
else if(i == 2){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("PComp"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("PCtable1"))))
}
else if(i == 3){
box(width=NULL,solidHeader = TRUE,status = "info",column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",plotlyOutput("exhausttemp"))),column(width=6,box(width=NULL,solidHeader = TRUE,status = "info",dataTableOutput("ETtable1"))))
}
else {return(NULL)}
}
}
)
|
a36f7547b572365f8332bd94a07e4a4689f1eb51
|
8100a4e0a8a8bf539b87a28bc3c978cfc9fa0016
|
/man/send_push_notification.Rd
|
100d65d6290a6d0cee81264d2ce2cf7dd62cf863
|
[] |
no_license
|
epijim/notifyme
|
d7ce38f7a21bf0ecf165b1720b97d6434d70db9f
|
5247f87242a1beda9fbdab5d457aa6f70459b2d7
|
refs/heads/master
| 2020-08-03T09:21:54.876117
| 2016-11-26T17:00:14
| 2016-11-26T17:00:14
| 73,549,456
| 1
| 0
| null | 2016-11-12T11:54:12
| 2016-11-12T11:44:42
|
R
|
UTF-8
|
R
| false
| true
| 1,330
|
rd
|
send_push_notification.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/send_push_notification.R
\name{send_push_notification}
\alias{send_push_notification}
\title{Send push notification to devices}
\usage{
send_push_notification(title = "Your R session says:",
message = paste0("Message sent: ", Sys.time()), api_token = NULL,
user_key = NULL, priority = "medium", file = "~/r_keychain.rds")
}
\arguments{
\item{title}{Title of the push notification. Defaults to message from r.}
\item{message}{Message body. Default just tells time message sent.}
\item{api_token}{API token - create your own in a few minutes from pushover.net dashboard.}
\item{user_key}{This is the key that identifies you. It's on the pushover.net dashboard.}
\item{priority}{'low' means no beep/vibrate, 'medium' means beep/vibrate, 'high' means require response on device.}
\item{file}{Optional - location of keychain if using.}
}
\description{
This function will send a push notification to your device via the
push over API. You must make an account with that service (pushover.net)
and get an API key and userkey.
}
\section{Bugs}{
Code repo: \url{https://github.com/epijim/notifyme}
}
\examples{
\dontrun{send_push_notification(user_key = "xxxxxx", api_token = "xxxxx")}
}
\keyword{Hue}
\keyword{R}
\keyword{notify}
\keyword{pushover}
|
fc4842e2aefdef6a6ed4eb8968d830a8c3725da3
|
a1c90df487eecd4ce56fdd2bc092e3a23f260b97
|
/plots.R
|
7688fe8fde9072aec06717d1bab52d3a69b30c26
|
[] |
no_license
|
mawassw/selection_analysis
|
89b76886ba5b6d3fb2a5e406c74293e4ac449e3e
|
530f91be77eea621aab6c51585d73e3e7a68110e
|
refs/heads/main
| 2023-04-28T17:38:50.104881
| 2021-05-05T19:44:05
| 2021-05-05T19:44:05
| 364,682,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,257
|
r
|
plots.R
|
#plot n of individuals per year
library(ggplot2)
#charlevoix
nind_char <- data.frame(table(ped_charlevoix$yob,useNA = "no"))
colnames(nind_char) <- c("yob", "N")
nind_char$yob <-as.numeric(as.character(nind_char$yob))
tiff("nind_char.tiff", units="in", width = 5, height = 5, res=300)
ggplot(nind_char, aes(x=yob, y=N))+
geom_line()+xlab("Year of birth")+ylab("Number of individuals")+
annotate("pointrange", x=1841, xmin=1837, xmax=1871, y=0, color = "cyan3", size =1, alpha=0.5)+
annotate("text", x=1841, y=10, label = "Immigration event", size = 2)+
scale_y_continuous(breaks = c(seq(0,600,by=100)))+
theme_classic()
dev.off()
#saguenay
nind_sag <- data.frame(table(ped_saguenay$yob,useNA = "no"))
colnames(nind_sag) <- c("yob", "N")
nind_sag$yob <-as.numeric(as.character(nind_sag$yob))
tiff("nind_sag.tiff", units="in", width = 5, height = 5, res=300)
ggplot(nind_sag, aes(x=yob, y=N))+
geom_line()+xlab("Year of birth")+ylab("Number of individuals")+
annotate("pointrange", x=1841, xmin=1837, xmax=1871, y=0, color = "cyan3", size =1, alpha=0.5)+
annotate("text", x=1841, y=10, label = "Immigration event", size = 2)+
scale_y_continuous(breaks = c(seq(0,9000,by=1000)))+
theme_classic()
dev.off()
|
f0e0c6d948e0477b42f69808e9e17bbc86adeab3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/etm/examples/etmprep.Rd.R
|
b9a336239cb5e1834b56f7979eb531c1ece78fe4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
r
|
etmprep.Rd.R
|
library(etm)
### Name: etmprep
### Title: Data transformation function for using etm
### Aliases: etmprep
### Keywords: datagen manip
### ** Examples
### creation of fake data in the wild format, following an illness-death model
## transition times
tdisease <- c(3, 4, 3, 6, 8, 9)
tdeath <- c(6, 9, 8, 6, 8, 9)
## transition status
stat.disease <- c(1, 1, 1, 0, 0, 0)
stat.death <- c(1, 1, 1, 1, 1, 0)
## a covariate that we want to keep in the new data
cova <- rbinom(6, 1, 0.5)
dat <- data.frame(tdisease, tdeath,
stat.disease, stat.death,
cova)
## Possible transitions
tra <- matrix(FALSE, 3, 3)
tra[1, 2:3] <- TRUE
tra[2, 3] <- TRUE
## data preparation
newdat <- etmprep(c(NA, "tdisease", "tdeath"),
c(NA, "stat.disease", "stat.death"),
data = dat, tra = tra, cens.name = "cens")
|
aee31ba3c8925c5b32820c4a92141aaacfeefa92
|
d24d004fb078dabde79de5277c7e34a70841765a
|
/man/computeLandmarks.Rd
|
d04cdaadfd2c3f6834d09d7862c39d7b6b972f16
|
[] |
no_license
|
dbrg77/scABC
|
0269a75d288032d1472db4c0f2ea94487c527949
|
d419e17293b0aa7974e17503d7eb1354127bf18a
|
refs/heads/master
| 2020-03-21T04:56:20.418533
| 2018-04-17T21:51:42
| 2018-04-17T21:51:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
computeLandmarks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster.R
\name{computeLandmarks}
\alias{computeLandmarks}
\title{compute landmarks}
\usage{
computeLandmarks(ForeGround, BackGround, nCluster = 2, lambda = 0.1,
nTop = 2000)
}
\arguments{
\item{ForeGround}{matrix or data frame of Foreground values}
\item{BackGround}{matrix or data frame of BackGround values}
\item{nCluster}{number of clusters (default = 2)}
\item{lambda}{weighting parameter (default = 0.1)}
\item{nTop}{number of top clusters}
}
\description{
compute landmarks
}
|
b6999260615cbdcc2501c1989251c5df1a6b71fd
|
069e8335129d029970406740b440c70cc355f619
|
/Case_Study_12/Case_Study/combined-data.R
|
58e5604c5a2018ac0b2440e5e3cfdf4eb5c1efc7
|
[] |
no_license
|
jborjon/M335_Borjon_Joseph
|
fc692cebd4cbe583b1286427e7ae273c5f473018
|
f414cc72ecfb600a78917f00cbede297f19ce424
|
refs/heads/master
| 2023-01-19T11:15:59.694455
| 2023-01-04T20:23:29
| 2023-01-04T20:23:29
| 196,254,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
combined-data.R
|
## @knitr combine_data
library(tidyverse)
# Get the 2-digit state FIPS code as text
get_state_fips <- function(state_fips) {
if (state_fips < 10) {
state_fips <- paste("0", state_fips, sep = "")
} else {
as.character(state_fips)
}
}
# Get the 3-digit county FIPS code as text
get_county_fips <- function(county_fips) {
if (county_fips < 10) {
county_fips <- paste("00", county_fips, sep = "")
} else if (county_fips < 100) {
county_fips <- paste("0", county_fips, sep = "")
} else {
as.character(county_fips)
}
}
# Vectorize the functions above
get_state_fips <- Vectorize(get_state_fips)
get_county_fips <- Vectorize(get_county_fips)
# Read the RDS suicide file
suicide_data_file <- file.path(
"C:", "Users", "joebo", "Documents", "Math335", "M335_Borjon_Joseph",
"data", "Semester_Project", "us-suicide-data.Rds"
)
suicide_data <- read_rds(suicide_data_file)
# Read the subset file of suicides between 10 and 14 years old
suicide_data_file <- file.path(
"C:", "Users", "joebo", "Documents", "Math335", "M335_Borjon_Joseph",
"data", "Semester_Project", "all-suicides-10-14.Rds"
)
all_suicides_10_14 <- read_rds(suicide_data_file)
# County data
us_counties <- all_suicides_10_14 %>%
select(1:4) %>%
mutate(
statefipschar = get_state_fips(StateFIPS),
countyfipschar = get_county_fips(CountyFIPS),
county_5_digit_fips = paste(statefipschar, countyfipschar, sep = "")
)
# Socioeconomic data
counties_socioeco <- us_counties %>%
left_join(education_covariate, by = c("StateFIPS" = "fips", "CountyFIPS" = "countyid"))
# Suicide data for all races between ages 10 and 19
counties_suicide_all <- suicide_data %>%
filter(
AgeStart >= 10,
AgeEnd <= 19,
Race == "All",
Sex == "Both",
is.na(HispanicOrigin),
!is.na(U_A_Rate),
!is.na(U_C_Rate)
) %>%
group_by(StateFIPS, CountyFIPS) %>%
summarise(
TotalDeaths = sum(Deaths),
AvgCrudeRate = mean(U_C_Rate),
AvgAdjustedRate = mean(U_A_Rate)
) %>%
ungroup() %>%
left_join(us_counties, by = c("StateFIPS", "CountyFIPS"))
# Suicide data for everyone 20 and up
counties_suicide_20_up <- suicide_data %>%
filter(
AgeStart >= 20,
Race == "All",
Sex == "Both",
is.na(HispanicOrigin),
!is.na(U_A_Rate),
!is.na(U_C_Rate)
) %>%
group_by(StateFIPS, CountyFIPS) %>%
summarise(
TotalDeaths = sum(Deaths),
AvgCrudeRate = mean(U_C_Rate),
AvgAdjustedRate = mean(U_A_Rate)
) %>%
ungroup() %>%
left_join(us_counties, by = c("StateFIPS", "CountyFIPS"))
# Education test results per county
counties_ed_results <- education_results %>%
left_join(us_counties, by = c("fips" = "StateFIPS", "countyid" = "CountyFIPS"))
|
892667c4dd289934deea26f93187c9aca11b7584
|
29ea4d870adef9fbae1bc542d3e23d51165241aa
|
/plot4.R
|
e694482adbd5868f90d12d8b3ecc70db5dcd4c6d
|
[] |
no_license
|
PeterVarga73/ExData_Plotting1
|
84e95059ed342ed49aa96c019e41aea275542978
|
183c0052c5f8d798a04c910434bea78a2ec180d6
|
refs/heads/master
| 2020-04-07T12:49:40.173933
| 2018-11-20T16:52:49
| 2018-11-20T16:52:49
| 158,382,747
| 0
| 0
| null | 2018-11-20T11:59:55
| 2018-11-20T11:59:54
| null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
plot4.R
|
library(readr)
library(lubridate)
library(dplyr)
file <- "household_power_consumption.txt"
power <- read_csv2( file = file )
power$Date <- dmy(power$Date)
power <- power %>% filter( Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
Sys.setlocale("LC_TIME", "English")
datetime<-strptime(paste(power$Date,power$Time),format = "%Y-%m-%d %H:%M:%S")
par(mfrow=c(2,2))
plot(datetime,power$Global_active_power,type = "l",xlab = "", ylab ="Global Active Power (kilowatts)" )
plot(datetime,power$Voltage/1000,type = "l", ylab ="Voltage")
plot(datetime,power$Sub_metering_1,type="n",ylab = "Energy sub metering",xlab="")
lines(datetime,power$Sub_metering_1)
lines(datetime,power$Sub_metering_2,col="red")
lines(datetime,power$Sub_metering_3,col="blue")
legend("topright",cex=0.5,pt.cex = 1.5,box.lty=0,bg="transparent",lty=1,lwd=4,col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(datetime,power$Global_reactive_power,type="l",ylab = "Global Reactive Power")
dev.copy(png,"plot4.png")
dev.off()
|
a321284ffff188c8319bca8dee5f520ae7a8e8d4
|
6eb6be10dfb00975aa041b19b47ef2511808096d
|
/Practical Machine Learning Course Project_Activities of Fitbits/Practical Machine Learning Course Project_Activities of Fitbits.R
|
45200e06e65b9138811b6b1c5ff54745668aec61
|
[] |
no_license
|
yashika-sindhu/datasciencecoursera
|
5e72af030f83d7ba90433da32af1bc2940b50b54
|
971ceb4526935374250fa646d8722b7e94bb0ed6
|
refs/heads/master
| 2022-01-22T04:59:26.645366
| 2019-07-22T09:35:49
| 2019-07-22T09:35:49
| 116,703,715
| 0
| 1
| null | 2018-01-09T05:04:23
| 2018-01-08T16:57:01
| null |
UTF-8
|
R
| false
| false
| 2,320
|
r
|
Practical Machine Learning Course Project_Activities of Fitbits.R
|
download.file(url="https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv",destfile = "Desktop/pml-training.csv")
download.file(url="https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv",destfile = "Desktop/pml-testing.csv")
training<-read.csv("Desktop/pml-training.csv")
testing<-read.csv("Desktop/pml-testing.csv")
library(caret)
set.seed(1000)
intrain<-createDataPartition(y=training$classe,p=0.7,list=FALSE)
validation<-training[-intrain,]
training<-training[intrain,]
#Calculating and removing, factors & number vars, which have >95% vals as NA
k<-sapply(training,is.factor)
training_factors<-training[,k]
#37 total factors, genuine factors c("user_name","cvtd_timestamp","new_window","classe")
#But first 3 are not going to impact my classe var, hence removing them from data
#rest 33 have very high no of NA values, and are not useful. Hence removing them
#also first 7 vars are details about the people taking test and time stamps, so we remove them too
p_na<-names(training)[1:7]
p_na<-(append(p_na,names(training_factors)[1:36]))
l<-array()
for(i in 1:dim(training)[2]){l[i]<-sum(is.na(training[,i]))}
p_na<-append(p_na,names(training)[l>0.95*dim(training)[1]])
training_prep<-training[,!(names(training) %in% p_na)]
validation_prep<-validation[,!(names(validation) %in% p_na)]
testing_prep<-testing[,!(names(testing) %in% p_na)]
#fitting multiple models on training_prep and checking their accuracy on validation_prep
#using k-fold cross validation method
set.seed(1000)
fit1<-train(classe~.,method="treebag",data=training_prep,trControl=trainControl(method="cv"))
confusionMatrix(predict(fit1,validation_prep),validation_prep$classe)
# Accuracy is 0.9856
set.seed(1000)
fit2<-train(classe~.,method="gbm",data=training_prep,trControl=trainControl(method="cv"),verbose=FALSE)
confusionMatrix(predict(fit2,validation_prep),validation_prep$classe)
# Accuracy is 0.9611
set.seed(1000)
fit3<-train(classe~.,method="rf",data=training_prep,trControl=trainControl(method="cv"))
confusionMatrix(predict(fit3,validation_prep),validation_prep$classe)
# Accuracy is 0.9925
#Since accuracy of fit3 is the best, we will take fit3 as the final model
final_testing_results<-predict(fit3,testing_prep)
final_testing_results
#[1] B A B A A E D B A A B C B A E E A B B B
#Levels: A B C D E
|
1151a3761a55e2b9713886f6ba51ac27d88fbcf8
|
a37a3e54565806ee4c9d1f925ce87cd06de5f254
|
/analysis/descriptive/diagnoses.R
|
098881e8a92b4ba43c6aaea4e959029ce2ca2332
|
[
"MIT"
] |
permissive
|
opensafely/comparative-ve-research
|
cdec66752a31294016a9d9b857bea359fbc8abfd
|
cf3118545d18a5777dc43d153a6261bd68de56bd
|
refs/heads/main
| 2023-08-23T10:47:30.506829
| 2022-05-06T11:36:00
| 2022-05-06T11:36:00
| 367,404,609
| 1
| 1
|
MIT
| 2022-05-06T11:36:01
| 2021-05-14T15:27:50
|
HTML
|
UTF-8
|
R
| false
| false
| 11,698
|
r
|
diagnoses.R
|
# # # # # # # # # # # # # # # # # # # # #
# This script looks at emergency attendnace diagnoses
# An ad-hoc script which is NOT part of the main analysis
# # # # # # # # # # # # # # # # # # # # #
# Preliminaries ----
## Import libraries ----
library('tidyverse')
library('here')
library('glue')
library('survival')
## Import custom user functions from lib
source(here::here("analysis", "lib", "utility_functions.R"))
source(here::here("analysis", "lib", "redaction_functions.R"))
source(here::here("analysis", "lib", "survival_functions.R"))
args <- commandArgs(trailingOnly=TRUE)
if(length(args)==0){
# use for interactive testing
removeobs <- FALSE
} else {
removeobs <- TRUE
}
## import global vars ----
gbl_vars <- jsonlite::fromJSON(
txt="./analysis/global-variables.json"
)
#list2env(gbl_vars, globalenv())
## create output directory ----
fs::dir_create(here("output", "descriptive", "diagnoses"))
## load A&E diagnosis column names
lookup <- read_rds(here("analysis", "lib", "diagnosis_groups_lookup.rds")) %>%
mutate(
diagnosis_col_names = paste0("emergency_", group, "_date"),
diagnosis_short = group,
diagnosis_long = ECDS_GroupCustom,
) %>%
add_row(
diagnosis_short="unknown",
diagnosis_long="(Unknown)"
)
diagnoses <- set_names(lookup$diagnosis_short, lookup$diagnosis_long)
## Import processed data ----
data_cohort <- read_rds(here("output", "data", "data_cohort.rds"))
data_diagnoses <- read_rds(here("output", "data", "data_diagnoses.rds")) %>%
filter(patient_id %in% data_cohort$patient_id)
rm(data_cohort)
data_diagnoses <- data_diagnoses %>%
mutate(
censor_date = pmin(vax1_date - 1 + (7*14), end_date, dereg_date, death_date, covid_vax_any_2_date, na.rm=TRUE),
tte_emergency = tte(vax1_date-1, emergency_date, censor_date, na.censor=FALSE),
ind_emergency = censor_indicator(emergency_date, censor_date),
vax1_week = lubridate::floor_date(vax1_date, unit="week", week_start=1),
vax1_month = format(vax1_date, "%b-%y"),
all=""
)
diag_freq <-
data_diagnoses %>%
group_by(vax1_type_descr) %>%
summarise(
across(
matches("emergency_(.)+_date$"),
list(
day1 = ~sum(!is.na(.x) & tte_emergency <=1),
day2 = ~sum(!is.na(.x) & tte_emergency <=2),
day3 = ~sum(!is.na(.x) & tte_emergency <=3),
day4 = ~sum(!is.na(.x) & tte_emergency <=4),
day5 = ~sum(!is.na(.x) & tte_emergency <=5),
day6 = ~sum(!is.na(.x) & tte_emergency <=6),
day7 = ~sum(!is.na(.x) & tte_emergency <=7),
day8 = ~sum(!is.na(.x) & tte_emergency <=8),
day14 = ~sum(!is.na(.x) & tte_emergency <=14)
)
)
) %>%
pivot_longer(
cols=-vax1_type_descr,
names_pattern="emergency_(.+)_date_day(\\d+)",
names_to= c("diagnosis", "day"),
values_to="n"
) %>% mutate(
diagnosis_short = factor(diagnosis, levels=diagnoses),
diagnosis_long = fct_recode(diagnosis_short, !!!diagnoses)
)
vax_freq <-
data_diagnoses %>%
count(vax1_type_descr, name="n_vax") %>%
add_count(wt=n_vax, name="n_total") %>%
mutate(
pct_vax=n_vax/n_total
)
freqs <- diag_freq %>%
left_join(vax_freq, by="vax1_type_descr") %>%
mutate(
n=if_else(between(n,1,5), 3L, n), # rounding
pct=n/n_vax,
day=as.numeric(day)
)
## plot diagnosis frequencies ----
#
# get_freqs <- function(day){
#
# data_wide <- data_diagnoses %>%
# filter(tte_emergency<=day & ind_emergency==1) %>%
# mutate(
# emergency_diagnosis_list=str_split(emergency_diagnosis, "; "),
# dummy_val=1L
# ) %>%
# unnest_longer(col="emergency_diagnosis_list") %>%
# pivot_wider(
# id_cols=-emergency_diagnosis_list,
# names_from=emergency_diagnosis_list,
# names_prefix = "diag_",
# values_from=dummy_val,
# values_fill=0L
# )
#
#
# diag_freq <-
# data_wide %>%
# group_by(vax1_type_descr) %>%
# select(starts_with("diag_")) %>%
# summarise(
# across(
# starts_with("diag_"),
# .fns=list(n=sum, pct=mean),
# .names="{.col}.{.fn}",
# na.rm=TRUE
# )
# ) %>%
# pivot_longer(
# cols=starts_with("diag_"),
# names_prefix="diag_",
# names_to=c("diagnosis", ".value"),
# names_sep="\\."
# ) %>%
# mutate(
# diagnosis_short = factor(diagnosis, levels=diagnoses),
# diagnosis_long = fct_recode(diagnosis_short, !!!diagnoses)
# ) %>%
# arrange(vax1_type_descr, diagnosis_long)
#
# vax_freq <-
# data_wide %>%
# count(vax1_type_descr, name="n_vax") %>%
# add_count(wt=n_vax, name="n_total") %>%
# mutate(
# pct_vax=n_vax/n_total
# )
#
#
# freq <- diag_freq %>%
# left_join(vax_freq, by="vax1_type_descr") %>%
# mutate(day = day)
#
# freq
# }
#
#
# freqs <- bind_rows(
# get_freqs(1),
# get_freqs(2),
# get_freqs(3),
# get_freqs(4),
# get_freqs(5),
# get_freqs(6),
# get_freqs(7),
# get_freqs(8),
# get_freqs(14)
# )
plot_freq <- function(day){
dayy <- day
freqs_day <- freqs %>%
filter(day==dayy)
plot_freqs <-
freqs_day %>%
mutate(
day_name = glue("Proportion of attendance diagnoses\nafter {dayy} days"),
n=if_else(vax1_type_descr==first(vax1_type_descr), n, -n),
pct=if_else(vax1_type_descr==first(vax1_type_descr), pct, -pct),
vax1_type_descr = paste0(vax1_type_descr, " (N = ", n_vax, ")")
) %>%
ggplot()+
geom_bar(aes(x=pct, y=fct_rev(diagnosis_long), fill=vax1_type_descr), width=freqs_day$pct_vax, stat = "identity")+
geom_vline(aes(xintercept=0), colour = "black")+
scale_fill_brewer(type="qual", palette="Set1")+
scale_y_discrete(position = "right")+
scale_x_continuous(labels = abs)+
labs(
y=NULL,
x="Proportion",
fill=NULL,
title = glue("Post-vaccination emergency attendances after {dayy} days"),
subtitle= "There may be multiple diagnoses per attendance"
)+
theme_minimal()+
theme(
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
#panel.grid.minor.x = element_blank(),
axis.line.x.bottom = element_line(),
plot.title.position = "plot",
legend.position = "bottom"
)
plot_freqs
}
ggsave(plot_freq(1), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq1.png"))
ggsave(plot_freq(2), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq2.png"))
ggsave(plot_freq(3), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq3.png"))
ggsave(plot_freq(4), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq4.png"))
ggsave(plot_freq(5), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq5.png"))
ggsave(plot_freq(6), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq6.png"))
ggsave(plot_freq(7), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq7.png"))
ggsave(plot_freq(8), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq8.png"))
ggsave(plot_freq(14), filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_freq14.png"))
## plot diagnosis-specific survival-curves ----
ceiling_any <- function(x, to=1){
# round to nearest 100 millionth to avoid floating point errors
ceiling(plyr::round_any(x/to, 1/100000000))*to
}
survobj <- function(.data, diagnosis, group, threshold){
dat <- .data %>%
mutate(
event_date = .[[glue("emergency_{diagnosis}_date")]],
.time = tte(vax1_date-1, event_date, censor_date, na.censor=FALSE),
.indicator = censor_indicator(event_date, censor_date),
)
unique_times <- unique(c(dat[[".time"]]))
dat_surv <- dat %>%
group_by(across(all_of(c("vax1_type_descr", group)))) %>%
transmute(
.time, .indicator
)
dat_surv1 <- dat_surv %>%
nest() %>%
mutate(
n_events = map_int(data, ~sum(.x$.indicator, na.rm=TRUE)),
surv_obj = map(data, ~{
survfit(Surv(.time, .indicator) ~ 1, data = .x, conf.type="log-log")
}),
surv_obj_tidy = map(surv_obj, ~tidy_surv(.x, addtimezero = TRUE)),
) %>%
select("vax1_type_descr", all_of(group), n_events, surv_obj_tidy) %>%
unnest(surv_obj_tidy)
dat_surv_rounded <- dat_surv1 %>%
mutate(
surv = ceiling_any(surv, 1/floor(max(n.risk, na.rm=TRUE)/(threshold+1))),
surv.ll = ceiling_any(surv.ll, 1/floor(max(n.risk, na.rm=TRUE)/(threshold+1))),
surv.ul = ceiling_any(surv.ul, 1/floor(max(n.risk, na.rm=TRUE)/(threshold+1))),
)
dat_surv_rounded
}
surv_list <- vector("list", length(diagnoses))
names(surv_list) <- diagnoses
for(diagnosis in names(surv_list)){
surv_list[[diagnosis]] <-
survobj(data_diagnoses, diagnosis, "vax1_month", 0) %>%
mutate(diagnosis = diagnosis)
}
surv_long_month <- bind_rows(surv_list) %>%
mutate(
diagnosis_short = factor(diagnosis, levels=diagnoses),
diagnosis_long = fct_recode(diagnosis, !!!diagnoses),
diagnosis_wrap = fct_relabel(diagnosis_long, ~str_wrap(., 15)),
)
surv_plot_month <-
surv_long_month %>%
filter(time <= 14) %>%
ggplot(aes(group=vax1_type_descr, colour=vax1_type_descr, fill=vax1_type_descr)) +
geom_step(aes(x=time, y=surv))+
geom_rect(aes(xmin=time, xmax=leadtime, ymin=surv.ll, ymax=surv.ul), alpha=0.1, colour="transparent")+
facet_grid(rows=vars(diagnosis_wrap), cols=vars(vax1_month))+
scale_color_brewer(type="qual", palette="Set1", na.value="grey")+
scale_fill_brewer(type="qual", palette="Set1", guide="none", na.value="grey")+
scale_y_continuous(expand = expansion(mult=c(0,0.01)))+
coord_cartesian(xlim=c(0, NA))+
labs(
x="Days since vaccination",
y="1 - emergency attendance rate",
colour=NULL,
fill=NULL,
title=NULL
)+
theme_minimal(base_size=9)+
theme(
legend.position = "bottom",
axis.line.x = element_line(colour = "black"),
panel.grid.minor.x = element_blank(),
strip.text.y = element_text(angle = 0)
)
ggsave(
surv_plot_month,
filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_surv_by_month.png"),
units="cm", width=15, height=30
)
surv_list <- vector("list", length(diagnoses))
names(surv_list) <- diagnoses
for(diagnosis in names(surv_list)){
surv_list[[diagnosis]] <-
survobj(data_diagnoses, diagnosis, "all", 0) %>%
mutate(diagnosis = diagnosis)
}
surv_long <- bind_rows(surv_list) %>%
mutate(
diagnosis_short = factor(diagnosis, levels=diagnoses),
diagnosis_long = fct_recode(diagnosis, !!!diagnoses),
diagnosis_wrap = fct_relabel(diagnosis_long, ~str_wrap(., 15)),
)
surv_plot <-
surv_long %>%
filter(time <= 14) %>%
ggplot(aes(group=vax1_type_descr, colour=vax1_type_descr, fill=vax1_type_descr)) +
geom_step(aes(x=time, y=surv))+
geom_rect(aes(xmin=time, xmax=leadtime, ymin=surv.ll, ymax=surv.ul), alpha=0.1, colour="transparent")+
facet_wrap(vars(diagnosis_wrap))+
scale_color_brewer(type="qual", palette="Set1", na.value="grey")+
scale_fill_brewer(type="qual", palette="Set1", guide="none", na.value="grey")+
scale_y_continuous(expand = expansion(mult=c(0,0.01)))+
coord_cartesian(xlim=c(0, NA))+
labs(
x="Days since vaccination",
y="1 - emergency attendance rate",
colour=NULL,
fill=NULL,
title=NULL
)+
theme_minimal(base_size=9)+
theme(
legend.position = "bottom",
axis.line.x = element_line(colour = "black"),
panel.grid.minor.x = element_blank(),
strip.text.y = element_text(angle = 0)
)
ggsave(
surv_plot,
filename=here("output", "descriptive", "diagnoses", "plot_diagnosis_surv.png"),
units="cm", width=15, height=15
)
|
95c6b6d7a3c6d9cf626022981fd3d18f4726e328
|
91216dfb0263e2d1e5e9cf585b2e27a720cce932
|
/data_wrangling.R
|
03da04efcb2f62e595aff5d86e60ad8b1f2b4982
|
[] |
no_license
|
kylechanpols/euro2020
|
a71d1c97e21e6d228a4008bf513d206adba40f3a
|
6f3d5509a3eb4518a24eedf66737e816809edf41
|
refs/heads/main
| 2023-05-31T07:58:28.803249
| 2021-06-12T01:07:39
| 2021-06-12T01:07:39
| 376,157,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
data_wrangling.R
|
source("flattenmatrix.R")
group_A <- flatten(read_excel("base_data.xlsx",sheet = "Group A Matrix"))
group_B <- flatten(read_excel("base_data.xlsx",sheet = "Group B Matrix"))
group_C <- flatten(read_excel("base_data.xlsx",sheet = "Group C Matrix"))
group_D <- flatten(read_excel("base_data.xlsx",sheet = "Group D Matrix"))
group_E <- flatten(read_excel("base_data.xlsx",sheet = "Group E Matrix"))
group_F <- flatten(read_excel("base_data.xlsx",sheet = "Group F Matrix"))
group_G <- flatten(read_excel("base_data.xlsx",sheet = "Group G Matrix"))
group_H <- flatten(read_excel("base_data.xlsx",sheet = "Group H Matrix"))
group_I <- flatten(read_excel("base_data.xlsx",sheet = "Group I Matrix"))
group_J <- flatten(read_excel("base_data.xlsx",sheet = "Group J Matrix"))
qualifying <- rbind(group_A,group_B,group_C,group_D,group_E,group_F,group_G,group_H,group_I,group_J)
qualifying$mtype <- "Qualifying Group Stage"
##############################################################################
#add back playoffs:
playoffs <- read_excel("playoffs.xlsx")
qualifying <- rbind(qualifying, playoffs)
write.csv(qualifying, "qualifiers.csv")
|
902d1cf4323484864cda7801dae00c960ba013c0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/murphydiagram/examples/datasets.Rd.R
|
fd64804b4ed0894c49aab3392d56207e0e2e7448
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
datasets.Rd.R
|
library(murphydiagram)
### Name: Data sets
### Title: Data sets with forecasts and realizations
### Aliases: inflation_mean recession_probability
### Keywords: datasets
### ** Examples
## Not run:
##D
##D # Load inflation forecasts
##D data(inflation_mean)
##D
##D # Make numeric time axis
##D tm <- as.numeric(substr(inflation_mean$dt, 1, 4)) +
##D 0.25*(as.numeric(substr(inflation_mean$dt, 6, 6))-1)
##D
##D # Plot
##D matplot(x = tm, y = inflation_mean[,2:4], type = "l", bty = "n",
##D xlab = "Time", ylab= "Inflation (percent)", col = 3:1)
##D legend("topright", legend = c("SPF", "Michigan", "Actual"), fill = 3:1, bty = "n")
##D
## End(Not run)
|
b7411933c7edab5339f14c05b0f844a468c76663
|
c8f22bfb6a8c79c7c50cf968f48087485f311f8a
|
/man/Affy2_Distance_Final.Rd
|
2684185b71d93b84eff05727d7cc393eea5e456e
|
[] |
no_license
|
VilainLab/IntramiRExploreR
|
04d0578c966a8d61f1b89b28fa26e56f4d17d3dc
|
f1da54cd6ff87934ad02175abc5199e89a44b7a6
|
refs/heads/master
| 2021-10-26T22:00:16.154053
| 2021-10-11T18:05:47
| 2021-10-11T18:05:47
| 154,200,941
| 0
| 0
| null | 2018-10-22T19:10:54
| 2018-10-22T19:10:54
| null |
UTF-8
|
R
| false
| true
| 902
|
rd
|
Affy2_Distance_Final.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{Affy2_Distance_Final}
\alias{Affy2_Distance_Final}
\title{Targets for the microRNA analyzed from Affy2 plaform using Distance.}
\format{
A data frame with 73374 rows and 8 variables:
\describe{
\item{miRNA}{miRNA name, miRNA symbol}
\item{GeneSymbol}{Gene name, in Gene Symbol}
\item{FBGN}{Gene name, in FlybaseID}
\item{CGID}{Gene name, in CGID}
\item{Score}{Computed Score, in float}
\item{GeneFunction}{Gene Functions, from Flybase}
\item{experiments}{Experiments, from ArrayExpress}
\item{TargetDatabases}{Target Database Name, from TargetDatabases}
}
}
\usage{
Affy2_Distance_Final
}
\description{
A precomputed dataset containing the targets, scores and other
attributes of 83 intragenic microRNAs using Distance Correlation
for plaform Affymetrix 1.
}
\keyword{datasets}
|
0468da88e122327e94f2f5dd4237e9d98d6c996f
|
59f0d2448d94f32c13b8aa72e81ccb1aa8b48fd9
|
/man/theta2theta.Rd
|
02b2e922114ea57132ddd8d64c8e88474d0b6f7d
|
[] |
no_license
|
cran/rtmpt
|
d1c90567519639ff96add93284062e1c6ba2483c
|
99c3a35b42077a6aae0ccbce1b6d0448bfc503c1
|
refs/heads/master
| 2022-04-27T06:46:33.199440
| 2022-04-10T08:40:02
| 2022-04-10T08:40:02
| 213,435,147
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,949
|
rd
|
theta2theta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_params.R
\name{theta2theta}
\alias{theta2theta}
\alias{set_thetas_equal}
\title{Set process probabilities equal}
\usage{
theta2theta(model, names, keep_consts = FALSE)
set_thetas_equal(model, names, keep_consts = FALSE)
}
\arguments{
\item{model}{A list of the class \code{rtmpt_model}.}
\item{names}{Character vector giving the names of the processes for which the process probabilities should be equal. If
\code{length(names) = 1} then the corresponding process probability will be estimates (i.e., it will be set to NA)}
\item{keep_consts}{Can be one of the following
\itemize{
\item logical value: \code{FALSE} (default) means none of the constants for \code{names} in the \code{model} will be kept; The probability of
the reference process (i.e., first of \code{names} in alphabetical order) will be set to \code{NA} (i.e., will be estimated) and the others
will be set to the name of the reference process (i.e., will be set to equal the reference process probability). \code{TRUE} means
the constant of the reference process probability (if specified) is used for all other processes.
\item numeric value: index for \code{names}. If 1, the constant of the first process in \code{names} (in original order defined by the user) is
used for all other probabilities of the processes in \code{names}. If 2, the constant of the second process is used. And so on.
}}
}
\value{
A list of the class \code{rtmpt_model}.
}
\description{
Setting multiple process probabilities (thetas) equal. One of the process probabilities will be estimated and
the other named process(es) will be set to equal the former. The equality can be removed by only using one name of a process.
}
\note{
If you use \code{theta2theta()} and \code{tau2tau()} with the same process names you might just change the EQN or MDL file accordingly
by using the same process name for all processes which should have equal process times and probabilities.
}
\examples{
####################################################################################
# Detect-Guess variant of the Two-High Threshold model.
# The encoding and motor execution times are assumed to be equal for each category.
# The process probabilities for both detection processes ("do" and "dn") will be
# set equal.
####################################################################################
mdl_2HTM <- "
# targets
do+(1-do)*g
(1-do)*(1-g)
# lures
(1-dn)*g
dn+(1-dn)*(1-g)
# do: detect old; dn: detect new; g: guess
"
model <- to_rtmpt_model(mdl_file = mdl_2HTM)
## make do = dn
new_model <- theta2theta(model = model, names = c("do", "dn"))
new_model
## make do = dn
new_model <- set_thetas_equal(model = model, names = c("do", "dn"))
new_model
}
\seealso{
\code{\link{delta2delta}}, \code{\link{theta2const}}, \code{\link{tau2zero}} and \code{\link{tau2tau}}
}
\author{
Raphael Hartmann
}
|
0988838e3cc728b93fe268b50bdf9e92becd6d28
|
f421c6f0472a53fadb9435741dfe2bcfce740ec4
|
/man/SKAT.Rd
|
da91e9fd34b1d96f15c218a6a5fc500dcf64b6c6
|
[] |
no_license
|
EpiSlim/kernelPSI
|
7e9cf5a412213b69c02a0105a13f08ccc84f6809
|
3ecd7dd295d1de7d95fdb3089741608ebbc3991d
|
refs/heads/master
| 2023-08-25T15:01:26.877765
| 2020-02-03T15:12:48
| 2020-02-03T15:12:48
| 187,524,830
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,572
|
rd
|
SKAT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skat.R
\name{SKAT}
\alias{SKAT}
\title{implements the sequence kernel association test for GWAS data}
\usage{
SKAT(Y, K, sigma = 1)
}
\arguments{
\item{Y}{response vector}
\item{K}{list of kernel similarity matrices. The sum kernel is used in
the quadratic form.}
\item{sigma}{standard deviation of the response Y}
}
\value{
\eqn{p}-value of the SKAT test
}
\description{
The SKAT test is a quadratic test of association between a
phenotype of interest and a genomic region. One of the main
benefits of the SKAT test is the incorporation of nonlinear
effects through the use of a kernel similarity matrix in the
quadratic form. For instance, the identical-by-state (IBS) kernel
which computes the number of identical alleles between two samples
can be used.
}
\details{
The null hypothesis in the SKAT test is the absence of effects
of the SNPs within the region of interest and the outcome. Under the null,
the distribution of the test statistic is a weighted sum of chi-square
distributions whose quantiles are computed using the davies formula.
}
\examples{
n <- 30
p <- 20
K <- replicate(5, matrix(rnorm(n*p), nrow = n, ncol = p), simplify = FALSE)
K <- sapply(K, function(X) return(X \%*\% t(X) / dim(X)[2]), simplify = FALSE)
Y <- rnorm(n)
SKAT(Y, K)
}
\references{
Wu, M. C., Lee, S., Cai, T., Li, Y., Boehnke, M., & Lin, X.
(2011). Rare-variant association testing for sequencing data with the
sequence kernel association test. American Journal of Human Genetics,
89(1), 82–93.
}
|
84b08c2a393e803e264ddcefe8fc93b898d8c7d7
|
31f68385a76fdfbe5407bf060fdc329deef5eb77
|
/R/execby_utils.R
|
862f160afa41509d016289682bdcd9406ebfa25a
|
[] |
no_license
|
akzaidi/dplyrXdf
|
8ec3a4cbe363041cea742ea71f8d8840b90cc013
|
2bcc34285939426ba13f816763a7ed268bd99ca3
|
refs/heads/master
| 2021-01-18T19:57:31.623037
| 2017-08-15T07:02:31
| 2017-08-15T07:02:31
| 100,542,426
| 0
| 0
| null | 2017-08-16T23:52:23
| 2017-08-16T23:52:23
| null |
UTF-8
|
R
| false
| false
| 679
|
r
|
execby_utils.R
|
execByCheck <- function(execLst)
{
ok <- sapply(execLst, function(x) x$status[[1]] == "OK")
if(!all(ok))
{
errs <- sapply(execLst[!ok], function(x) x$status[[2]])
stop("bad result from rxExecBy: ", errs[1])
}
}
execByResult <- function(.data, ...)
{
cc <- rxGetComputeContext()
on.exit(rxSetComputeContext(cc))
# rxExecBy fails in local CC with relative path for HDFS data
if(!inherits(cc, "RxDistributedHpa") && in_hdfs(.data) && substr(.data@file, 1, 1) != "/")
.data <- modifyXdf(file=normalizeHdfsPath(.data@file))
execLst <- rxExecBy(.data, ...)
execByCheck(execLst)
lapply(execLst, "[[", "result")
}
|
45ed2cf7965918a5005b1fcf813f0eded8ff07f0
|
c482564f06ddb420ab70a0c004ad4e42c8142376
|
/merge_training_batches.R
|
606816178a1f6ed31fcc4b42b875e2d485d6328b
|
[] |
no_license
|
BadSeby/RNASeqDrug
|
3c3f1ecccdafb3f651a534cc48d569711161fd1c
|
f86cfd7c7fc7f9c5ea28010c0cb11405d14d1b44
|
refs/heads/master
| 2020-04-16T03:22:27.575519
| 2018-05-04T20:05:06
| 2018-05-04T20:05:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,667
|
r
|
merge_training_batches.R
|
args <- commandArgs(trailingOnly=TRUE)
##for test
#args <- c("../results/training_results", ../data/training_ccle_gdsc.RData", "../data/auc_recomputed_drug_association.RData")
if(!require("PharmacoGx")){biocLite("PharmacoGx");library(PharmacoGx)}
if(!require("Biobase")){biocLite("Biobase");library(Biobase)}
stat <- "r.squared & cindex"
path.training.results <- file.path(as.character(args[1]))
pvalues.files <- list.files(file.path(path.training.results))
path.training.data <- as.character(args[2])
load(path.training.data)
GeneList <- colnames(ccle.genes.fpkm)
drugs <- colnames(ccle.drug.sensitivity)
pvalues = list()
best.isoforms = list()
statistics = list()
length(pvalues) <- length(GeneList)
length(best.isoforms) <-length(GeneList)
length(statistics) <-length(GeneList)
###
if(stat == "r.squared & cindex"){
pvalues <- list("r.squared"=pvalues, "cindex"=pvalues)
statistics <- list("r.squared"=statistics, "cindex"=statistics)
}
for ( i in 1: length(pvalues.files))
{
load(file.path(path.training.results, pvalues.files[i]))
Index <- unlist(strsplit(pvalues.files[i],"[.,_]"))
for(j in Index[1]:Index[2])
{
t <- j-as.numeric(Index[1])+1
for(k in 1:length(drugs)) #24 ccle #15 ccle & gdsc
{
pvalues[["r.squared"]][[j]][[k]] <- both.drug.association.adj.r.squared.pvalues[["r.squared"]][[t]][[k]]
pvalues[["cindex"]][[j]][[k]] <- both.drug.association.adj.r.squared.pvalues[["cindex"]][[t]][[k]]
statistics[["r.squared"]][[j]][[k]] <- both.drug.association.statistics[["r.squared"]][[t]][[k]]
statistics[["cindex"]][[j]][[k]] <- both.drug.association.statistics[["cindex"]][[t]][[k]]
names(pvalues[["r.squared"]][[j]])[k] <- drugs[k]
names(pvalues[["cindex"]][[j]])[k] <- drugs[k]
names(statistics[["r.squared"]][[j]])[k] <- drugs[k]
names(statistics[["cindex"]][[j]])[k] <- drugs[k]
}
best.isoforms[[j]] <- both.drug.association.best.isoforms[[t]]
names(best.isoforms)[j] <-names(both.drug.association.best.isoforms)[t]
names(pvalues[["r.squared"]])[j] <- names(both.drug.association.adj.r.squared.pvalues[["r.squared"]])[t]
names(pvalues[["cindex"]])[j] <- names(both.drug.association.adj.r.squared.pvalues[["cindex"]])[t]
names(statistics[["r.squared"]])[j] <- names(both.drug.association.statistics[["r.squared"]])[t]
names(statistics[["cindex"]])[j] <- names(both.drug.association.statistics[["cindex"]])[t]
}
}
drug.association <- pvalues
drug.association.statistics <- statistics
drug.association.best.isoforms <- best.isoforms
save(drug.association,drug.association.statistics, drug.association.best.isoforms, file=as.character(args[3]))
|
39a5c9e103b474233812604ed4425c2d4557e4fc
|
90fa2c6fb2fd30abe674893845eddaadb15bb90a
|
/1essay.R
|
346ba605bff7660dade5933a6a483c1e9b3d7e1a
|
[] |
no_license
|
EstherPeev/Git-P1
|
473596c5ce2fbc591d2d3bf5fbd700cada31e4fd
|
3a58782a19c4ce5c375f8e810b4ef1f7d0606fe2
|
refs/heads/master
| 2023-01-20T01:50:49.691045
| 2020-11-21T14:41:03
| 2020-11-21T14:41:03
| 314,832,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67
|
r
|
1essay.R
|
plot(cars)
cars %>%
ggplot(aes(x=speed, y=dist)) +
geom_point()
|
e5d752b3e8732c0777dee3181bb6462ab0e62ae7
|
dcfb9c659d06de87b66bc3f39dda1ae70b6489e1
|
/cachematrix.R
|
9e34a1e5fb4b4d2778482f8da04d5eff8e561b86
|
[] |
no_license
|
vijayathota/ProgrammingAssignment2
|
357d001c88613f3fea32f636239b6c3c90c2912f
|
b0c2170cbd067966fc81e7ef2f988e42a2b70001
|
refs/heads/master
| 2021-01-13T07:24:41.515479
| 2016-10-20T07:23:47
| 2016-10-20T07:23:47
| 71,323,102
| 0
| 0
| null | 2016-10-19T05:46:05
| 2016-10-19T05:46:04
| null |
UTF-8
|
R
| false
| false
| 1,463
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## makeCacheMatrix fuction take matrix as an input and it caches value in variables.
## Here "inversemat" variable holds inverse matrix and it is considered as cache.
## this function has 4 sub functions
### set() it sets the matrix which needs to be inverserd
### get() it return matrix.
### setinversematrix() It set the inversed matrix
### getinversematrix() it returns the inversed matrix
makeCacheMatrix <- function(x = matrix()) {
inversemat <- NULL
set <- function(y)
{
x <<- y
inversemat <<- NULL
}
get <- function() x
setinversematrix <- function(inmat) inversemat <<- inmat
getinversematrix <- function() inversemat
list(set = set, get = get, setinversematrix = setinversematrix , getinversematrix = getinversematrix)
}
## Write a short comment describing this function
## This function takes return object of makeCacheMatrix() as input.
## It checks if if we have any inverse matrix in cache, if yes it simply return and exit.
## If cache is empty, it would take matrix from makeCacheMatrix() and then caliculate inverse by using solve()
## result of solve() would be saved in cache.
cacheSolve <- function(x, ...) {
mat <- x$getinversematrix()
if ( !is.null(mat) )
{
print("Inverse Matrix is taken from Cache")
return(mat)
}
mat <- x$get()
inversemat <- solve(mat)
x$setinversematrix(inversemat)
inversemat
}
|
05c15eca4f95e49295f20d3cc1187f3a97a4ae92
|
485dfaceb316da13355a69717a7d4a443db1b44c
|
/cachematrix.R
|
c830392b327c8e949b4c2149e857ea845864e635
|
[] |
no_license
|
hanutm/ProgrammingAssignment2
|
9135cc0153ba3766127c1e7c5eb8fdbe72e04ddc
|
4d818b3395e933c4afd501b2823b1ce458016266
|
refs/heads/master
| 2021-01-18T20:26:12.753754
| 2019-08-22T12:51:57
| 2019-08-22T12:51:57
| 86,968,329
| 0
| 0
| null | 2017-04-02T08:29:59
| 2017-04-02T06:37:53
|
R
|
UTF-8
|
R
| false
| false
| 1,397
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()){
i <<- NULL # i carries the cached inverse of the matrix
set <- function(b) { # setter function for changing the matrix
a <<- b
i <<- NULL
}
get <- function() a # getter function for reading value of current matrix
setinv <- function(inv) i <<- inv # setter function for setting the output of function (here inverse)
getinv <- function() i # function for getting the output of the function
list(set = set,
get = get,
setinverse = setinv,
getinverse = getinv)
}
cacheSolve <- function(a, ...) { #function for actual computation
i <- a$getinverse()
if(!(is.null(i))) { #checks if inverse exists or is NULL in case of new matrix
message("getting saved data")
return(i)
}
data <- a$get() #reads the current matrix into local object (variable)
inv <- solve(data) #solves the matrix
a$setinverse(inv) #caches the new output (inverse value)
inv
}
|
fc20d00c571f42eb0ab97d3062cd5be86d40405e
|
6ae140d984207c5601ae807886c1ba514f4c663e
|
/Plot3.R
|
502cd6bd31585e4aaf48439f3c83d51b765d4121
|
[] |
no_license
|
dsuwal/ExData_Plotting1
|
01daba8a5ef1b9598aba882de9710d746873210c
|
7de3cf4d746405a7aa3de5e5a2332d17c9ec10ec
|
refs/heads/master
| 2020-04-04T13:07:37.166564
| 2016-05-07T20:57:38
| 2016-05-07T20:57:38
| 58,283,534
| 0
| 0
| null | 2016-05-07T20:38:13
| 2016-05-07T20:38:12
| null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
Plot3.R
|
#Set working directory
setwd("C:/WorkSpace/DataScience/4.ExploratoryDataAnalysis/Project/Assignment1")
#Reading and subsetting data
PCDataFile <- "../Data/household_power_consumption.txt"
PowerConsumData <- read.table(PCDataFile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
SubsetPowerConsumData <- PowerConsumData[PowerConsumData$Date %in% c("1/2/2007","2/2/2007") ,]
DateTime <- strptime(paste(SubsetPowerConsumData$Date, SubsetPowerConsumData$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(SubsetPowerConsumData$Global_active_power)
#Plotting graph
png("plot3.png", width=480, height=480)
plot(DateTime, as.numeric(SubsetPowerConsumData$Sub_metering_1), type = "l", ylab = "Energy Submetering", xlab = "")
lines(DateTime, as.numeric(SubsetPowerConsumData$Sub_metering_2), type = "l", col = "red")
lines(DateTime, as.numeric(SubsetPowerConsumData$Sub_metering_3), type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2.5, col = c("black", "red", "blue"))
#Closing graph device
dev.off()
|
8e84642fb6322d79e0dd9889d100421501e2ec88
|
c42e4faa9a5a546fc8fbe7cc367383a14c2a7d8a
|
/part3/R 러닝/ggplot.R
|
1dafb9626a4d3f201cef395ccef2447a41091161
|
[] |
no_license
|
wisdom009/R
|
4be82b65a9d1b79085cf40ced11173421f405548
|
202dce45c6faac6bd4563f96a40a09b09ae416ed
|
refs/heads/master
| 2020-06-03T19:03:28.896231
| 2019-06-17T08:30:42
| 2019-06-17T08:30:42
| 189,162,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,609
|
r
|
ggplot.R
|
#ggplot
install.packages("ggplot2")
library(ggplot2)
setwd("d:/workspace/R_date/part3/date")
kor = read.table("학생별국어성적_new.txt",header=T,sep=",")
kor
ggplot(kor,aes(x=이름,y=점수)) + geom_point()
ggplot(mapping = aes(x=이름, y=점수),data=kor) + geom_point() #aes 값을 뭘로주느냐가 ggplot의 핵심
ggplot(kor,aes(x=이름,y=점수)) + geom_bar(stat = "identity") # stat 원래는 각 종목별 근데 이건 각각
ggplot(kor,aes(x=이름,y=점수)) + geom_bar(stat = "identity", fill="blue", color="red")
ggplot(kor,aes(x=이름,y=점수)) + geom_bar(stat = "identity", fill="blue", color="red") +
theme(axis.text.x =element_text(angle = 45, hjust = 1, vjust = 1, color = "black", size = 10))
# x element = x축에대한 angle 각도를 45도 기울임 색은 금색 크기 10 등등
score=read.csv("학생별과목별성적_국영수_new.csv", header=T)
score
#
library(dplyr)
sort=arrange(score,이름,과목)
sort
sort2 = sort %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수)) # sort 에서 그룹= 이름 별로 점수르 구해 누적 합을 구함
sort2
sort3 = sort2 %>%
group_by(이름) %>%
mutate(label=cumsum(점수)-0.5 * 점수) #각각 자리에 래이블이 위치하게 만듬
sort3
# 1
sort4 = sort %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수)) %>%
mutate(누적합계=cumsum(점수)- 0.5 *점수)
# 2
sort5 = sort %>%
group_by(이름) %>%
mutate(누적합계=cumsum(점수)) %>%
mutate(누적합계=cumsum(점수), label=cumsum(점수)-0.5*점수)
sort5
ggplot(sort5, aes(x=이름,y=점수,fill=과목)) +
geom_bar(stat="identity") +
geom_text(aes(y=label,label=paste(점수,'점')), color="black", size=4) +
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust = 1,colour = "black",size = 9))
score=read.csv("학생별전체성적_new.txt",header=T,sep=",")
score
score_e = score[,c("이름","영어")] # 이름과 영어만 따로 분리
score_e
ggplot(score,aes(x=영어,y=reorder(이름,영어))) +
geom_point(size=6)+
theme_bw()+
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(color = "red",linetype = "dashed"))
ggplot(score,aes(x=영어,y=reorder(이름,영어))) +
geom_segment(aes(yend=이름),xend=0,color="blue") + # 라인을 그려주는게 segment
geom_point(size=6, color="green")+
theme_bw()+
theme(panel.grid.major.y = element_blank())
#----------------------------------------------------------------------------------------
#
install.packages("gridExtra")
library(gridExtra)
mtcars
str(mtcars) # 정보 간소화
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point()
ggplot(mtcars,aes(x=hp, y=disp)) +
geom_point()
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(color="red") +
geom_line()
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(colour = 'blue'))
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(color = factor(am)))
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(size=7)
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(color = factor(am), size=wt))
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(shape = factor(am), size=wt))
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(color = factor(am), size=wt)) +
scale_color_manual(values = c("red","green"))
par(oma=c(12,1,12,1))
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(aes(color = factor(am), size=wt)) +
scale_color_manual(values = c("red","green")) +
labs(x="마력",y="연비(mile/gallon)")
three = read.csv("학생별과목별성적_3기_3명.csv")
three
ss = arrange(three, 이름 ,과목)
ss
ggplot(three, aes(x= 과목, y=점수, color=이름, group=이름))+
geom_line()
ggplot(three, aes(x= 과목, y=점수, color=이름, group=이름))+
geom_line() +
geom_point(size=3)
ggplot(ss, aes(x=과목, y=점수, color = 이름, group=이름, fill=이름)) + geom_line() +geom_point(size=6, shape=22)
dis =read.csv("1군전염병발병현황_년도별.csv",stringsAsFactors=F)
dis
str(dis)
ggplot(dis, aes(x=년도별, y=장티푸스, group=1))+
geom_line()
ggplot(dis, aes(x=년도별, y=장티푸스, group=1))+
geom_area(color="red",fill="cyan",alpha=0.4)
ggplot(dis, aes(x=년도별, y=장티푸스, group=1))+
geom_area(color="red",fill="cyan",alpha=0.4) +
geom_line(color="blue")
# -----------------------------------------------------------------
# Anscombe's Quarrtet 기술 통계량
anscombe
an = anscombe
an
an1 = anscombe &>&
select(an, x1, x2, x3, x4) %>%
summarise_each(list(mean), x1, x2, x3, x4)
|
4e874d631b9cfc41ca618f46af3eef0c6e2a5b9f
|
c118908b1c8bad0914e38e43f1148b58364accc2
|
/man/BchronCalibrate.Rd
|
70e9975e64c950709addc914f128359bee12db3d
|
[] |
no_license
|
andrewcparnell/Bchron
|
baf98d6642a328ba3c83e8fcf2e04b6c0af86974
|
faa14f54444e7ec417e0e389596014a1c7645349
|
refs/heads/master
| 2023-06-27T02:01:46.417288
| 2023-06-08T11:17:34
| 2023-06-08T11:17:34
| 40,361,984
| 30
| 12
| null | 2022-04-05T20:46:28
| 2015-08-07T13:33:16
|
R
|
UTF-8
|
R
| false
| true
| 4,319
|
rd
|
BchronCalibrate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BchronCalibrate.R
\name{BchronCalibrate}
\alias{BchronCalibrate}
\title{Fast radiocarbon calibration}
\usage{
BchronCalibrate(
ages,
ageSds,
calCurves = rep("intcal20", length(ages)),
ids = NULL,
positions = NULL,
pathToCalCurves = system.file("data", package = "Bchron"),
allowOutside = FALSE,
eps = 1e-05,
dfs = rep(100, length(ages))
)
}
\arguments{
\item{ages}{A vector of ages provided in years before 1950.}
\item{ageSds}{A vector of 1-sigma values for the ages given above}
\item{calCurves}{A vector of values containing either \code{intcal20}, \code{shcal20}, \code{marine20}, or \code{normal} (older calibration curves are supposed such as intcal13). Should be the same length the number of ages supplied. Non-standard calibration curves can be used provided they are supplied in the same format as those previously mentioned and are placed in the same directory. Normal indicates a normally-distributed (non-14C) age.}
\item{ids}{ID names for each age}
\item{positions}{Position values (e.g. depths) for each age. In the case of layers of non-zero thickness, this should be the middle value of the slice}
\item{pathToCalCurves}{File path to where the calibration curves are located. Defaults to the system directory where the 3 standard calibration curves are stored.}
\item{allowOutside}{Whether to allow calibrations to run outside the range of the calibration curve. By default this is turned off as calibrations outside of the range of the calibration curve can cause severe issues with probability ranges of calibrated dates}
\item{eps}{Cut-off point for density calculation. A value of eps>0 removes ages from the output which have negligible probability density}
\item{dfs}{Degrees-of-freedom values for the t-distribution associated with the calibration calculation. A large value indicates Gaussian distributions assumed for the 14C ages}
}
\value{
A list of lists where each element corresponds to a single age. Each element contains:
\item{ages}{The original age supplied}
\item{ageSds}{The original age standard deviation supplied}
\item{positions}{The position of the age (usually the depth)}
\item{calCurves}{The calibration curve used for that age}
\item{ageGrid}{A grid of age values over which the density was created}
\item{densities}{A vector of probability values indicating the probability value for each element in \code{ageGrid}}
\item{ageLab}{The label given to the age variable}
\item{positionLab}{The label given to the position variable}
}
\description{
A fast function for calibrating large numbers of radiocarbon dates involving multiple calibration curves
}
\details{
This function provides a direct numerical integration strategy for computing calibrated radiocarbon ages. The steps for each 14C age are approximately as follows:
1) Create a grid of ages covering the range of the calibration curve
2) Calculate the probability of each age according to the 14C age, the standard deviation supplied and the calibration curve
3) Normalise the probabilities so that they sum to 1
4) Remove any probabilities that are less than the value given for eps
Multiple calibration curves can be specified so that each 14C age can have a different curve. For ages that are not 14C, use the 'normal' calibration curve which treats the ages as normally distributed with given standard deviation
}
\examples{
# Calibrate a single age
ages1 <- BchronCalibrate(
ages = 11553,
ageSds = 230,
calCurves = "intcal20",
ids = "Date-1"
)
summary(ages1)
plot(ages1)
# Or plot with Calibration curve
plot(ages1, includeCal = TRUE)
# Calibrate multiple ages with different calibration curves
ages2 <- BchronCalibrate(
ages = c(3445, 11553, 7456),
ageSds = c(50, 230, 110),
calCurves = c("intcal20", "intcal20", "shcal20")
)
summary(ages2)
plot(ages2)
# Calibrate multiple ages with multiple calibration curves and including depth
ages3 <- BchronCalibrate(
ages = c(3445, 11553),
ageSds = c(50, 230),
positions = c(100, 150),
calCurves = c("intcal20", "normal")
)
summary(ages3)
plot(ages3, withPositions = TRUE)
}
\seealso{
\code{\link{Bchronology}}, \code{\link{BchronRSL}}, \code{\link{BchronDensity}}, \code{\link{BchronDensityFast}}, \code{\link{createCalCurve}}
}
|
aa033cb3621ebc47d3d29fabb99c98f153548d71
|
15aeb42c2d5db72049ca805ecb9568fb09a94788
|
/demonPhylo.R
|
7b0c9822fc676ae7dbd1a7d8998f8f45ca9f4d06
|
[] |
no_license
|
jesusNPL/ManageTRY
|
05a817f7a54f156ede259eb5b3f1d882eae2a4ba
|
57beb73a94dcfa28d9c92184e7b792c84bf6811c
|
refs/heads/master
| 2021-05-05T06:24:34.644334
| 2020-12-17T22:22:15
| 2020-12-17T22:22:15
| 118,797,877
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
demonPhylo.R
|
### Arguments
# Spp = verctor of species names e.g., c(Elymus nutans, Festuca sinensis, Kobresia setschwanensis)
# scenarios = "S2" or "S3". Scenario S3 is recommended by Jian and Qian.
# If S2 is selected a sample of trees will be returned, so you first need to inform the number of replicares, e.g., r = 100
# saveTaxonomy = Logic, True or False
# r = number of replicates, only if S2 is selected
# output.tree = Logic, True or False
demonPhyloPlants <- function(Spp, scenarios, saveTaxonomy, r, output.tree){
if ( ! ("ape" %in% installed.packages())) {install.packages("ape", dependencies = T)}
if ( ! ("remotes" %in% installed.packages())) {install.packages("remotes", dependencies = T)}
if ( ! ("V.PhyloMaker" %in% installed.packages())) {remotes::install_github("jinyizju/V.PhyloMaker")}
require(V.PhyloMaker)
source("https://raw.githubusercontent.com/jesusNPL/ManageTRY/master/demonCheckScinames.R")
taxonomy <- check_TPLScinames(Spp, saveTaxonomy = saveTaxonomy)
data <- data.frame(species = taxonomy$TPLSciname,
genus = taxonomy$TPLGenus,
family = taxonomy$TPLFamily)
if(scenarios == "S3"){
YourPhylo <- phylo.maker(data, scenarios = "S3", output.tree = output.tree)
} else if(scenarios == "S2"){
YourPhylo <- phylo.maker(data, scenarios = "S2", output.tree = output.tree, r = r)
}
return(YourPhylo)
}
|
a08c3b71fac67a7cf04bce2cdeb7824855e18a07
|
06cedafb169a5abbabd90c25e1cd265066dab6d4
|
/scripts/variant_sets/encode_overlap_scripts/sumOverlapPerTF.R
|
d25327a8deee86d94b55f42166f23dfcdc46086a
|
[] |
no_license
|
LappalainenLab/TF-eQTL_preprint
|
0027a5d03accf1e3ec84cd4539429bf5035f338f
|
5c9e30ab7c628e098a72d19f8e66066464390aa1
|
refs/heads/master
| 2023-06-20T01:07:50.398202
| 2021-07-16T16:56:59
| 2021-07-16T16:56:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
sumOverlapPerTF.R
|
#!/usr/bin/Rscript
##
## calcPeakPerTF.R
##
## Given a Peak overlap file, sum the number of
## overlaps per TF and DNase
##
setwd("~/data/gtex/v8/encode_overlap")
#args = commandArgs(trailingOnly=TRUE)
in_file = "ENCODE_TF_overlap.GTEx_Analysis_2017-06-05_v8_WholeGenomeSeq_838Indiv_Analysis_Freeze_MAF001_GTonly.txt.gz"
out_file = "ENCODE_TF_overlap_by_TF.GTEx_Analysis_2017-06-05_v8_WholeGenomeSeq_838Indiv_Analysis_Freeze_MAF001_GTonly.txt"
## read in TFBS data
TFBS_header = readLines(in_file,n=1)
#TFBS_counts = read.table("ENCODE_GRCh38/TfDNase_split/TfDNase_split.aa")
#TFBS_header = read.table("ENCODE_GRCh38/WGS_Peak_overlap_TfDNase.header",
# header=TRUE,comment.char='')
#names(TFBS_counts) <- names(TFBS_header)
TFs = sort(unique( sapply( names(TFBS_counts)[-c(seq(1,3))] ,
function(str) {strsplit(str,'[_]')[[1]][1]} ) ))
TF_counts = as.data.frame( sapply(TFs, function(tf) {
tf_cols = grep(paste('^',tf,'_',sep=''),names(TFBS_counts))
apply(TFBS_counts[tf_cols], 1, function(row) {
sum(row)
})
}) )
out_TFBS_counts = cbind(TFBS_counts[c(seq(1,3))],
TF_counts)
write.table(out_TFBS_counts,file=out_file,
col.names=TRUE,row.names=FALSE,
quote=FALSE,sep='\t')
|
9089c4cebd121e1060cf09d33a14eb75554ceca5
|
ec9096db7e6f33846d86b1af9a5d833a1cfa482d
|
/man/get_credentials.Rd
|
2aa2ecf55abfaa614402f95e39ff5bd57426e908
|
[] |
no_license
|
mjdhasan/alpacaR
|
7107a0b5b6954842bc2a1cbef7aea04f7a391648
|
938b5313b5ee17b47ea0bf1b126b8b13de281ad2
|
refs/heads/master
| 2022-04-08T08:54:54.352304
| 2020-03-10T14:38:59
| 2020-03-10T14:38:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 227
|
rd
|
get_credentials.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{get_credentials}
\alias{get_credentials}
\title{get Alpaca API tokens}
\usage{
get_credentials()
}
\description{
get Alpaca API tokens
}
|
8a02b2fd6885745d86f37b72c6ac3d75d9ac31f8
|
128e6f4584bc00fe6ec2cf0bee1c6f92ad03b427
|
/2_R_Programming/Week1_R_Programming/course 2 week 1.R
|
20e7fcf6793647655971073ed0425cc742dfafad
|
[] |
no_license
|
JoeWadford/Data-Science-Coursera
|
75ddf7e52ecab4266a9a040cd24cd8e7a9f16ac3
|
370f8b6c1d758c3e51c172f9efdfe22292b58bf4
|
refs/heads/master
| 2020-05-05T05:59:57.593496
| 2019-04-14T00:43:51
| 2019-04-14T00:43:51
| 179,771,638
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,564
|
r
|
course 2 week 1.R
|
### Coursera Data Science, Course 2 week 1
### R Programming
## EXPLICIT COERCISION
x <- 0:6
class(x)
# integer
as.numeric(x)
# [1] 0 1 2 3 4 5 6
as.logical(x)
# [1] FALSE TRUE TRUE TRUE TRUE TRUE TRUE
as.character(x)
# [1] "0" "1" "2" "3" "4" "5" "6"
## LISTS
x <- list(1, "a", TRUE, 1 + 4i)
x
## MATRICES
m <- matrix(1:6, nrow = 2, ncol = 3)
m
dim(m)
attributes(m)
m1 <- 1:10
m1
dim(m1) <- c(2, 5)
m1
# cbind-ing and rbind-ing
x <- 1:3
y <- 10:12
cbind(x, y)
rbind(x, y)
## FACTORS
x <- factor(c("yes", "yes", "no", "yes", "no"))
x
table(x)
unclass(x)
attr(x,"levels")
## DATA FRAMES
xdf <- data.frame(foo = 1:4, bar = c(T, T, F, F))
xdf
nrow(xdf)
ncol(xdf)
## NAMES
x <- 1:3
names(x)
names(x) <- c("foo", "bar", "norf")
x
names(x)
x <- list(a =1, b =2, c=3)
x
m <- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
## TEXTUAL DATA FORMATS
y <- data.frame(a=1, b="a")
dput(y)
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
# Dumping R Objects
x <- "foo"
y <- data.frame(a=1, b="a")
dump(c("x", "y"), file = "data.R")
rm(x,y)
source("data.R")
y
x
## CONNECTIONS: INTERFACES TO THE OUTSIDE WORLD
str(file)
function (description = "", open = "", blocking = TRUE,
encoding = getOption("encoding"))
# Reading Lines of a Text File
con <- url("http://www.jhsph.edu", "r")
x <- readLines(con)
head(x)
## SUBSETTING BASICS
x <- c("a", "b", "c", "c", "d", "a")
x[1]
# [1] "a"
x[2]
# [1] "b"
x[1:4]
# [1] "a" "b" "c" "c"
x[x > "a"]
# [1] "b" "c" "c" "d"
u <- x > "a"
u
# [1] FALSE TRUE TRUE TRUE TRUE FALSE
x[u]
# [1] "b" "c" "c" "d"
## SUBSETTING LISTS
x <- list(foo = 1:4, bar = 0.6)
x[1]
# $foo
# [1] 1 2 3 4
x[[1]]
# [1] 1 2 3 4
x$bar
# [1] 0.6
x[["bar"]]
# [1] 0.6
x["bar"]
# $bar
# [1] 0.6
x1 <- list(foo = 1:4, bar = 0.6, baz = "hello")
x1[c(1,3)]
# $foo
# [1] 1 2 3 4
# $baz
# [1] "hello"
x2 <- list(foo = 1:4, bar = 0.6, baz = "hello")
name <- "foo"
x[[name]]
# [1] 1 2 3 4
x$name
# NULL
x$foo
# [1] 1 2 3 4
x3 <- list(a = list(10, 12, 14), b = c(3.14, 2.81))
x3[[c(1,3)]]
#[1] 14
x3[[1]][[3]]
# [1] 14
x3[[c(2, 1)]]
# [1] 3.14
## SUBSETTING A MATRIX
mx <- matrix(1:6, 2, 3)
mx[1,2]
# [1] 3
mx[2,1]
# [1] 2
mx[1,]
# [1] 1 3 5
mx[,2]
# [1] 3 4
mx[1,2, drop = FALSE]
# [,1]
# [1,] 3
mx[1, ,drop = FALSE]
# [,1] [,2] [,3]
# [1,] 1 3 5
## PARTIAL MATCHING
px <- list(aardvark = 1:5)
px$a
# [1] 1 2 3 4 5
px[["a"]]
# NULL, because a does not equal aardvark exactly
px[["a", exact = FALSE]]
# [1] 1 2 3 4 5
## REMOVING NA Values
nax <- c(1, 2, NA, 4, NA, 5)
bad <- is.na(nax)
nax[!bad]
#[1] 1 2 4 5
nay <- c("a", "b", NA, "d", NA, "f")
good <- complete.cases(nax, nay)
good
# TRUE TRUE FALSE TRUE FALSE TRUE
nax[good]
# [1] 1 2 4 5
nay[good]
# [1] "a" "b" "d" "f"
airquality[1:6,]
# Ozone Solar.R Wind Temp Month Day
# 1 41 190 7.4 67 5 1
# 2 36 118 8.0 72 5 2
# 3 12 149 12.6 74 5 3
# 4 18 313 11.5 62 5 4
# 5 NA NA 14.3 56 5 5
# 6 28 NA 14.9 66 5 6
goodaq <- complete.cases(airquality)
airquality[goodaq,][1:6,]
Ozone Solar.R Wind Temp Month Day
# 1 41 190 7.4 67 5 1
# 2 36 118 8.0 72 5 2
# 3 12 149 12.6 74 5 3
# 4 18 313 11.5 62 5 4
# 7 23 299 8.6 65 5 7
# 8 19 99 13.8 59 5 8
## VECTORIZED OPERATIONS
vox <- 1:4; voy <- 6:9
vox + voy
# [1] 7 9 11 13
vox > 2
# [1] FALSE FALSE TRUE TRUE
vox >= 2
#[1] FALSE TRUE TRUE TRUE
voy == 8
# [1] FALSE FALSE TRUE FALSE
vox * voy
# [1] 6 14 24 36
round(vox / voy, 2)
# [1] 0.17 0.29 0.38 0.44
# Vectorized Matrix Operations
vmox <- matrix(1:4, 2, 2); vmoy <- matrix(rep(10, 4), 2, 2)
vmox
# [,1] [,2]
# [1,] 1 3
# [2,] 2 4
vmoy
# [,1] [,2]
# [1,] 10 10
# [2,] 10 10
vmox * vmoy
# [,1] [,2]
# [1,] 10 30
# [2,] 20 40
round(vmox/vmoy, 2)
# [,1] [,2]
# [1,] 0.1 0.3
# [2,] 0.2 0.4
vmox %*% vmoy # True matrix multiplication
# [,1] [,2]
# [1,] 40 40
# [2,] 60 60
### WEEK 1 QUIZ
# Problems involving code are included for my practice purposes only. Please do not use this to cheat!
## PROBLEM 4. What is the class of x4?
x4 <- 4
class(x4)
# numeric
## PROBLEM 5.
x5 <- c(4, "a", TRUE)
class(x5)
# Character
## PROBLEM 6. What is produced by the expression rbind(x, y)?
x6 <- c(1,3,5); y6 <- c(3,2,10)
rbind(x6, y6)
# a matrix with two rows and 3 columns
# [,1] [,2] [,3]
# x6 1 3 5
# y6 3 2 10
## PROBLEM 8. What does x8[[2]] give me?
x8 <- list(2, "a", "b", TRUE)
x8[[2]]
# [1] "a"
# a character vector containing the letter "a"
# a character vector of length 1
## PROBLEM 9. What is produced by the expression x9 + y9?
x9 <- 1:4; y9 <- 2:3
z9 <- x9 + y9
class(z9)
# [1] 3 5 5 7
# an integer vector with the values 3, 5, 5, and 7
## PROBLEM 10. What R code causes a vector with all its elements greater than 10 to be equal to 4?
x10 <- c(17, 14, 4, 5, 13, 12, 10)
# x10[x10 > 10] <- 4
# x10[x10 >= 11] <- 4
## PROBLEM 11. What are the column names?
quiz1 <- read.csv("hw1_data.csv")
colnames(quiz1)
# [1] "Ozone" "Solar.R" "Wind" "Temp" "Month" "Day"
## PROBLEM 12. Extract the first two rows fo the data frame and print them to the console.
head(quiz1, 2)
# Ozone Solar.R Wind Temp Month Day
# 1 41 190 7.4 67 5 1
# 2 36 118 8.0 72 5 2
## PROBLEM 13. How many observations are in this data frame?
nrow(quiz1)
# 153
## PROBLEM 14. Extract the last 2 rows of the data frame and print them to the console.
tail(quiz1, 2)
# Ozone Solar.R Wind Temp Month Day
# 152 18 131 8.0 76 9 29
# 153 20 223 11.5 68 9 30
## PROBLEM 15. What is the value of the Ozone in the 47th row?
quiz1[47, "Ozone"]
# [1] 21
## PROBLEM 16. How many missing values are in the Ozone column of this data frame?
sum(is.na(quiz1$Ozone))
# [1] 37
## PROBLEM 17. What is the mean of the Ozone column in this dataset? Exclude missing values
mean(quiz1$Ozone, na.rm = TRUE)
# [1] 42.12931
## PROBLEM 18.
mean(quiz1$Solar.R[quiz1$Ozone > 31 & quiz1$Temp > 90], na.rm = TRUE)
# [1] 212.8
## PROBLEM 19. What is the mean of Temp when the month is equal to 6?
mean(quiz1$Temp[quiz1$Month==6], na.rm=T)
#[1] 79.1
## PROBLEM 20. What was the maximum ozone value in the month of May?
max(quiz1$Ozone[quiz1$Month==5], na.rm=T)
#[1] 115
|
6cd7e61abbe2fe213f15de6bb424b731f4d393f4
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.networking/man/apigateway_get_rest_api.Rd
|
50b7e84efbbb6c73498a5b41dba817be01ed7c6c
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,129
|
rd
|
apigateway_get_rest_api.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigateway_operations.R
\name{apigateway_get_rest_api}
\alias{apigateway_get_rest_api}
\title{Lists the RestApi resource in the collection}
\usage{
apigateway_get_rest_api(restApiId)
}
\arguments{
\item{restApiId}{[required] [Required] The string identifier of the associated RestApi.}
}
\value{
A list with the following syntax:\preformatted{list(
id = "string",
name = "string",
description = "string",
createdDate = as.POSIXct(
"2015-01-01"
),
version = "string",
warnings = list(
"string"
),
binaryMediaTypes = list(
"string"
),
minimumCompressionSize = 123,
apiKeySource = "HEADER"|"AUTHORIZER",
endpointConfiguration = list(
types = list(
"REGIONAL"|"EDGE"|"PRIVATE"
),
vpcEndpointIds = list(
"string"
)
),
policy = "string",
tags = list(
"string"
),
disableExecuteApiEndpoint = TRUE|FALSE
)
}
}
\description{
Lists the RestApi resource in the collection.
}
\section{Request syntax}{
\preformatted{svc$get_rest_api(
restApiId = "string"
)
}
}
\keyword{internal}
|
9f22b105d9f2b7df4303c0853ec8308615909f9a
|
ac655728cfed40aacb3686b9a3fd2c26f8facdc0
|
/scripts/alternative_first_exons/differential_sitecount_analysis.tars.R
|
16b489194daddc51bdd0dc33ca4cfd6c87dbf32f
|
[] |
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
8ba092245e934eff8c5dd6eab3d265a35ccfca06
|
f1a6550aa3d703b4bb494066be1b647dfedcb51c
|
refs/heads/master
| 2020-09-20T12:29:01.164008
| 2020-08-07T07:49:46
| 2020-08-07T07:49:46
| 224,476,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,127
|
r
|
differential_sitecount_analysis.tars.R
|
# 2015-07-06
# Some bug with DHS peaks (need to filter low values)
# for now, let's focus on Tars
setwd("~/projects/tissue-specificity/")
library(dplyr)
library(ggplot2)
library(reshape2)
library(PMA)
source("scripts/functions/LoadSitecounts.R")
source("scripts/functions/LoadArrayRnaSeq.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/ReadListToVector.R")
source("scripts/functions/DifferentialSitecountsFunctions.R")
# Load --------------------------------------------------------------------
# N.dir <- "/home/yeung/projects/tissue-specificity/data/sitecounts/motevo_tars"
N.dir <- "/home/yeung/projects/tissue-specificity/data/sitecounts/motevo_tars_nokidneypeak"
suffix <- "filtered.100000.noN.tars.nokid.mat"
N <- LoadSitecountsEncodeAll(maindir = N.dir, tissues = c("Liver", "Kidney", "Cere", "Lung", "Heart", "Mus"),
suffix = suffix, with.ensemblid = FALSE, rename.tissues = FALSE) # merged by gene
dat.long <- LoadArrayRnaSeq()
load(file = "Robjs/dat.rhyth.relamp.pvalmin1e-5.pvalmax0.05.relampmax.0.1.meancutoff6.Robj", verbose = T)
N <- N %>%
group_by(gene, tissue) %>%
mutate(motevo.value.norm = motevo.value / sum(motevo.value))
dhs.tiss <- unique(N$tissue)
tfs <- GetTFs()
# Init exprs gene ---------------------------------------------------------
X.exprs <- dcast(data = subset(dat.rhyth.relamp, gene %in% tfs & tissue %in% dhs.tiss), formula = gene ~ tissue, value.var = "int.rnaseq")
rownames(X.exprs) <- X.exprs$gene
X.exprs$gene <- NULL
# Only tars ---------------------------------------------------------------
jgene <- "Tars"
subset(N, gene == jgene & motif == "RORA.p2")
subset(N, gene == jgene & motif == "HNF1A.p2")
subset(N, gene == jgene & motif == "HIC1.p2")
subset(N, gene == jgene & motif == "AHR_ARNT_ARNT2.p2")
N.sub <- subset(N, gene == jgene)
X.motif <- dcast(data = N.sub, formula = motif ~ tissue, value.var = "motevo.value")
rownames(X.motif) <- X.motif$motif
X.motif$motif <- NULL
# replace NA with 0
X.motif[is.na(X.motif)] <- 0
# center stuff
jscale <- FALSE
jcenter <- TRUE
X.exprs.scaled <- ScaleRemoveInfs(X.exprs)
X.motif.scaled <- ScaleRemoveInfs(X.motif)
p.motif <- prcomp(X.motif.scaled, center = TRUE, scale. = FALSE)
biplot(p.motif, main = paste(jgene, "Motif PCA"), cex = c(0.5, 1.6), pch = 20)
# Rotate by LIVER vector --------------------------------------------------
V <- p.motif$rotation[, c(1, 2)]
U <- p.motif$x[, c(1, 2)]
liver.vec <- V["Liver", ]
liver.vec.norm <- liver.vec / sqrt(sum(liver.vec ^ 2))
V.livproj <- V %*% liver.vec.norm
U.livproj <- U %*% liver.vec.norm
U.livproj <- U.livproj[order(U.livproj, decreasing = TRUE), ]
U.livproj <- U.livproj[which(abs(U.livproj) > 1)]
par(mar=c(10.1, 4.1, 4.1, 2.1))
barplot(U.livproj, names.arg = names(U.livproj), las = 2)
# Do CCA with penalties ---------------------------------------------------
jscale <- FALSE
jcenter <- TRUE
X.exprs.scaled <- ScaleRemoveInfs(X.exprs)
X.motif.scaled <- ScaleRemoveInfs(X.motif)
X.motif.exprs <- MatchColumns(X.motif.scaled, X.exprs.scaled)
perm.out <- CCA.permute(t(X.motif.exprs$X.motif), t(X.motif.exprs$X.exprs), typex="standard", typez="standard", standardize = F)
penaltyx <- perm.out$bestpenaltyx
penaltyz <- perm.out$bestpenaltyz
# penaltyx <- 1
# penaltyz <- 1
cca.out <- CCA(t(X.motif.exprs$X.motif), t(X.motif.exprs$X.exprs), typex="standard", typez="standard", K=2, penaltyx=penaltyx, penaltyz=penaltyz, standardize = F)
rownames(cca.out$u) <- rownames(X.motif.exprs$X.motif)
rownames(cca.out$v) <- rownames(X.motif.exprs$X.exprs)
# visualize with biplots
Xu.motif <- t(X.motif.exprs$X.motif) %*% cca.out$u
Xv.exprs <- t(X.motif.exprs$X.exprs) %*% cca.out$v
cor(Xu.motif, Xv.exprs)
biplot(cca.out$u[, 1:2], Xu.motif[, 1:2])
biplot(cca.out$v[, 1:2], Xv.exprs[, 1:2])
# Visualize component 1 by bar --------------------------------------------
par(mar=c(10.1, 4.1, 4.1, 2.1))
u.sorted <- cca.out$u[order(cca.out$u[, 1]), 1]
u.sorted <- u.sorted[which(abs(u.sorted) > 0.05)]
barplot(u.sorted, names.arg = names(u.sorted), las = 2)
|
688c0ba10ab87b30f169b9f98b976160d3d2da6e
|
f4fffe026383f8f681c8b2ef2e7b2ec0f8143688
|
/man/ggUnivServer.Rd
|
8e8d1adadc5525c60b2afae6226ba1c73a2d93a3
|
[] |
no_license
|
DavisVaughan/romic
|
436da67b077937d1c13af9701d39a00f083e6694
|
f3470f5cd42b6ee8322db3f10a1c254766a5dc3e
|
refs/heads/master
| 2023-05-08T05:44:49.460728
| 2021-05-18T13:10:03
| 2021-05-18T13:10:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 679
|
rd
|
ggUnivServer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module_gguniv.R
\name{ggUnivServer}
\alias{ggUnivServer}
\title{ggUnivariate Server}
\usage{
ggUnivServer(id, tomic, plot_table, return_brushed_points = FALSE)
}
\arguments{
\item{id}{An ID string that corresponds with the ID used to call the module's
UI function.}
\item{tomic}{Either a \code{tidy_omic} or \code{triple_omic} object}
\item{plot_table}{table containing the data to be plotted}
\item{return_brushed_points}{Return values selected on the plot}
}
\value{
a tomic_table if return_brushed_points is TRUE, and 0 otherwise.
}
\description{
Server components for the ggUnivariate module
}
|
78758c03994de7f0f0a225876af044a401aeed1f
|
5ed28449dc1bbb70cb2892d01bfe15bdc0cd2292
|
/tests/testthat/test_S3.R
|
184ab90c4038f4c92f51f31ad8d342b361135a6b
|
[] |
no_license
|
MatthewHeun/matsbyname
|
2693334d7c02f425dc50b695d40e08353d41d34a
|
fa060dbfab43d5e0dc65f793a5ef654f72781805
|
refs/heads/master
| 2023-08-03T10:09:57.674953
| 2023-05-23T18:10:03
| 2023-05-23T18:10:03
| 80,359,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,123
|
r
|
test_S3.R
|
###########################################################
# context("Creating a mat_byname")
###########################################################
# test_that("mat_byname works as expected", {
# expect_error(mat_byname(NULL), "'data' must be of a vector type, was 'NULL'")
# expect_true(is.na(mat_byname(NA)))
# expect_true(inherits(mat_byname(matrix(1:2))), c("mat_byname","matrix"))
# mbn <- mat_byname(c("a", "b"), nrow = 2, ncol = 1)
# expect_equal(mbn[1,1], "a")
# expect_equal(mbn[2,1], "b")
# expect_true(is.mat_byname(mbn))
# expect_false(is.mat_byname(matrix(1:2)))
# expect_true(is.mat_byname(as.mat_byname(matrix(1:2))))
# })
###########################################################
# context("Adding mat_bynames")
###########################################################
# test_that("adding two mat_bynames with '+' works as expected", {
# # one <- as.mat_byname(1)
# # two <- as.mat_byname(2)
# # expect_equal(one + two, 3)
#
# m1 <- matrix(c(1:4),
# nrow = 2, ncol = 2, byrow = TRUE,
# dimnames = list(c("r1", "r2"), c("c1", "c2"))) %>%
# setrowtype("row") %>% setcoltype("col")
# m2 <- matrix(c(1:4),
# nrow = 2, ncol = 2, byrow = TRUE,
# dimnames = list(c("r2", "r1"), c("c2", "c1"))) %>%
# setrowtype("row") %>% setcoltype("col")
# # Nonsensical, as row and column names are not respected
# expect_equal(m1 + m2,
# matrix(c(2, 4,
# 6, 8),
# nrow = 2, ncol = 2, byrow = TRUE,
# dimnames = list(c("r1", "r2"), c("c1", "c2"))) %>%
# setrowtype("row") %>% setcoltype("col"))
# mbn1 <- as.mat_byname(m1)
# mbn2 <- as.mat_byname(m2)
# expected_mbn <- matrix(5, nrow = 2, ncol = 2,
# dimnames = list(c("r1", "r2"), c("c1", "c2"))) %>%
# setrowtype("row") %>% setcoltype("col")
# expect_equal(sum_byname(m1, m2), expected_mbn)
# # expect_equal(mbn1 + mbn2, expected_mbn)
# # expect_error(mbn1 + m2, "When adding mat_bynames with")
# })
|
c2a6df9fbb45843944868575e248b29e9de96650
|
2e6247ef617fd7af386729e4fcbe542b6cd492db
|
/FinalTrees.R
|
5aed5a9cf4c132dcdd4007dc210bce645cf845fc
|
[] |
no_license
|
eswar3/Decision_Trees
|
d07ef404c4159af53fe35e7e0c31560772a67832
|
b355881548e1f7a2b2593c24f4f690deacfabdba
|
refs/heads/master
| 2020-03-16T06:52:38.426822
| 2018-05-08T06:29:53
| 2018-05-08T06:29:53
| 132,564,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
FinalTrees.R
|
rm(list=ls(all=TRUE))
par(mfrow=c(1,1))
setwd("F:/AA_Clases/DecisionTrees/CSE 7405c (2)/CSE 7405c/CSE 7405c (2)/CSE 7405c/Day_03_DT_20150612")
source("05largeData.R")
write.csv(train, "train.csv")
library(C50)
dtC50= C5.0(loan ~ .,
data = train[],
rules=TRUE)
summary(dtC50)
C5imp(dtC50, pct=TRUE)
a=table(train$loan,
predict(dtC50,
newdata=train,
type="class"))
rcTrain=(a[2,2])/(a[2,1]+a[2,2])*100
a=table(test$loan, predict(dtC50, newdata=test, type="class"))
rcTest=(a[2,2])/(a[2,1]+a[2,2])*100
rm(a)
#based on c5 importance
dtC50= C5.0(loan ~ inc+infoReq+edu+family+cc+usage+online,
data = train,
rules=TRUE)
summary(dtC50)
#Smote model
dtC50= C5.0(loan ~ .,
data = trainS,
rules=TRUE)
summary(dtC50)
C5imp(dtC50, pct=TRUE)
a=table(trainS$loan,
predict(dtC50,
newdata=trainS,
type="class"))
rcTrain=(a[2,2])/(a[2,1]+a[2,2])*100
a=table(test$loan, predict(dtC50, newdata=test, type="class"))
rcTest=(a[2,2])/(a[2,1]+a[2,2])*100
#Experiment for best results
#mortgage equalfreq
#ccavg equalfreq
#ccavg equalfreq, 5
#All 10 bins
#All 10 bins equal width
a=table(eval$loan, predict(dtC50,
newdata=eval,
type="class"))
rcEval=(a[2,2])/(a[2,1]+a[2,2])*100
cat("Recall in Training", rcTrain, '\n',
"Recall in Testing", rcTest, '\n',
"Recall in Evaluation", rcEval)
#Test by increasing the number of bins in inc and ccavg to 10
#Test by changing the bin to euqalwidth in inc and ccavg
rm(a,rcEval,rcTest,rcTrain)
|
bb17f6d7559e6ceb12e2b15dcacdcb5945a7eecf
|
7560aee57c74473d020cccb48175ba8009135e06
|
/R/HImeanind.R
|
eac979f6d6f5b7155c656f820990147eca788f18
|
[] |
no_license
|
cran/Laterality
|
fae0a5a1f161eb17c7a69418c7579bca0d6554b1
|
735a0df83f44ba3fecedff41328037324bfa5e8c
|
refs/heads/master
| 2022-06-22T13:11:32.480000
| 2022-06-07T03:00:02
| 2022-06-07T03:00:02
| 17,680,309
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
r
|
HImeanind.R
|
HImeanind <- function (data, catch="Food", hand="Hand", indiv = "Indiv", RightHand = "R", LeftHand = "L"
, col = 2:((length(levels(data[[indiv]])))+1), ylab = "Mean handedness index"
, main="Hand preference regarding to the individuals", legend.text = FALSE, beside = TRUE
, ylim = c(-1,1), names.arg=levels(data[[indiv]]), legendlocation=FALSE, standarderror=TRUE
, cex=1, pt.cex=2, pch=15, horiz=FALSE, savetable = FALSE, file = "HImeanPerIndiv.csv")
{
for (i in 1:nlevels(data[[catch]])) {
seldata<- data[data[[catch]]==levels(data[[catch]])[i],]
Tab<- table(seldata[[indiv]], seldata[[hand]])
NewTab<-as.data.frame.matrix(Tab)
ifelse (is.null(NewTab[[RightHand]]) == TRUE, HITab<-(-NewTab[[LeftHand]])/NewTab[[LeftHand]], ifelse (is.null(NewTab[[LeftHand]]) == TRUE, HITab<-NewTab[[RightHand]]/NewTab[[RightHand]], HITab<-(NewTab[[RightHand]]-NewTab[[LeftHand]])/(NewTab[[RightHand]]+NewTab[[LeftHand]]))) #Handedness index
if("HImperIndiv" %in% ls() == FALSE) {HImperIndiv<-c()} else {}
HImperIndiv<-cbind(HImperIndiv,HITab)
}
colnames(HImperIndiv)<-levels(data[[catch]])
rownames(HImperIndiv)<-levels(data[[indiv]])
HImperIndiv
HImeanPerIndiv<-rowMeans(HImperIndiv, na.rm=TRUE) #mean HI
graph<-as.matrix(HImeanPerIndiv)
graphHImean<-barplot(graph, beside = beside, ylab=ylab, main=main, legend.text = legend.text, col=col, ylim=ylim, names.arg=names.arg)
#Standard error bars
if (standarderror == TRUE) {
standarddeviations<-apply(HImperIndiv,1,sd,na.rm=TRUE)
standarderror <- standarddeviations/sqrt(nrow(HImperIndiv))
arrows(graphHImean, HImeanPerIndiv + standarderror, graphHImean, HImeanPerIndiv - standarderror, angle = 90, code=3, length=0.1)
} else {
}
#Legend
if (legendlocation == TRUE) {
message("Click where you want to place the legend")
legendplace <- locator(1)
legend(legendplace$x,legendplace$y,as.vector(levels(data[[indiv]])),col=col,bty="n",pch=pch, cex=cex, pt.cex=pt.cex, horiz=horiz)
} else {
}
HImeanIndiv<-as.data.frame(HImeanPerIndiv)
if (savetable == "csv") {write.csv(HImeanPerIndiv, file = file)} else{}
if (savetable == "csv2") {write.csv2(HImeanPerIndiv, file = file)} else {}
HImeanIndiv
}
|
6820dd74aff256be569d393c466e2cb7b4bc894b
|
095a3c16a98071e940cca4eea0960e4719302407
|
/R/cBernEx.R
|
4782df7dd90426b25715a11c5ff096700e8ad678
|
[] |
no_license
|
cran/CorBin
|
ac101e7a5f1738777ff55034b6eac9825a28c042
|
1cf63d450f12cff000e49474c825d227e00fab70
|
refs/heads/master
| 2021-07-09T14:58:50.554737
| 2020-11-14T08:20:02
| 2020-11-14T08:20:02
| 216,161,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,482
|
r
|
cBernEx.R
|
#' @title Generate binary data with exchangeable correlated structure
#' @description
#' Equivalent to cBern(n, p, rho, type="exchange")
#'
#' @param n number of observations
#' @param p the vector of marginal probabilities with dimension m
#' @param rho a non-negative value indecating the shared correlation coefficient
#' @return an n*p matrix of binary data
#' @examples
#' X <- cBernEx(10, rep(0.5,3), 0.5)
#' @export
#'
#'
cBernEx <- function(n, p, rho){
#Generate Correlated Bernoulli Distribution
if(!is.atomic(p) || typeof(p)!='double') {
warning("Invalid input of p")
return(NaN)
}
if(sum((p<=0) | (p>=1))!=0) {
warning("Invalid input of p")
return(NaN)
}
m<-length(p)
minP<-min(p)
maxP<-max(p)
rhoLimit<-sqrt((minP/(1-minP))/(maxP/(1-maxP)))
rhoLimit1 <- floor(rhoLimit*10000)/10000
if((rho<0) || (rho>rhoLimit)){
message(paste('The non-negative Prentice constraint for rho is [',0,',',rhoLimit1,']', sep=''))
warning('rho is out-of-range\n')
return(NaN)
}
Pc<-sqrt(minP*maxP)/(sqrt(minP*maxP)+sqrt((1-minP)*(1-maxP)))
Pa<-sqrt(rho*p*(1-p)/(Pc*(1-Pc)))
Pb<-(p-Pa*Pc)/(1-Pa)
if(rho==rhoLimit){
Pb[which.max(p)] <- 1
Pb[which.min(p)] <- 0
}
X<-replicate(n, {
U<-rbinom(m, 1, Pa)
Y<-rbinom(m, 1, Pb)
Z<-rbinom(1, 1, Pc)
(1-U)*Y+U*Z
})
X<-t(X)
return(X)
}
# cBernEx(10,rep(0.5,5),0.5)
#cBernEx(10,c(0.3,0.5,0.7),0.4285)
|
c5fb013c53e8c18f0cdacd71e6c0b6bf3d708145
|
f0c85baaaf0b9d2d2c725327c759fdb9ff58aad1
|
/05b_Host_State_Variables_-_Economic_conditions.R
|
84a3c9cef7659723d250540dd44983c3d0b8c587
|
[] |
no_license
|
hrdii/post_conflict_refugee_returns
|
4ead6d7ea2997cd8b88230fec76b8e4e9c3e0a9d
|
757cbde10d49890901fad28db4f590dc6ae9635d
|
refs/heads/main
| 2023-09-05T23:32:58.289994
| 2021-11-04T07:56:18
| 2021-11-04T07:56:18
| 424,493,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,158
|
r
|
05b_Host_State_Variables_-_Economic_conditions.R
|
##########----------##########----------##########----------##########----------
##########---------- HEADER
##### meta-information
## Author: Hardika Dayalani(dayalani@rand.org)
## Creation: 2020-02-09 for Post-Conflict Refugee Returns Project
## Description: Adds Host state variables to conflict cases
## Economic conditions
## Distribution of refugees at the end of conflict living across host
## countries with different income levels. Based on GNI per capita and income
## classification defined by World Bank.
##### environment set up
rm(list = ls())
## Load Libraries
library(data.table)
## Load Functions
source(file = "00a_Custom_Functions.R")
## Load Conflict Cases
load("../Intermediate/Conflict case with refugee origin and host state information.RData")
## Load WB-UNHCR Country Name Lookup Dataframe
load("../Intermediate/WB-UNHCR Country Name Lookup.RData")
## Historical GNI Classification Thresholds
load("../Intermediate/GNI Per Capita.RData")
## Load Refugee Information
load("../Intermediate/Refugee Population.RData")
host_countries <- names(refugee_df)
host_countries <- setdiff(host_countries, c("year", "source", "total"))
inc_df <- setDT(inc_df)
## Replace Country names using Lookup Dataframe
inc_df[lookup_df, on=.(country = wb_names), country := i.unhcr_names ]
rm(lookup_df)
## Reshape inc_df
inc_df <- melt(inc_df, id.vars = c("country"),
measure.vars = as.character(1989:2018))
names(inc_df) <- c("country", "year", "inc_group")
inc_df$year <- as.numeric(as.character(inc_df$year))
##Imputing specific missing values
inc_df$inc_group[grepl("Serbia", inc_df$country) & is.na(inc_df$inc_group)] <- "LM"
##### Calculate % refugee
## Write a function to calculate % refugees
PropInc <- function(cas, inc_group, r_df = refugee_df, i_df = inc_df){
## Source Country
s <- gsub('(.*) ([0-9]{4})','\\1',cas)
## Year0
y <- as.numeric(gsub('(.*) ([0-9]{4})','\\2',cas))
## Subset Refugee Data to Year0
temp_df <- r_df[year == y, ]
## Subset to host that are above the threshold
hosts <- SubsetHosts(country = s, df = temp_df)
## Subset income classification to year0
i_df <- i_df[year == y, c("country", "inc_group")]
## Add Income classification for all hosts
hosts <- merge(hosts, i_df, by.x=c("country"), by.y=c("country"), all.x = TRUE)
hosts$inc_group <- hosts$inc_group %in% inc_group
## Calculate the proportion of source country refugees that live in host countries in a given income group
hosts$inc_group <- hosts$inc_group * hosts$pop / sum(hosts$pop, na.rm = TRUE)
## Return proportion
return(sum(hosts$inc_group, na.rm = TRUE))
}
## Calculate % refugee living in low income countries
agg_df$pinc_low <- sapply(agg_df$case, FUN = PropInc, inc_group = "L")
## Calculate % refugee living in middle income countries
agg_df$pinc_middle <- sapply(agg_df$case, FUN = PropInc, inc_group = c("LM", "UM"))
## Calculate % refugee living in high income PropInc
agg_df$pinc_high <- sapply(agg_df$case, FUN = PropInc, inc_group = "H")
## Calculate % refugee living in countries with recorded income
agg_df$pinc <- rowSums(agg_df[, c("pinc_low",
"pinc_middle",
"pinc_high")], na.rm = TRUE)
## Calculate % refugee living in countries without recorded income
agg_df$pinc <- pmax({1 - agg_df$pinc}, 0)
## Proportion of refugees living in host countries for whom income data exists doesn't
## add up to one in some cases. The only explanation for that could be that refugees
## are living in countries that are war-torn themselves. These countries are unlikely
## to be high-income. Therefore residual proportion is added to low-income variable.
agg_df$pinc_low <- agg_df$pinc_low + agg_df$pinc
## Dropping the pinc variable
agg_df <- agg_df[, !{names(agg_df) %in% "pinc"}]
summary(agg_df$pinc_low)
summary(agg_df$pinc_middle)
summary(agg_df$pinc_high)
## Save File
save(agg_df,
file = "../Intermediate/Conflict case with refugee origin and host state information.RData")
print("05b")
|
887b3efd945a8bcac5cf2f4100e9bcb2f97f0870
|
46805a3bbf291e220caa99ec7ce274321b08f736
|
/cachematrix.R
|
fa92145d62a8c427a994b06281b4f41420cc8f63
|
[] |
no_license
|
Carmena1/ProgrammingAssignment2
|
2fb921e85f5f58082f447435e9f144c6ac3b075c
|
99fb5957eab5644c7fcfebdd1f9ef964379ddd66
|
refs/heads/master
| 2022-04-18T02:49:17.390724
| 2020-04-19T12:10:18
| 2020-04-19T12:10:18
| 256,985,644
| 0
| 0
| null | 2020-04-19T11:48:16
| 2020-04-19T11:48:15
| null |
UTF-8
|
R
| false
| false
| 858
|
r
|
cachematrix.R
|
## We need to write two functions that will cache the inverse of a matrix x
## we will use the example that it was provided
## makeCacheMatrix - creates a matrix obj where the cache is the inverse of the input
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL}
get<-function(){x}
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve - computes the inverse and returns it, if the inverse was already computed,
##then the function should return the inverse from the cache
cacheSolve <- function(x, ...) {
inv<- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)}
m<- x$get()
inv <- solve(m,...)
x$setInverse(inv)
inv
}
|
693c8844feece62c908ea87a5d82e11e055136a3
|
f11c255013ef0b6c0d6b2a5256409df78c63a8d5
|
/Tutorial2.R
|
c8d71f94b6773680d5119f276b08b18c28b1d318
|
[] |
no_license
|
JennyBloom/Titanic---Machine-Learning-from-Disaster
|
2893fa7cd7f64164a7aedfea112b8fa59c4248fa
|
41e412ce63c4d2ba709aa86e371dcb2913f5d6aa
|
refs/heads/master
| 2020-03-25T22:35:09.323700
| 2018-08-10T03:16:56
| 2018-08-10T03:16:56
| 144,230,748
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,106
|
r
|
Tutorial2.R
|
# Jenny Bloom 12/27/2017
# Tutorial 2: From http://trevorstephens.com/kaggle-titanic-tutorial/r-part-2-the-gender-class-model/
# Gender-Class Model: Women and Children First
#Set Working Directory, Import and View Datasets
setwd("~/Desktop/Titanic") #Set Working Directory
train <- read.csv("~/Desktop/Titanic/train.csv") #Import Training Dataframe
View(train)
test <- read.csv("~/Desktop/Titanic/test.csv") #Import Test Dataframes
View(test)
# Obtain a summary of sex in the training dataset
summary(train$Sex) #314 passengers are female, 577 passengers are male
# Determine row-wise proportion of each sex that survived, as separate groups.
# Give proportions in the 1st dimension which stands for the rows (using “2” instead would give you column proportions)
# NOTE: prop.table command by default takes each entry in the table and divides by the total number of passengers
# Prop.table expresses table entries as fraction of marginal table, where 1 is an index to generate margin for rows
prop.table(table(train$Sex, train$Survived), 1) #Females survived: 74.2%; males survived: 18.9%
test$Survived <- 0 #Create Survived column and assign all values 0 for 'everyone does not survive/ everyone dies'
test$Survived[test$Sex == 'female'] <- 1 #Assign persons who are variable sex = female a value of 1 meaning 'survive'
# Build prediction submission for Kaggle.com - all males perish, females saved
submit <- data.frame(PassengerID = test$PassengerId, Survived = test$Survived) #Total number of passengers who did not survive
write.csv(submit, file = 'allmalesperish.csv', row.names = FALSE) #Write submit dataframe to csv for importing to Kaggle.com
# Look into Age as a Predictor of Survivalsurvival
summary(train$Age) #NAs assumed to be mean age
train$Child <- 0 #Create column 'child' and fill with zero values
train$Child[train$Age < 18] <- 1 #Populate column Child with 1 values where age of passenger (row) is less than 18
# Use Aggregate: Splits the data into subsets, computes summary statistics for each, and returns the result in a convenient form
# Survived: Target variable
# Child & Sex: Subset variables
# Dataframe: train.csv
# Function: Sum, applied to subsets
aggregate(Survived ~ Child + Sex, data = train, FUN = sum)
# The command above subsets the whole dataframe over the different possible combinations of the age and gender variables
# and applies the sum function to the Survived vector for each of these subsets.
# As our target variable is coded as a 1 for survived, and 0 for not, the result of summing is the number of survivors.
# Find the total number of people in each subset:
aggregate(Survived ~ Child + Sex, data = train, FUN = length)
# This provides the total for each group of passengers.
# Find the proportion of each group that survived.
# Create a function that takes the subset vector as input and applies both sum and length functions
# and divides to provide a proportion
aggregate(Survived ~ Child + Sex, data = train, FUN = function(x) {sum(x)/length(x)})
# These data show that most females survive and very few males survive.
# Apply more variables such as class and ticket price.
# Build new columns
train$Fare2 <- '30+'
train$Fare2[train$Fare < 30 & train$Fare >= 20] <- '20-30'
train$Fare2[train$Fare < 20 & train$Fare >= 10] <- '10-20'
train$Fare2[train$Fare < 10] <- '<10'
# Check ticket price and class alongside survival rate
aggregate(Survived ~ Fare2 + Pclass + Sex, data = train, FUN = function(x) {sum(x)/length(x)})
# These data show males survived poorly regardless of class, and females in 3rd class who paid +$20 for tickets survived poorly.
# Check prediction on test dataset based on training data insights on class and ticket cost plus sex.
test$Survived <- 0
test$Survived[test$Sex == 'female'] <- 1
test$Survived[test$Sex == 'female' & test$Pclass == 3 & test$Fare >= 20] <- 0
# Create new csv for Kaggle.com submission highlighting women and children survivability
submit <- data.frame(PassengerID = test$PassengerId, Survived = test$Survived)
write.csv(submit, file = 'womenchildren.csv', row.names = FALSE)
|
c7b5ddb0f5d4d9c2b3b840e795adbae0fa35c5aa
|
51cb0c55f7eaeee3023aa8b39b8ce68f5c0a97f7
|
/man/MaxProRunOrder.Rd
|
233558555cbf64a44f7ff71bb5d7edc9b27f338a
|
[] |
no_license
|
cran/MaxPro
|
b87b72a213959a310bab8d4f8eecfeb516564010
|
f45921ddf771ad2987f11e69a271c728829511c3
|
refs/heads/master
| 2021-06-04T05:24:35.390672
| 2018-09-28T03:40:03
| 2018-09-28T03:40:03
| 29,903,205
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,090
|
rd
|
MaxProRunOrder.Rd
|
\name{MaxProRunOrder}
\alias{MaxProRunOrder}
\title{
Find the Optimal Sequential Order to Run a Given Experimental Design
}
\description{
Given a computer experimental design matrix, this function searches for an optimal run (row) order based on the maximum projection (MaxPro) criterion. This optimal order enables the given design to be run in a sequential manner: when terminated at any step, the previous design points form a nearly optimal subset based on the MaxPro criterion.
}
\usage{
MaxProRunOrder(Design,p_nom=0,initial_row=1)
}
\arguments{
\item{Design}{
The design matrix, where each row is an experimental run and each column is a factor. The rightmost p_nom columns correspond to the p_nom nominal factors, and the columns on the left are for continuous factors and discrete numeric factors. The ordinal factors, if any, should be pre-converted into discrete numeric factors through the scoring method (see, e.g., Wu and Hamada 2009, Section 14.10). All columns of the continuous and discrete numeric factors should be standardized into the unit range of [0,1].
}
\item{p_nom}{
Optional, default is 0. The number of nominal factors
}
\item{initial_row}{
Optional, default is 1. The vector specifying the row number of each design point in the given design matrix that should be run at first or have already been run.
}
}
\details{
This function utilizes a greedy search algorithm to find the optimal row order to run the given experimental design based on the MaxPro criterion.
}
\value{
The value returned from the function is a list containing the following components:
\item{Design}{The design matrix in optimal run (row) order. The run sequence ID is added as the first column}
\item{measure}{The MaxPro criterion measure of the given design}
\item{time_rec}{Time to complete the search}
}
\references{
Joseph, V. R., Gul, E., and Ba, S. (2015) "Maximum Projection Designs for Computer Experiments," \emph{Biometrika}, 102, 371-380.
Joseph, V. R. (2016) "Rejoinder," \emph{Quality Engineering}, 28, 42-44.
Joseph, V. R., Gul, E., and Ba, S. (2018) "Designing Computer Experiments with Multiple Types of Factors: The MaxPro Approach," \emph{Journal of Quality Technology}, to appear.
Wu, C. F. J., and Hamada, M. (2009), \emph{Experiments: Planning, Analysis, and Parameter Design Optimization, 2nd Edition}, New York: Wiley.
}
\author{
Shan Ba <shanbatr@gmail.com> and V. Roshan Joseph <roshan@isye.gatech.edu>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{MaxProLHD}}, \code{\link{MaxProQQ}}, \code{\link{MaxProAugment}}
}
\examples{
D0=MaxProLHD(25,2)$Design
#Assume the first two rows of the design have already been executed
#Find the optimal run orders
D=MaxProRunOrder(D0,p_nom=0,initial_row=c(1,2))$Design
plot(D[,2],D[,3],xlim=c(0,1),ylim=c(0,1),type="n",
xlab=expression(x[1]),ylab=expression(x[2]),cex.lab=1.5)
text(D[,2],D[,3],labels=D[,1],col='red')
}
\keyword{ Design of Experiments }
\keyword{ Computer Experiments }
|
a74ae28ded39678af71e349d602022f243c5de8e
|
3e5c7e1e5f36cf6a4004e9e38ecf64e676ae36d5
|
/nla2012aggregate.R
|
44bbe0de44321a9e1a9a5ace6450f3365c2b9cd6
|
[] |
no_license
|
MFEh2o/TPchloro
|
3e63144bb32ffb28b2bb5a1cb9d4698aeeff14c8
|
1abf0618f932543aac56c318208a93394c7ec74b
|
refs/heads/main
| 2023-04-10T22:38:57.159479
| 2022-03-31T01:46:12
| 2022-03-31T01:46:12
| 413,883,323
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,453
|
r
|
nla2012aggregate.R
|
setwd("~/Downloads/")
## csv's downloaded from https://www.epa.gov/national-aquatic-resource-surveys/data-national-aquatic-resource-surveys
## 2022-03-29
wc=read.csv("nla2012_waterchem_wide.csv",header=TRUE)
si=read.csv("nla2012_wide_siteinfo_08232016.csv",header=TRUE)
kv=read.csv("nla12_keyvariables_data.csv",header=TRUE)
chla=read.csv("nla2012_chla_wide.csv",header=TRUE)
range(wc$PTL_RESULT)
range(kv$PTL_RESULT)
unique(wc$PTL_UNITS)
range(wc$DOC_RESULT)
unique(wc$DOC_UNITS)
range(si$AREA_HA)
range(kv$CHLX_RESULT,na.rm=TRUE)
unique(kv$CHLX_UNITS)
range(chla$CHLX_RESULT,na.rm=TRUE)
unique(chla$CHLX_UNITS)
sites=unique(kv$SITE_ID)
nla=data.frame(SITE_ID=sites,chla=NA,DOC=NA,TP=NA,depth=NA,area=NA)
for(i in 1:nrow(nla)){
curKV=kv[kv$SITE_ID%in%nla$SITE_ID[i],]
curWC=wc[wc$UID%in%curKV$UID,]
curSI=si[si$UID%in%curKV$UID,]
nla$chla[i]=mean(curKV$CHLX_RESULT,na.rm=TRUE)
nla$DOC[i]=mean(curWC$DOC_RESULT,na.rm=TRUE)
nla$TP[i]=mean(curKV$PTL_RESULT,na.rm=TRUE)
nla$depth[i]=mean(curKV$INDEX_SITE_DEPTH,na.rm=TRUE)
nla$area[i]=mean(curSI$AREA_HA,na.rm=TRUE)*1e4
}
write.csv(nla,"nla2012aggregated_2022-03-29.csv",row.names=FALSE)
###### also generating file with data from NLA 2007 and NSRA 2013-14 used for distributions of forcings for NLA model comparison
NLAiso=read.csv("nla2007_isotopes_wide.csv",header=TRUE)
NLAiso$RT=NLAiso$RT*365 # days
NLAiso=NLAiso[NLAiso$RT>0,] # remove 5 with RT=0
NLAforcingDistributions=data.frame(origin=rep("nla2007_isotopes",nrow(NLAiso)),quantity=rep("RT_days",nrow(NLAiso)),value=NLAiso$RT)
EPAstream=read.csv("nrsa1314_widechem_04232019.csv",header=TRUE)
EPAstream=EPAstream[EPAstream$PTL_RESULT>0,] # remove 5 measures that equal 0
NLAforcingDistributions=rbind(NLAforcingDistributions,data.frame(origin=rep("nrsa1314_chem",nrow(EPAstream)),
quantity=rep("PTL_mgPm3",nrow(EPAstream)),
value=EPAstream$PTL_RESULT))
NLAforcingDistributions=rbind(NLAforcingDistributions,data.frame(origin=rep("nrsa1314_chem",nrow(EPAstream)),
quantity=rep("DOC_gCm3",nrow(EPAstream)),
value=EPAstream$DOC_RESULT))
write.csv(NLAforcingDistributions,"NLAfforcingDistributions.csv",row.names=FALSE)
|
74930f9cc133665b42cff1f09e0c96e1682531f6
|
d5a34c7f1e6cedbd3287d287f5cd0041c0dad26a
|
/man/make_recacher.Rd
|
043535cbeddf522477d886bd930ba656721257a8
|
[] |
no_license
|
arendsee/rmonad
|
9da470e324a2e9923f4d6874f76b55c9b067b7ae
|
58370cf26dc9ec18a9d34c0cc2a8facc43f77a88
|
refs/heads/master
| 2021-06-01T18:26:45.713627
| 2020-12-14T21:41:46
| 2020-12-14T21:41:46
| 95,687,512
| 76
| 4
| null | 2018-01-01T23:06:45
| 2017-06-28T16:01:55
|
R
|
UTF-8
|
R
| false
| true
| 1,199
|
rd
|
make_recacher.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cache.R
\name{make_recacher}
\alias{make_recacher}
\title{Make a function that takes an Rmonad and recaches it}
\usage{
make_recacher(cacher, preserve = TRUE)
}
\arguments{
\item{cacher}{A function of a data value}
\item{preserve}{logical Should the cached value be preserved across bind operations?}
}
\value{
A function that swaps the cache function of an Rmonad
}
\description{
Make a function that takes an Rmonad and recaches it
}
\examples{
\dontrun{
recacher <- make_recacher(make_local_cacher())
m <- iris \%>>\% summary \%>\% recacher
# load the data from a local file
.single_value(m)
recacher <- make_recacher(memory_cache)
m <- iris \%>>\% summary \%>\% recacher
# load the data from memory
.single_value(m)
}
add1 <- function(x) x+1
add2 <- function(x) x+2
add3 <- function(x) x+3
cc <- make_recacher(make_local_cacher())
3 \%>>\% add1 \%>\% cc \%>>\% add2 \%>>\% add3 -> m
m
}
\seealso{
Other cache:
\code{\link{clear_cache}()},
\code{\link{fail_cache}()},
\code{\link{make_cacher}()},
\code{\link{memory_cache}()},
\code{\link{no_cache}()},
\code{\link{void_cache}()}
}
\concept{cache}
|
23ed07ddcb05bd65b2243551951a9f64ff805a8e
|
e4c30ee61142928b4b5c703d77731f9c432ef7cb
|
/R/binomial.R
|
88c1c6e9ab82564fbccb436e5275305ee1f3cc13
|
[] |
no_license
|
spockoyno/assurance
|
13847056cb503363069b5b2f10e17cf69d0f9c5e
|
912d7c0ad311eecae134b85db8927f5915c01cf0
|
refs/heads/master
| 2020-06-21T07:25:18.669805
| 2015-03-31T09:56:03
| 2015-03-31T09:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,410
|
r
|
binomial.R
|
#' @include assurance.R
NULL
setClass("binomialInitialData",
representation(grp1.count="numeric",
grp2.count="numeric",
size="twoWayStudySize"),
validity=function(object) {
grp1.count <- object@grp1.count
grp2.count <- object@grp2.count
size <- object@size
if (!(is.wholenumber(grp1.count) &&
is.positive(grp1.count))) {
"grp1.count must be a positive whole number"
} else if (!(is.wholenumber(grp2.count) &&
is.positive(grp2.count))) {
"grp2.count must be a positive whole number"
} else if (grp1.count > size@grp1) {
"grp1.count must be <= grp1.size"
} else if (grp2.count > size@grp2) {
"grp2.count must be <= grp2.size"
} else {
TRUE
}
})
setClass("binomialProbabilities",
representation(grp1.prob="numeric",
grp2.prob="numeric"),
validity=function(object) {
p1 <- object@grp1.prob
p2 <- object@grp2.prob
if (length(p1) != length(p2)) {
"grp1.prob and grp2.prob must have same lengths"
} else if (!is.probability(p1)) {
"grp1.prob must be in [0, 1]"
} else if (!is.probability(p2)) {
"grp2.prob must be in [0, 1]"
} else {
TRUE
}
})
setMethod("length",
signature(x="binomialProbabilities"),
function(x) {
length(x@grp1.prob)
})
#' @export new.binomial
new.binomial <- function(x1, m1, x2, m2) {
new("binomialInitialData",
grp1.count=x1,
grp2.count=x2,
size=study.size(grp1.size=m1,
grp2.size=m2))
}
beta.sample <- function(nsim, event.count, group.size) {
# sample from a beta distribution (a posterior using
# Jeffreys' posterior)
#
# Args:
# nsim : number of samples
# event.count : count of events observed
# group.size : in a group of given size
rbeta(nsim, event.count + 0.5, group.size - event.count + 0.5)
}
setMethod("samplePosterior",
signature(earlyStudy="binomialInitialData", nsim="numeric"),
function(earlyStudy, nsim) {
# Samples the event probabilities in two groups
#
# Args:
# x : data from the initial study
# nsim : the number of simulations to do
#
#
# Returns:
# A binomialProbabilities object giving the event probabilities
# in each group.
grp1.prob <- beta.sample(nsim, earlyStudy@grp1.count,
earlyStudy@size@grp1)
grp2.prob <- beta.sample(nsim, earlyStudy@grp2.count,
earlyStudy@size@grp2)
new("binomialProbabilities",
grp1.prob=grp1.prob, grp2.prob=grp2.prob)
})
setMethod("treatmentEffect",
signature(posteriorSample="binomialProbabilities"),
function(posteriorSample) {
posteriorSample@grp1.prob - posteriorSample@grp2.prob
})
setClass("binomialStudy",
contains="twoArm",
representation(direction="integer",
testFunction="function"),
validity=function(object) {
dirn <- object@direction
if (length(dirn) != 1L) {
"direction must be a scalar"
} else if (!(dirn %in% c(-1L, 1L))) {
"direction must be in {-1, 1}"
} else {
TRUE
}
})
##' make a binomial study
##' @export new.binomialStudy
new.binomialStudy <- function(size,
endpoint=c("cure", "mortality"),
method=c("normal", "chisq", "wald",
"bl", "ac"),
significance=as.numeric(NA),
hurdle=as.numeric(NA),
margin=as.numeric(NA)) {
## first find our endpoint
endpoint <- match.arg(endpoint)
direction <- switch(endpoint,
cure=1L,
mortality=-1L,
stop("unknown 'endpoint'"))
## and what we're doing
method <- match.arg(method)
## here's the test function
testFunction <- switch(method,
normal=.gaussianTest,
chisq=.chiTest,
wald=.ni.wald,
bl=.ni.bl,
ac=.ni.ac,
stop("unknown test method"))
## check that the arguments are sane
if (method %in% c("normal", "chisq")) {
if (!missing(margin)) {
stop("you have specified 'margin' for an equivalence study")
}
assert(is.numeric(hurdle) ||
(is.positive.scalar(significance) &&
(significance < 1)))
} else {
if (!missing(hurdle)) {
stop("you have specified 'hurdle' for a noninferiority study")
}
if (missing(margin)) {
stop("you need to specify 'margin' for a noninferiority study")
}
if (missing(significance)) {
stop("you need to specific 'significance' for a noninferiority study")
}
hurdle <- margin
assert(is.numeric(hurdle))
assert(is.positive.scalar(significance) && (significance < 1))
}
new("binomialStudy",
size=size,
significance=significance,
hurdle=hurdle,
direction=direction,
testFunction=testFunction)
}
setClass("binomialLater",
representation(grp1.count="numeric",
grp2.count="numeric",
study.defn="binomialStudy"))
setMethod("sampleLater",
signature(posteriorSample="binomialProbabilities",
laterStudy="twoArm"),
function(posteriorSample, laterStudy) {
n1 <- laterStudy@size@grp1
n2 <- laterStudy@size@grp2
nsim <- length(posteriorSample)
grp1.count <- rbinom(nsim, n1, posteriorSample@grp1.prob)
grp2.count <- rbinom(nsim, n2, posteriorSample@grp2.prob)
new("binomialLater",
grp1.count=grp1.count,
grp2.count=grp2.count,
study.defn=laterStudy)
})
.chiTest <- function(event.1, size.1, event.2, size.2, alpha, hurdle,
flip.direction) {
d <- ifelse(flip.direction * event.1 > flip.direction * event.2,
1, 0)
p.1 <- event.1 / size.1
p.2 <- event.2 / size.2
check.significance(
alpha,
{
chi2 <- qchisq(alpha, 1, lower.tail=FALSE)
n <- size.1 + size.2
stat <- (n * (event.1 * (size.2 - event.2) -
(size.1 - event.1) * event.2)^2)/(event.1 + event.2)/(n - event.1 - event.2)/size.1/size.2 * d
stat > chi2
}) & check.hurdle(flip.direction * (p.1 - p.2), hurdle)
}
.gaussianTest <- function(event.1, size.1, event.2, size.2, alpha, hurdle,
flip.direction) {
p.1 <- event.1 / size.1
p.2 <- event.2 / size.2
effect <- flip.direction * (p.1 - p.2)
check.significance(
alpha,
{
se <- sqrt(p.1 * (1 - p.1) / size.1 + p.2 * (1 - p.2) / size.2)
effect > se * qnorm(alpha / 2, lower.tail=FALSE)
}) & check.hurdle(effect, hurdle)
}
## backend code for non-inferiority tests
#' @name internal
.ni.test <- function(effect, se, alpha, margin, direction) {
scale <- qnorm(alpha / 2, lower.tail=FALSE)
lower.limit <- effect - scale * se
upper.limit <- effect + scale * se
if (direction > 0) {
## cure: we want p.1 > p.2
lower.limit > margin
} else {
## mortality: we want p.1 < p.2
upper.limit < margin
}
}
# Wald noninferiority test
#' @name internal
.ni.wald <- function(event.1, size.1, event.2, size.2, ...) {
p.1 <- event.1 / size.1
p.2 <- event.2 / size.2
se <- sqrt(p.1 * (1 - p.1) / size.1 + p.2 * (1 - p.2) / size.2)
if (any(se==0)) {
stop("Wald test: se estimate is zero, use a different CI method")
}
.ni.test(p.1 - p.2, se, ...)
}
# Agresti & Caffo noninferiority test
#' @name internal
.ni.ac <- function(event.1, size.1, event.2, size.2, ...) {
p.1 <- (event.1 + 1) / (size.1 + 2)
p.2 <- (event.2 + 1) / (size.2 + 2)
se <- sqrt(p.1 * (1 - p.1) / size.1 + p.2 * (1 - p.2) / size.2)
.ni.test(p.1 - p.2, se, ...)
}
# Brown & Lis Jeffreys method
#' @name internal
.ni.bl <- function(event.1, size.1, event.2, size.2, ...) {
p.1 <- (event.1 + 0.5) / (size.1 + 1)
p.2 <- (event.2 + 0.5) / (size.2 + 1)
se <- sqrt(p.1 * (1 - p.1) / size.1 + p.2 * (1 - p.2) / size.2)
.ni.test(p.1 - p.2, se, ...)
}
setMethod("testLater",
signature(laterSample="binomialLater"),
function(laterSample) {
study.def <- laterSample@study.defn
alpha <- study.def@significance
sizing <- study.def@size
study.def@testFunction(laterSample@grp1.count,
sizing@grp1,
laterSample@grp2.count,
sizing@grp2,
alpha,
study.def@hurdle,
study.def@direction)
})
|
450a0b42e604086adc05bf6fc9c8cdf1fbbe50cf
|
d1f2fb9dd640805b7fd0a087ff627e393a359012
|
/Code/Figure5_cQ_Slope.R
|
c0a5b9ff06b9bdd7c571e831ac6e552aee4e6e76
|
[] |
no_license
|
LinneaRock/MendotaWatershed_SalinityRegimes
|
0e9db184dfa2ac191f998b326c1398fea982dc61
|
34194da7b3245657ccb4717645c3a39f4abb8fbb
|
refs/heads/main
| 2023-04-10T00:45:52.947143
| 2023-03-17T21:04:44
| 2023-03-17T21:04:44
| 378,938,614
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,529
|
r
|
Figure5_cQ_Slope.R
|
#script to create figures with relevant cQ slope information
################################################
######## ######## PLOT ######## ######## #####
all_individual_events$season = factor(all_individual_events$season,
levels = c("Jan-Mar", "Apr-Jun", "Jul-Sep", "Oct-Dec"))
all_individual_events$trib = factor(all_individual_events$trib, levels = c('SH','PB','YR-I','SMC','DC', 'YR-O'))
all_individual_events$trib = factor(all_individual_events$trib, levels = c('SH','PB','YR-I','SMC','DC','YR-O'))
ggplot() +
labs(x = "Stormflow cQ Slope", y = "") +
annotate("rect", xmin = -0.05, xmax = 0.05, ymin = 0, ymax = Inf, alpha = 0.2, color = "grey") +
annotate("text", label = 'chemostatic', x = 0.02, y = 2.2, size = 2.5, angle = 90,color = "grey50") +
geom_jitter(all_individual_events, mapping = aes(slope_SpC, trib, fill = season),
width = 0, height = 0.2, size = 2.5, shape = 21, alpha = 0.8) +
scale_fill_manual(labels = c("Jan-Mar", "Apr-Jun", "Jul-Sep","Oct-Dec"),
values = palette_OkabeIto[1:4]) +
#values = c("#1DACE8", "#1C366B", "#F24D29", "#E5C4A1")) +
scale_y_discrete(limits=rev) + # flip y axis order for continuity with other plots
expand_limits(y = 7) +
theme_minimal() +
theme(legend.title = element_blank()) +
geom_point(all_full %>% filter(season == "Annual"), mapping = aes(slope_SpC, trib), shape = "|", size = 6) +
geom_point(all_baseflow %>% filter(season == "Annual"), mapping = aes(slope_SpC, trib), shape = "|", size = 6, color = "#6394a6") +
geom_point(all_bulkstorm %>% filter(season == "Annual"), mapping = aes(slope_SpC, trib), shape = "|", size = 6, color = "#801129") +
geom_curve(aes(x = -0.14, y = 6.6, xend = -0.20, yend = 6.4), curvature = 0.5, arrow = arrow(length = unit(0.03, "npc")), col = "#801129") +
geom_curve(aes(x = -0.29, y = 6.6, xend = -0.24, yend = 6.4), curvature = -0.5, arrow = arrow(length = unit(0.03, "npc"))) +
geom_curve(aes(x = 0.2, y = 6.6, xend = 0.07, yend = 6.4), curvature = 0.5, arrow = arrow(length = unit(0.03, "npc")), color = "#6394a6") +
annotate('text', label = 'stormflow', x = -0.13, y = 6.6, hjust = 0, size = 2.5, col = "#801129") +
annotate('text', label = 'all', x = -0.33, y = 6.6, hjust = 0, size = 2.5) +
annotate('text', label = 'baseflow', x = 0.21, y = 6.6, hjust = 0, size = 2.5, col = "#6394a6") +
coord_equal(ratio = 1/10)
ggsave("Figures/F5_individualSlopes.png", height = 3.25, width = 6.25, units = "in", dpi = 500)
|
91109e451af828791c025a33937cef6b083a3495
|
9ac197fd83e1dee2ac8d5fc209d8df701866241a
|
/man/daqFinish.Rd
|
ac01bcf4b0f89a27d42b0fb2731bbe4498c1c590
|
[
"MIT"
] |
permissive
|
tunelipt/daq
|
745503f8bc8666480538c872aaeed70d0aa9d017
|
e4e509a4eb47f1357b86a98711fe3ac1fa9cc769
|
refs/heads/master
| 2020-07-25T14:05:08.127153
| 2020-06-11T13:03:12
| 2020-06-11T13:03:12
| 208,316,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 257
|
rd
|
daqFinish.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daq.R
\name{daqFinish}
\alias{daqFinish}
\title{Finish data acquisition.}
\usage{
daqFinish(dev, ...)
}
\arguments{
\item{dev}{Device.}
}
\description{
Finish data acquisition.
}
|
ce81f6457f589e86a38a2618e55b88f90cacc240
|
67a6256dc7f82be3b8e688daf202dc5045c18011
|
/man/bbox_to_sf.Rd
|
4bb7cd1a2f02b119e3e8cd359e82141aa8eb35e3
|
[
"MIT"
] |
permissive
|
jhollist/elevatr
|
4f987460fb0bcc91c8dcd71e3bcdf172ef45fa5e
|
284966714a96100b42b09b94027e32c27c9b19f5
|
refs/heads/main
| 2023-08-31T23:58:54.739878
| 2023-08-17T19:01:03
| 2023-08-17T19:01:03
| 65,325,400
| 178
| 22
|
NOASSERTION
| 2023-09-07T18:19:46
| 2016-08-09T20:19:18
|
R
|
UTF-8
|
R
| false
| true
| 381
|
rd
|
bbox_to_sf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal.R
\name{bbox_to_sf}
\alias{bbox_to_sf}
\title{Assumes geographic projection
sf bbox to poly}
\usage{
bbox_to_sf(bbox, prj = 4326)
}
\arguments{
\item{prj}{defaults to "EPSG:4326"}
\item{bbx}{an sf bbox object}
}
\description{
Assumes geographic projection
sf bbox to poly
}
\keyword{internal}
|
2e139675f277e45055f59331a89c76ff02c5c04f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/dlmap/R/dltest.R
|
8544968b6567d0665ef5685178eb74264dcfd484
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,537
|
r
|
dltest.R
|
`dltest` <-
function(input, algorithm, chrSet, prevLoc=NULL, ...)
{
dfMerged <- input$dfMerged
n.chrSet <- length(chrSet)
null.ll <- vector(length=n.chrSet)
null.forma <- list()
random.forma <- list()
results <- list()
results$converge <- TRUE
formula <- list()
if (algorithm=="asreml") {
dfMrk <- input$dfMrk
envModel <- input$envModel
n.perm <- input$nperm
n.chr <- length(input$map)
nphe <- input$nphe
idname <- input$idname
formula <- envModel
npop <- ngen(input)
# Create permutation matrices
perm.test <- matrix(nrow=n.perm+1, ncol=n.chrSet)
maxp <- vector(length=n.perm+1)
perm.mat <- matrix(nrow=npop, ncol=n.perm+1)
perm.mat[,1] <- c(1:npop)
if (n.perm>0)
for (kk in 2:(n.perm+1))
perm.mat[,kk] <- sample(npop)
# Fit full model, with all chromosomes having separate VCs
formula$fixed <- paste(as.character(envModel$fixed)[2], "~",as.character(envModel$fixed[3]), sep="")
# Include fixed effects for all markers which have already been mapped
if (length(prevLoc)>0)
formula$fixed <- paste(formula$fixed, "+", paste(prevLoc, collapse="+"), sep="")
formula$fixed <- as.formula(formula$fixed)
nmrkchr <- vector(length=n.chr)
for (i in 1:n.chr) nmrkchr[i] <- length(grep(paste("C", i, "M", sep=""), colnames(dfMrk)))
############################################
# Random effects for all markers on a chromosome excluding those which
## Set up new dfMerged based on grouped random effects ##
###########################################
if (min(nmrkchr) > nrow(dfMrk)) {
dfm1 <- dfMerged[,c(1:nphe, match(prevLoc, names(dfMerged)))]
index <- list()
mat <- list()
ncolm <- vector(length=n.chr)
for (ii in 1:n.chr) {
index[[ii]] <- 1+setdiff(grep(paste("C", ii, "M", sep=""), names(dfMrk)[2:ncol(dfMrk)]), match(prevLoc, names(dfMrk)[2:ncol(dfMrk)]))
mat[[ii]] <- as.matrix(dfMrk[, index[[ii]]])
mat[[ii]] <- mroot(mat[[ii]] %*% t(mat[[ii]]))
ncolm[ii] <- ncol(mat[[ii]])
}
dfm2 <- do.call("cbind", mat)
dfm2 <- as.data.frame(dfm2)
dfm2 <- cbind(dfMrk[,1], dfm2)
names(dfm2)[1] <- colnames(dfMrk)[1]
dfMerged2 <- merge(dfm1, dfm2, by=names(dfm2)[1], all.x=TRUE, sort=FALSE)
colnames(dfMerged2)[(ncol(dfm1)+1):ncol(dfMerged2)] <- paste("var", 1:(ncol(dfMerged2)-ncol(dfm1)), sep="")
cumind <- c(0, cumsum(ncolm))
for (ii in 1:n.chr)
formula$group[[paste("g_", ii, "chr", sep="")]] <- ncol(dfm1) + (cumind[ii]+1):cumind[ii+1]
} else {
for (ii in 1:n.chr)
formula$group[[paste("g_", ii, "chr", sep="")]] <- nphe+setdiff(grep(paste("C", ii, "M", sep=""), names(dfMerged)[(nphe+1):ncol(dfMerged)]), match(prevLoc, names(dfMerged)[(nphe+1):ncol(dfMerged)]))
dfMerged2 <- dfMerged
}
############################################
# Random effects for each chromosome in selected subset
# Markers are modelled as independent and same variance within chromosomes
chrnam <- paste("idv(grp(g_", chrSet, "chr))", sep="")
formula$random <- paste("~", paste(chrnam, collapse="+"))
if (length(envModel$random)>0)
formula$random <- paste(formula$random, "+", as.character(envModel$random[2]), sep="")
formula$random <- as.formula(formula$random)
formula$dump.model <- TRUE
formula$data <- "dfMerged2"
formula$control <- envModel$control
formula$eqorder <- 3
formula <- c(formula, ...)
formula <- formula[!duplicated(formula)]
formula <- formula[!sapply(formula, is.null)]
full <- do.call("asreml", formula)
if (n.chrSet==1)
{
form.null <- formula
form.null$random <- envModel$random
form.null <- form.null[!sapply(form.null, is.null)]
null.forma[[1]] <- do.call("asreml", form.null)
null.forma[[1]]$control$eqorder <- 3
}
if (n.chrSet>1)
for (cc in 1:n.chrSet)
{
# fit model leaving out each chromosome to test VC
chrnam <- paste("idv(grp(g_", setdiff(chrSet, chrSet[cc]), "chr))", sep="")
rndf <- paste("~", paste(chrnam, collapse="+"), sep="")
if (!is.null(envModel$random))
rndf <- paste(rndf, "+", as.character(envModel$random[2]), sep="")
rndf <- as.formula(rndf)
form.null <- formula
form.null$random <- rndf
null.forma[[cc]] <- do.call("asreml", form.null)
null.forma[[cc]]$control$eqorder <- 3
}
# Vector of observed test statistics from LRTs
if (n.perm==0)
{
run <- asreml(model=full)
full.ll <- run$loglik
if (run$converge==FALSE) results$converge <- FALSE
for (cc in 1:n.chrSet)
{
run <- asreml(model=null.forma[[cc]])
null.ll[cc] <- run$loglik
if (run$converge==FALSE) results$converge <- FALSE
}
perm.test[1,] <- 2*(full.ll-null.ll)
results$obs <- perm.test[1,]
results$raw.pval <- sapply(perm.test[1,], pvfx)
if (input$multtest=="bon")
results$adj.pval <- sapply(results$raw.pval, function(x) return(min(x*n.chrSet,1))) else {
pval <- as.matrix(rbind(c(1:length(results$raw.pval)), results$raw.pval))
pval <- as.matrix(pval[,order(pval[2,])])
pval[2,] <- sapply(pval[2,]*(n.chrSet:1), function(x) return(min(x,1)))
results$adj.pval <- pval[2, order(pval[1,])] }
results$thresh <- qchibar(input$alpha/n.chrSet)
}
if (n.perm>0)
{
namesrnd <- setdiff(names(dfMrk)[2:ncol(dfMrk)], prevLoc)
for (ii in 1:(n.perm+1))
{
if (ncol(dfMrk) > 20*nrow(dfMrk)) {
### now instead of merging the final matrix on, reconstruct
names(dfmp)[1] <- names(dfMrk)[1]
df3 <- merge(dfm1, dfmp, by=names(dfMrk)[1], all.x=TRUE, sort=FALSE)
dfmp <- cbind(dfMrk[,1], dfm2[perm.mat[,ii],])
} else {
df2 <- cbind(dfMrk[,1],dfm2[perm.mat[,ii],which(names(dfMrk)%in%namesrnd)])
names(df2)[1] <- names(dfMrk)[1]
df4 <- dfMerged[, match(c(idname, setdiff(names(dfMerged), names(df2))), names(dfMerged))]
df3 <- merge(df4, df2, by=idname, all.x=TRUE, sort=FALSE)
}
df3 <- df3[,match(names(dfMerged2), names(df3))]
full <- update(full, data=df3)
# replace data in model for random marker effects
# index <- match(namesrnd, names(full$data))
# index <- index[!is.na(index)]
# index2 <- match(names(full$data)[index], names(df3))
# full$data[,index] <- df3[, index2]
# run full model
run <- asreml(model=full)
full.ll <- run$loglik
if (run$converge==FALSE)
results$converge <- FALSE
for (cc in 1:n.chrSet)
{
# replace data for random marker effects for each of the null models
# index <- match(namesrnd, names(null.forma[[cc]]$data))
# index <- index[!is.na(index)]
# index2 <- match(names(null.forma[[cc]]$data)[index], names(df3))
null.forma[[cc]] <- update(null.forma[[cc]], data=df3)
# null.forma[[cc]]$data[, index] <- df3[, index2]
run <- asreml(model=null.forma[[cc]])
if (run$converge==FALSE) results$converge <- FALSE
null.ll[cc] <- run$loglik
}
perm.test[ii,] <- 2*(full.ll-null.ll)
} # end of loops over permutations
# For each permutation, store the maximum (over chromosomes) LRT
maxp <- apply(perm.test, 1, max)
# Permutation threshold is the (1-alpha) percentile of max values
results$thresh <- sort(maxp[2:(n.perm+1)])[floor((1-input$alpha)*n.perm)]
results$raw.pval <- sapply(perm.test[1,], pvfx)
results$adj.pval <- sapply(perm.test[1,], function(x) sum(x<=maxp[2:(n.perm+1)])/n.perm)
results$obs <- perm.test[1,]
results$perm.ts <- perm.test
} # end of check whether n.perm>0
} # end of algorithm==asreml
if (algorithm=="lme") {
fixed <- input$envModel$fixed
f.mrk <- vector()
chrRE <- vector()
LRTStats <- vector()
# Construct vector of already mapped markers (f.mrk)
formula$fixed <- paste(as.character(fixed)[2], as.character(fixed)[1], as.character(fixed)[3], sep="")
# Include fixed effects for all markers which have already been mapped
if (length(prevLoc)>0)
formula$fixed <- paste(formula$fixed, "+", paste(prevLoc, collapse="+"))
formula$fixgrp <- paste(formula$fixed, "|grp1", sep="")
formula$fixed <- as.formula(formula$fixed)
formula$fixgrp <- as.formula(formula$fixgrp)
gd <- groupedData(formula$fixgrp, data=dfMerged)
# Random effects for all markers on a chromosome excluding those which
# enter the model as fixed effects
for (ii in 1:length(input$map))
chrRE[ii] <- paste("pdIdent(~", paste(setdiff(names(dfMerged)[grep(paste("C", ii, "M", sep=""), names(dfMerged))], prevLoc), collapse="+"), "-1)", sep="")
formula$random <- paste("pdBlocked(list(", paste(chrRE[chrSet], collapse=","), "))", sep="")
if (length(chrSet)==1)
formula$random <- chrRE[chrSet]
full <- lme(fixed=formula$fixed, random=eval(parse(text=formula$random)), data=gd, control=lmeControl(maxIter=input$maxit), na.action=na.omit)
full.ll <- full$logLik
# If there is only one chromosome in the subset, compare a full model to the model
# with no random effects
if (n.chrSet==1)
null.forma[[1]] <- lme(fixed=formula$fixed, data=gd, control=lmeControl(maxIter=input$maxit), na.action=na.omit)
# Otherwise, compare the full model to leave-one-VC-out models, removing
# each chromosome effect one at a time
if (n.chrSet>1)
for (cc in 1:n.chrSet)
{
random.forma[[cc]] <- paste("pdBlocked(list(", paste(chrRE[setdiff(chrSet, chrSet[cc])], collapse=","), "))", sep="")
if (n.chrSet==2)
random.forma[[cc]] <- chrRE[setdiff(chrSet, chrSet[cc])]
# Fit the null model, where we omit the specified chromosome random effect
null.forma[[cc]] <- lme(fixed=formula$fixed, random=eval(parse(text=random.forma[[cc]])), data=gd, control=lmeControl(maxIter=input$maxit), na.action=na.omit)
null.ll[cc] <- null.forma[[cc]]$logLik
}
LRTStats <- 2*(full.ll-null.ll)
results$obs <- LRTStats
results$raw.pval <- sapply(LRTStats, pvfx)
# depends on multtest value
if (input$multtest=="bon")
results$adj.pval <- sapply(results$raw.pval*n.chrSet, function(x) return(min(x, 1))) else {
pval <- as.matrix(rbind(1:length(results$raw.pval), results$raw.pval))
pval <- as.matrix(pval[,order(pval[2,])])
pval[2,] <- sapply(pval[2,]*(n.chrSet:1), function(x) return(min(x,1)))
results$adj.pval <- pval[2,order(pval[1,])] }
results$thresh <- qchibar(input$alpha/n.chrSet)
}
return(results)
}
|
43f2a323627f1caae039513388361fe9fad65463
|
612559e34b7a0495eaa7bca2ac1624d641bf782d
|
/spearmans_rho/snp_env_association_spearmans3_parallelise.R
|
c6ee4bd1e446f6ef24764a1d07ffa98391d996c1
|
[] |
no_license
|
CoAdapTree/GEA
|
749e76dda60f27451121e7873e6a5e9f6a12315f
|
1118b2b590fa24d2c9878dd9d87c71151e2c3373
|
refs/heads/master
| 2020-09-06T04:50:35.218076
| 2020-05-29T19:32:41
| 2020-05-29T19:32:41
| 220,328,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,660
|
r
|
snp_env_association_spearmans3_parallelise.R
|
### PSingh MARCH 2020 ####
### pooja.singh09@gmail.com ###
### CoAdapTree #####
### this script takes the unstitched scaffold and position ###
###### SNP ENV Association without correcting for population structure ####
###### : This script can be parallelised, so please set numCores!!!
#needed libraries
library(dplyr)
library(tidyr)
library(stringr)
library(foreach)
library(doParallel)
library(data.table)
library(doSNOW)
# set number of cores to use
numCores=10
cl <- makeCluster(numCores)
registerDoSNOW(cl)
#set iterations (here it is the number of environments
iterations <- 19
# set progress bar
pb <- txtProgressBar(min = 1, max = iterations, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress=progress)
# read in AF table from GATK pipeline
data1 <- read.table("/data/projects/pool_seq/pangenome/JP_pangenome/JP_pooled/snpsANDindels/03_maf-p05_RD-recalculated/JP_pooled-varscan_all_bedfiles_SNP_maf_RD-recalculated.txt", header=T, sep="\t")
### select columns with FREQ and order the populations and conver FREQ % to decimal
data2 <- data1 %>% select(contains(".FREQ"))
colhead <- sub(".FREQ", "", colnames(data2))
colnames(data2) <- colhead
data3 <- data2[ , order(colnames(data2))]
data4 <- data3 %>% mutate_each(funs(as.numeric(gsub("%", "", ., fixed = TRUE))/100))
### prepare row header for final SNP table
header1 <- data1 %>% select(contains("unstitched_locus"))
header <- as.data.frame(str_replace(header1$unstitched_locus, ">", ""))
colnames(header) <- c("ID")
### paste header and FREQs and finalise
data5 <- cbind(header, data4)
snps <- data5[2:41]
rownames(snps) <- data5$ID
#### read in env data and order header the same as the SNPfile #####
env_var <- read.table("jp_std_env-19variables.txt", header=T)
env_var1 <- env_var[c(6:24)]
rownames(env_var1) <- env_var$our_id
env_var2 <- t(env_var1)
env_var3 <- env_var2[ , order(colnames(env_var2))]
###### Parallelise correlation loop through each SNP versus ENV and output file ####
scafpos <- as.matrix(rownames(snps))
envname <- as.matrix(rownames(env_var3))
args <- commandArgs(trailingOnly = TRUE)
start1 <- 1
end1 <- nrow(snps)
end2 <- nrow(env_var3)
# Create class which holds multiple results for each loop iteration.
# Each loop iteration populates four properties: $result1 and $result2 and so on
multiResultClass <- function(result1=NULL,result2=NULL,result3=NULL,result4=NULL)
{
me <- list(result1 = result1,result2 = result2, result3 = result3, result4 = result4)
## Set the name for the class
class(me) <- append(class(me),"multiResultClass")
return(me)
}
# set counter
count <- 0
# set sys time
system.time(
# loop through envs and parallelise
output <- foreach (j = 1:end2, .options.snow=opts, .packages="foreach", .combine=rbind) %dopar% {
# loop through SNPs
foreach (i = 1:end1, .combine=rbind) %do% {
count <- count + 1
result <- multiResultClass()
result$result1 <- scafpos[i,1]
result$result2 <- envname[j,1]
result$result3 <- cor.test(as.numeric(snps[i,]), as.numeric(env_var3[j,]), method = "spearman", exact=FALSE, use = "pairwise.complete.obs")$estimate
result$result4 <- cor.test(as.numeric(snps[i,]), as.numeric(env_var3[j,]), method = "spearman", exact=FALSE, use = "pairwise.complete.obs")$p.value
setTxtProgressBar(pb, i)
return(result)
}
}
)
output1 <- data.table(output)
colnames(output1) <- c("snp", "env", "spearmansrho", "pvalue")
outname_p <- paste("snp_env_spearmans_rho_parallel",".txt",sep = "")
fwrite(output1, outname_p, sep="\t", col.names = T, row.names = F, quote = F)
## close progress bar and clean up cluster
close(pb)
stopImplicitCluster()
|
956ca6bb141f0c04d53fd03f7e3147a7f439fc06
|
9cf933abd57da6dd2ed5d293644a5fa9683d0923
|
/DGE/DGE_Analysis.R
|
f7a9732b2d7a207b3d96b773f620e62ae50b3b8b
|
[] |
no_license
|
konopkalab/Mef2c_Het
|
49a33f1851fd67416dfb14fcf00bf21264d89306
|
c6da4ad65371dce6e29f75ec42184600f09781a7
|
refs/heads/master
| 2020-08-09T05:01:04.287240
| 2020-04-30T13:12:31
| 2020-04-30T13:12:31
| 214,003,792
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,924
|
r
|
DGE_Analysis.R
|
# Load libraries
suppressPackageStartupMessages(library(pheatmap))
suppressPackageStartupMessages(library(DESeq2))
suppressPackageStartupMessages(library(sva))
suppressPackageStartupMessages(library(biomaRt))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggjoy))
suppressPackageStartupMessages(library(ggrepel))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(edgeR))
suppressPackageStartupMessages(library(preprocessCore))
####################################
# Load Inputs and filter for Neun+ #
####################################
RPKM=read.table("RPKM_EXON.txt",sep="\t",header=T)
COUNT=read.table("COUNT_EXON.txt",sep="\t",header=T)
first=apply(RPKM, 1, function(x) (all(x[1:4] >= 0.5) | all(x[5:8] >= 0.5)))
df <- COUNT[first,]
##########################
# normalization #
##########################
dat <- log2(cpm(df)+1)
##########################
# Quantile normalization #
##########################
p <- normalize.quantiles(as.matrix(dat))
rownames(p) <- rownames(dat)
colnames(p) <- colnames(dat)
#write.table(p, "OUTPUTS_GENEBODY_NEUN/NeuN_Primates_GeneBody_CPM.txt",sep="\t",quote=F)
pd=data.frame(row.names = colnames(p), Treatment=c(rep("HET",4),rep("WT",4)))
#########################
# PCA #
#########################
pdf("PCA.pdf",width=5,height=5,useDingbats=FALSE)
pca.Sample<-prcomp(t(p))
PCi<-data.frame(pca.Sample$x,Treatment=pd$Treatment)
eig <- (pca.Sample$sdev)^2
variance <- eig*100/sum(eig)
ggscatter(PCi, x = "PC1", y = "PC2",color = "Treatment",palette=c("steelblue","darkgrey","green"), shape = 21, size = 3)+
xlab(paste("PC1 (",round(variance[1],1),"% )"))+
ylab(paste("PC2 (",round(variance[2],1),"% )"))+
theme_classic()
dev.off()
########################
# Human vs Chimpanzee #
########################
TRAITSfilt <- droplevels(pd)
Data=t(p)
output <- data.frame(matrix(nrow=ncol(Data), ncol=3, dimnames = list(colnames(Data), c("Estimate", "Pval", "Warning"))))
output[,] <- NA
for (i in 1:ncol(Data)) {
Model=tryCatch(lm(as.formula(paste("Data[,i] ~ ", paste(colnames(TRAITSfilt),collapse = " + "))), data = TRAITSfilt),warning = function(w) w)
if (i %% 1000 == 0) {cat(paste("Done on gene ",i,"\n",sep=""))}
if(typeof(Model) == "list"){
coefs = data.frame(coef(summary(Model)))
t_value = coefs["Treatment", "t.value"]
output[i,"Pval"] = 2 * (1 - pnorm(abs(t_value)))
output[i,"Estimate"]= -1 * coefs["Treatment", "Estimate"]
} else {
output[i,"Warning"] = as.character(Model)
output[i, "Estimate"] = 0
output[i,"Pval"] = 1
}
}
DGE <- output
DGE$Warning <- NULL
DGE$FDR <- p.adjust(DGE$Pval,"BH")
write.table(DGE, "LM_EXON.txt",sep="\t",quote=F)
xlsx::write.xlsx(DGE, file="LM_EXON.xlsx",sheetName = "EXON DEG",row.names=TRUE, showNA=FALSE)
sign <- DGE[DGE$FDR < 0.05,]
mat <- p[rownames(p)%in% rownames(sign),]
colnames(mat) <- c("Het_1","Het_2","Het_3","Het_4","Wt_1","Wt_2","Wt_3","Wt_4")
anno <- data.frame(row.names = colnames(mat), Treatment=c(rep("HET",4),rep("WT",4)))
Treatment <- c("red", "black")
names(Treatment) <- c("HET", "WT")
anno_colors <- list(Treatment = Treatment)
pdf("Heatmap.pdf",width=4,height=6)
pheatmap(mat,scale="row",show_rownames = F,annotation=anno,annotation_colors = anno_colors)
dev.off()
# Convert to human
human = useMart("ensembl", dataset = "hsapiens_gene_ensembl")
mouse = useMart("ensembl", dataset = "mmusculus_gene_ensembl")
MGI = getLDS(attributes = c("mgi_symbol"), filters = "mgi_symbol", values = rownames(sign) ,mart = mouse, attributesL = c("hgnc_symbol"), martL = human, uniqueRows=T)
signComb <- merge(sign,MGI,by.x="row.names",by.y="MGI.symbol",all=F)
df <- signComb %>%
mutate(Direction = case_when(Estimate > 0 ~ "Upreg", Estimate < 0 ~ "Downreg")) %>%
dplyr::select(HGNC.symbol,Direction) %>%
dplyr::rename(Gene = HGNC.symbol)
tmp <- data.frame(Gene = df$Gene, Direction = rep("All",nrow(df)))
df <- rbind(df,tmp)
write.table(df,"DEGs_For_Enrichment.txt",sep="\t",quote=F,row.names=F)
# Vulcano Plot
tab <- read.table("LM_EXON.txt")
df <- tab %>%
rownames_to_column("Names") %>%
mutate(Threshold = case_when(FDR < 0.05 ~ "TRUE", FDR > 0.05 ~ "FALSE")) %>%
mutate(Direction = case_when(Estimate > 0 ~ "UpReg", Estimate < 0 ~ "DownReg")) %>%
mutate(LOG = -log10(FDR), ABS = abs(Estimate))
df$LOG[!is.finite(df$LOG)] <- 12
top_labelled <- tbl_df(df) %>%
group_by(Direction) %>%
top_n(n = 10, wt = LOG)
pdf("Vulcano_Plot.pdf",width=6,height=6,useDingbats=FALSE)
ggscatter(df,
x = "Estimate",
y = "LOG",
color = "Threshold",
palette=c("grey","red"),
size = 1,
alpha=0.3,
shape=19)+
xlab("log2(Fold Change)")+
ylab("FDR")+
geom_vline(xintercept = 0, colour = "grey",linetype="dotted",size=1,alpha=0.5) +
geom_hline(yintercept = 1.3, colour = "grey",linetype="dotted",size=1,alpha=0.5) +
geom_text_repel(data = top_labelled,
mapping = aes(label = Names),
size = 5,
box.padding = unit(0.4, "lines"),
point.padding = unit(0.4, "lines"))+
theme(legend.position="none")+
ylim(0,20)+
xlim(-2,2)
dev.off()
|
0dd130fe68c0c2be435dee2fa77231f0d35dafe5
|
6951cfcfbcad0034696c6abe9a4ecf51aa0f3a4b
|
/man/reorder_columns.Rd
|
00769affad81801e4950a2be4692ea0be9bc9f2c
|
[] |
no_license
|
renozao/pkgmaker
|
df3d4acac47ffbd4798e1d97a31e311bf35693c8
|
2934a52d383adba1d1c00553b9319b865f49d15b
|
refs/heads/master
| 2023-05-10T16:40:30.977394
| 2023-05-03T07:02:51
| 2023-05-03T07:17:17
| 12,726,403
| 8
| 3
| null | 2023-02-14T10:26:07
| 2013-09-10T10:07:35
|
R
|
UTF-8
|
R
| false
| true
| 1,101
|
rd
|
reorder_columns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{reorder_columns}
\alias{reorder_columns}
\title{Reordering Columns}
\usage{
reorder_columns(x, target, decreasing = FALSE)
}
\arguments{
\item{x}{an object with columns, such as a \code{matrix} or a \code{data.frame},
or from a class that support subsetting via \code{x[, i, drop = FALSE]} and has a method \code{colnames}.}
\item{target}{a character or named numeric vector that specifies the column prefered order.
If a numeric vector, then its names are assumed to correspond to columns,
and its values determine the target order -- according to argument \code{decreasing}.}
\item{decreasing}{logical that indicates in which direction a numeric target vector should
be ordered.}
}
\value{
an object of the same type and dimension
}
\description{
Reorders columns according to a prefered target order
}
\details{
Column names will be reordered so that their order match the one in \code{target}.
Any column that does not appear in \code{target} will be put after those that are
listed in \code{target}.
}
|
a4d3c1a278aa8e77b2e4be62474c885ff1721fcb
|
b503054596cc98acdc6239ecf1ebf8f2986b4336
|
/additional files/LR.R
|
521e9a9ee3fc33f815c4dd8c7a0d124be84df15a
|
[] |
no_license
|
ajayso/Regression
|
224883020b0869c7ac93d4e164d242fd9a21e0ac
|
c38b85e7f85403d3eea96ed6f4a4d8529a3c0922
|
refs/heads/master
| 2020-03-18T04:06:22.065524
| 2018-05-21T12:58:17
| 2018-05-21T12:58:17
| 134,269,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
LR.R
|
library(forecast)
mydata = read.csv("c:/start/ML-Regression-Analysis/gd2.csv", stringsAsFactors = FALSE, header=TRUE,sep=",");
fit = lm(USD~.,mydata)
plot(fit)
|
3b190103704efdc38340eaf486545e2b1f196ea3
|
dbd3045fabdcc6305658802a59eb0455dc418cd5
|
/R/update_timediaps.R
|
bc1c60dcbac85dfb23006ec21b27efbd1f680f00
|
[
"MIT"
] |
permissive
|
aljrico/hs_balancedata
|
5d239fe7ee63c1e8beb7a471655b4b0f10318527
|
02aea4ba6bdcc81f0d0b17518e2206f1719158d0
|
refs/heads/master
| 2020-07-21T09:02:11.433048
| 2019-12-03T16:07:57
| 2019-12-03T16:07:57
| 206,807,289
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
update_timediaps.R
|
#' @export
update_timediaps <- function(spreadsheet_name = '(HS) timediaps', game_folder = 'homestreet'){
hs.balancedata::gs_credentials()
spreadsheet_name %>%
googlesheets::gs_title() %>%
googlesheets::gs_read(ws = 'timediaps_prod') %>%
data.table::data.table() %>%
data.table::fwrite(paste0('~/', game_folder, '/Assets/data/source/csv/timediaps_prod.csv'))
}
|
6ade8735f16402a16fa6c0169cd2fe18e4bd5681
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rfast/R/logical.R
|
268010a2f62d90d86da92f641ed42ba5b064bdec
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
logical.R
|
#[export]
rowTrueFalse <- function(x) {
x <- .Call(Rfast_row_true_false,x)
rownames(x) <- c("FALSE","TRUE")
x
}
#[export]
colTrueFalse <- function(x) {
x <- .Call(Rfast_col_true_false,x)
rownames(x) <- c("FALSE","TRUE")
x
}
#[export]
colTrue <- function(x) {
.Call(Rfast_col_true,x)
}
#[export]
rowTrue <- function(x) {
.Call(Rfast_row_true,x)
}
#[export]
rowFalse <- function(x) {
.Call(Rfast_row_false,x)
}
#[export]
colFalse <- function(x) {
.Call(Rfast_col_false,x)
}
|
e3323ad6f126248aa1cec592eddd3e58a140170e
|
b4dad97c1fc3a9a1b51c806f99e6b8ce1105ae32
|
/Code/kmeans_econ.R
|
2c2c36bf5bd9b8529f1d4f716376f91218c717bc
|
[] |
no_license
|
ndifilippo/mgsc410
|
46d331d9d4acf9743e69fafcd2e5d163e7b5a067
|
93756607aefd77a64db571a67016ea303dab16c5
|
refs/heads/master
| 2020-05-15T10:24:55.240849
| 2019-05-05T22:38:13
| 2019-05-05T22:38:13
| 182,191,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,493
|
r
|
kmeans_econ.R
|
library(tidyverse)
library(factoextra)
library(cluster)
library(VIM)
# install.packages("devtools")
library(devtools)
# install dev version of ggmap
# devtools::install_github("dkahle/ggmap")
library(ggmap)
library(knitr)
library(kableExtra)
tinytex::install_tinytex()
#> Loading required package: ggplot2
#> Google Maps API Terms of Service: http://developers.google.com/maps/terms.
#> Please cite ggmap if you use it: see citation("ggmap") for details.
# save api key
register_google(key = "AIzaSyCgDlbIH6DyxUJ5b4VcfVP-7LY5-cbjHsc")
setwd("C:/git/mgsc410/datasets")
# clean data
data <- read.csv("dma_and_msa.csv", stringsAsFactors = F)
# delete non states: west, northeast etc
data <- filter(data, !(str_detect(str_to_lower(data$location),"all consumer")))
# variables are not needed
data <- select(data, -c(dma, name, dma_name, region, name, dma_name, name))
data <- filter(data, date_id == 2009)
temp <- data[, 4:49]
data.percents <- temp[, 1:44] / data$income_before_taxes
data_kmeans <- data.percents[, 9:40]
# Kmeans
kmeans4 <- kmeans(data_kmeans, centers = 4, nstart = 25)
# cluster to data frame
data$cluster <- (kmeans4$cluster)
data.percents$cluster <- kmeans4$cluster
data.percents$location <- data$location
# USA MAP
p <- get_map(location = c(lon = mean(as.numeric(data$longitude)) - 5, lat = mean(as.numeric(data$latitude))), zoom = 4,
maptype = "roadmap", scale = 2)
p
ggmap(p) + geom_point(data = data,
aes(x = as.numeric(data$longitude ),
y = as.numeric(data$latitude)),
fill = data$cluster, alpha =0.4,
size = (data$income_before_taxes)/10000 ,
shape = 21, color = data$cluster) +
labs(y="Latitude", x = "Longitude",
title = "Clustering DMAs by 2009's Consumer Expenditure Survey") +
theme(plot.title = element_text(size = 18), axis.title = element_text(size = 14), legend.position="top")
options(digits=2)
table <- cbind( data.percents[c(9,45:46)], data$income_before_taxes)
print(table[order( table$cluster, table$`data$income_before_taxes` ),])
table <- table[order( table$cluster, table$`data$income_before_taxes` ),]
colnames(table) <- c("Avg expenditure / Income", "Cluster", "Location", "Income before Taxes")
table$`Income before Taxes` <- paste('$',formatC(table$`Income before Taxes`, big.mark=',', format = 'f', digits = 2))
table
|
125994fd0024acd79dfe1e29face62ae4d593d54
|
ef572bd2b0515892d1f59a073b8bf99f81d6a734
|
/data-raw/COP23/COP23OPU_Data_Pack_generation_script.R
|
efe943a1bfaea05b5e475dc3cf992442b0b0bc41
|
[
"CC0-1.0"
] |
permissive
|
pepfar-datim/datapackr
|
5bc604caa1ae001b6c04e1d934c0c613c59df1e6
|
9275632673e45948db6846513a53c1436cfc0e47
|
refs/heads/master
| 2023-08-30T23:26:48.454382
| 2023-08-11T13:01:57
| 2023-08-11T13:01:57
| 170,350,211
| 9
| 7
|
CC0-1.0
| 2023-09-11T21:53:24
| 2019-02-12T16:19:47
|
R
|
UTF-8
|
R
| false
| false
| 2,123
|
r
|
COP23OPU_Data_Pack_generation_script.R
|
#
# This script is to be used when producing COP23 OPU Tools — i.e., PSNUxIM tools
# — in cases where an OPU requires ONLY a target shift IM. Unlike the regular
# Data Pack generation process where a country generates a PSNUxIM tool via the
# Self-Service App based on data from an existing Data Pack, this code generates
# a PSNUxIM tool based on data pulled directly from DATIM.
#
# If a country needs more than a target shift among IMs — i.e., top-level target
# changes — DO NOT use this process. Instead, send back their latest Data Pack
# representing the most updated understanding of their targets as in DATIM.
#
library(datapackr)
# Point to DATIM login secrets ####
secrets <- Sys.getenv("SECRETS_FOLDER") %>% paste0(., "datim.json")
datimutils::loginToDATIM(secrets)
output_folder <- Sys.getenv("OUTPUT_FOLDER") %>% paste0(., "Documents/COP23 OPUs/")
# For Generating Individual Data Packs ####
pick <- datapackr::cop_datapack_countries %>%
dplyr::filter(datapack_name %in% c("Eswatini"))
# test valid org units against cached ####
valid_OrgUnits <- getDataPackOrgUnits(use_cache = FALSE) %>%
dplyr::filter(country_uid %in% unlist(pick$country_uids))
#TODO: Make it possible to pull and compare for a single (or list) of countries
valid_OrgUnits_package <- datapackr::valid_OrgUnits %>%
dplyr::filter(country_uid %in% unlist(pick$country_uids))
compare_diffs <- valid_OrgUnits_package %>%
dplyr::full_join(valid_OrgUnits, by = "uid") %>%
dplyr::filter(is.na(name.x) | is.na(name.y))
if (NROW(compare_diffs) > 0) {
stop("Valid org units are not up to date! Please update valid org units.")
}
waldo::compare(valid_OrgUnits_package, valid_OrgUnits)
# Execution ####
for (i in seq_along(pick$datapack_name)) {
print(paste0(i, " of ", NROW(pick), ": ", pick[[i, 1]]))
d <- packTool(tool = "OPU Data Pack",
datapack_name = pick$datapack_name[i],
country_uids = unlist(pick$country_uids[i]),
template_path = NULL,
cop_year = 2023,
output_folder = output_folder,
results_archive = FALSE)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.