blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba3f8fb86789ed676971f492f887cf6236cb0ed7
|
eb9c68c0cdf0ba0a008e7526d7827f7b92f54f7f
|
/Scripts/Rscripts/experimental.R
|
1a5832ba6d32298b2acf2eb5c899f3a58e49afc3
|
[] |
no_license
|
PaavoReinikka/hello-sandbox
|
68d77316c80fbb3acea7bda664ecf39d05fe561e
|
d08644422aadf1173e4d449a7f3cb126efe8c592
|
refs/heads/master
| 2023-07-09T18:20:13.579335
| 2023-07-04T14:07:51
| 2023-07-04T14:07:51
| 128,568,237
| 0
| 0
| null | 2018-04-07T21:10:45
| 2018-04-07T21:02:46
| null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
experimental.R
|
k <- 100000
n <- 3
FC <- 1.5
q_st <- qnorm(0.9,0,1)
q_st
# we want q=FC/2 => kerroin=FC/2q_st
sigma <- FC/(2*q_st)
#sigma <- 1
r <- c(rnorm(k,0,sigma),rnorm(k,-FC/2,sigma),rnorm(k,FC/2,sigma))
r <- sample(r,10000,TRUE)
var(r)
sigma^2 + FC^2/(2*n)
sigma^2 + FC^2*(n-1)/n^2
sigma^2 + FC^2*(n-2)/n^2
hist(r)
pA=0.3
pB=0.7
mA=3
mB=-2
pA*mA^2 + pB*mB^2 - (pA*mA + pB*mB)^2
pA*pB*(mA-mB)^2
#############################################################################
library(extraDistr)
library('comprehenr')
n <- 20
sig1 <- 1
sig2 <- 2
effect_range <- seq(0,5,0.25)
results <- data.frame(matrix(ncol = 6,nrow = 0))
for(effect in effect_range) {
MLeffects <- c()
MLhighs <- c()
MLlows <- c()
Bayeseffects <- c()
Bayeshighs <- c()
Bayeslows <- c()
MLerrors <- c()
Berrors <- c()
for(i in 1:10) {
# data
r1 <- rnorm(n, 0, sig1)
r2 <- rnorm(n, effect, sig2)
# ML estimates
m1 <- mean(r1)
m2 <- mean(r2)
std <- (var(r1)/n + var(r2)/n) %>% sqrt()
ML_effect <- m2 - m1
ML_high <- ML_effect + 1.96*std
ML_low <- ML_effect - 1.96*std
# Posterior estimates
sigma1 <- rinvchisq(k,n-1,sqrt(var(r1)))/n %>% sqrt()
sigma2 <- rinvchisq(k,n-1,sqrt(var(r2)))/n %>% sqrt()
mu1 <- to_vec(for(i in 1:length(sigma1)) rnorm(1, m1, sigma1[i]))
mu2 <- to_vec(for(i in 1:length(sigma2)) rnorm(1, m2, sigma1[i]))
posterior_effect <- (mu2 - mu1)
Bayes_effect <- posterior_effect %>% mean()
tmp <- quantile(posterior_effect,probs = c(0.025, 0.975))
Bayes_high <- tmp[2]
Bayes_low <- tmp[1]
MLeffects <- c(MLeffects,ML_effect)
MLhighs <- c(MLhighs,ML_high)
MLlows <- c(MLlows,ML_low)
Bayeseffects <- c(Bayeseffects,Bayes_effect)
Bayeshighs <- c(Bayeshighs,Bayes_high)
Bayeslows <- c(Bayeslows,Bayes_low)
MLerrors <- c(MLerrors,abs(ML_effect-effect))
Berrors <- c(MLerrors,abs(Bayes_effect-effect))
}
# insert the results in the data frame
#results <- rbind(results, c(ML_effect,ML_high,ML_low,Bayes_effect,Bayes_high,Bayes_low))
results <- rbind(results,
c(mean(MLeffects),
mean(MLhighs),
mean(MLlows),
mean(MLerrors),
mean(Bayeseffects),
mean(Bayeshighs),
mean(Bayeslows),
mean(Berrors)))
}
results <- cbind(results, effect_range)
colnames(results) <- c('MLest','MLhigh','MLlow','MLerror','Best','Bhigh','Blow','Berror','true_effect')
ggplot(results, aes(x=true_effect)) +
geom_line(aes(y = true_effect)) +
geom_line(aes(y = MLest), color='red') +
geom_line(aes(y = MLhigh), lty='dashed', color='red') +
geom_line(aes(y = MLlow), lty='dashed', color='red') +
geom_line(aes(y = Best),color='blue') +
geom_line(aes(y = Bhigh), lty='dashed', color='blue') +
geom_line(aes(y = Blow), lty='dashed', color='blue')
ggplot(results, aes(x=true_effect)) +
geom_line(aes(y = MLerror, color='red')) +
geom_line(aes(y = Berror, color='blue', lty='dashed'))
##########################################
hist(r2-r1)
abline(v=ML_effect, col='red', lwd=3)
abline(v=ML_high, col='black', lwd=3)
abline(v=ML_low, col='black', lwd=3)
hist(posterior_effect)
abline(v=Bayes_effect, col='blue', lwd=3)
abline(v=quantile(posterior_effect,probs = c(0.159, 1-0.159)), lwd=3)
hist(r2-r1)
abline(v=ML_effect, col='red', lwd=3)
abline(v=Bayes_effect, col='blue', lwd=3, lty=3)
abline(v=c(ML_high,ML_low), col='yellow', lwd=2)
abline(v=quantile(posterior_effect,probs = c(0.159, 1-0.159)), col='green', lwd=2, lty=2)
|
597dcc409b3c589282c149ed3c4306f41b239c44
|
e828d0bcc8367858d328a50b353f638129d7df8c
|
/plot1.R
|
80225cfd36ce800390730bdf598a048fb5666947
|
[] |
no_license
|
a-diamant/expl_data_analysis
|
c25702de7750f7f28804c2e73a160c023cbb1026
|
d3c92bf83e664a846a1aaa4e4586b1a3b3a9edbc
|
refs/heads/master
| 2023-07-21T12:24:59.038671
| 2019-09-26T11:08:41
| 2019-09-26T11:08:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
plot1.R
|
##exploatory data analysis.
##week1
###plot1
library(data.table)
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "power_consumption.zip")
unzip("power_consumption.zip")
list.files()
power_consumption <- fread("household_power_consumption.txt", sep = ";", header = T)
power_consumption$Date <- as.Date(power_consumption$Date, format="%d/%m/%Y")
head(power_consumption1)
#subset the data required 2007-02-01 and 2007-02-02
power_consumption1 <- power_consumption[(power_consumption$Date=="2007-02-01") |
(power_consumption$Date=="2007-02-02"),]
power_consumption1$Global_active_power <- as.numeric(power_consumption1$Global_active_power)
#plot1. Global active power
hist(power_consumption1$Global_active_power,
main = "Global Active Power", xlab = "Global Active Power (kilowatts)",
col="red", xlim = c(0, 6), axis = F)
axis(side=2, at=seq(0, 6, by=2), labels = seq(0, 6, by=2))
#save plot to png
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
|
d4ed64591181e3c4ad103cc11d43a09241da172f
|
330cbad4fae68f7721083c4c171337b1d746a4cc
|
/turistus_skc.R
|
961259edba8b582749e068c195e9d843fb1964cf
|
[] |
no_license
|
auksesi/rastodarbas
|
3c440cff617a8b3bff3a4a436b75fd4d43416392
|
f4213c2ba594ac0e88e8ed3ea4952b032cee7c4b
|
refs/heads/master
| 2020-06-06T06:44:29.529647
| 2019-06-19T06:05:05
| 2019-06-19T06:05:05
| 192,668,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,395
|
r
|
turistus_skc.R
|
getwd()
setwd("/Users/Aukse/Desktop/turizmas")
if(!require(eurostat)) install.packages("eurostat") ; require(eurostat)
if(!require(tidyverse)) install.packages("tidyverse") ; require(tidyverse)
if(!require(rsdmx)) install.packages("rsdmx") ; require(rsdmx)
if(!require(openxlsx)) install.packages("openxlsx") ; require(openxlsx)
if(!require(dplyr)) install.packages("dplyr") ; require(dplyr)
if(!require(ggplot2)) install.packages("ggplot2") ; require(ggplot2)
if(!require(tidyr)) install.packages("tidyr") ; require(tidyr)
#Duomenu failo atsiuntimas ir issaugojimas
#url <- "https://osp-rs.stat.gov.lt/rest_xml/dataflow/"
#meta <- as.data.frame(meta)
#write.xlsx(meta, "LSD_meta.xlsx")
################################
#Duomenu parsiuntimas is LSD, ju redagavimas(suvienodinimas)
# S8R500_M4091505_1 - Vietinių turistų skaičius
#Suminis (2004 - 2017)
VIETINIAI <- readSDMX(providerId ="LSD",
resource = "data",
flowRef = "S8R500_M4091505_1",
dsd= TRUE)
VIETINIAI <- as.data.frame(VIETINIAI)
VIETINIAI <- VIETINIAI %>% filter(LAIKOTARPIS > "2006-01-01")
ATVYKE <- readSDMX(providerId ="LSD",
resource = "data",
flowRef = "S8R701_M4091301_1",
dsd= TRUE)
ATVYKE <- as.data.frame(ATVYKE)
ATVYKE <- ATVYKE %>% filter(LAIKOTARPIS > "2006-01-01")
ISVYKE <- readSDMX(providerId ="LSD",
resource = "data",
flowRef = "S8R709_M4091101_1",
dsd= TRUE)
ISVYKE <- as.data.frame(ISVYKE)
ISVYKE <- ISVYKE %>% filter(LAIKOTARPIS > "2006-01-01")
#____________
#Sukursiu bendra duomenu lentele, kuri atvaizduotu turistu skaiciu Lietuvoje
dt<- data.frame(VIETINIAI, ISVYKE, ATVYKE)
dt <- dt %>% select(-c(1,4,5,6,8,9,10,12))
names(dt)[2]<-paste("Vietiniai")
names(dt)[3]<-paste("Isvyke")
names(dt)[4]<-paste("Atvyke")
#-----------------
#Sukursiu grafika atspindinti gauta duomenu lentele.
ggplot(dt, aes(LAIKOTARPIS, y = value, color = Turistų_tipai)) +
geom_point(aes(y = Vietiniai, col = "Vietiniai")) +
geom_point(aes(y = Atvyke, col = "Atvyke"))+
geom_point(aes(y = Isvyke, col = "Isvyke"))+
labs(title = "Išvykusių, atvykusių ir vietinių turistų skaičius Lietuvoje 2007m-2017m.",
subtitle = "Lietuvos statistikos departamentas",
x="Metai", y = "Turistų skaičius")
|
ee42b88f3647880b3e680de31fd6eb0a9ae97c91
|
772f302825f9618adbf9d7a045e8395016da538e
|
/svsocket_example.R
|
044161e28f0880bc6145a4711168bf9fd679e19f
|
[] |
no_license
|
smlab-niser/flaps
|
b89b78811ab14b35535ee5ccf1962b8f4dd61c09
|
ff0278fd177d40bff3aab66776880ef373709b68
|
refs/heads/master
| 2023-01-03T01:24:04.134557
| 2020-10-21T08:21:14
| 2020-10-21T08:21:14
| 267,589,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
svsocket_example.R
|
library(utils)
# NOT RUN {
finger <- function(user, host = "localhost", port = 79, print = TRUE)
{
if (!is.character(user))
stop("user name must be a string")
user <- paste(user,"\r\n")
socket <- make.socket(host, port)
on.exit(close.socket(socket))
write.socket(socket, user)
output <- character(0)
repeat{
ss <- read.socket(socket)
if (ss == "") break
output <- paste(output, ss)
}
close.socket(socket)
if (print) cat(output)
invisible(output)
}
# }
# NOT RUN {
finger("root") ## only works if your site provides a finger daemon
# }
|
dce596d17cd91b94c4c3766980c1e9f34e8ca1fd
|
6ffb2e5fad7a7414eacfbfb642e8ef579ee510bf
|
/R/functions.R
|
cd92e5cdf6d9c50eee6194633df82b4a97fd783f
|
[
"Apache-2.0"
] |
permissive
|
kproductivity/santander-customer-satisfaction
|
e6bd07507577e424ee3429f56a9061fbe81747e6
|
96b0892f878f8455dd127fe362e3686a6bdd5e04
|
refs/heads/master
| 2021-01-10T08:31:21.200230
| 2016-03-28T13:47:04
| 2016-03-28T13:47:04
| 53,279,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 708
|
r
|
functions.R
|
# Assess performance
assessPerf <- function(myModel){
# myModel - H2o model
perf <- h2o.performance(model = model.nb, newdata = new.hex)
h2o.confusionMatrix(perf)
plot(perf, type = "roc", col = "blue", typ = "b")
perf@metrics$AUC
}
# Generate submission file
genSubmission <- function(myModel){
# myModel - H2o model
testPath <- "~/Projects/santander-customer-satisfaction/data/test.csv"
test.hex <- h2o.uploadFile(path = testPath, header = TRUE)
test.pca <- h2o.predict(model.pca, test.hex)
submit <- as.data.frame(test.hex$ID)
submit$TARGET <- as.data.frame(h2o.predict(myModel, newdata=test.pca))$predict
write.csv(submit, "submission.csv", row.names = FALSE, quote = FALSE)
}
|
109547b56ca6e2058cfaa6ea166a16fe0971d704
|
6b733d7f4cd3c0360cce9e30d1a518bbd2bac7d6
|
/man/Tab9.1.Rd
|
405b34a6e26f2f036ba980527f4f0b96e09cf360
|
[] |
no_license
|
pbreheny/hdrm
|
52c5758b2be1ee813db776b96048f506e9387d02
|
a1f2ffb7f831b382847db81ad0085c9e86a3af07
|
refs/heads/master
| 2023-04-30T05:25:04.338437
| 2023-04-20T16:54:58
| 2023-04-20T16:54:58
| 60,282,907
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,072
|
rd
|
Tab9.1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boot-sim.R
\name{Tab9.1}
\alias{Tab9.1}
\title{Reproduce Table 9.1}
\usage{
Tab9.1(
N = 100,
B = 100,
n = 100,
p = 100,
a = 10,
b = 2,
rho = 0.5,
noise = "autoregressive",
rho.noise = 0.8,
seed = 1,
...
)
}
\arguments{
\item{N}{Number of simulated realizations}
\item{B}{Number of bootstrap replications (for each data set)}
\item{n}{Sample size}
\item{p}{Number of features}
\item{a}{Number of causal ('A') variables}
\item{b}{Number of correlated ('B') variables}
\item{rho}{Correlation between 'A' and 'B' variables}
\item{noise}{Correlation structure between features ('exchangeable' | 'autoregressive')}
\item{rho.noise}{Correlation parameter for noise variables}
\item{seed}{Random number seed for reproducibility}
\item{...}{Further arguments to \code{\link{genDataABN}}}
}
\description{
Reproduces Table 9.1 from the book. If you specify any options, your results may look different.
}
\examples{
Tab9.1(N=5) # Increase N for more reliable results
}
|
65728d27c37de0bfdc1998d25640e0a29b8cd266
|
69c9338bedd17f91288a39a267dd638fe891daba
|
/exemplos/kmeans/GeneralizedKMeans.R
|
065d86d3cd671374ae3bdfb448aa9fd7166bfe08
|
[] |
no_license
|
victormmp/rec-pad
|
1925e49fe6435a25b85dac1b02482abb7a53bdb5
|
fabd9a6c044bbabb55ac8c72e587c29db53a72d0
|
refs/heads/master
| 2021-04-09T13:12:37.083123
| 2018-06-22T03:01:15
| 2018-06-22T03:01:15
| 125,459,459
| 0
| 0
| null | 2018-04-07T03:16:30
| 2018-03-16T03:38:40
|
TeX
|
UTF-8
|
R
| false
| false
| 3,232
|
r
|
GeneralizedKMeans.R
|
rm(list=ls())
cat("\014")
# library("tictoc")
library("rgl")
library("plot3D")
cat("===== Starting Routine=====\n\n")
cat(">> Creating functions\n")
kMeans <- function (X, k, maxit) {
N <- dim(X)[1]
n <- dim(X)[2]
seqN <- seq(1,N,1)
seqk <- seq(1,k,1)
Mc <- matrix(nrow=k, ncol=n)
Clustx <-matrix(nrow=N, ncol=1)
seqx <- sample(N,k)
Mc <- X[seqx,]
it <- 1
while(it <= maxit){
for (i in seqN) {
xrep <- matrix(X[i,], nrow=k, ncol=n, byrow=T)
vecdist <- rowSums((Mc-xrep)^2)
Clustx[i] <- which.min(vecdist)
}
for (j in seqk) {
xj <- which(Clustx==j)
Mc[j,] <- colMeans(X[xj,])
}
it <- it+1
}
nam <- c("Mc","Clustx")
retlist <- list(Mc,Clustx)
names(retlist) <- nam
return(retlist)
}
pdfnvar <- function(x,m,K,n) {
y <- ((1/(sqrt((2*pi)^n*(det(K)))))*exp(-0.5*(t(x-m)%*%(solve(K))%*%(x-m))))
return(y)
}
cat(">> Creating samples\n")
nSamples <- 1000
# Number of clusters
k <- 3
# Max Iteration
maxit <- 100
xg1 <- matrix(rnorm(nSamples,mean=2,sd=1),ncol=2)
xg2 <- matrix(rnorm(nSamples,mean=4,sd=0.5),ncol=2)
xg3 <- matrix(rnorm(nSamples,mean=0,sd=0.6),ncol=2)
X <- rbind(xg1,xg2,xg3)
plotLim <- c(-10,10)
# plot(xg1[,1],xg1[,2],xlim=plotLim,ylim=plotLim)
# par(new=T)
# plot(xg2[,1],xg2[,2],xlim=plotLim,ylim=plotLim)
# par(new=T)
# plot(xg3[,1],xg3[,2],xlim=plotLim,ylim=plotLim)
cat(">> Calculatint centers\n")
McList <- kMeans(X,k,maxit)
Mc <- McList$Mc
Clustx <- McList$Clustx
xCluster1 <- list()
for(i in seq(k)) {
ici <- which(Clustx == i)
xCluster1[[i]] <- ici
}
# ixg1 <- which(Clustx == 1)
# ixg2 <- which(Clustx == 2)
# ixg3 <- which(Clustx == 3)
print(Mc)
# plot(X[ixg1,1],X[ixg1,2],xlim=plotLim,ylim=plotLim,col=2)
# par(new=T)
# plot(X[ixg2,1],X[ixg2,2],xlim=plotLim,ylim=plotLim,col=3)
# par(new=T)
# plot(X[ixg3,1],X[ixg3,2],xlim=plotLim,ylim=plotLim,col=4)
# par(new=T)
# plot(Mc[1,1],Mc[1,2],xlim=plotLim,ylim=plotLim,col=1,pch=15)
# par(new=T)
# plot(Mc[2,1],Mc[2,2],xlim=plotLim,ylim=plotLim,col=4, pch=15)
# http://www.sthda.com/english/wiki/r-plot-pch-symbols-the-different-point-shapes-available-in-r
###########
cat(">> Generating 3D plot\n")
seqx1x2 <- seq(-10,10,0.1)
lseq <- length(seqx1x2)
M1 <- matrix(nrow=lseq,ncol=lseq)
m1 <- colMeans(X[xCluster1[[1]],])
K1 <- cov(X[xCluster1[[1]],])
M2 <- matrix(nrow=lseq,ncol=lseq)
m2 <- colMeans(X[xCluster1[[2]],])
K2 <- cov(X[xCluster1[[2]],])
M3 <- matrix(nrow=lseq,ncol=lseq)
m3 <- colMeans(X[xCluster1[[3]],])
K3 <- cov(X[xCluster1[[3]],])
M12 <- matrix(nrow=lseq,ncol=lseq)
cr <- 0
for (i in 1:lseq) {
for(j in 1:lseq) {
cr <- cr +1
x1 <- seqx1x2[i]
x2 <- seqx1x2[j]
x1x2 <- matrix(cbind(x1,x2),ncol=1)
M1[i,j] <- pdfnvar(x1x2,m1,K1,2)
M2[i,j] <- pdfnvar(x1x2,m2,K2,2)
M3[i,j] <- pdfnvar(x1x2,m3,K3,2)
}
}
pi1 <- length(xCluster1[[1]]) / (length(Clustx))
pi2 <- length(xCluster1[[2]]) / (length(Clustx))
pi3 <- length(xCluster1[[3]]) / (length(Clustx))
M12 <- pi2*M1 + pi2*M2 + pi3*M3
# persp3d(seqx1x2,seqx1x2,M1,col='red')
# persp3d(seqx1x2,seqx1x2,M2,col='blue',add=TRUE)
persp3d(seqx1x2,seqx1x2,M12,col='green',add=FALSE)
cat("\n===== Routine Finished=====\n")
|
6b2e654b14148c86b43875b6874f5bcf7f6e6428
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NHLData/examples/Sch9495.Rd.R
|
df046b3d2260214c843f1c3754d0cacb58f0d0d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
Sch9495.Rd.R
|
library(NHLData)
### Name: Sch9495
### Title: 1994-1995 Season Results
### Aliases: Sch9495
### Keywords: datasets
### ** Examples
data(Sch9495)
## This command will show the results for the first game of the season.
Sch9495[1,]
|
1027663223c57e7c7c048d4396f6308d4efa723a
|
a2e3e68f410a313e0539524c97d806daa802e143
|
/11_19_2020/script.R
|
4260bb2499027aaef885f693bc0ebe4683cc5ff3
|
[] |
no_license
|
kmdono02/Stats_R_Teaching
|
7d0887bb41d3b5c6392d849411c88958225e3329
|
2de52240eb132a6d9dd208cb9ff1f600b0cefaaf
|
refs/heads/main
| 2023-05-29T18:09:54.524290
| 2021-06-08T13:57:33
| 2021-06-08T13:57:33
| 305,139,979
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,606
|
r
|
script.R
|
library(tidyverse)
library(readr)
# Base r Plotting (very brief)
# Use plot() function
# Option 1: dataset with only 2 variables, can just specify dataset as argument
plot(cars) # cars is example dataset included in R for tutorial purposes
# Option 2: Specify X and Y variables separately for plot
ibis_data <- read_csv("../Data/Cross-sec_full.csv", na = c(".","","NA"))
# Plotting scatterplot of AOSI total score and MSEL cognitive score at 6 months
plot(x=ibis_data$`V06 aosi,total_score_1_18`,
y=ibis_data$`V06 mullen,composite_standard_score`)
# Note that this is a base R function, so you have to use standard notation unlike
# the tidyverse functions from before. That is, we need to specify the variables
# as separate vectors in the data frame using the data$x notation
# you can adjust traits of plot like title, x and y axis labels and limits, etc.
# plot() function is sometimes useful (see later sessions), but ggplot is much
# better option in general as you will see
# ggplot2 Plotting
# ggplot2 is a package for R which is included in the tidyverse package
# loading tidyverse loads all of its packages (dplyr, ggplot2, etc.)
# ggplot works by considering your plot as a set of layers plot on top of one another
# plus a setting used to tweak aspects of these layers
# consider the same plot as the above example.
# First, we initialize a ggplot using the ggplot() option
# Here you can specify global settings for the plot, that is, things you want to
# carry through for all of the plot layers
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))
# We specify the dataset for the whole plot, as well as the x and y axis variables
# for the whole plot (mapping argument). Note that for settings in the plot that
# equal variables in the data, you must wrap them around the aes() function so that
# R knows these objects comes from the data referenced before and don't stand alone.
# Note also that as one before, since the variable names include spaces and commas,
# we must wrap their names around single quotes `` so ggplot knows they are a single
# variable name
# now we have our canvas to paint our plot onto. Let's add a layer of points to
# create a scatterplot. This is done using the geom_point() function. We add the
# layer using the + key
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))+
geom_point()
# within the geom_point() function call, we can tweak settings for the points/layer
# such as their size, shape, and color using different arguments
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))+
geom_point(size=5, color="blue", shape=2)
# See https://www.datanovia.com/en/blog/ggplot-point-shapes-best-tips/
# for the different shape types available
# We can also have this settings depend on variables in the data. This is done
# using the mapping with aes() call we used before, though this changes the settings
# for this layer only. Thus the settings specified inside the ggplot() call create
# default settings for the whole plot which can be altered for specific layers later on
# We color the points based on diagnosis
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))+
geom_point(mapping=aes(color=SSM_ASD_v24))
# Let's add another layer, by adding a trend line to illustrate the association
# between the variables
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))+
geom_point(mapping=aes(color=SSM_ASD_v24))+
geom_smooth()
# The shaded region represent error for the trend line estimate
# This is nice, but it would useful to separate trend lines per group
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`))+
geom_point(mapping=aes(color=SSM_ASD_v24))+
geom_smooth(mapping=aes(color=SSM_ASD_v24))
# To do so and then also color by group, we need to specify this within the layer
# Note that we have specified the same setting, color=group, for both layers
# Thus, we may want to just add this to the default settings in the ggplot so we
# are not writing this line over and over
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth()
# Much cleaner.
# Suppose we want to add the overall trend line for the whole sample back in
# We can do this by adding in a second geom_smooth layer to paint on top of the
# current plot
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth()+
geom_smooth(color="blue")
# We have to "turn off" the color=group setting in the ggplot() call in this layer.
# We can do this by specifying a static color like "blue" for the layer.
# If you don't like the error bars, add se=FALSE for the layers of interest
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")
# An additional layer we can add to the plot are facets. These are just panels
# which divide your plots up based on a variable of interest.
# There are two ways you can facet. One is using the facet_grip() function.
# This lets you directly specify the row and column variable using what is called
# formula notation: y ~ x
# You will see this again during the regression section.
# Let's only facet by gender
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(Gender~.)
# Or columns
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)
# Or row and column variable
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(Study_Site~Gender)
# There is also facet_wrap(), which I'll leave to use to explore.
# It uses the same y ~ x notation; see what you can create!
# To finalize this plot (we'll remove the Site faceting), we can edit the various
# text in the plot: title, subtitle, axis labels, tick marks, limits, etc.
# Various functions are used depending on what you want to edit
# labs() lets you edit the title, subtitle, footer (caption), legend titles
# among others
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")
# xlab() and ylab() let you edit the x and y axis
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")+
xlab("6 month AOSI Total Score")+
ylab("6 month MSEL Composite Std. Score")
# For any text you can specify \n to force a new line
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")
# To edit axis limits there are various functions you can use, including
# xlim(), ylim(), scale_x_continuous(), scale_y_continuous(),
# scale_x_discrete(), scale_y_discrete()
# The last 2 functions give you a lot of options for editing the limits, tick
# marks, etc.
# The discrete functions are used if your variable is discrete (a character or factor)
# The last set of functions we discuss are the theme() related functions
# These let you fine tune the asthetics of the plot, specifically
# font size and styles, sizes of various elements, etc.
# We'll go over how to increase the size of all of your plot text which is
# important if you want to export your plot to an image
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme(text = element_text(size=20))
# text refers to all text in the gplot, with element_text() being a function used
# to specify the traits you wish to change (size, font style, font type such as
# bold or italicize, color, etc.).
# You can also specify certain text you wish to edit instead of all text by using
# that text's name. For example below, we rotate the x axis labels. This is
# useful when you have long labels that overlap one another (if the axis variable
# is a group label for example)
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme(axis.text.x = element_text(angle=90, hjust=1))
# angle specified the amount of rotation and hjust adds some horizontal justification
# to the text
# We can also combine these two changes
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color=" 24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme(text = element_text(size=20),
axis.text.x = element_text(angle=90, hjust=1))
# Lastly, there are some theme templates that you can specify that greatly alter
# the look of your plot. I often use the theme_bw() template, though see
# https://ggplot2.tidyverse.org/reference/ggtheme.html for more examples
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color="24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# Note that you should specify the template call first, then add the theme()
# call to include your additional edits
# To close out this script, let's consider a different plot
# Let's do a boxplot of MSEL by diagnosis
# Instead of the geom_point() layer, we will call the geom_boxplot() layer
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color="24 month ASD diagnosis")+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20),
axis.text.x = element_text(angle=90, hjust=1))
# We see that color just colors the outlime of the boxplot, we need to use fill
# instead to fill in the plots with color
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
fill=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
fill="24 month ASD diagnosis")+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# We can make this plot cleaner by formatting the ASD group labels
# We could do this in the data itself, though we may just want to edit the way
# the labels are displayed in the plot whether then changing the whole dataset
# This can be done using the scale_x_discrete() function
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
fill=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
fill="24 month ASD diagnosis")+
scale_x_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# You can see we adjust the group labels using the label= argument, with a
# vector inside of the form "old"="new", with groups separated by commas
# Note that this DOES NOT change the color labels as these are not part of the x
# axis. You can do this using the scale_fill_discrete() function
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
fill=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
fill="24 month ASD diagnosis")+
scale_x_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
scale_fill_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# ggplot gives you a lot of flexibility for choosing the colors you would like used
# to represent the groups.
# The function used depends on if you are using a color call, fill call, etc.
# For fill calls, you can edit the colors using scale_fill_manual()
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
fill=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
fill="24 month ASD diagnosis")+
scale_x_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
scale_fill_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
scale_fill_manual(values=c("green", "red"))+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# You see that this overwrites the previous scale_fill_discrete() call and removes
# our labels!
# Luckily, we can also use the scale_fill_manual() call to change labels AND
# specify colors for the fill
ggplot(data=ibis_data, mapping=aes(x=SSM_ASD_v24,
y=`V06 mullen,composite_standard_score`,
fill=SSM_ASD_v24))+
geom_boxplot()+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
fill="24 month ASD diagnosis")+
scale_x_discrete(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"))+
scale_fill_manual(labels=c("NO_ASD" = "ASD Negative",
"YES_ASD" = "ASD Positive"),
values=c("green", "red"))+
xlab("24 month ASD diagnosis")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# So why would you ever use scale_fill_discrete() then? It uses the default colors
# which are usually quite nice. scale_fill_manual() requires you to specify your
# own colors, so if you just want to change the labels or other traits, but want
# to stick with the default colors, you can just use scale_fill_discrete()
# For continuous variables which are colored, you can use various functions
# to specify gradients for the color range.
# See http://www.sthda.com/english/wiki/ggplot2-colors-how-to-change-colors-automatically-and-manually
# For examples and details
# Finally, let's save our plot as an image on our computer
# First, note that all we have done so far is print the plots.
# We can also save the plot as an object an R using the same commands we have seen
# before
ex_plot <- ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color="24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
# and then can print it by calling the object
ex_plot
# to save a plot as an image, the best way for a ggplot is using the function
# ggsave()
# The first two arguments are the plot of interest, and then the file path and
# name you'd like to save the plot to
ggsave(ex_plot, filename = "plots/scatterplot_ex.jpg")
# We can also save as a png by changing the extension in the filename
ggsave(ex_plot, filename = "plots/scatterplot_ex.png")
# To edit the size of the saved plot, we have some options. I like to use the
# scale= argument to expand or shrink the plot
ggsave(ex_plot, filename = "plots/scatterplot_ex.png",
limitsize = FALSE, scale=2.5)
# Note that R will annoyingly limit the size of the plot you can save by default,
# requiring to add in the limitsize = FALSE argument call for "big" plots. I
# just add this argument in for all saved plots as it doesn't do any harm (unless
# your plot is huge and takes forever to save which will likely never happen)
# Note that you can omit the first argument if you print the plot before hand, as
# the function automatically takes the last printed plot as the first agrument
# something else is specified there
ex_plot
ggsave(filename = "plots/scatterplot_ex.png",
limitsize = FALSE, scale=2.5)
# Or alternatively
ggplot(data=ibis_data, mapping=aes(x=`V06 aosi,total_score_1_18`,
y=`V06 mullen,composite_standard_score`,
color=SSM_ASD_v24))+
geom_point()+
geom_smooth(se=FALSE)+
geom_smooth(color="blue")+
facet_grid(~Gender)+
labs(title="My title", subtitle = "My subtitle", caption="My footer",
color="24 month ASD diagnosis")+
xlab("6 month AOSI\nTotal Score")+
ylab("6 month MSEL\nComposite Std. Score")+
theme_bw()+
theme(text = element_text(size=20))
ggsave(filename = "plots/scatterplot_ex.png",
limitsize = FALSE, scale=2.5)
# You now should have all the tools to understand the gist of the NBA data plots
# code included in the RMD file which corresponds to the HTML slides.
# Have fun playing around with your data visualization! We will cover much more
# visualization examples with ggplot once we start discussing specific analyses
# in later sessions! Next session we discuss R Markdown (that RMD file is an
# example of this)!
|
4ca3cdda09ffffbb2e4e97b91a4bb85823fdc7e1
|
409490d9da29446f5fb1672eab7e774731554785
|
/R/list.findi.R
|
60fd1e081d49ba05313c612405948b6c08fc67e5
|
[
"MIT"
] |
permissive
|
timelyportfolio/rlist
|
8004c472fb6835182773d4458c9d604cb03795a3
|
d3299cec59c36f9295493feea3d53d21278a8a2a
|
refs/heads/master
| 2020-11-30T23:33:33.408653
| 2014-08-07T16:28:24
| 2014-08-07T16:28:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
list.findi.R
|
#' Find the indices of a number of members in a list that
#' meet given condition
#'
#' @param .data \code{list}
#' @param cond A logical lambda expression
#' @param n The maximal number of members to find out
#' @param envir The environment to evaluate mapping function
#' @name list.findi
#' @export
#' @examples
#' \dontrun{
#' x <- list(p1 = list(type="A",score=list(c1=10,c2=8)),
#' p2 = list(type="B",score=list(c1=9,c2=9)),
#' p3 = list(type="B",score=list(c1=9,c2=7)))
#' list.findi(x,type=="B")
#' list.findi(x,min(score$c1,score$c2) >= 8)
#' list.findi(x,min(score$c1,score$c2) <= 8,2)
#' }
list.findi <- function(.data,cond,n=1L,envir = parent.frame()) {
list.findi.internal(.data,substitute(cond),n,envir)
}
|
7147e4ab1a0ec14fcd6e212d64ffe2687e81a7ff
|
cc146174e877dbd443cdd63ba03ff7d13e7adc71
|
/R/var_theo.R
|
efe23337d46f417a4d523f10b085e42191fe0914
|
[] |
no_license
|
ccombesGG4/AmoRosoDistrib
|
ef5621b143d2132b6162e24f8a093bb4795b6c0b
|
b1e0fad34359ba986480aa7f0d078ff4d7ddc960
|
refs/heads/main
| 2023-08-24T04:35:49.499948
| 2021-10-04T08:52:57
| 2021-10-04T08:52:57
| 359,819,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,440
|
r
|
var_theo.R
|
#############################################################################
# Copyright (c) 2021 Hon Keung Tony Ng and Catherine COMBES
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#
#############################################################################
##' Variance estimated from the Generalized Gamma four-parameters. the function
##' allows us to verify the results regarding parameters estimated with mimic function
###
#
#' @param theta vector of 4-parameters.
#' @return variance from parameters
#' @export
var_theo <- function(theta){
a <- theta[1]
l <- theta[2]
c <- theta[3]
mu <- theta[4]
V = (a^2*(gamma((c*l + 2)/c)*gamma(l) - (gamma((c*l + 1)/c))^2)) / (gamma(l))^2
return(V)
}
|
59f65a5048a001c44d66b0ac4c802de3a22cc3e3
|
dd0d26163c4a0498de5b25e4ee57c4ce70b2676d
|
/man/setupConsoleMonitor.Rd
|
08b428bc7afec1958e682137d57c53675c1e24fd
|
[] |
no_license
|
jakobbossek/ecr
|
a1f97be9b4cb3b2538becebb38c9a5085b8464c9
|
f9954f5b1374cc70776f8b7e780f906e57ca50b7
|
refs/heads/master
| 2020-04-04T07:26:32.216427
| 2017-06-06T11:05:27
| 2017-06-06T11:05:27
| 17,904,690
| 13
| 5
| null | 2016-09-27T10:30:10
| 2014-03-19T13:15:56
|
R
|
UTF-8
|
R
| false
| true
| 807
|
rd
|
setupConsoleMonitor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monitor.console.R
\name{setupConsoleMonitor}
\alias{setupConsoleMonitor}
\title{Simple stdout monitoring function.}
\usage{
setupConsoleMonitor(show.info.stepsize = 5L, num.format = "\%g")
}
\arguments{
\item{show.info.stepsize}{[\code{integer(1)}]\cr
Adjust the stepsize of iterations with informative messages.}
\item{num.format}{[\code{character(1)}]\cr
Number format for output of numeric parameters. See the details section of
the manual for \code{\link[base]{sprintf}} for details.
Default is \dQuote{\%g}.}
}
\value{
[\code{ecr_monitor}]
}
\description{
This is the default monitoring function used by ecr. It simply outputs
the iteration as well as minimal, mean and maximal target values from the current
population.
}
|
0906202c549b89b588db851f191ff68d7836ae60
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/mixR/R/normalEM2.R
|
d91d8ad966941d877caa3a2bdaf4cd6827375688
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,569
|
r
|
normalEM2.R
|
## normal mixture models (grouped data)
normalEM2 <- function(x, ncomp = NULL, pi = NULL, mu = NULL, sd = NULL, ev,
mstep.method, init.method, tol = 1e-6, max_iter = 500) {
# check if initial values are missing
if(is.null(pi) & is.null(mu) & is.null(sd)) {
if(is.null(ncomp)) stop("provide 'ncomp' or all of 'pi', 'mu' and 'sd'.")
init <- initz(x, ncomp = ncomp, init.method)
pi <- init$pi
mu <- init$mu
sd <- init$sd
ncomp <- length(pi)
} else if (is.null(pi) | is.null(mu) | is.null(sd)) {
if(!is.null(pi)) ncomp <- length(pi)
if(!is.null(mu)) ncomp <- length(mu)
if(!is.null(sd)) ncomp <- length(sd)
init <- initz(x, ncomp = ncomp, init.method)
if(is.null(pi)) pi <- init$pi
if(is.null(mu)) mu <- init$mu
if(is.null(sd)) sd <- init$sd
}
if(length(pi) != length(mu) || length(pi) != length(sd) ) {
stop("the length of 'pi', 'mu' and 'sd' should be the same.")
}
count <- x[, 3]
if(ev) {
fit <- norm_ev_g(x, pi, mu, sd, max_iter, tol)
} else {
fit <- norm_uv_g(x, pi, mu, sd, max_iter, tol)
}
pi_new <- fit[[1]]
mu_new <- fit[[2]]
sd_new <- fit[[3]]
loglik <- fit[[4]]
iter <- fit[[5]]
comp.prob <- fit[[6]]
aic <- -2* loglik + 2 * ifelse(ev, 2 * ncomp, 3 * ncomp - 1)
bic <- -2* loglik + log(sum(count)) * ifelse(ev, 2 * ncomp, 3 * ncomp - 1)
res <- list(pi = pi_new, mu = mu_new, sd = sd_new, iter = iter, loglik = loglik,
aic = aic, bic = bic, data = x, comp.prob = comp.prob, family = "normal")
class(res) <- "mixfitEM"
return(res)
}
|
ca866cd3d37a9b7142578c46ce82484ef4898b91
|
dc41b39f5ba677804e02cc8c6985fb28e2fd4ad6
|
/data-raw/internal.R
|
dcd4bb45958abb4710b274fb854e1e1bfafecc6e
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
CamBullen/shinywqg
|
0a5738dbe5ff37a0f95c709d62e0397f7abb266d
|
e56f984f97984ff7b1e555c2dac404af0847363c
|
refs/heads/master
| 2023-04-01T15:00:21.735563
| 2021-05-03T20:05:20
| 2021-05-03T20:05:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,593
|
r
|
internal.R
|
library(readr)
library(dplyr)
library(stringr)
library(magrittr)
library(wqbc)
hash_limits <- "85d3990a-ec0a-4436-8ebd-150de3ba0747"
limits <- bcdata::bcdc_get_data(record = hash_limits)
hash_cu_acute <- "23ada5c3-67a6-4703-9369-c8d690b092e1"
hash_cu_chronic <- "a35c7d13-76dd-4c23-aab8-7b32b0310e2f"
#limits <- readr::read_csv("https://raw.githubusercontent.com/bcgov/wqg_data/master/all_wqgs.csv")
## modify limits to be what databc should be
## switch off code to read from databc therefore uses internals.
## once working then update databc with new limits and lookups.
# limits$Units[limits$Variable == "Copper" & limits$Component == "Dissolved"] <- "ug/L"
# limits$Direction[limits$Variable == "Copper" & limits$Component == "Dissolved"] <- "Upper Limit"
# limits$Limit[limits$Variable == "Copper" &
# limits$Use == "Aquatic Life - Freshwater" &
# limits$Media == "Water" &
# limits$Type == "Short-term acute"] <- hash_acute
# limits$Limit[limits$Variable == "Copper" &
# limits$Use == "Aquatic Life - Freshwater" &
# limits$Media == "Water" &
# limits$Type == "Long-term chronic"] <- hash_chronic
# limits$LimitNotes[limits$Variable == "Copper" & limits$Component == "Dissolved"] <- NA
internal_tables <- list(limits)
#### need to fix hash to be correct - missing last 2 digits
#names(internal_tables) <- "85d3990a-ec0a-4436-8ebd-150de3ba07"
names(internal_tables) <- hash_limits
lookup_hash <- c(hash_cu_chronic, hash_cu_acute)
for (file in lookup_hash) {
lookup <- bcdata::bcdc_get_data(record = file)
internal_tables[[paste0(file)]] <- lookup
}
codes <- wqbc::codes
codes <- codes %>% dplyr::rename(EMS_Code = Code)
missing_help <- "There are two reasons why guideline values may be missing:
1. A condition was not met;
2. There is no available equation for that variable/use/term combination."
empty_raw <- limits[0, ]
empty_evaluate <- limits %>%
mutate(ConditionPass = NA, Guideline = NA)
empty_evaluate <- empty_evaluate[0, ]
empty_report <- empty_evaluate[c("Variable", "Use", "Media", "PredictedEffectLevel",
"Type", "Statistic", "Guideline", "Reference",
"Reference Link", "Overview Report Link",
"Technical Document Link")]
empty_report <- empty_report %>% rename(`Effect Level` = PredictedEffectLevel)
usethis::use_data(limits, internal_tables, codes, empty_raw, empty_report,
empty_evaluate, missing_help, internal = TRUE, overwrite = TRUE)
|
7253e137990245adc010eea6a315a75282d36eda
|
73259f4c8fdda2f4312915dfeb47003cb3047a17
|
/2_analyse_plot_WEMs.R
|
25b174a027f8095bca85ed04152d243662bd837e
|
[] |
no_license
|
MilanvanL/debating_evil
|
de1a3f81afb9d35773e1b062a91bfd1d9bbe3730
|
642e6d07cb7089eee1d1f3789e0ca736690089a8
|
refs/heads/master
| 2020-05-23T13:57:18.293743
| 2019-07-02T13:38:23
| 2019-07-02T13:38:23
| 186,790,366
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,833
|
r
|
2_analyse_plot_WEMs.R
|
###################
# This R-script is used for the analysis of parliamentary data in:
# Debating Evil: Using Word Embeddings to Analyse Parliamentary Debates on War Criminals in the Netherlands
# Authors: Ralf Futselaar and Milan van Lange
# NIOD Institute for War, Holocaust, and Genocide Studies
# 2019
# Email: m.van.lange@niod.knaw.nl
###################
# Recommended version of R (Windows): R version 3.3.3 (2017-03-06) -- "Another Canoe"
# Install Packages --------------------------------------------------------
install.packages('devtools')
library(devtools)
install.packages('magrittr')
devtools::install_github("bmschmidt/wordVectors") # See also: https://github.com/bmschmidt/wordVectors
# Load Packages -----------------------------------------------------------
library(wordVectors)
library(magrittr)
# Set WD and load files --------------------------------------------------------------
# Set working directory
setwd("YOUR/PATH/HERE")
# Read binary vector files (.bin) from your working directory
vec4555 <- read.binary.vectors("vecs_1945-55.bin")
vec6575 <- read.binary.vectors("vecs_1965-75.bin")
# Define Word Groups ------------------------------------------------------
# War criminal
OM <- c("oorlogmisdadiger", "oorlogsmisdadigers")
# Victims
SL <- c("slachtoffer", "slachtoffers")
# Treason/traitor
TR <- c("landverrader", "landverraders", "verrader", "verraders", "landverraad")
# Life imprisonment
LI <- c("levenslang","levenslange","vrijheidsstraf","gevangenisstraffen","gevangenisstraf", "opsluiting", "hechtenis")
# Death penalty
DP <- c("doodstraf", "doodstraffen")
# Calculate cosine similarities for treason and victim -------------------------------------------
# Define war criminal discourse (1945-1955) by creating list of 250 nn's
misd_words_4555 = vec4555 %>% nearest_to(vec4555[[OM]],250) %>% names
sample(misd_words_4555,5) # Have a look at the nn's
misds_4555 = vec4555[rownames(vec4555) %in% misd_words_4555,]
# Compare vector space of war cirminal-discourse with that of victim and traitor words
Similarity_victim_vector = misds_4555 %>% cosineSimilarity(vec4555[[SL]])
Similarity_treason_vector = misds_4555 %>% cosineSimilarity(vec4555[[TR]])
# Do the same for 1965-1975
misd_words_6575 = vec6575 %>% nearest_to(vec6575[[OM]],250) %>% names
sample(misd_words_6575,5)
misds_6575 = vec6575[rownames(vec6575) %in% misd_words_6575,]
SL_score_6575 = misds_6575 %>% cosineSimilarity(vec6575[[SL]])
TR_score_6575 = misds_6575 %>% cosineSimilarity(vec6575[[TR]])
# Plot Figure 1: Top 250 war criminal related words 1945-1955 (grey) and 1965-1975 (black) plotted by their cosine similarity to victim (x) and traitor (y) words. -------------------------------------------------------------------
# Set working directory to store output as PDF-file
setwd("YOUR/PATH/HERE")
# Create PDF-file, open connection
pdf('vic_trait_1.pdf', width=12, height=12)
# Make the plot
par(mfrow=c(1,1), cex=0.8)
plot(Similarity_victim_vector,Similarity_treason_vector,type='n',
xlim=c(0.1,0.9), ylim=c(0.1,1.0),
xlab='CosineSimilarity victim words - war criminal words',
ylab='CosineSimilarity treason words - war criminal words',
main="Closeness of 250 war criminal related words in vector space (1945-1955 & 1965-1975)",
cex.main=1.5, cex.lab=1.5
)
# Plot word positions
points(Similarity_victim_vector,Similarity_treason_vector, pch=17, col = 'grey')
points(SL_score_6575,TR_score_6575, pch=16, col = 'black')
# Plot ablines
abline(h=mean(Similarity_treason_vector), v=mean(Similarity_victim_vector), col='grey')
abline(h=mean(TR_score_6575), v=mean(SL_score_6575), col='black')
# Add legend
legend('topleft', legend=c("1945-1955", "1965-1975"),
col=c('grey', 'black'), pch=c(17,16), lwd=8, cex=2)
# Close connection
dev.off()
# Calculate cosine similarities for death penalty and life imprisonment -------------------------------------------
# Compare vector space of war cirminal-discourse with that of LI and DP words
Similarity_LI_vector = misds_4555 %>% cosineSimilarity(vec4555[[LI]])
Similarity_DP_vector = misds_4555 %>% cosineSimilarity(vec4555[[DP]])
# Do the same for 1965-1975
LI_score_6575 = misds_6575 %>% cosineSimilarity(vec6575[[LI]])
DP_score_6575 = misds_6575 %>% cosineSimilarity(vec6575[[DP]])
# Plot Figure 2: Top 250 war criminal related words 1945-1955 (grey) and 1965-1975 (black) plotted by their cosine similarity to life imprisonment (x) and death sentence words (y). --------------------------------
# Set working directory to store output as PDF-file
setwd("YOUR/PATH/HERE")
# Create PDF-file, open connection
pdf('dp_li_1.pdf', width=12, height=12)
# Make the plot
par(mfrow=c(1,1), cex=0.8)
plot(Similarity_LI_vector,Similarity_DP_vector,type='n',
xlim=c(0.1,0.9), ylim=c(0.1,1.0),
xlab='CosineSimilarity life imprisonment words - war criminal words',
ylab='CosineSimilarity death penalty words - war criminal words',
main="Closeness of 250 war criminal related words in vector space (1945-1955 & 1965-1975)",
cex.main=1.5, cex.lab=1.5
)
# Plot word positions
points(Similarity_LI_vector,Similarity_DP_vector, pch=17, col = 'grey')
points(LI_score_6575,DP_score_6575, pch=16, col = 'black')
# Plot ablines
abline(h=mean(Similarity_DP_vector), v=mean(Similarity_LI_vector), col='grey')
abline(h=mean(DP_score_6575), v=mean(LI_score_6575), col='black')
# Add legend
legend('topleft', legend=c("1945-1955", "1965-1975"),
col=c('grey', 'black'), pch=c(17,16), lwd=8, cex=2)
# Close connection
dev.off()
# End of script
###################
|
b2aaa66e0031c48a59d0fb36da26ead464e09ec5
|
5150cf610a34c6c5be9b598277db1834d8fb16b4
|
/R/calc_cumfledge.R
|
9916eec21c60a3b5661f4179eb74dba5839615fb
|
[] |
no_license
|
SPI-Birds/pipelines
|
f3ab78668e526a47bd298b0f7f4127e274a4dfd0
|
cb4bd41bc26d991fa54e520bb15b54333696b4cb
|
refs/heads/master
| 2023-08-16T18:15:29.835023
| 2023-08-09T09:51:56
| 2023-08-09T09:51:56
| 153,275,927
| 0
| 3
| null | 2022-12-04T14:48:00
| 2018-10-16T11:42:17
|
R
|
UTF-8
|
R
| false
| false
| 1,626
|
r
|
calc_cumfledge.R
|
#' Calculate cumulative number of fledgings
#'
#' For a given nest, determine the cumulative number of fledglings in all nests
#' before this. This is used to calculate ClutchType_calc. The function also
#' includes functionality to return whether number of fledglings in previous
#' nests was measured or not (i.e. were there NAs in any nests before the
#' current one). This is neccesary in populations where the number of fledglings
#' is not measured consistently
#' @param x Column NumberFledged in the Brood_data table
#' @param na.rm Logical. If TRUE, returns cumulative number of fledglings
#' where NA is assumed to be 0. If FALSE, returns a vector of logicals
#' showing whether any nests before the current nest were unmeasured (NA).
#'
#' @return A vector of numerics (if na.rm = TRUE) or logicals (na.rm = FALSE)
#' @export
#'
#' @examples
#' #Assuming NA is 0
#' #Return vector of numerics.
#' calc_cumfledge(x = c(1, 3, NA, 4), na.rm = TRUE)
#'
#' #Do not assume NA is 0.
#' #Return a vector of logicals showing whether an NA occurred before
#' #the current record.
#' calc_cumfledge(x = c(1, 3, NA, 4), na.rm = FALSE)
calc_cumfledge <- function(x, na.rm = TRUE){
if(na.rm){
#This func assumes that all NAs are just 0s.
#This is needed because otherwise cumsum returns all NAs
#However, all we need to know is if there was atleast 1 successful nest before the current nest
x[!stats::complete.cases(x)] <- 0
nrs <- cumsum(x)
} else {
x <- is.na(x)
nrs <- cumsum(x)
}
if(length(x) == 1){
return(0)
} else {
return(c(0, nrs[1:(length(nrs) - 1)]))
}
}
|
2fa8fdebc8b3e1244849f07bac2d64cbaa9a01a3
|
89f79d108b334d9a503a70f18b7746232e4dd03e
|
/plot6.R
|
f68dc9582746340661b67f8b12f724340c56072d
|
[] |
no_license
|
Gradon/exploredata
|
793b66ec37ef7b8d1523970c9f517c748668e650
|
8cc6185e7781178bdcc24df3ad87e22c37854eda
|
refs/heads/master
| 2023-01-06T20:23:43.531406
| 2020-10-25T13:55:56
| 2020-10-25T13:55:56
| 307,050,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,604
|
r
|
plot6.R
|
# question: compare emissions from motor vehicles in Baltimore City with those in Los Angeles.
# which city has seen greater changes over time in motor vehicle emissions?
# national emissions data
NEIdata <- readRDS("summarySCC_PM25.rds")
# classification code mappings
SCCmap <- readRDS("Source_Classification_Code.rds")
# subset to Baltimore City emissions only
baltemissions <- subset(NEIdata, fips == 24510)
# on-road and non-road sources are motor vehicles, can just subset by type instead of SCC code
baltmotors <- subset(baltemissions, type == "ON-ROAD" | type == "NON-ROAD")
# total Baltimore City motor vehicle data data for each year
totalbaltmotors <- with(baltmotors, tapply(Emissions, year, sum))
# subset to LA County emissions only
laemissions <- subset(NEIdata, fips == "06037")
# on-road and non-road sources are motor vehicles, can just subset by type instead of SCC code
lamotors <- subset(laemissions, type == "ON-ROAD" | type == "NON-ROAD")
# total LA County motor vehicle data data for each year
totallamotors <- with(lamotors, tapply(Emissions, year, sum))
# open file device for plotting
png(filename = "plot6.png", width = 480, height = 480)
# plot vehicle emissions
plot(names(totalbaltmotors), totalbaltmotors, xlab = "Year", ylab = "PM2.5", pch = 20, col = "red", main = "Motor Vehicle-Related PM2.5 Emissions in Baltimore City by Year", ylim = range(totalbaltmotors, totallamotors))
points(names(totallamotors), totallamotors, pch = 18, col = "blue")
legend("topright", c("Baltimore City", "LA County"), lty = 1, col = c("red", "blue"))
# close png device
dev.off()
|
62f0f21e14455eef23112c9cf7b64d9c9c26a9f1
|
df6d4fd40c7a47127d608d397a0aaa67cfe92bf1
|
/Multiple imputation.R
|
0a0f7caf1b2cdcefa5bd00d1aab8912a034eca8e
|
[] |
no_license
|
Econometrics-in-r/Multiple-imputation-of-missing-data
|
abaaa27688cc61fa730fe7c725edc48c16a4d4b2
|
e61a1019a17190a8c3549bc16ff0b15ed4e58fe3
|
refs/heads/master
| 2022-08-30T23:07:03.646882
| 2020-05-27T09:14:50
| 2020-05-27T09:14:50
| 266,992,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,397
|
r
|
Multiple imputation.R
|
rm(list = ls())
require(stats4)
require(maxLik)
require(randtoolbox)
require(data.table)
require(mice)
#reading and storing data in a dataframe
dataset <- read.csv(file.choose(),header=T)
#Sample Size
N <- nrow(dataset)
#Dependent variable (crash counts in this example); change the variable as required
DVar <- dataset$Crash
#variable with missing data
MVar <- dataset$NLane
#Number of imputations
M <- 100
# index of missing values
Mindex = which(MVar %in% NA)
#placeholders
matimpvar = matrix(NA,nrow = N, ncol = M)
# Halton Draws
preparedraws=function()
{
d=1
while(d<(length(normaldraws)+1))
{
draws1[,normaldraws[d]]<<- qnorm(draws1[,normaldraws[d]])
d=d+1
}
}
Ndraws=500 # set number of draws
dimensions=2 # define number of random parameters in the model
# generate draws (using Halton)
draws1=as.matrix(halton(Ndraws*N,dimensions))
# assign names to individual sets of draws - need one entry per dimension
colnames(draws1)=c("HRbeta1","HRbeta2")
# define whether any draws should be transformed to Normals, which is also needed for e.g. lognormals (leave empty if not)
normaldraws=c("HRbeta1","HRbeta2")
# preparing draws for estimation - this may take a while
preparedraws()
draws1 = draws1[,1:dimensions]
# prediction matrix (put 1 for those variables to be used in imputation)
prd <- matrix(0,ncol = NCOL(dataset), nrow = NROW(dataset))
prd[5,2]=1
prd[5,4]=1
# imputation
imp <- mice(dataset, m = M , method = "norm",predictorMatrix = prd)
# plotting imputed data vs observed parts of variable with missing data
for (m in 1:M) {
impdata = complete(imp,m)
MVar2 = impdata$NLane #the variable with imputed values
matimpvar[,m] = MVar2
}
impvar = rowMeans(matimpvar)
plot(impvar,MVar)
# plotting densities of imputed data vs observed parts of variable with missing data
par(font=1,ps=22,family="serif",mar=c(7,7,4,2))
plot(density(MVar), col="gray",lty=1,lwd=3,
xlab = "Number of lanes (NL)",ylab = "Density",main="",yaxt='n',ylim=c(0,0.4))
lines(density(impvar), lwd=3,col="black",lty=3)
Axis(side=2,at = seq(0,1,by=0.05))
legend(4.5,0.40, c("Density of observed variable","Density of imputed variable"),
lty=c(1,3), lwd=c(3,3),col = c("gray","black"),bty="n",y.intersp=3)
# initial values #
init <- c(2,0.1,0.1)
#placeholders
EST = matrix(NA,nrow = NROW(init), ncol = M)
SQR = matrix(NA,nrow = NROW(init), ncol = M)
SQ = matrix(NA,nrow = NROW(init), ncol = M)
# multiple imputation
for (m in 1:M) {
# Data Preparation
impdata = complete(imp,m)
dataF = as.matrix(data.frame(1,log(impdata$Length),impdata$NLane))
LL <- function(params){## Log Likelihood Function
disp <- params[1] # Dispersion Parameter
Fbeta <- params[2:3] # Fixed parameters in Mu Function
offset = rep.int(dataF%*%as.matrix(Fbeta,ncol=1),Ndraws)
mu <- exp(offset)
loglik <- sum(dnbinom(DVar,size=disp,mu=mu,log = T))
return(loglik)
}
fit1 <- maxLik(LL,start=init,method="BFGS")
EST[,m] <- fit1$estimate
SQR[,m] <- diag(solve(-fit1$hessian))
Lik[m] <- fit1$maximum
}
# Pooling estimates
PEST <- rowMeans(EST)
#Within variance
WVAR <- rowMeans(SQR)
#Between variance
for (m in 1:M) {SQ[,m] <- (EST[,m] - PEST)^2}
BVAR <- rowSums(SQ)/(M-1)
#Total variance
TVAR <- WVAR + BVAR + BVAR/M
# Table of results
results = data.frame(PEstimates=PEST, WSD=sqrt(WVAR), BSD=sqrt(BVAR), TSD=sqrt(TVAR))
results
|
75f418af11ca7ae251b6bccb023f73f8a5d0b8d9
|
7a8c0602ea1ce52ef51a096f0ac70d456f179e0a
|
/R/man/training_step.Rd
|
045ce40df662057329645e8975e59914ee88eb5a
|
[
"MIT"
] |
permissive
|
dmalagarriga/interpret
|
37798891b20d5a5c2ead5535ca5049406b91dab6
|
961aba790f19798f03f496346d2c0ff037202050
|
refs/heads/master
| 2020-09-29T17:52:01.980013
| 2019-12-10T06:12:04
| 2019-12-10T06:12:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
rd
|
training_step.Rd
|
\name{training_step}
\alias{training_step}
\title{Training Step}
\description{
Takes one Training Step
}
\usage{
training_step(
ebm_training,
index_feature_combination,
learning_rate,
count_tree_splits_max,
count_instances_required_for_parent_split_min,
training_weights,
validation_weights
)
}
\arguments{
\item{ebm_training}{ebm training}
\item{index_feature_combination}{index feature combination}
\item{learning_rate}{learning rate}
\item{count_tree_splits_max}{count tree splits max}
\item{count_instances_required_for_parent_split_min}{count instances required for parent split min}
\item{training_weights}{training weights}
\item{validation_weights}{validation weights}
}
\value{
Returns the root mean squared error when the model being trained is for a regression problem, or the log loss for a classification problem
}
\examples{
training_ptr <- initialize_training_regression(
1L,
list(ebm_feature(2)),
list(ebm_feature_combination(1)),
c(0),
c(10, 10), c(0, 1), c(0, 0),
c(12), c(1), c(0),
0L)
validation_metric <- training_step(training_ptr, 0, 0.01, 2, 2, NULL, NULL)
}
|
a109a5f384e64899511af758daf80638a5cea7e1
|
023267839ada61c94515f24ae2b782d2b844194e
|
/lectures/lesson25_reviews/onlineReview_pro.R
|
7ce6753768c26c04e1c5c765caf19cd78f38b0a6
|
[] |
no_license
|
DarioBoh/ISDS3105_fall18
|
303399915687750d93e1d850d2fd07eb5870a2bd
|
23bc7e464091e1641efec084c4803c612edebf0f
|
refs/heads/master
| 2021-07-12T12:50:06.424984
| 2021-07-04T09:46:36
| 2021-07-04T09:46:36
| 145,449,465
| 0
| 25
| null | 2018-08-24T17:02:20
| 2018-08-20T17:28:20
| null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
onlineReview_pro.R
|
library(tidyverse)
# Read the file `dataset.RDA` using readRDS
dt <- readRDS('onlineReviews_analysis/dataset.RDA')
# Adjust the datatypes as needed. Use mutate_at to apply the same function to multiple columns
# Write a function to plot a chart of rating frequencies for a single hotel. Each plot should be created with a title.
# Add an error message if the hotel name does not match any of the hotels in the dataset
ratingsHist <- function( ... ) {
if ( .. ) stop( )
}
# Use iteration to create a plot for 5 hotels using `ratingsHist()`. Save them into a folder called `figures`.
#' Researchers claim that over time the rating distribution changes from approximately "normal"
#' to a J-shaped distribution (very few 3s but a lot of extremly positive ratings, and some very negative ratings).
#' Pick a hotel with a large number of observations and show whether your data support this claim.
#' For instance, visualize how the distribution changes over time (years)
#' Other researchers argues that most recent reviews are more positive because of the pervasiveness of mobile phones.
#' Is the average score of reviews from mobile higher than for others?
#' Show the trend of the cumulative mean by each new review. Use `cumsum` to calculate the cumulative mean of the scores.
#' Use facetting to plot the score distributions for each attribute from Value to Service
#' Are the reviews with more helpful votes the ones that are more positive? Discretize the number of helpful
#' votes, and show the distributions for each level of the (discretized) unmber of helpful votes
|
57258ad2f35d3435580495b8f601cf3bb1301759
|
108d4834704fa9e2fcb610fc9fadd5203af666f6
|
/routesRandomiser.R
|
c4bbc36f7739a2c987fc1eca85dfdd4b8b6f1575
|
[] |
no_license
|
DanOlner/randomNetworkDistancer
|
f1c7066622b9a8809276999e2bf3078f1f04ac3d
|
e0a0604b9eee3a726727e4bb681983b88fda62a7
|
refs/heads/master
| 2021-01-23T00:14:53.071552
| 2015-07-06T08:39:28
| 2015-07-06T08:39:28
| 19,893,925
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,320
|
r
|
routesRandomiser.R
|
#Pick random points inside shapefile polygon
#Use Google distance matrix API to fetch network distance/time and addresses
#https://developers.google.com/maps/documentation/distancematrix/
#Store and save results as CSV
library(rgdal)
library(rgeos)
library(httr)
library(jsonlite)
#Load properties file
#Nabbed from http://stackoverflow.com/questions/13681310/reading-configuration-from-text-file
# myProp <- read.table("config.txt", header=FALSE, sep="=", row.names=1, strip.white=TRUE, na.strings="NA", stringsAsFactors=FALSE)
# myProp <- setNames(myProp[,1],row.names(myProp))
# myProp["dateOfLastLimitBreach"]
#record of failed calls
fail = 0
#Base URL, will knock query together below
testURL <- "http://maps.googleapis.com/maps/api/distancematrix/json"
#Use single shapefile of Great Britain
gbmerge <- readOGR(dsn="GB_merged", "GB_merged")
#Store the points
#Number of origin-destination pairs to create and fetch
#Note there's a pause of at least 0.2 seconds between each
#so getting >2000 of your daily google API allowance
#Might be around 7 minutes
#The csv won't be saved until the end of the process
#Job: add option to save regularly so's not to risk wasting API call limit
#DAILY ALLOWANCE IS 2500.
pairNum = 2000
#http://casoilresource.lawr.ucdavis.edu/drupal/book/export/html/644
#sp package has specific tool for spatial sampling within polygons. Hooray!
randomPointOrigins <- spsample(gbmerge, n=pairNum, type='random')
randomPointDestinations <- spsample(gbmerge, n=pairNum, type='random')
#In case you wanna see em
#plot(gbmerge); points(randomPointDestinations, col='red', pch=3, cex=0.5); points(randomPointOrigins, col='blue', pch=3, cex=0.5)
#convert to lat long in prep for google query
randomPointOrigins <- spTransform(randomPointOrigins, CRS("+init=epsg:4326"))
randomPointDestinations <- spTransform(randomPointDestinations, CRS("+init=epsg:4326"))
#Use dataframe, single row per origin-destination pair
randomPointOrigins <- data.frame(randomPointOrigins)
randomPointDestinations <- data.frame(randomPointDestinations)
#Distinguish x and y column names (for later CSV writing)
colnames(randomPointOrigins)[colnames(randomPointOrigins)=="x"] <- "origin_x"
colnames(randomPointOrigins)[colnames(randomPointOrigins)=="y"] <- "origin_y"
colnames(randomPointDestinations)[colnames(randomPointDestinations)=="x"] <- "dest_x"
colnames(randomPointDestinations)[colnames(randomPointDestinations)=="y"] <- "dest_y"
#Final set of origin-destination points
pointSet <- cbind(randomPointOrigins,randomPointDestinations)
#Create results matrix, one row per origin-destination pair
#Storing four results: distance and time of each route
#(Distance in metres, time in seconds)
#And also the strings for the address of origins and destinations
results <- matrix(nrow=pairNum , ncol=4)
#Iterate over required pair numbers, get data from google
for(i in 1:pairNum) {
#set google distance matrix query
#Google does y,x. Reverse order
#See https://developers.google.com/maps/documentation/distancematrix/ for option info
qry <- paste("origins=", pointSet[i,2] , "," , pointSet[i,1] ,
"&destinations=" ,pointSet[i,4] , "," , pointSet[i,3] ,
"&sensor=FALSE",
# "&mode=bicycling",
"&avoid=ferries",#not going to any islands!
sep=""#no spaces
)
#Get the JSON
gimme <- GET(
testURL,
query = qry,
#If using in Leeds University, obv: comment this out if not, or if using Leeds Uni wifi
#Use this to see details of proxy connection: c(use_proxy("www-cache.leeds.ac.uk:3128", 8080), verbose())
c(use_proxy("www-cache.leeds.ac.uk:3128", 8080))
)
#http://blog.rstudio.org/2014/03/21/httr-0-3/
stop_for_status(gimme)
store <- content(gimme)
#if result was OK, keep
# if(store$status=="OK") {
if(store$rows[[1]]$elements[[1]]$status=="OK") {
results[i,1] <- store$rows[[1]]$elements[[1]]$distance$value
results[i,2] <- store$rows[[1]]$elements[[1]]$duration$value
results[i,3] <- store$origin_addresses[[1]]
results[i,4] <- store$destination_addresses[[1]]
} else {
fail <- fail + 1
}
#pause between API calls. We're aiming for:
# "100 elements per 10 seconds."
# "2500 elements per 24 hour period."
#Two elements per call (origin and destination)
#Being conservative: 0.3 seconds should hit ~66 elements per 10 seconds
Sys.sleep(0.3)
print(paste("API call", i, "complete, status: ", store$rows[[1]]$elements[[1]]$status))
}#end for
#Append the coordinates used
readyForWriting <- cbind(data.frame(results),pointSet)
colnames(readyForWriting)[colnames(readyForWriting)=="X1"] <- "distance"
colnames(readyForWriting)[colnames(readyForWriting)=="X2"] <- "time"
colnames(readyForWriting)[colnames(readyForWriting)=="X3"] <- "origin"
colnames(readyForWriting)[colnames(readyForWriting)=="X4"] <- "destination"
#Strip out failed calls
readyForWriting <- readyForWriting[ !is.na(readyForWriting$distance) ,]
#Write the final results file, unique name each time
filename = paste("GoogleDistanceMatrixRandomPathRresults_",date(),".csv",sep="")
#spaces with underscores
filename <- gsub(" ", "_", filename)
#colons with underscores
filename <- gsub(":", "_", filename)
write.csv(readyForWriting, filename)
print(paste(pairNum, "attempts, ", (pairNum - fail), "successful."))
print(paste("File written: ", filename))
|
ae832d4e040b0daba40a44f888e87ea3dbd77a86
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkCTreeFindAllByRowDataCustom.Rd
|
808cf682ee62df6e4200d71868fc9792197aa4b6
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 745
|
rd
|
gtkCTreeFindAllByRowDataCustom.Rd
|
\alias{gtkCTreeFindAllByRowDataCustom}
\name{gtkCTreeFindAllByRowDataCustom}
\title{gtkCTreeFindAllByRowDataCustom}
\description{
Find all nodes under \code{node} whose row data pointer fulfills
a custom criterion.
\strong{WARNING: \code{gtk_ctree_find_all_by_row_data_custom} is deprecated and should not be used in newly-written code.}
}
\usage{gtkCTreeFindAllByRowDataCustom(object, node, data = NULL, func)}
\arguments{
\item{\verb{object}}{The node where to start searching.}
\item{\verb{node}}{User data for the criterion function.}
\item{\verb{data}}{The criterion function.}
\item{\verb{func}}{A list of all nodes found.}
}
\value{[list] A list of all nodes found.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
c65c4f8d86565e73c61875e9a045f8ae9b763619
|
00daf46a1286c20caa103a95b111a815ea539d73
|
/R/manual_generics.R
|
4eeb6c14ce3ccc8855b5433b009493b9656a4508
|
[] |
no_license
|
duncantl/Rllvm
|
5e24ec5ef50641535895de4464252d6b8430e191
|
27ae840015619c03b2cc6713bde71367edb1486d
|
refs/heads/master
| 2023-01-10T15:12:40.759998
| 2023-01-02T18:05:26
| 2023-01-02T18:05:26
| 3,893,906
| 65
| 14
| null | 2017-03-09T07:59:25
| 2012-04-01T16:57:16
|
R
|
UTF-8
|
R
| false
| false
| 4,980
|
r
|
manual_generics.R
|
setGeneric("getParent",
function(x, ...)
standardGeneric("getParent"))
setGeneric("eraseFromParent",
function(x, delete = TRUE, ...)
standardGeneric("eraseFromParent"))
setGeneric("getElementTypes",
function(x, ...)
standardGeneric("getElementTypes"))
setGeneric("getNumElements",
function(x, ...)
standardGeneric("getNumElements"))
setGeneric("moveAfter",
function(src, dest, ...)
standardGeneric("moveAfter"))
setGeneric("getParameters",
function(fun, addNames = TRUE, ...)
standardGeneric("getParameters"))
setGeneric("getFields",
function(obj, ...)
standardGeneric("getFields"))
setGeneric("getReturnType",
function(obj, ...)
standardGeneric("getReturnType"))
setGeneric("isZeroValue", function(x, ...) standardGeneric("isZeroValue"))
setGeneric("getInstructions", function(x, debug = TRUE, ...) standardGeneric("getInstructions"))
setGeneric("setName",
function(obj, name, ...)
standardGeneric("setName"))
setGeneric("getName",
function(obj, ...)
standardGeneric("getName"))
setGeneric("clone", function(x, ...) standardGeneric("clone"))
setGeneric("getDataLayout", function(from, ...) standardGeneric("getDataLayout"))
setGeneric("setDataLayout", function(x, value, ...) standardGeneric("setDataLayout"))
setGeneric("llvmDump", function(x, ...) standardGeneric("llvmDump"))
setGeneric("getValue", function(x, ...) standardGeneric("getValue"))
setGeneric("onlyReadsMemory",
function(x, ...)
standardGeneric("onlyReadsMemory"))
setGeneric("getMetadata",
function(obj, id, ...)
standardGeneric("getMetadata"))
setGeneric("getPredecessors", function(x, notSelf = FALSE, ...) standardGeneric("getPredecessors"))
setGeneric("getSuccessors", function(x, ...) standardGeneric("getSuccessors"))
setGeneric("getSuccessor", function(x, ...) standardGeneric("getSuccessor"))
setGeneric("getPredecessor", function(x, ...) standardGeneric("getPredecessor"))
# For DI nodes - debug info. But isLittleEndian is used for datalayout.
setGeneric("isPrivate", function(x, ...) standardGeneric("isPrivate"))
setGeneric("isProtected", function(x, ...) standardGeneric("isProtected"))
setGeneric("isPublic", function(x, ...) standardGeneric("isPublic"))
setGeneric("isForwardDecl", function(x, ...) standardGeneric("isForwardDecl"))
setGeneric("isAppleBlockExtension", function(x, ...) standardGeneric("isAppleBlockExtension"))
setGeneric("isVirtual", function(x, ...) standardGeneric("isVirtual"))
setGeneric("isArtificial", function(x, ...) standardGeneric("isArtificial"))
setGeneric("isObjectPointer", function(x, ...) standardGeneric("isObjectPointer"))
setGeneric("isObjcClassComplete", function(x, ...) standardGeneric("isObjcClassComplete"))
setGeneric("isVector", function(x, ...) standardGeneric("isVector"))
setGeneric("isBitField", function(x, ...) standardGeneric("isBitField"))
setGeneric("isStaticMember", function(x, ...) standardGeneric("isStaticMember"))
setGeneric("isLValueReference", function(x, ...) standardGeneric("isLValueReference"))
setGeneric("isRValueReference", function(x, ...) standardGeneric("isRValueReference"))
setGeneric("isTypePassByValue", function(x, ...) standardGeneric("isTypePassByValue"))
setGeneric("isTypePassByReference", function(x, ...) standardGeneric("isTypePassByReference"))
setGeneric("isBigEndian", function(x, ...) standardGeneric("isBigEndian"))
setGeneric("isLittleEndian", function(x, ...) standardGeneric("isLittleEndian"))
setGeneric("getElements",
function(x, ...)
standardGeneric("getElements"))
setGeneric("getEmissionKind",
function(x, ...)
standardGeneric("getEmissionKind"))
setGeneric("isOptimized",
function(x, ...)
standardGeneric("isOptimized"))
setGeneric("getLine",
function(x, ...)
standardGeneric("getLine"))
setGeneric("getNameTableKind", function(x, ...) standardGeneric("getNameTableKind"))
setGeneric("stripDebugInfo", function(x, ...) standardGeneric("stripDebugInfo"))
setGeneric("getTypes", function(x, ...) standardGeneric("getTypes"))
setGeneric("getFilename", function(x, ...) standardGeneric("getFilename"))
setGeneric("getDirectory", function(x, ...) standardGeneric("getDirectory"))
setGeneric("getType",
function(obj, ...)
standardGeneric("getType"))
# setGeneric("getExternalFunctions", function(x, ...) standardGeneric("getExternalFunctions"))
# See findCalledFunctions.R
setGeneric("hasError", function(x, ...) standardGeneric("hasError"))
setGeneric("addModule", function(engine, ...) standardGeneric("addModule"))
setGeneric("lapply", function(X, FUN, ...) standardGeneric("lapply"))
setMethod("lapply", "ANY", function(X, FUN, ...) base::lapply(X, FUN, ...))
|
a597567061b7b1cb02ff286909c3d754c6698126
|
f48e25ade098aef7aa6f9fde4927bbf2b2092d14
|
/man/dasl.oakland_passengers_2016.Rd
|
f050bab8c6b608c18274ada3ffe78a9da6f8eb9c
|
[] |
no_license
|
sigbertklinke/mmstat.data
|
23fa7000d5a3f776daec8b96e54010d85515dc7d
|
90f698e09b4aac87329b0254db28d835014c5ecb
|
refs/heads/master
| 2020-08-18T04:29:05.613265
| 2019-10-17T11:44:57
| 2019-10-17T11:44:57
| 215,747,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 718
|
rd
|
dasl.oakland_passengers_2016.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.oakland_passengers_2016}
\alias{dasl.oakland_passengers_2016}
\title{Oakland passengers 2016}
\format{318 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/oakland-passengers-2016/?sf_paged=29}{Oakland passengers 2016}
}
\description{
The data give the number of passengers at Oakland (CA) airport month by month since 1997.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
\url{http://www.oaklandairport.com/news/statistics/passenger-history/}
}
\concept{Regression}
\concept{Time Series Analysis}
|
785ca976297f78fed599c6ee20c4dff6edc99c35
|
8f09774b992fd23052201130a1ce4db3f3e27a53
|
/tests/testthat/test_utils.R
|
df5e4833ef19d340c5669e316aa691a082c42f0d
|
[
"MIT"
] |
permissive
|
tkonopka/umap
|
77f3ff453181cdbe0844df1b4f9e499f23559121
|
96f077865243434ec94ad1581c7003a5f38545c7
|
refs/heads/master
| 2023-02-09T22:47:49.312192
| 2023-02-01T19:07:40
| 2023-02-01T19:07:40
| 129,778,978
| 133
| 21
|
NOASSERTION
| 2018-11-08T19:03:34
| 2018-04-16T17:12:03
|
R
|
UTF-8
|
R
| false
| false
| 373
|
r
|
test_utils.R
|
## tests for universal functions (umap_universal.R)
## ############################################################################
## Tests for exact nearest neighbors extraction
test_that("message when verbose set", {
expect_message(message.w.date("hello", TRUE))
})
test_that("message by default when verbose not set", {
expect_silent(message.w.date("hello"))
})
|
90b3e6f2b68b4c40f6956cd22a85a9640d60b13f
|
a800dff7c2068108a2502e034d0625b247a87b46
|
/R/test-output-contains.R
|
4bc7ca391e6bad4245cd4090e90eaa9d34caf4b9
|
[] |
no_license
|
selcukfidan47/testwhat
|
9c4786e4654d404c54affd510a4735819a843b5c
|
a7c03fd6848881915fe6657d5c5c45db90392ce0
|
refs/heads/master
| 2021-01-16T22:36:14.340497
| 2016-02-15T11:00:07
| 2016-02-15T11:00:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,218
|
r
|
test-output-contains.R
|
#' Check whether the student printed something to the console
#'
#' Function checks whether the student's console contains the output one gets by
#' evaluating the character string provided in \code{expr} provided to expr.
#' This function needs refactoring, as all new lines etc are removed.
#'
#' @param expr The expression (as string) for which the output should be in the student's console output.
#' @param times How often the expression's output should occur in the student's console
#' @param console_output The string containing the output printed to the student's console.
#' The default is DM.console.output which is set on the DataCamp server (automagically).
#' This means you don't have to specify this argument when writing SCTs for DataCamp.com.
#' @param incorrect_msg feeback message in case the output did not contain the expression
#' @param env environment where the code in expr exectued.
#'
#' @examples
#' \dontrun{
#' # SCT to test whether student printed numbers 1 to 10
#' test_output_contains("for(i in 1:10) print(i)")
#' }
#'
#' @import datacampAPI
#' @import testthat
#' @export
test_output_contains <- function(expr, times = 1, console_output = get_student_output(), incorrect_msg = NULL, env = .GlobalEnv) {
# in reality incorrect_msg should be defined at all times... no good feedback messages result from this.
if(is.null(incorrect_msg)) {
incorrect_msg <- build_incorrect_output_msg(expr)
}
test_what(expect_true(output_contains(expr,console_output = console_output, env) >= times),
incorrect_msg)
}
output_contains = function(expr, console_output = get_student_output(), env = .GlobalEnv) {
correct_output = try(capture.output(try(eval(parse(text=expr), envir = env))))
if (inherits(correct_output, "try-error")) {
return(FALSE)
}
correct_output = paste(correct_output, collapse='')
# Remove new-lines:
console_output = gsub("\n|[[:space:]]","", console_output)
correct_output = gsub("\n|[[:space:]]","", correct_output)
where.is.regex = gregexpr(pattern = correct_output, text = console_output, fixed = TRUE)
if (any(where.is.regex[[1]] == (-1))) {
return(0L)
} else {
return(length(where.is.regex[[1]]))
}
}
|
b69be022e21cb39d0c97a2b9763c0e0ad592e6f2
|
4cb0878310a0891c07c21b6152b57257ffd5cdf4
|
/R Tips & Tricks - Summarizing and Visualizing Data.R
|
20fe723f081dc116709c82e19f0367a8964628d9
|
[] |
no_license
|
pw2/R-Tips-Tricks
|
75cc89a970b18c5a39e3964cfe8a0fb8a8513fc8
|
3ae303b4b296a18817ab7389eed966743283220b
|
refs/heads/master
| 2023-06-07T23:05:42.832942
| 2023-06-01T03:00:32
| 2023-06-01T03:00:32
| 254,485,479
| 7
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,736
|
r
|
R Tips & Tricks - Summarizing and Visualizing Data.R
|
### R Tips & Tricks: Summarizing and Visualizing Data ###
## Patrick Ward
## load packages -----------------------------------------------
library(tidyverse) # for data manipulation and visualization
library(gridExtra) # for organizing plot grid
library(psych) # for summary statistics
theme_set(theme_bw()) # setting the background theme for plots
## Simulate Data -----------------------------------------------
# Two periodization groups: Traditional & Block
# 16 week training program
# Pre- and post-test squat performance
set.seed(454)
participant <- 1:20
group <- rep(c("traditional periodization", "block periodization"),
each = 20)
pre_squat <- c(round(rnorm(n = 20, mean = 130, sd = 15), 0),
round(rnorm(n = 20, mean = 127, sd = 16), 0))
post_squat <- c(round(rnorm(n = 20, mean = 136, sd = 10), 0),
round(rnorm(n = 20, mean = 147, sd = 6), 0))
dat <- data.frame(participant, group, pre_squat, post_squat)
dat %>% head()
### Data Manipulation ----------------------------------------------------
## Data is currently in wide format
# Turn data into long format
dat_long <- pivot_longer(data = dat,
cols = c("pre_squat", "post_squat"),
names_to = "test",
values_to = "performance")
dat_long %>% head()
# turn back into wide format
dat_wide <- pivot_wider(data = dat_long,
names_from = test,
values_from = performance)
dat_wide %>% head()
### Summary Statistics -------------------------------------------------
# Summary stats for all data using describe() from the psych package
describe(dat_long$performance)
# Summary stats by group using describeBy() from the psych package
describeBy(dat_long$performance, dat_long$group)
# Use the tidyverse package to code summary statistics by group and test time
dat_long %>%
mutate(test = fct_relevel(test, levels = c("pre_squat", "post_squat"))) %>%
group_by(group, test) %>%
summarize(N = n(),
Mean = mean(performance),
SD = sd(performance))
## Create a column for the difference from pre to post for each participant
dat$Diff <- with(dat, post_squat - pre_squat)
dat %>% head()
# Describe the difference for each group
dat %>%
group_by(group) %>%
summarize(N = n(),
Mean_Diff = mean(Diff),
SD_Diff = sd(Diff))
# could have also used describeBy() from the psych package
describeBy(dat$Diff, dat$group)
## Producing Quantiles
# Quantiles for the difference in performance
quantile(dat$Diff)
# Quantiles with more probabilities returned
quantile(dat$Diff,
probs = seq(from = 0,
to = 1,
by = 0.1))
# Quantiles for the difference by group
by(dat$Diff, dat$group, quantile)
# Same thing but with more quantiles returned
by(dat$Diff, dat$group, quantile, probs = seq(from = 0, to = 1, by = 0.1))
#### Data Visualization ------------------------------------------------------
## Box Plots of Difference by Group
dat %>%
ggplot(aes(x = Diff, y = group)) +
geom_boxplot() +
geom_vline(aes(xintercept = 0),
color = "red",
size = 1.2,
linetype = "dashed") +
labs(x = "Post - Pre Difference in Squat",
y = "Training Group",
title = "Changes in Squat Strength from 16-week Program")
## Histogram of Differences by Group
dat %>%
ggplot(aes(x = Diff, fill = group)) +
geom_density(alpha = 0.6) +
geom_vline(aes(xintercept = 0),
color = "red",
size = 1.2,
linetype = "dashed") +
labs(x = "Post - Pre Difference in Squat",
y = "",
title = "Changes in Squat Strength from 16-week Program")
## Violin Plot with Points
dat %>%
ggplot(aes(x = group, y = Diff)) +
geom_violin() +
stat_summary(fun = mean,
geom = "point",
color = "black",
size = 4) +
geom_jitter(color = "grey",
size = 3) +
geom_hline(aes(yintercept = 0),
color = "red",
size = 1.2,
linetype = "dashed") +
labs(x = "Post - Pre Difference in Squat",
y = "",
title = "Changes in Squat Strength from 16-week Program")
## Joint Plot
# Build the main plot
main_plot <- dat %>%
ggplot(aes(x = pre_squat,
y = post_squat,
color = group)) +
geom_point(size = 3,
alpha = 0.8) +
theme(legend.position = "bottom")
# build margin plots
x_axis_hist <- dat %>%
ggplot(aes(x = pre_squat, fill = group)) +
geom_density(alpha = 0.3) +
xlim(80, 170) +
theme(legend.position = "none")
y_axis_hist <- dat %>%
ggplot(aes(x = post_squat, fill = group)) +
geom_density(alpha = 0.3) +
xlim(80, 170) +
coord_flip() +
theme(legend.position = "none")
# build an empty plot to occupy space in the second column of the plot grid
empty_space <- ggplot() +
geom_point(aes(x = 1, y = 1), color = "white") +
theme(axis.ticks = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
# Arrange the plot grid
grid.arrange(x_axis_hist,
empty_space,
main_plot,
y_axis_hist,
ncol = 2,
nrow = 2,
widths = c(4, 1),
heights = c(1, 4))
## Individual Participant Plots
dat_long %>%
mutate(test = fct_relevel(test, levels = c("pre_squat", "post_squat"))) %>%
ggplot(aes(x = test, y = performance, color = as.factor(participant), group = as.factor(participant))) +
geom_point(size = 4) +
geom_line(size = 1.1) +
facet_wrap(~group)
|
4f53c70cf93ab70028a17bfa5dae61e9098e7abd
|
5427315c68c26753b28e18432c118621b9b984fc
|
/POST_plattscaling.R
|
94f674f4d69e045e9531d7a6260a25896df40487
|
[] |
no_license
|
JohannesJacob/fairCreditScoring
|
dc58734b210a6e118628fc446b6710201ea655db
|
10ebe9878cc63ddb8aab69159e2a097f5c09116e
|
refs/heads/master
| 2020-12-13T02:16:32.566382
| 2020-06-22T10:53:29
| 2020-06-22T10:53:29
| 234,286,257
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 4,254
|
r
|
POST_plattscaling.R
|
# POSTPROCESSING PROFIT EVALUATION
setwd("C:/Users/Johannes/OneDrive/Dokumente/Humboldt-Universitšt/Msc WI/1_4. Sem/Master Thesis II/")
rm(list = ls());gc()
set.seed(0)
options(scipen=999)
# libraries
library(EMP)
library(pROC)
source("fairCreditScoring/95_fairnessMetrics.R")
# read data
dtest_unscaled <- read.csv("3_wipResults/taiwan_orig_test.csv")
dtest_unscaled <- subset(dtest_unscaled, select = c(CREDIT_AMNT,AGE, TARGET))
dval_unscaled <- read.csv("3_wipResults/taiwan_orig_valid.csv")
dval_unscaled <- subset(dval_unscaled, select = c(AGE, TARGET))
dval_training_results <- read.csv("3_wipResults/taiwan_post_training_results_dval.csv")
dtest_training_results <- read.csv("3_wipResults/taiwan_post_training_results_dtest.csv")
# ---- PLATT SCALING PER GROUP ----
model.names <- c('glm', "svmLinear", "rf", "xgbTree", "nnet")
for (i in c(0,1)){
dval_target <- dval_unscaled$TARGET[dval_unscaled$AGE==i]
dval_scores <- dval_training_results[dval_unscaled$AGE==i,]
dtest_scores <- dtest_training_results[dtest_unscaled$AGE==i,]
dtest_subset <- dtest_unscaled[dtest_unscaled$AGE==i,]
platt_scores <- NULL
for (m in model.names){
# train logistic model with Yval ~ Y^val --> model_val
dataframe_valid <- data.frame(x = dval_scores[, paste0(m, "_scores")], y = dval_target)
model_val <- glm(y~x,data = dataframe_valid,family = binomial)
# determine optimal cutoff
dataframe_valid <- dataframe_valid[-2]
valid_scores <- predict(model_val, newdata = dataframe_valid, type = 'response')
EMP <- empCreditScoring(scores = valid_scores, classes = dval_target)
assign(paste0('cutoff.', m), quantile(valid_scores, EMP$EMPCfrac))
# use model_val to predict ytest
dataframe_test <- data.frame(x = dtest_scores[, paste0(m, "_scores")])
test_score <- predict(model_val, newdata = dataframe_test, type = 'response')
platt_scores <- cbind(platt_scores, test_score)
}
colnames(platt_scores) <- model.names
assign(paste0("platt_scores_",i), cbind(platt_scores, dtest_subset))
}
platt_results <- rbind(platt_scores_0, platt_scores_1)
#---- TESTING ----
# Assess test restults
test_results <- NULL
for(i in model.names){
pred <- platt_results[, i]
cutoff <- get(paste0("cutoff.", i))
cutoff_label <- sapply(pred, function(x) ifelse(x>cutoff, 'Good', 'Bad'))
# Compute AUC
AUC <- as.numeric(roc(platt_results$TARGET, as.numeric(pred))$auc)
# Compute EMP
EMP <- empCreditScoring(scores = pred, classes = platt_results$TARGET)$EMPC
acceptedLoans <- length(pred[pred>cutoff])/length(pred)
# Compute Profit from Confusion Matrix (# means in comparison to base scenario = all get loan)
loanprofit <- NULL
for (i in 1:nrow(platt_results)){
class_label <- cutoff_label[i]
true_label <- platt_results$TARGET[i]
if (class_label == "Bad" & true_label == "Bad"){
#p = dtest_unscaled$CREDIT_AMNT[i]
p = 0
} else if (class_label == "Good" & true_label == "Bad"){
p = -platt_results$CREDIT_AMNT[i]
} else if (class_label == "Good" & true_label == "Good"){
p = platt_results$CREDIT_AMNT[i] * 0.2644
}else if (class_label == "Bad" & true_label == "Good"){
p = -platt_results$CREDIT_AMNT[i] * 0.2644
#p = 0
}
loanprofit <- c(loanprofit, p)
}
profit <- sum(loanprofit)
profitPerLoan <- profit/nrow(platt_results)
# fairness criteria average
statParityDiff <- statParDiff(sens.attr = platt_results$AGE, target.attr = cutoff_label)
averageOddsDiff <- avgOddsDiff(sens.attr = platt_results$AGE, target.attr = platt_results$TARGET, predicted.attr = cutoff_label)
predParityDiff <- predParDiff(sens.attr = platt_results$AGE, target.attr = platt_results$TARGET, predicted.attr = cutoff_label)
cm <- confusionMatrix(data = as.factor(cutoff_label), reference = platt_results$TARGET)
balAccuracy <- cm$byClass[['Balanced Accuracy']]
test_eval <- rbind(AUC, balAccuracy, EMP, acceptedLoans, profit, profitPerLoan, statParityDiff, averageOddsDiff, predParityDiff)
test_results <- cbind(test_results, test_eval)
}
# Print results
colnames(test_results) <- c(model.names); test_results
write.csv(test_results, "5_finalResults/POST_PlattScaling_Results.csv", row.names = T)
|
9f5376f1179fbccfbcd2e41bc1da9c4989fc3b0c
|
0005fc8b3163d8bd8fb5d80fa7db1ddc3cc50229
|
/man/plotSequenceEvents.Rd
|
705b67d110d849212cb134778d649135df51643c
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.base
|
2f54c0de826eb47d964131e1bda9ff9105c4ff24
|
0a8f37eec4ebee01e591fdade5ae60e97108270b
|
refs/heads/master
| 2022-08-11T18:04:09.478100
| 2022-06-09T19:52:31
| 2022-06-09T19:52:31
| 137,505,674
| 0
| 0
|
MIT
| 2022-06-11T01:30:18
| 2018-06-15T15:46:06
|
R
|
UTF-8
|
R
| false
| true
| 632
|
rd
|
plotSequenceEvents.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baseValidation.R
\name{plotSequenceEvents}
\alias{plotSequenceEvents}
\title{Plot Sequence Events}
\usage{
plotSequenceEvents(
timestamps,
sequences,
main = "Overlapping time sequences in hydraulic data",
language = "de"
)
}
\arguments{
\item{timestamps}{vector of timestamps}
\item{sequences}{data frame as returned by
\code{\link{getOverlappingTimeSequences}} with attribute "sequenceNumber"}
\item{main}{plot title}
\item{language}{"de" (German) or something else (English)}
}
\description{
Plot Sequence Events
}
|
44ed51f84b46cf38580c766b70d32ca25db98ddb
|
d04e8e91a28ebe98128152f06aa6a5a1104e03da
|
/scripts/helpful_snippets/self_sufficiency_2020.R
|
8b4b34123ef1742d56602398b754067d64eb80c6
|
[
"MIT"
] |
permissive
|
BPSTechServices/income-analysis
|
de0dd33ea3fda022b6b219734d419252ad219da0
|
991c3a2a316ae4d5f51e496bb8adf205646b9a86
|
refs/heads/main
| 2023-03-09T09:02:55.670310
| 2021-02-22T20:20:21
| 2021-02-22T20:20:21
| 340,525,301
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,945
|
r
|
self_sufficiency_2020.R
|
##### Load libraries, set working directory #####
pacman::p_load(tidyverse, data.table, srvyr, ipumsr)
options(
scipen = 999, # remove scientific notation
digits = 4, # set data precision for readability
stringsAsFactors = F, # string variables are brought in as charactors
dplyr.width = Inf,
survey.replicates.mse = T,
datatable.fread.datatable = F # <- is this necessary?
)
rm(list=ls())
gc()
## Set working directory
# setwd("N:/work/district_planning/Economic Development/NICK/PUMS/IPUMS/sss_2020/")
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
## Import FIPS codes
stfips <- rio::import("fips.xlsx", which = "state")
cntyfips <- rio::import("fips.xlsx", which = "county")
## Import CPI (Census Research Series)
cpi <- rio::import("cpi-u-rs_1950-current.xlsx") %>%
select(year, inflation_factor_2018)
## Import CPI (BLS)
# cpi <- rio::import("N:/work/district_planning/Economic Development/NICK/Resources/CPI-U-West_BLS.xlsx", which = "annual") %>%
# select(year, inflation_factor_2018)
### Import the 2017 self-sufficiency standard
sssor <- rio::import("OR2017_all_families.xlsx", which = "By Family") %>% select(`Family Type`, State, Year, County, `Annual Self-Sufficiency Wage`)
ssswa <- rio::import("WA2017_all_families.xlsx", which = "By Family") %>% select(`Family Type`, State, Year, County, `Annual Self-Sufficiency Wage`)
sss <- rbind(sssor, ssswa) %>%
rename(family_type = `Family Type`, sss_wage = `Annual Self-Sufficiency Wage`); rm(sssor, ssswa)
names(sss) <- tolower(names(sss))
# Filter for Portland MSA and join FIPS codes and adjust for inflation
sss <- sss %>%
filter((county %in% c("Clark County", "Skamania County") & state == "WA") |
(county %in% c("Clackamas County", "Columbia County", "Multnomah County", "Washington County", "Yamhill County") & state == "OR")) %>%
left_join(., stfips, by = "state") %>%
left_join(., cntyfips, by = "county") %>%
left_join(., cpi, by = "year") %>%
mutate(sss_wage_adj = sss_wage * inflation_factor_2018) %>%
select(-inflation_factor_2018)
## Import poverty thressholds and adjust for inflation
poverty <- rio::import("poverty_thresholds.xlsx", which = "all_years") %>%
left_join(., cpi, by = "year") %>%
mutate(poverty_threshold_adj = threshold * inflation_factor_2018)
## Variables to assist in coding
pdxpumas <- c(1314, 1301, 1305, 1303, 1302) # east county: 1316
# Which Group Quarters to include
gq_filter <- c("Households under 1970 definition", "Additional households under 1990 definition",
"Additional households under 2000 definition")
# Educational attainment
edu_lths <- c("Nursery school to grade 4", "Nursery school, preschool", "Kindergarten", "Grade 1, 2, 3, or 4", "Grade 1", "Grade 2", "Grade 3",
"Grade 4", "Grade 5, 6, 7, or 8", "Grade 5 or 6", "Grade 5", "Grade 6", "Grade 7 or 8", "Grade 7", "Grade 8", "Grade 9", "Grade 10",
"Grade 11", "Grade 12", "12th grade, no diploma", "No schooling completed")
edu_hs <- c("High school graduate or GED", "Regular high school diploma", "GED or alternative credential")
edu_sc <- c("Some college, but less than 1 year", "1 year of college", "1 or more years of college credit, no degree", "2 years of college",
"Associate's degree, type not specified", "Associate's degree, occupational program", "Associate's degree, academic program",
"3 years of college", "4 years of college")
edu_ba <- c("Bachelor's degree", "5+ years of college", "6 years of college (6+ in 1960-1970)", "7 years of college", "8+ years of college",
"Master's degree", "Professional degree beyond a bachelor's degree", "Doctoral degree")
#LANGUAGE and LANGUAGED
ee_slavic_lang = c('Rumanian', 'Albanian', 'Russian', 'Russian, Great Russian', 'Bielo-, White Russian', 'Ukrainian, Ruthenian, Little Russian', 'Ruthenian',
'Little Russian', 'Ukrainian', 'Czech', 'Bohemian', 'Moravian', 'Polish', 'Kashubian, Slovincian', 'Slovak', 'Serbo-Croatian, Yugoslavian, Slavonian',
'Croatian', 'Serbian', 'Dalmatian', 'Montenegrin', 'Slovene', 'Lithuanian', 'Lettish, Latvian', 'Other Balto-Slavic', 'Bulgarian',
'Lusatian, Sorbian, Wendish', 'Wendish', 'Macedonian', 'Slavic unknown', 'Armenian', 'Romany, Gypsy', 'Gypsy', 'Finnish', 'Magyar, Hungarian',
'Magyar', 'Hungarian', 'Uralic', 'Estonian, Ingrian, Livonian, Vepsian, Votic', 'Lapp, Inari, Kola, Lule, Pite, Ruija, Skolt, Ume', 'Other Uralic',
'Other Altaic', 'Chuvash', 'Karakalpak', 'Kazakh', 'Kirghiz', 'Karachay, Tatar, Balkar, Bashkir, Kumyk', 'Uzbek, Uighur', 'Azerbaijani', 'Turkmen',
'Yakut', 'Caucasian, Georgian, Avar', 'Georgian', 'Lappish', 'Estonian', 'Dalmatian, Montenegrin', 'Great Russian', 'Bosnian',
'Rumanian', 'Albanian', 'Russian', 'Ukrainian, Ruthenian, Little Russian', 'Czech', 'Polish', 'Slovak', 'Serbo-Croatian, Yugoslavian, Slavonian',
'Slovene', 'Lithuanian', 'Other Balto-Slavic', 'Slavic unknown', 'Armenian', 'Romany, Gypsy', 'Finnish', 'Magyar, Hungarian')
ee_slavic_bpl = c('Turkmenistan', 'Tadzhik', 'Kirghizia', 'Kazakhstan', 'Republic of Georgia', 'Azerbaijan', 'Armenia', 'Ukraine', 'Bessarabia',
'Moldavia', 'Byelorussia', 'Other USSR/Russia', 'Baltic States, ns', 'Lithuania', 'Latvia', 'Estonia', 'Eastern Europe, ns',
'Central Europe, ns', 'Kosovo', 'Slovenia', 'Carniola', 'Slovonia', 'Dalmatia', 'Bosnia', 'Serbia', 'Montenegro', 'Croatia',
'Yugoslavia', 'Transylvania', 'Romania', 'Russian Poland', 'West Prussia', 'Silesia', 'Prussian Poland', 'Posen', 'Pomerania',
'East Prussia', 'German Poland', 'Galicia', 'Austrian Poland', 'Poland', 'Hungary', 'Finland', 'Lapland, ns', 'Svalbard and Jan Meyen',
'Svalbard', 'Albania', 'Bulgaria', 'Czechoslovakia', 'Bohemia', 'Bohemia-Moravia', 'Slovakia', 'Czech Republic', 'East Berlin',
'East Germany', 'USSR, ns', 'Siberia', 'Uzbekistan')
ee_slavic_ancest = c('Armenian', 'Eastern European, nec', 'Central European, nec', 'Slavonian', 'Slav', 'Yugoslavian', 'Windish', 'Husel', 'Bioko',
'Lemko', 'Ruthenian (1990-2000)', 'Ruthenian (1980)', 'Ukrainian (1990-2000, ACS, PRCS)', 'Ukrainian (1980)', 'Uzbek',
'Tadzhik (1980, 2000)', 'Soviet Central Asia (1990-2000)', 'Tuvinian (1990-2000)', 'Crimean (1980)', 'Tartar (1980)',
'Tatar (1990-2000)', 'Soviet Union, nec', 'Yakut', 'Mesknetian (1990-2000)', 'Gagauz (1990-2000)', 'Chevash', 'Bashkir',
'Soviet Turkic (1990-2000)', 'Sorb/Wend', 'Slovene', 'Slovak', 'Montenegrin (1990-2000, 2012 ACS)',
'Bosnian (1990) Herzegovinian (2000, ACS, PRCS)', 'Serbian (1990-2000, ACS, PRCS)', 'Serbian (1980)', 'Muscovite', 'Russian',
'Wallachian', 'Moldavian', 'Bucovina', 'Bessarabian (1990-2000)', 'Bessarabian (1980)', 'Transylvanian', 'Rumanian (1980)',
'Romanian (1990-2000, ACS, PRCS)', 'Kashubian', 'Polish', 'Ossetian', 'North Caucasian Turkic (1990-2000)', 'North Caucasian',
'Macedonian', 'Lithuanian', 'Latvian', 'Magyar', 'Hungarian', 'Rom', 'Gruziia (1990-2000)',
'German from Russia (1990-2000); German Russian (ACS, PRCS)', 'Volga', 'Germans from Russia', 'Georgian', 'Voytak', 'Mordovian',
'Udmert', 'Finno Ugrian (1990-2000)', 'Livonian', 'Estonian', 'Moravian (1990-2000)', 'Bohemian (1990-2000, ACS, PRCS)', 'Bohemian',
'Czech', 'Czechoslovakian', 'Croatian', 'Turcoman (1980)', 'Kirghiz (1980)', 'Turkestani (1990-2000, 2012 ACS)', 'Cossack (1980)',
'Cossack (1990-2000)', 'Rusyn', 'Carpatho Rusyn', 'Carpathian', 'Bulgarian', 'Belorussian', 'Azerbaijani', 'Albanian', 'Silesian (1990-2000)',
'East German (1990-2000)', 'Finnish')
## Import the household and person replicate weights for variance estimation (person isn't actually needed)
repwt <- rbind(
read_ipums_micro(read_ipums_ddi("usa_00089.xml")), ## 2013-2018
read_ipums_micro(read_ipums_ddi("usa_00117.xml")) ## 2019
) %>% select(-c(YEAR, CBSERIAL, STRATA, HHWT, CLUSTER, STATEFIP, GQ, PERWT)) %>%
mutate_at(vars(SAMPLE, SERIAL, PERNUM), as.numeric)
## Import the PUMS microdata
read_pums <- function(filename){
read_ipums_micro(read_ipums_ddi(filename)) %>%
# filter(MET2013 == 38900) %>%
mutate_at(vars(GQ:OCC2010, MIGRATE1:MOVEDIN), haven::as_factor) %>%
mutate_at(vars(YEAR:STRATA, INCTOT:INCOTHER, PERWT, PERNUM, AGE, OWNCOST, RENTGRS), as.numeric) %>% ## TODO double check these
mutate_at(vars(OCCSOC, INDNAICS), as.character)
}
pums <- rbind(read_pums("usa_00092.xml"), read_pums("usa_00116.xml")) ## 2013-18 and 2019
pums_cleaned <- pums %>%
left_join(., cpi, by = c("YEAR" = "year")) %>%
## Corrections before recoding
mutate(AGE = AGE - 1,
INCTOT = if_else(INCTOT == 9999999, NA_real_, INCTOT * inflation_factor_2018),
INCWAGE = if_else(INCWAGE == 999999, NA_real_, INCWAGE * inflation_factor_2018),
INCBUS00 = if_else(INCBUS00 == 999999, NA_real_, INCBUS00 * inflation_factor_2018),
INCSS = if_else(INCSS == 99999, NA_real_, INCSS * inflation_factor_2018),
INCINVST = if_else(INCINVST == 999999, NA_real_, INCINVST * inflation_factor_2018),
INCRETIR = if_else(INCRETIR == 999999, NA_real_, INCRETIR * inflation_factor_2018),
INCSUPP = if_else(INCSUPP == 99999, NA_real_, INCSUPP * inflation_factor_2018),
INCWELFR = if_else(INCWELFR == 99999, NA_real_, INCWELFR * inflation_factor_2018),
INCOTHER = if_else(INCOTHER == 99999, NA_real_, INCOTHER * inflation_factor_2018),) %>%
## Pre-recoding for calculating household-level characteristics
mutate(ssi_disability_flag = if_else(INCSS > 10000 & AGE < 62 & INCWAGE < 5000, TRUE, FALSE), # Flag disabled if they are under 62, receive SS and have minimal wages
qualified_person_flag = if_else(AGE >= 19 & AGE <= 65 & ssi_disability_flag == FALSE, 1, 0), # Flag disabled, youth and retired folks to '0'
lep_helper1 = case_when(AGE < 14 | SPEAKENG == "N/A (Blank)" ~ 0L,
SPEAKENG %in% c("Does not speak English", "Yes, speaks well", "Yes, but not well") ~ 0L,
T ~ 1L),
per_type = case_when(AGE <= 2 ~ 1,
AGE > 2 & AGE <= 5 ~ 10,
AGE > 5 & AGE <= 12 ~ 100,
AGE > 12 & AGE <= 18 ~ 1000,
AGE > 18 ~ 10000),
adult = if_else(AGE > 18, 1, 0),
infant = if_else(AGE <= 2, 1, 0),
preschooler = if_else(AGE > 2 & AGE <= 5, 1, 0),
schoolager = if_else(AGE > 5 & AGE <= 12, 1, 0),
teenager = if_else(AGE > 12 & AGE <= 18, 1, 0)) %>%
group_by(SAMPLE, SERIAL) %>%
mutate(qualified_in_hh = sum(qualified_person_flag),
lep_helper2 = sum(lep_helper1),
lep_hh = if_else(lep_helper2 == 0, "LEP household", "Non-LEP household"),
hh_size = max(PERNUM),
hh_income = sum(INCTOT, na.rm = T),
adults = sum(adult),
infants = sum(infant),
preschoolers = sum(preschooler),
schoolagers = sum(schoolager),
teenagers = sum(teenager),
children = infants + preschoolers + schoolagers + teenagers) %>%
ungroup() %>%
## Recoding begins
mutate(unqualified_hh_flag = if_else(qualified_in_hh == 0 | !(GQ %in% gq_filter), TRUE, FALSE), # Flag unqualified households or those in GQs
family_type = case_when(adults >= 4 & children == 0 ~ paste0("a",adults,"i0p0s0t0"), #,infants,"p",preschoolers,"s",schoolagers,"t",teenagers),
adults >= 4 ~ paste0("a",adults,"c",children),
T ~ paste0("a",adults,"i",infants,"p",preschoolers,"s",schoolagers,"t",teenagers)),
unadj_housing_costs = case_when(OWNERSHP == "Owned or being bought (loan)" ~ OWNCOST,
OWNERSHP == "Rented" ~ RENTGRS,
T ~ 0),
housing_costs_adj = unadj_housing_costs * inflation_factor_2018,
RACHISP = case_when(HISPAN != "Not Hispanic" ~ "Yes", T ~ "No"),
race_rec = case_when(RACHISP == "Yes" ~ "Hispanic",
RACE == "White" ~ "White",
RACE == "Black/African American/Negro" ~ "Black",
RACE == "American Indian or Alaska Native" ~ "Native American",
RACE %in% c("Chinese", "Japanese", "Other Asian or Pacific Islander") & RACASIAN == "Yes" ~ "Asian",
RACE == "Other Asian or Pacific Islander" & RACPACIS == "Yes" ~ "Pacific Islander",
RACE == "Other race, nec" ~ "Another race",
RACE %in% c("Two major races", "Three or more major races") ~ "Multi-racial",
T ~ NA_character_),
# Factors above 58 are outside U.S. posessions (foreign countries)
birthplace_rec = case_when(as.numeric(BPL) > 58 & CITIZEN != "Born abroad of American parents" ~ "Foreign-born", T ~ "Not foreign-born"),
# Factors here are people born in Eastern Europe
RACSLAVIC = case_when(BPLD %in% ee_slavic_bpl | BPL %in% ee_slavic_bpl ~ "Yes",
LANGUAGED %in% ee_slavic_lang | LANGUAGE %in% ee_slavic_lang ~ "Yes",
ANCESTR1D %in% ee_slavic_ancest & ANCESTR2D == "Not Reported" ~ "Yes",
T ~ "No"),
educ_rec = case_when(EDUCD %in% edu_lths ~ "Less than high school",
EDUCD %in% edu_hs ~ "High school graduate or equivalent",
EDUCD %in% edu_sc ~ "Some college or Associate's",
EDUCD %in% edu_ba ~ "Bachelor's degree or higher",
T ~ NA_character_)) %>%
group_by(SAMPLE, SERIAL) %>%
mutate(qualified_hh_income = sum(INCTOT[qualified_person_flag == 1], na.rm = T)) %>% ## Sum total income only for qualified persons in the household
ungroup() %>%
left_join(., sss, by = c("STATEFIP", "COUNTYFIP", "family_type")) %>%
mutate(self_sufficient_flag = if_else(hh_income >= sss_wage_adj, "Self-sufficient", "Not self-sufficient")) %>% # Use qualified_hh_income if you must exclude folks
mutate_at(vars(YEAR, PUMA, RACHISP, race_rec, birthplace_rec, lep_hh, RACSLAVIC, educ_rec, self_sufficient_flag), as.factor) %>%
left_join(., repwt, by = c("SAMPLE", "SERIAL", "PERNUM")) %>%
mutate(HHWT_3 = HHWT / 3,
PERWT_3 = PERWT / 3)
### Three-year replicate weight sample design for calculating standard errors
h_repd <- pums_cleaned %>%
select(-c(REPWTP:REPWTP80)) %>% # Remove person weights !! IMPORTANT !!
filter(PERNUM == 1, # Select householder
GQ %in% gq_filter, # Remove group quarters
# YEAR %in% c(2016, 2017, 2018), # Subset 3 years
MET2013 == 38900, # Keep only Portland MSA
unqualified_hh_flag == FALSE, # Remove unqualified households
!(is.na(self_sufficient_flag))) %>% # Remove NA values for whether they are self-sufficient
mutate_at(vars(HHWT, REPWT:REPWT80), ~./3) %>% # Divide replicate weights and household weights by 3 since using 3-year sample
as_survey_rep(weights = "HHWT",
repweights = starts_with("REPWT"),
combined_weights = T,
type = "JK1",
scale = 4/80,
rscales = ncol('REPWT[0-9]+'))
## Quickly demonstrate self-sufficiency standard on race alone
h_repd %>%
filter(COUNTYFIP == 51 & STATEFIP == 41) %>% # Filter for Multnomah County
group_by(self_sufficient_flag, race_rec) %>%
summarize(hh = survey_total(),
n = unweighted(n())) %>% ungroup() %>%
mutate(cv = hh_se / hh,
moe = hh_se * 1.645) %>%
arrange(race_rec)
#### Define a few helper functions to analyze some data
## (Sorry this is so ugly!)
## Filter for year and for a geography wrapped in quotes; use 3-year trailing average from `analysis_year`
geo_year_filter <- function(design, analysis_year, geofilter_statement = "COUNTYFIP == 51 & STATEFIP == 41") {
# Helpful: https://stackoverflow.com/questions/61692367/how-to-pass-a-filter-statement-as-a-function-parameter-in-dplyr-using-quosure
if(analysis_year == 2016) {
design %>%
filter(
YEAR %in% c(2014, 2015, 2016),
# COUNTYFIP == 51 & STATEFIP == 41
eval(rlang::parse_expr(geofilter_statement)) ## Washington County would be `"COUNTYFIP == 67 & STATEFIP == 41"`
) %>%
mutate(analysis_year = 2016)
}
if(analysis_year == 2018) {
design %>%
filter(
YEAR %in% c(2016, 2017, 2018),
# COUNTYFIP == 51 & STATEFIP == 41
eval(rlang::parse_expr(geofilter_statement))
) %>%
mutate(analysis_year = 2018)
}
}
## Summarize the design by households and add moe and cv
sss_summarize <- function(design) {
design %>%
summarize(hh = survey_total(na.rm = T),
count = unweighted(n())) %>%
mutate(hh_moe = hh_se * 1.645,
hh_cv = hh_se / hh,
hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
hh_cv < 0.2 ~ "1. Use"))
}
## Determine the self-sufficiency standard by passing in varuables, statements to
## test for, a group name for the summary and an analysis year
get_sss <- function(svydesign, var, truevar, group_name, ayear, geofilter) {
# var is the variable containing the demographic of interest
# truevar is the string to test var against
ayear <- ayear
svydesign %>%
geo_year_filter(., analysis_year = ayear, geofilter_statement = geofilter) %>% # Filter for Multnomah County
mutate(group = group_name) %>% # Add the group name
group_by(group, !!var, analysis_year, self_sufficient_flag) %>% # Group by the key variables
sss_summarize(.) %>%
filter(!!var == !!truevar) %>% # Return only records for the tested variable, i.e., racblk == "Yes"
select(-!!var)
}
## Group 1 (g1) for TOTAL HOUSEHOLDS.
## This returns the same results as all groupings below; this is just an explicit
## example of what is happening under the hood.
g1_18 <- h_repd %>%
filter(
COUNTYFIP == 51, STATEFIP == 41,
# YEAR %in% c(2014, 2015, 2016),
YEAR %in% c(2016, 2017, 2018),
) %>%
group_by(self_sufficient_flag) %>%
summarize(hh = survey_total(na.rm = T),
count = unweighted(n())) %>%
mutate(hh_moe = hh_se * 1.645,
hh_cv = hh_se / hh,
hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
hh_cv < 0.2 ~ "1. Use"),
group = "Total households",
analysis_year = 2018)
## The difference for these race categories is that we are using race alone or in combination
## which double-counts some households but increases sample size
g2_18 <- get_sss(h_repd, quo(RACBLK), "Yes", "Black households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g3_18 <- get_sss(h_repd, quo(RACHISP), "Yes", "Hispanic households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g4_18 <- get_sss(h_repd, quo(RACAMIND), "Yes", "Native American households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g5_18 <- get_sss(h_repd, quo(RACASIAN), "Yes", "Asian households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g6_18 <- get_sss(h_repd, quo(RACPACIS), "Yes", "Pacific Islander households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g7_18 <- get_sss(h_repd, quo(RACOTHER), "Yes", "Another race households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g8_18 <- get_sss(h_repd, quo(RACSLAVIC), "Yes", "Slavic households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g9_18 <- get_sss(h_repd, quo(race_rec), "White", "White alone households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g10_18 <- get_sss(h_repd, quo(race_rec), "Multi-racial", "Multi-racial households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g11_18 <- get_sss(h_repd, quo(lep_hh), "LEP household", "LEP households", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g12_18 <- get_sss(h_repd, quo(educ_rec), "Less than high school", "Less than HS", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g13_18 <- get_sss(h_repd, quo(educ_rec), "High school graduate or equivalent", "HS diploma", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g14_18 <- get_sss(h_repd, quo(educ_rec), "Some college or Associate's", "Some college", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
g15_18 <- get_sss(h_repd, quo(educ_rec), "Bachelor's degree or higher", "Four-year degree", 2018, "COUNTYFIP == 51 & STATEFIP == 41")
# Bind all the groupings together
sss18 <- rbind(g1_18, g2_18, g3_18, g4_18, g5_18, g6_18, g7_18, g8_18, g10_18, g9_18, g11_18, g12_18, g13_18, g14_18, g15_18) ; sss18
# Display in tabular format
sss18 %>%
select(group, self_sufficient_flag, hh) %>%
pivot_wider(names_from = self_sufficient_flag, values_from = hh) %>%
mutate(total_hh = (`Self-sufficient` + `Not self-sufficient`),
share_self_sufficient = `Self-sufficient` / total_hh)
############ FOR LIZA ############
## For Liza - all family types by race
pums_cleaned %>%
filter(
PERNUM == 1,
GQ %in% gq_filter,
YEAR == 2019,
# YEAR %in% c(2017, 2018, 2019),
MET2013 == 38900,
) %>%
# mutate(HHWT = HHWT/3) %>%
group_by(family_type, race_rec) %>%
# summarize(hh = round(sum(HHWT))) %>% ungroup() %>%
summarize(hh = n()) %>% ungroup() %>%
pivot_wider(id_cols = family_type, values_from = hh, names_from = race_rec) %>%
mutate(total = select(., c(everything(), -family_type)) %>% rowSums(na.rm = T)) %>%
arrange(desc(total)) %>% clipr::write_clip()
## For Liza - top 10 family types by race
pums_cleaned %>%
filter(
PERNUM == 1,
GQ %in% gq_filter,
YEAR == 2019,
# YEAR %in% c(2017, 2018, 2019),
MET2013 == 38900,
) %>%
# mutate(HHWT = HHWT/3) %>%
group_by(family_type, race_rec) %>%
summarize(hh = round(sum(HHWT))) %>% ungroup() %>%
group_by(race_rec) %>% top_n(10, wt = hh) %>%
arrange(race_rec, desc(hh)) %>% clipr::write_clip()
h_repd %>%
filter(YEAR %in% c(2016, 2017, 2018)) %>% # TODO Change to 2019 when repwt file is ready to download
group_by(family_type, race_rec) %>%
summarize(hh = survey_total(),
n = unweighted(n())) %>% ungroup() %>%
mutate(cv = hh_se / hh,
moe = hh_se * 1.645) %>%
group_by(race_rec) %>% top_n(10, wt = hh) %>%
arrange(race_rec, desc(hh)) %>% clipr::write_clip()
############ ARCHIVE - might be useful later ############
#
#
# h %>%
# mutate(PUMA = as.factor(PUMA)) %>%
# # filter(MET2013 == 38900) %>%
# filter(
# # COUNTYFIP == 51, STATEFIP == 41,
# GQ %in% gq_filter,
# PUMA %in% pdxpumas | PUMA == 1316,
# # RACSLAVIC == "No",
# unqualified_hh_flag == FALSE,
# is.na(self_sufficient_flag) == FALSE,
# # YEAR %in% c(2014, 2015, 2016),
# YEAR %in% c(2016, 2017, 2018),
# ) %>%
# group_by(self_sufficient_flag, race_rec) %>%
# summarize(hh = survey_total(na.rm = T),
# count = unweighted(n())) %>%
# mutate(hh_moe = hh_se * 1.645,
# hh_cv = hh_se / hh,
# hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
# hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
# hh_cv < 0.2 ~ "1. Use")) %>%
# select(race_rec, self_sufficient_flag, hh) %>%
# pivot_wider(names_from = self_sufficient_flag, values_from = hh) %>%
# mutate(total_hh = (`Self-sufficient` + `Not self-sufficient`),
# share_self_sufficient = `Self-sufficient` / total_hh)
#
#
#
#
#
#
#
#
# g1_16 <- h %>%
# mutate(PUMA = as.factor(PUMA)) %>%
# filter(
# COUNTYFIP == 51, STATEFIP == 41,
# GQ %in% gq_filter,
# unqualified_hh_flag == FALSE,
# is.na(self_sufficient_flag) == FALSE,
# YEAR %in% c(2014, 2015, 2016),
# # YEAR %in% c(2016, 2017, 2018),
# ) %>%
# group_by(self_sufficient_flag) %>%
# summarize(hh = survey_total(na.rm = T),
# count = unweighted(n())) %>%
# mutate(hh_moe = hh_se * 1.645,
# hh_cv = hh_se / hh,
# hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
# hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
# hh_cv < 0.2 ~ "1. Use"),
# group = "Total households",
# analysis_year = 2016)
#
#
# g2_16 <- get_sss(h, quo(RACBLK), "Black households", 2016)
#
# g3_16 <- get_sss(h, quo(RACHISP), "Hispanic households", 2016)
#
# g4_16 <- get_sss(h, quo(RACAMIND), "Native American households", 2016)
#
# g5_16 <- get_sss(h, quo(RACASIAN), "Asian households", 2016)
#
# g6_16 <- get_sss(h, quo(RACPACIS), "Pacific Islander households", 2016)
#
# g7_16 <- get_sss(h, quo(RACOTHER), "Another race households", 2016)
#
# g8_16 <- get_sss(h, quo(RACSLAVIC), "Slavic households", 2016)
#
#
# g9_16 <- h %>%
# mutate(PUMA = as.factor(PUMA)) %>%
# filter(
# COUNTYFIP == 51, STATEFIP == 41,
# GQ %in% gq_filter,
# unqualified_hh_flag == FALSE,
# is.na(self_sufficient_flag) == FALSE,
# YEAR %in% c(2014, 2015, 2016),
# # YEAR %in% c(2016, 2017, 2018),
# race_rec == "White"
# ) %>%
# group_by(self_sufficient_flag) %>%
# summarize(hh = survey_total(na.rm = T),
# count = unweighted(n())) %>%
# mutate(hh_moe = hh_se * 1.645,
# hh_cv = hh_se / hh,
# hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
# hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
# hh_cv < 0.2 ~ "1. Use"),
# group = "White alone households",
# analysis_year = 2016)
#
# g10_16 <- h %>%
# mutate(PUMA = as.factor(PUMA)) %>%
# filter(
# COUNTYFIP == 51, STATEFIP == 41,
# GQ %in% gq_filter,
# unqualified_hh_flag == FALSE,
# is.na(self_sufficient_flag) == FALSE,
# YEAR %in% c(2014, 2015, 2016),
# # YEAR %in% c(2016, 2017, 2018),
# race_rec == "Multi-racial"
# ) %>%
# group_by(self_sufficient_flag) %>%
# summarize(hh = survey_total(na.rm = T),
# count = unweighted(n())) %>%
# mutate(hh_moe = hh_se * 1.645,
# hh_cv = hh_se / hh,
# hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
# hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
# hh_cv < 0.2 ~ "1. Use"),
# group = "Multi-racial households",
# analysis_year = 2016)
#
#
# sss16 <- rbind(g1_16, g2_16, g3_16, g4_16, g5_16, g6_16, g7_16, g8_16, g10_16, g9_16) ; sss16
#
# sss16 %>%
# select(group, analysis_year, self_sufficient_flag, hh) %>%
# pivot_wider(names_from = self_sufficient_flag, values_from = hh) %>%
# mutate(total_hh = (`Self-sufficient` + `Not self-sufficient`),
# share_self_sufficient = `Self-sufficient` / total_hh)
#
#
# s <- rbind(sss18, sss16) %>%
# select(analysis_year, group, self_sufficient_flag, hh) %>%
# pivot_wider(names_from = self_sufficient_flag, values_from = hh) %>%
# mutate(total_hh = (`Self-sufficient` + `Not self-sufficient`),
# share_self_sufficient = `Self-sufficient` / total_hh) %>%
# arrange(group)
#
# s %>%
# ggplot(aes(x = analysis_year, y = share_self_sufficient, color = group)) +
# geom_line(size = 2)
#
#
#
#
#
# ## https://stackoverflow.com/questions/45947787/create-new-variables-with-mutate-at-while-keeping-the-original-ones
# # repwt %>%
# # mutate_at(vars(REPWT1:REPWT80), funs(./3))
#
#
# # repwtsub <- repwt %>%
# # filter(YEAR %in% c(2013, 2014, 2015, 2016, 2017, 2018)) %>%
# # # mutate_at(vars(REPWT1:REPWT80), funs(./3)) %>%
# # # mutate_at(vars(REPWTP1:REPWTP80, REPWTP), funs(./3)) %>%
# # select(-c(YEAR, CBSERIAL, STRATA, HHWT, CLUSTER, STATEFIP, GQ, PERWT))
#
# # sssraw <- rio::import("usa_00088.csv")
#
# # pums_sub <- pums %>%
# # filter(YEAR %in% c(2013, 2014, 2015, 2016, 2017, 2018),
# # MET2013 == 38900) %>%
# # mutate_at(vars(HHWT, PERWT), funs(./3))
#
#
# # lep <- pums %>%
# # select(SAMPLE, SERIAL, PERNUM, AGE, LANGUAGE, SPEAKENG) %>%
# # mutate(hh_id = paste0(SAMPLE, SERIAL),
# # lep_helper1 = case_when(AGE < 14 | SPEAKENG == "N/A (Blank)" ~ 0L,
# # SPEAKENG %in% c("Does not speak English", "Yes, speaks well", "Yes, but not well") ~ 0L,
# # T ~ 1L)) %>%
# # group_by(SAMPLE, SERIAL) %>%
# # mutate(lep_helper2 = sum(lep_helper1),
# # lep_hh = if_else(lep_helper2 == 0, "LEP household", "Non-LEP household"))
#
#
#
# # s_design2 <- s %>%
# # as_survey_rep(weights = HHWT,
# # repweights = starts_with("REPWT"),
# # combined_weights = T,
# # type = "JK1",
# # scale = 4/80,
# # rscales = ncol('REPWT[0-9]+'))
#
#
# # test <- s_design %>%
# # filter(COUNTYFIP == 51, STATEFIP == 41) %>%
# # group_by(YEAR) %>%
# # summarize(hh = survey_total(na.rm = T)) %>%
# # mutate(hh_moe = hh_se * 1.645,
# # hh_cv = hh_se / hh,
# # hh_reliability = case_when(hh_cv > 0.4 ~ "3. Unreliabile",
# # hh_cv <= 0.4 & hh_cv > 0.2 ~ "2. Use with caution",
# # hh_cv < 0.2 ~ "1. Use"))
|
2bba00d2aded15915780eeb0e199ac6ad7375026
|
e4228d2482a085d3355964c278269a082959e038
|
/R/tcplMthdList.R
|
16592c0e238f9957274519dbdf71e92b8fa0a847
|
[
"MIT"
] |
permissive
|
USEPA/CompTox-ToxCast-tcpl
|
08d9667ee76532382f2ef2fe7da2e7b8ebc26b2b
|
a8582c61883ba6e6f25b503cfa1f1b37605e3b29
|
refs/heads/main
| 2023-08-30T12:53:21.736913
| 2023-08-24T13:40:02
| 2023-08-24T13:40:02
| 89,386,154
| 23
| 13
|
NOASSERTION
| 2023-09-13T14:07:15
| 2017-04-25T17:05:00
|
R
|
UTF-8
|
R
| false
| false
| 966
|
r
|
tcplMthdList.R
|
#-------------------------------------------------------------------------------
# tcplMthdList:
#-------------------------------------------------------------------------------
#' @rdname mthd_funcs
#' @export
tcplMthdList <- function(lvl, type = "mc") {
tbl <- paste0(type, lvl, "_methods")
qstring <- paste0("SELECT * FROM ", tbl, ";")
## Suppress warnings because the data fields are not recognized by R and
## imported as character.
#dat <- suppressWarnings(tcplQuery(qstring, getOption("TCPL_DB"), tbl=c("mc2_methods")))
dat <- suppressWarnings(tcplQuery(qstring, getOption("TCPL_DB"), tbl=tbl))
if (nrow(dat) == 0) {
warning("No ", type, lvl, " methods in the tcpl databases.")
return(dat[])
}
drop_cols <- c("created_date", "modified_date", "modified_by")
dat <- dat[ , .SD, .SDcols = setdiff(names(dat), drop_cols)]
dat[]
}
#-------------------------------------------------------------------------------
|
15f014b2c30c0c96034af7189adc8b485a0f2a7b
|
4b2389814b5e717c240d8f80cf0f4f6114a30995
|
/Hwk_Reduced_Rank_FDA.R
|
ef2204f5e1b68f31a297b5a7c9a24ee62b7a031c
|
[] |
no_license
|
jaime-gacitua/HW2ML-Classification
|
6b6c6aac9297e9e8d9e0ec5f357097aa7e0835d9
|
a99607d90e664bcf7b0a7ef997897d5019168039
|
refs/heads/master
| 2020-05-23T08:06:50.847629
| 2016-10-12T21:55:37
| 2016-10-12T21:55:37
| 70,308,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,590
|
r
|
Hwk_Reduced_Rank_FDA.R
|
require(DAAG)
require(ggplot2)
require(MASS)
library(caret)
# This code implements reduced rank LDA (Fisher Discriminant Analysis)
# It can reproduce the subplots of Figure 4.8 in HTF by specifing coordinates a,b
# For example, a=1,b=3 reproduces the top-left sub-figure of Figure 4.8
a=9 # First Fisher coordinate to plot
b=10 # second Fisher coordinate to plot
################################################################
# First download the training data from the HTF website
url<-"http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/vowel.train"
vtrain<-read.table(url,header=TRUE,sep=',')
vtrain<-as.data.frame(vtrain)
# columns are row.names,y,x.1,x.2,x.3,x.4,x.5,x.6,x.7,x.8,x.9,x.10
# y is the c lass, and x.1 to x.10 are predictors
# Now download the test data
url<-"http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/vowel.test"
vtest<-read.table(url,header=TRUE,sep=',')
vtest<-as.data.frame(vtest)
#################################################################
# Now find the Fisher discriminant directions using the "lda" function
ldatrain<-lda(y~x.1+x.2+x.3+x.4+x.5+x.6+x.7+x.8+x.9+x.10, data=vtrain)
vtrain.lda.values <- data.frame(predict(ldatrain, vtrain[,3:12]))
vtrain.lda.values.means <- data.frame(predict(ldatrain, data.frame(ldatrain.means)))
vtest.lda.values <- data.frame(predict(ldatrain, vtest[,3:12]))
plot.data <- subset(vtrain.lda.values, select= c(paste('x.LD',a,sep=''), paste('x.LD',b,sep='')))
plot.data.means <- subset(vtrain.lda.values.means, select= c(paste('x.LD',a,sep=''), paste('x.LD',b,sep='')))
p <- ggplot() +
geom_point(data = plot.data, aes(
x = plot.data[,1],
y = plot.data[,2],
color=factor(vtrain.lda.values$class), shape=1)) +
xlab(paste('Coordinate ',a)) +
ylab(paste('Coordinate ',b)) +
geom_point(data = plot.data.means, aes(
x = plot.data.means[,1],
y = plot.data.means[,2],
colour=factor(vtrain.lda.values.means$class), shape=1, stroke=5, size=10)) +
scale_shape_identity() + theme(legend.position="none")
p
#################
# Part B
# Now we us LDA again to predict values in the training set and the test set
errors.train = numeric(length=10)
errors.test = numeric(length=10)
rep=10
for(t in 1:rep){
ldatrain.predict <- data.frame(predict(ldatrain, vtrain[,3:12], dimen=t))
errors.train[t] = 1- confusionMatrix(ldatrain.predict$class, vtrain$y)$overall[1]
ldatest.predict <- data.frame(predict(ldatrain, vtest[,3:12], dimen=t))
errors.test[t] = 1- confusionMatrix(ldatest.predict$class, vtest$y)$overall[1]
}
rm(errplot)
errplot <- ggplot() +
geom_line(data = data.frame(errors.train), aes(
x = c(1,2,3,4,5,6,7,8,9,10),
y = errors.train[1:10]), colour="blue") + scale_shape_identity() +
geom_point(data = data.frame(errors.train), aes(
x = c(1,2,3,4,5,6,7,8,9,10),
y = errors.train[1:10]), shape=16) +
xlab('Dimension') +
ylab('Misclassification Rate') +
geom_line(data = data.frame(errors.test), aes(
x = c(1,2,3,4,5,6,7,8,9,10),
y = errors.test[1:10]), colour="orange") +
geom_point(data = data.frame(errors.test), aes(
x = c(1,2,3,4,5,6,7,8,9,10),
y = errors.test[1:10]), shape=16) +
ggtitle('LDA and Dimension Reduction on the Vowel Data')
errplot
for(t in 2:2){
ldatest.predict <- data.frame(predict(ldatrain, vtest[,3:12], dimen=t))
}
confusionMatrix(ldatest.predict$class, vtest$y)
|
d707fb4f842077d2889f858efa04ba9a6e086fce
|
e4c8af552f8801a088ca91a6cffe77689089d5d7
|
/src/Analysis/other/10b-regress-3day-body-entero-pool.R
|
7390d194cf8035a3db115dca7fd96560680d24eb
|
[] |
no_license
|
jadebc/13beaches-coliphage
|
eb6087b957dbfac38211ac531508860f48094c15
|
3d511ffa91a6dd5256d6832162ea239c1dbbad28
|
refs/heads/master
| 2021-06-17T03:38:00.805458
| 2017-04-27T22:50:06
| 2017-04-27T22:50:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,174
|
r
|
10b-regress-3day-body-entero-pool.R
|
##########################################
# Coliphage analysis - 6 beaches
# v1 by Jade 7/13/15
# This file conducts maximum likelihood regression
# to estimate prevalence ratios
# for enterococcus
# Results pooled across beaches and assay
# 3 day gi illness
##########################################
rm(list=ls())
library(foreign)
# --------------------------------------
# load the and pre-preprocess the
# analysis dataset
# (refer to the base functions script
# for details on the pre-processing)
# --------------------------------------
beaches13=read.csv("~/Documents/CRG/coliphage/13beaches-data/final/13beaches-analysis.csv")
# load base functions
source("~/Documents/CRG/coliphage/13beaches-coliphage/src/Analysis/0-base-functions.R")
data=preprocess.6beaches(beaches13)
# restrict to 6 beaches with coliphage data
beach.list=c("Avalon","Doheny","Malibu","Mission Bay",
"Fairhope","Goddard")
all=data[data$beach %in% beach.list,]
# drop individuals with no water quality information
all=subset(all,nowq==0)
# subset to non-missing exposure categories
# to make the robust CI calcs work
all=subset(all,all$bodycontact=="Yes")
# --------------------------------------
# Calculate the actual Ns for each cell
# and store them for plotting and tables
# --------------------------------------
regN <- function(outcome,exposurecat) {
sum(table(outcome,exposurecat))
}
# n's pooled by assay and beach ---------------------------------------
n3.entero35.fmc = regN(all$gici3[!is.na(all$fmc.pres)],
all$entero35[!is.na(all$fmc.pres)])
n3.entero35.fpc = regN(all$gici3[!is.na(all$fpc.pres)],
all$entero35[!is.na(all$fpc.pres)])
# pooled n's by risk level---------------------------------------
data=all[!is.na(all$fmc.pres),]
data.high=subset(data,data$risk=="High")
n3.entero35.fmc.high = regN(data.high$gici3,data.high$entero35)
data.low=subset(data,data$risk=="Low")
n3.entero35.fmc.low = regN(data.low$gici3,data.low$entero35)
data=all[!is.na(all$fpc.pres),]
data.high=subset(data,data$risk=="High")
n3.entero35.fpc.high = regN(data.high$gici3,data.high$entero35)
data.low=subset(data,data$risk=="Low")
n3.entero35.fpc.low = regN(data.low$gici3,data.low$entero35)
# --------------------------------------
# Estimates pooled across beach
# (can't use the mpreg fn because we
# need the actual glm returned object
# for the LR tests)
# 10-day GI illness
# --------------------------------------
# f- coliphage --------------------------------
all.fit3.entero.fmc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=all[!is.na(all$entero35) &
!is.na(all$fmc.pres),])
all.VC3.entero.fmc <- cl(all[!is.na(all$entero35) & !is.na(all$fmc.pres)],
fm=all.fit3.entero.fmc, cluster=
all$hhid[!is.na(all$entero35) & !is.na(all$fmc.pres)])
overall.fit3.entero.fmc <- coeftest(all.fit3.entero.fmc, all.VC3.entero.fmc)
summary(all.fit3.entero.fmc)
overall.fit3.entero.fmc
aic.entero.fmc=AIC(all.fit3.entero.fmc)
# f+ coliphage --------------------------------
all.fit3.entero.fpc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=all[!is.na(all$entero35) &
!is.na(all$fpc.pres),])
all.VC3.entero.fpc <- cl(all[!is.na(all$entero35) & !is.na(all$fpc.pres)],
fm=all.fit3.entero.fpc, cluster=
all$hhid[!is.na(all$entero35) & !is.na(all$fpc.pres)])
overall.fit3.entero.fpc <- coeftest(all.fit3.entero.fpc, all.VC3.entero.fpc)
summary(all.fit3.entero.fpc)
overall.fit3.entero.fpc
aic.entero.fpc=AIC(all.fit3.entero.fpc)
# --------------------------------------
# Estimates pooled across beach and stratified by conditions
# (can't use the mpreg fn because we
# need the actual glm returned object
# for the LR tests)
# 10-day GI illness
# all beaches ----------------
# F- coliphage #####################
# high risk conditions --------------------------------
data=all[!is.na(all$entero35) & !is.na(all$fmc.pres),]
data.high=subset(data,data$risk=="High")
all.fit3.entero.high.fmc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=data.high)
all.VC3.entero.high.fmc <- cl(data.high,fm=all.fit3.entero.high.fmc, cluster=data.high$hhid)
overall.fit3.entero.high.fmc <- coeftest(all.fit3.entero.high.fmc, all.VC3.entero.high.fmc)
summary(all.fit3.entero.high.fmc)
overall.fit3.entero.high.fmc
aic.entero.high.fmc=AIC(all.fit3.entero.high.fmc)
# low risk conditions --------------------------------
data.low=subset(data,data$risk=="Low")
all.fit3.entero.low.fmc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=data.low)
all.VC3.entero.low.fmc <- cl(data.low,fm=all.fit3.entero.low.fmc, cluster=data.low$hhid)
overall.fit3.entero.low.fmc <- coeftest(all.fit3.entero.low.fmc, all.VC3.entero.low.fmc)
summary(all.fit3.entero.low.fmc)
overall.fit3.entero.low.fmc
aic.entero.low.fmc=AIC(all.fit3.entero.low.fmc)
# F+ coliphage #####################
# high risk conditions --------------------------------
data=all[!is.na(all$entero35) & !is.na(all$fpc.pres),]
data.high=subset(data,data$risk=="High")
all.fit3.entero.high.fpc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=data.high)
all.VC3.entero.high.fpc <- cl(data.high,fm=all.fit3.entero.high.fpc, cluster=data.high$hhid)
overall.fit3.entero.high.fpc <- coeftest(all.fit3.entero.high.fpc, all.VC3.entero.high.fpc)
summary(all.fit3.entero.high.fpc)
overall.fit3.entero.high.fpc
aic.entero.high.fpc=AIC(all.fit3.entero.high.fpc)
# low risk conditions --------------------------------
data.low=subset(data,data$risk=="Low")
all.fit3.entero.low.fpc <- glm(gici3~entero35+agecat+female+racewhite+gichron+anim_any+gicontactbase+
rawfood+beach,family=poisson(link="log"),data=data.low)
all.VC3.entero.low.fpc <- cl(data.low,fm=all.fit3.entero.low.fpc, cluster=data.low$hhid)
overall.fit3.entero.low.fpc <- coeftest(all.fit3.entero.low.fpc, all.VC3.entero.low.fpc)
summary(all.fit3.entero.low.fpc)
overall.fit3.entero.low.fpc
aic.entero.low.fpc=AIC(all.fit3.entero.low.fpc)
# --------------------------------------
# save the results
# exclude glm objects and data frames
# (they are really large)
# --------------------------------------
save(
n3.entero35.fmc,n3.entero35.fpc,
n3.entero35.fmc.high,n3.entero35.fmc.low,
n3.entero35.fpc.high,n3.entero35.fpc.low,
overall.fit3.entero.fmc,overall.fit3.entero.fpc,
overall.fit3.entero.high.fmc,overall.fit3.entero.high.fpc,
overall.fit3.entero.low.fmc,overall.fit3.entero.low.fpc,
all.VC3.entero.fmc,all.VC3.entero.fpc,
all.VC3.entero.high.fmc, all.VC3.entero.low.fmc,
all.VC3.entero.high.fpc, all.VC3.entero.low.fpc,
aic.entero.fmc,aic.entero.fpc,
aic.entero.low.fmc,aic.entero.low.fpc,
aic.entero.high.fmc,aic.entero.high.fpc,
file="~/Documents/CRG/coliphage/results/rawoutput/regress-3day-body-entero-pool.Rdata"
)
|
063e35087be6e671c78dd4123b335cf4622c2c61
|
f8c3946a0bc31d2830bf2863aa46b42325807015
|
/data/cibm.utils/examples/read-abk.R
|
69e398877ad5cade305525762184a7cba1ceb7ce
|
[] |
no_license
|
vfpimenta/corruption-profiler
|
87c2fde7025cb54341af03eba12007a0267b12b3
|
1366406ce21b05d88b72174aef9bb987921eb7e1
|
refs/heads/master
| 2021-01-18T04:07:58.830460
| 2018-06-01T22:03:37
| 2018-06-01T22:03:37
| 85,761,525
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
read-abk.R
|
# loads library
library("cibm.utils")
# defines filename
filename <- system.file("extdata","2695.abk",package="cibm.utils")
# reads
a2695 <- read.abk(filename)
# prints -- Notice that class is numeric
a2695[c(1:6,100),1:9]
|
f2a3a90c576466b115f935d19abc1a8870499b5f
|
bf8e155d3f6a835082c7a17d9c9fa2a8c4e0331e
|
/R/SavePlotDialog.R
|
039da88ddaf893388c4d4e1930474377eceb6259
|
[] |
no_license
|
sebkopf/dfv
|
94ba5e2fab9013e42601eeff4ac8865a9969f1bc
|
55fcc88a81e26ef039ef3dd9a6707386b306e02b
|
refs/heads/master
| 2016-09-02T02:39:00.117472
| 2015-12-10T20:45:57
| 2015-12-10T20:45:57
| 14,060,261
| 0
| 0
| null | 2014-02-04T07:00:04
| 2013-11-02T03:40:37
|
R
|
UTF-8
|
R
| false
| false
| 5,777
|
r
|
SavePlotDialog.R
|
#' @include ModalDialog.R
NULL
SavePlotDialogGui <- setClass("SavePlotDialogGui", contains="ModalDialogGui")
setMethod("makeMainGui", "SavePlotDialogGui", function(gui, module) {
mainGrp <- ggroup(horizontal=FALSE, cont=getWinGroup(gui, module), spacing=0, expand=TRUE)
treeGrp <- ggroup(horizontal=FALSE, expand=TRUE)
optionsGrp <- gframe("Options", horizontal=FALSE)
setWidgets(gui, module, tbPane = gpanedgroup(treeGrp, optionsGrp, container=mainGrp, expand=TRUE, horizontal=FALSE))
# options group
detailsGrp <- glayout(cont = optionsGrp, expand = TRUE)
detailsGrpGTK<-getToolkitWidget(detailsGrp) # gtk object
detailsGrpGTK['border-width']<-5 # border with
detailsGrp[(i<-1), 1] <- "Filename:"
detailsGrp[i, 2] <- setWidgets(gui, module, filename = gedit("", cont=detailsGrp))
detailsGrp[i, 3] <- setWidgets(gui, module, extension = glabel("", cont=detailsGrp))
detailsGrp[(i<-i+1), 1] <- "Saved formats:"
detailsGrp[i, 2:3, expand=TRUE] <- (tableGrp <- ggroup(cont = detailsGrp, expand = TRUE))
# options table
getElements(gui, module, 'optionsTable')$makeGui(tableGrp, selectionHandler = function(...) {
# get values of width and height in table and load the same name 'width' and 'height' text widgets with it
getModule(gui, module)$loadWidgets(
as.list(getElements(gui, module, 'optionsTable')$getSelectedValues(c('width', 'height')))
)
})
detailsGrp[(i<-i+1), 1] <- "Height [inches]:"
detailsGrp[i, 2] <- setWidgets(gui, module, height = gedit("", cont=detailsGrp, coerce.with=as.numeric))
detailsGrp[(i<-i+1), 1] <- "Width [inches]:"
detailsGrp[i, 2] <- setWidgets(gui, module, width = gedit("", cont=detailsGrp, coerce.with=as.numeric))
# directory selection
fileBrowser.items <- function(path = NULL, user.data=NULL) {
if (is.null(path))
path <- getwd()
else
path <- file.path(getwd(), do.call("file.path", args=as.list(path)))
showInfo(gui, module, paste("Folder:", path), timer=NULL, okButton=FALSE)
folders <- subset(data.frame(
Folder=dir(path, include.dirs=TRUE),
Path=dir(path, include.dirs=TRUE, full.names=TRUE),
file.info(dir(path, include.dirs=TRUE, full.names=TRUE))[,c(1:2)],
stringsAsFactors=FALSE), isdir==TRUE)
# figure out number of subdirectories
folders$Subdirs <- logical(nrow(folders))
if (nrow(folders) > 0)
folders$Subdirs <- apply(folders, 1, function(x) length(which(file.info(dir(x[2], full.names=T))$isdir)))
return(folders[c("Folder", "Path", "Subdirs")])
}
# check for subfolders
fileBrowser.hasOffspring <- function(children, user.data=NULL, ...) return(children$Subdirs > 0) # which items have subdirectories
fileBrowser.icons <- function(children,user.data=NULL, ...) return(rep("gtk-directory", length=nrow(children))) # FIXME: could implement some indicator which folders have already been used
# tree
tree <- gtree(fileBrowser.items, fileBrowser.hasOffspring, chosencol=2, icon.FUN = fileBrowser.icons, container=treeGrp, expand=TRUE)
setWidgets(gui, module, plotsPathIndex = tree) # link tree to plotsPathIndex
# tree click handler
addHandlerClicked(tree, handler=function(h,...) {
if (!is.null(val <- svalue(tree)))
setData(gui, module, plotsPath = val)
else
setData(gui, module, plotsPath = getwd()) # set back to working directory
showInfo(gui, module, paste("Folder: ", getData(gui, module, 'plotsPath')), timer=NULL, okButton=FALSE)
})
})
SavePlotDialog <- setRefClass(
'SavePlotDialog',
contains = 'ModalDialog',
methods = list(
initialize = function(gui = new("SavePlotDialogGui"), ...){
callSuper(gui = gui, ...)
### overwrite default setting for SavePlotDialog
setSettings(
windowSize = c(450, 550),
windowTitle = "Saving to PDF ...",
ok.icon = "gtk-save", # overwrite
ok.label = "Save",
ok.tooltip = "Save PDF(s).",
protect = TRUE
)
# new option (not protected, can be overwritten by user preference)
setSettings(tbPane = 0.4)
# data
setData(
plotsPath = getwd(),
plotsPathIndex = integer(0),
filename = "",
extension = ".pdf",
width = 8,
height = 6
)
},
# ' make DataTable Element
makeGui = function() {
options <- DataTable$new()
options$setData(
frame = data.frame( # all the options for formats
width = c(4, 8, 16),
height = c(4, 6, 12),
Dimensions = '',
stringsAsFactors = FALSE),
selectedRows = 2
)
options$setSettings(invisibleColumns = c('height', 'width'))
setElements(optionsTable = options)
callSuper()
},
loadGui = function() {
# update dimensions column
getElements('optionsTable')$setData(frame =
mutate(getElements('optionsTable')$getData('frame'), Dimensions = paste0(height, "x", width, " (height: ", height, " inches, width: ", width, " inches)")))
callSuper()
},
saveGui = function() {
callSuper()
# save width and height in options
if (nrow(subset(getElements('optionsTable')$getData('frame'), width == data$width & height == data$height)) == 0) {
dmsg(class(.self), ": adding new widht x height option to table")
getElements('optionsTable')$setData(
selectedRows = nrow(getElements('optionsTable')$getData('frame')) + 1,
frame = rbind(
getElements('optionsTable')$getData('frame'),
data.frame(width = data$width, height = data$height, Dimensions='', stringsAsFactors=F))
)
}
}
)
)
# Testing
#t <- SavePlotDialog$new()$makeGui()
|
65d0a0ff53a8f043af056d0a6812bfde77acc473
|
0ce98d31209c407007715713dd8e20fdcfdb3fe0
|
/R/print.CatTable.R
|
0233144489b85795aa31e2a29037b732b728b3a6
|
[] |
no_license
|
kaz-yos/tableone
|
c5cd484e41540b7198680a26abd4ff4cd15708b8
|
e2e7cccdc8b9d51c8cb2bb1da91f4663bed5e128
|
refs/heads/master
| 2023-05-12T22:26:52.823370
| 2022-04-15T14:24:00
| 2022-04-15T14:27:35
| 16,124,394
| 200
| 48
| null | 2023-05-06T23:59:15
| 2014-01-22T01:06:44
|
R
|
UTF-8
|
R
| false
| false
| 16,668
|
r
|
print.CatTable.R
|
##' Format and print \code{CatTable} class objects
##'
##' \code{print} method for the \code{CatTable} class objects created by \code{\link{CreateCatTable}} function.
##'
##' @param x Object returned by \code{\link{CreateCatTable}} function.
##' @param digits Number of digits to print in the table.
##' @param pDigits Number of digits to print for p-values (also used for standardized mean differences).
##' @param quote Whether to show everything in quotes. The default is FALSE. If TRUE, everything including the row and column names are quoted so that you can copy it to Excel easily.
##' @param missing Whether to show missing data information.
##' @param explain Whether to add explanation to the variable names, i.e., (\%) is added to the variable names when percentage is shown.
##' @param printToggle Whether to print the output. If FALSE, no output is created, and a matrix is invisibly returned.
##' @param noSpaces Whether to remove spaces added for alignment. Use this option if you prefer to align numbers yourself in other software.
##' @param format The default is "fp" frequency (percentage). You can also choose from "f" frequency only, "p" percentage only, and "pf" percentage (frequency).
##' @param showAllLevels Whether to show all levels. FALSE by default, i.e., for 2-level categorical variables, only the higher level is shown to avoid redundant information.
##' @param cramVars A character vector to specify the two-level categorical variables, for which both levels should be shown in one row.
##' @param dropEqual Whether to drop " = second level name" description indicating which level is shown for two-level categorical variables.
##' @param test Whether to show p-values. TRUE by default. If FALSE, only the numerical summaries are shown.
##' @param exact A character vector to specify the variables for which the p-values should be those of exact tests. By default all p-values are from large sample approximation tests (chisq.test).
##' @param smd Whether to show standardized mean differences. FALSE by default. If there are more than one contrasts, the average of all possible standardized mean differences is shown. For individual contrasts, use \code{summary}.
##' @param CrossTable Whether to show the cross table objects held internally using gmodels::CrossTable function. This will give an output similar to the PROC FREQ in SAS.
##' @param formatOptions A list of options, which will be passed to \code{\link[base]{format}}. Can be used to modify the \code{big.mark}, \code{decimal.mark}, \code{big.interval} etc. The default is \code{list(scientific = FALSE)}. The options digits, nsmall, justify and trim are not available. (Experimental)
##' @param ... For compatibility with generic. Ignored.
##' @return A matrix object containing what you see is also invisibly returned. This can be assinged a name and exported via \code{write.csv}.
##' @author Kazuki Yoshida, Alexander Bartel
##' @seealso
##' \code{\link{CreateTableOne}}, \code{\link{CreateCatTable}}, \code{\link{summary.CatTable}}
##' @examples
##'
##' ## Load
##' library(tableone)
##'
##' ## Load Mayo Clinic Primary Biliary Cirrhosis Data
##' library(survival)
##' data(pbc)
##' ## Check variables
##' head(pbc)
##'
##' ## Create an overall table for categorical variables
##' catVars <- c("status","ascites","hepato","spiders","edema","stage")
##' catTableOverall <- CreateCatTable(vars = catVars, data = pbc)
##'
##' ## Simply typing the object name will invoke the print.CatTable method,
##' ## which will show the sample size, frequencies and percentages.
##' ## For 2-level variables, only the higher level is shown for simplicity.
##' catTableOverall
##'
##' ## If you need to show both levels for some 2-level factors, use cramVars
##' print(catTableOverall, cramVars = "hepato")
##'
##' ## Use the showAllLevels argument to see all levels for all variables.
##' print(catTableOverall, showAllLevels = TRUE)
##'
##' ## You can choose form frequencies ("f") and/or percentages ("p") or both.
##' ## "fp" frequency (percentage) is the default. Row names change accordingly.
##' print(catTableOverall, format = "f")
##' print(catTableOverall, format = "p")
##'
##' ## To further examine the variables, use the summary.CatTable method,
##' ## which will show more details.
##' summary(catTableOverall)
##'
##' ## The table can be stratified by one or more variables
##' catTableBySexTrt <- CreateCatTable(vars = catVars,
##' strata = c("sex","trt"), data = pbc)
##'
##' ## print now includes p-values which are by default calculated by chisq.test.
##' ## It is formatted at the decimal place specified by the pDigits argument
##' ## (3 by default). It does <0.001 for you.
##' catTableBySexTrt
##'
##' ## The exact argument toggles the p-values to the exact test result from
##' ## fisher.test. It will show which ones are from exact tests.
##' print(catTableBySexTrt, exact = "ascites")
##'
##' ## summary now includes both types of p-values
##' summary(catTableBySexTrt)
##'
##' ## If your work flow includes copying to Excel and Word when writing manuscripts,
##' ## you may benefit from the quote argument. This will quote everything so that
##' ## Excel does not mess up the cells.
##' print(catTableBySexTrt, exact = "ascites", quote = TRUE)
##'
##' ## If you want to center-align values in Word, use noSpaces option.
##' print(catTableBySexTrt, exact = "ascites", quote = TRUE, noSpaces = TRUE)
##'
##' @export
print.CatTable <-
function(x, # CatTable object
digits = 1, pDigits = 3, # Number of digits to show
quote = FALSE, # Whether to show quotes
missing = FALSE, # Show missing values (not implemented yet)
explain = TRUE, # Whether to show explanation in variable names
printToggle = TRUE, # Whether to print the result visibly
noSpaces = FALSE, # Whether to remove spaces for alignments
format = c("fp","f","p","pf")[1], # Format f_requency and/or p_ercent
showAllLevels = FALSE,
cramVars = NULL, # variables to be crammed into one row
dropEqual = FALSE, # Do not show " = second level" for two-level variables
test = TRUE, # Whether to add p-values
exact = NULL, # Which variables should be tested with exact tests
smd = FALSE, # Whether to add standardized mean differences
CrossTable = FALSE, # Whether to show gmodels::CrossTable
formatOptions = list(scientific = FALSE), # Options for formatting
...) {
## x and ... required to be consistent with generic print(x, ...)
CatTable <- x
### Check the data structure first
## CatTable has a strata(list)-variable(list)-table(dataframe) structure
## Get the position of the non-null element
logiNonNullElement <- !sapply(CatTable, is.null)
## Stop if all elements are null.
if (sum(logiNonNullElement) == 0) {stop("All strata are null strata. Check data.")}
## Get the first non-null position
posFirstNonNullElement <- which(logiNonNullElement)[1]
## Save variable names using the first non-null element
varNames <- names(CatTable[[posFirstNonNullElement]])
## Check the number of variables (list length)
nVars <- length(varNames)
## Returns a numeric vector: 1 for approx test variable; 2 for exact test variable
exact <- ModuleHandleDefaultOrAlternative(switchVec = exact,
nameOfSwitchVec = "exact",
varNames = varNames)
## Check format argument. If it is broken, choose "fp" for frequency (percent)
if (!length(format) == 1 | !format %in% c("fp","f","p","pf")) {
warning("format only accepts one of fp, f, p, or pf. Choosing fp.")
format <- "fp"
}
## Set FormatOptions, delete reserved options
formatOptions$digits <- digits
formatOptions$nsmall <- digits
formatOptions$justify <- NULL
formatOptions$trim <- NULL
## Obtain the strata sizes in a character vector. This has to be obtained from the original data
## Added as the top row later
strataN <- sapply(CatTable,
FUN = function(stratum) { # loop over strata
## each stratum is a list of one data frame for each variable
## Obtain n from all variables and all levels (list of data frames)
n <- unlist(sapply(stratum, getElement, "n"))
## Pick the first non-null element
n[!is.null(n)][1]
## Convert NULL to 0
n <- ifelse(is.null(n), "0", n)
## Format n
formatOptions$nsmall <- 0
n <- do.call(base::format, c(list(x = n,
trim = TRUE),
formatOptions
)
)
## return as string
as.character(n)
},
simplify = TRUE) # vector with as many elements as strata
### Formatting for printing
## Variables to format using digits option
## Full list c("n","miss","p.miss","freq","percent","cum.percent")
varsToFormat <- c("p.miss","percent","cum.percent")
## Obtain collpased result by looping over strata
## within each stratum, loop over variables
CatTableCollapsed <-
ModuleCatFormatStrata(CatTable = CatTable,
digits = digits,
varsToFormat = varsToFormat,
cramVars = cramVars,
dropEqual = dropEqual,
showAllLevels = showAllLevels,
formatOptions = formatOptions)
### Obtain the original column width in characters for alignment in print.TableOne
## Name of the column to keep
widthCol <- c("nCharFreq","nCharFreq","nCharPercent","nCharPercent")[format == c("fp","f","p","pf")]
vecColWidths <- sapply(CatTableCollapsed,
FUN = function(LIST) {
## Get the width of the column (freq or percent, whichever comes left)
out <- attributes(LIST)[widthCol]
## Return NA if null
if (is.null(out)) {
return(NA)
} else {
return(as.numeric(out))
}
},
simplify = TRUE)
## Fill the null element using the first non-null element's dimension (Make sure to erase data)
CatTableCollapsed[!logiNonNullElement] <- CatTableCollapsed[posFirstNonNullElement]
## Access the filled-in data frames, and erase them with place holders.
for (i in which(!logiNonNullElement)) {
## Replace all elements with a place holder variable by variable
CatTableCollapsed[[i]][] <- lapply(CatTableCollapsed[[i]][],
function(var) {
var <- rep("-", length(var))
})
}
## Choose the column name for the right format
nameResCol <- c("freqPer","freq","percent","perFreq")[format == c("fp","f","p","pf")]
## Create output matrix without variable names with the right format
out <- do.call(cbind, lapply(CatTableCollapsed, getElement, nameResCol))
out <- as.matrix(out)
## Add column names if multivariable stratification is used. (No column names added automatically)
if (length(attr(CatTable, "dimnames")) > 1) {
colnames(out) <- ModuleCreateStrataNames(CatTable)
}
## Set the variables names
rownames(out) <- CatTableCollapsed[[posFirstNonNullElement]][,"var"]
## Get positions of rows with variable names
## Used for adding p values in place
logiNonEmptyRowNames <- CatTableCollapsed[[posFirstNonNullElement]][, "firstRowInd"] != ""
## Add p-values when requested and available
if (test & !is.null(attr(CatTable, "pValues"))) {
## Pick test types used (used for annonation)
testTypes <- c("","exact")[exact]
## Pick the p-values requested, and format like <0.001
pVec <- ModulePickAndFormatPValues(TableObject = CatTable,
switchVec = exact,
pDigits = pDigits,
formatOptions = formatOptions)
## Create an empty p-value column and test column
out <- cbind(out,
p = rep("", nrow(out))) # Column for p-values
## Put the values at the non-empty positions
out[logiNonEmptyRowNames,"p"] <- pVec
## Create an empty test type column, and add test types
out <- cbind(out,
test = rep("", nrow(out))) # Column for test types
## Put the test types at the non-empty positions (all rows in continuous!)
out[logiNonEmptyRowNames,"test"] <- testTypes
}
## Add SMDs when requested and available
if (smd & !is.null(attr(CatTable, "smd"))) {
## Create an empty column
out <- cbind(out,
SMD = rep("", nrow(out))) # Column for p-values
## Put the values at the non-empty positions
out[logiNonEmptyRowNames,"SMD"] <-
ModuleFormatPValues(attr(CatTable, "smd")[,1],
pDigits = pDigits,
formatOptions = formatOptions)
}
## Add percentMissing when requested and available
if (missing & !is.null(attr(CatTable, "percentMissing"))) {
## Create an empty column
out <- cbind(out,
Missing = rep("", nrow(out))) # Column for p-values
## Put the values at the non-empty positions
out[logiNonEmptyRowNames,"Missing"] <- ModuleFormatPercents(attr(CatTable, "percentMissing"),
digits = 1, formatOptions = formatOptions)
}
## Add freq () explanation if requested
if (explain) {
## Choose the format of the explanation string
explainString <- c(" (%)", "", " (%)", " % (freq)")[format == c("fp","f","p","pf")]
## Only for rows with row names
rownames(out)[logiNonEmptyRowNames] <- paste0(rownames(out)[logiNonEmptyRowNames],
explainString)
}
## Keep column names (strataN does not have correct names
## if stratification is by multiple variables)
outColNames <- colnames(out)
## rbind sample size row, padding necessary "" for p value, etc
nRow <- c(strataN, rep("", ncol(out) - length(strataN)))
out <- rbind(n = nRow, out)
## Put back the column names (overkill for non-multivariable cases)
colnames(out) <- outColNames
## Add level names if showAllLevels is TRUE.
## This adds the level column to the left, thus, after nRow addition.
## Need come after column naming.
if (showAllLevels) {
out <-
cbind(level = c("", CatTableCollapsed[[posFirstNonNullElement]][,"level"]),
out)
}
## Add stratification information to the column header depending on the dimension
names(dimnames(out)) <- ModuleReturnDimHeaders(CatTable)
## Remove spaces if asked.
out <- ModuleRemoveSpaces(mat = out, noSpaces = noSpaces)
## Modular version of quote/print toggle.
out <- ModuleQuoteAndPrintMat(matObj = out,
quote = quote, printToggle = printToggle)
## Print CrossTable() if requested
if (CrossTable) {
junk <- lapply(attributes(CatTable)$xtabs, gmodels::CrossTable)
}
## Add attributes for column widths in characters
attributes(out) <- c(attributes(out),
list(vecColWidths = vecColWidths,
## Add one FALSE for sample size row
logiNameRows = c(FALSE, logiNonEmptyRowNames)))
## return a matrix invisibly
return(invisible(out))
}
|
3eb945a247d53fa56d9afbf9436e1fc6261c199a
|
687a8bdf3119ef7a13449fa4a721f2c894472acb
|
/finalpapersubmission/finalcoderepo/ubiproject.R
|
c68a8236eae335ee658253ce92c366cc959fbf03
|
[] |
no_license
|
zliu2000/econometrics_final_project
|
abbecc609bbd39225d373a4b358e1ee0e706b271
|
0f3bcefea472b02ef49c3c3b99d2606d47294d8e
|
refs/heads/master
| 2022-11-26T19:05:27.537620
| 2020-07-26T03:09:24
| 2020-07-26T03:09:24
| 282,562,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,491
|
r
|
ubiproject.R
|
# ECON 21030 Econometrics - Honors
# Spring 2020, Final Project
# This version: 05/27/2020
# Author: Zhengyang (Jim) Liu
################################################################################
################################ (0) ENVIRONMENT ###############################
################################################################################
root <- "/Users/jimliu/Desktop/econ2103"
maindir <- paste0(root, "/final_project")
outdir <- paste0(maindir, "/out")
rawdir <- paste0(maindir, "/raw")
#libraries
library(MASS)
library(AER)
library(sandwich)
library(lmtest)
library(tidyverse)
library(haven)
library(plm)
library(xlsx)
library(readxl)
library(miceadds)
library(survey)
library(ipumsr)
library(Synth)
library(dplyr)
################################################################################
#DATA CLEANING
################################################################################
setwd(rawdir)
#reading in data using ipumsr package
ddi <- read_ipums_ddi("cps_00004.xml")
data <- read_ipums_micro(ddi)
myvars <- c("YEAR", "MONTH", "STATEFIP", "PERNUM","AGE", "SEX", "RACE",
"EMPSTAT", "LABFORCE", "IND1950", "UHRSWORKT", "WKSTAT", "EDUC")
data <- data[myvars]
#subsetting based on race
#data <- subset(data, RACE == 200)
#not sure about individual fixed effects when we aggregate by state
#aggregate female employment ratio (and part time hours; not enough data tho)
#and covariates (shares of education, industry)
#per state, aggregate
#iterating over states
for (i in c(1,2,4:6,8:13,15:42,44:51,53:56))
{
statedata <- subset(data, STATEFIP==i)
femstatedata <- subset(data, STATEFIP==i & SEX == 2)
#sample population
population = nrow(statedata)
fempopulation = nrow(femstatedata)
#Employment status: EMPSTAT
#10,12 = employed, 20-29 = unemployed, 30-39 = not in labor force
#CALCULATING COUNTS FOR INTEREST VAR AND COVARIATES GROUPED BY MONTH/YEAR
#Industry: IND1950
#sectors:
#1; 100-299: Agriculture, Forestry, and Fishing, Mining, Construction
#2; 300-399: Manufacturing
#3; 400-699:transportation, communications, utilities, wholesale,
#and retail trade;
#4; 700-849:finance, insurance, real estate, business, repair, and
#personal services
#5; 850-936:entertainment and recreation, professional and related services,
#public administration, and active duty military
grouped <- count(statedata, YEAR)
#calculating sec1 industry share grouped on year/month
sec1 <- subset(statedata, 100 <= IND1950 & IND1950 < 300)
sec1c <- count(sec1, YEAR)
names(sec1c)[names(sec1c)=="n"] <- "sec1"
grouped <- left_join(grouped, sec1c, by = c("YEAR" = "YEAR"))
#calculating sec2 industry share grouped on year/month
sec2 <- subset(statedata, 300 <= IND1950 & IND1950 < 400)
sec2c <- count(sec2, YEAR)
names(sec2c)[names(sec2c)=="n"] <- "sec2"
grouped <- left_join(grouped, sec2c, by = c("YEAR" = "YEAR"))
sec3 <- subset(statedata, 400 <= IND1950 & IND1950 < 700)
sec3c <- count(sec3, YEAR)
names(sec3c)[names(sec3c)=="n"] <- "sec3"
grouped <- left_join(grouped, sec3c, by = c("YEAR" = "YEAR"))
sec4 <- subset(statedata, 700 <= IND1950 & IND1950 < 850)
sec4c <- count(sec4, YEAR)
names(sec4c)[names(sec4c)=="n"] <- "sec4"
grouped <- left_join(grouped, sec4c, by = c("YEAR" = "YEAR"))
sec5 <- subset(statedata, (850 <= IND1950 & IND1950 < 937) | EMPSTAT == 01)
sec5c <- count(sec5, YEAR)
names(sec5c)[names(sec5c)=="n"] <- "sec5"
grouped <- left_join(grouped, sec5c, by = c("YEAR" = "YEAR"))
#age 16 to age 19, age 20 to age 24, age 25 to age 64, and age 65 or older
age1 <- subset(statedata, 16 <= AGE & AGE <= 19)
age1c <- count(age1, YEAR)
names(age1c)[names(age1c)=="n"] <- "age1"
grouped <- left_join(grouped, age1c, by = c("YEAR" = "YEAR"))
age2 <- subset(statedata, 20 <= AGE & AGE <= 24)
age2c <- count(age2, YEAR)
names(age2c)[names(age2c)=="n"] <- "age2"
grouped <- left_join(grouped, age2c, by = c("YEAR" = "YEAR"))
age3 <- subset(statedata, 25 <= AGE & AGE <= 64)
age3c <- count(age3, YEAR)
names(age3c)[names(age3c)=="n"] <- "age3"
grouped <- left_join(grouped, age3c, by = c("YEAR" = "YEAR"))
age4 <- subset(statedata, 65 <= AGE)
age4c <- count(age1, YEAR)
names(age4c)[names(age4c)=="n"] <- "age4"
grouped <- left_join(grouped, age4c, by = c("YEAR" = "YEAR"))
#less than high school
edu1 <- subset(statedata, 0 < EDUC & EDUC < 73)
edu1 <- count(edu1, YEAR)
names(edu1)[names(edu1)=="n"] <- "edu1"
grouped <- left_join(grouped, edu1, by = c("YEAR" = "YEAR"))
#high school or equivalent
edu2 <- subset(statedata, EDUC == 73)
edu2 <- count(edu2, YEAR)
names(edu2)[names(edu2)=="n"] <- "edu2"
grouped <- left_join(grouped, edu2, by = c("YEAR" = "YEAR"))
#some college
edu3 <- subset(statedata, 80 <= EDUC & EDUC <999)
edu3 <- count(edu3, YEAR)
names(edu3)[names(edu3)=="n"] <- "edu3"
grouped <- left_join(grouped, edu3, by = c("YEAR" = "YEAR"))
#adding female shares
fem <- count(femstatedata, YEAR)
names(fem)[names(fem)=="n"] <- "fem"
grouped <- left_join(grouped, fem, by = c("YEAR" = "YEAR"))
covariates <- c("sec1", "sec2", "sec3", "sec4", "sec5", "age1", "age2", "age3", "age4",
"edu1","edu2","edu3", "fem")
#turning things into shares
grouped[covariates] <- grouped[covariates]/grouped$n
grouped$stateid=i;
#ADDING var of interest (employment rate of women)
femp <- subset(femstatedata, 10 <= EMPSTAT & EMPSTAT<20)
fempc <- count(femp, YEAR)
fempc$n <- fempc$n/count(femstatedata, YEAR)$n
names(fempc)[names(fempc)=="n"] <- "femp"
grouped <- left_join(grouped, fempc, by = c("YEAR" = "YEAR"))
#rbind into collective DI and SFA
if (i == 1)
{
statepanel<-grouped
}
else
{
statepanel <- rbind(statepanel,grouped)
}
}
#synthetic control package
#https://www.rdocumentation.org/packages/Synth/versions/1.1-5/topics/synth
#https://www.rdocumentation.org/packages/Synth/versions/1.1-5/topics/dataprep
#statepanel to be balanced
statepanelfoo <- make.pbalanced(as.data.frame(statepanel), balance.type="shared.times", index=c("stateid","YEAR"))
predictors <-c("sec1", "sec2", "sec3", "sec4", "sec5", "age1", "age2", "age3", "age4", "edu1","edu2","edu3", "fem")
dataprep.out<-
dataprep(
foo = statepanelfoo,
predictors = predictors,
predictors.op = "mean",
dependent = "femp",
unit.variable = "stateid",
time.variable = "YEAR",
treatment.identifier = 2,
controls.identifier = c(1,4:6,8:13,15:42,44:51,53:56),
time.predictors.prior = c(1977:1981),
time.optimize.ssr = c(1977:1981),
time.plot = 1977:2020
)
# run synth
synth.out <- synth(data.prep.obj = dataprep.out)
# Get result tables
synth.tables <- synth.tab(
dataprep.res = dataprep.out,
synth.res = synth.out
)
# results tables:
print(synth.tables)
# plot results:
# synthetic Alaska vs Alaska
path.plot(synth.res = synth.out,
dataprep.res = dataprep.out,
Ylab = c("Female Employment Ratio"),
Xlab = c("year"),
Ylim = c(0,1),
Legend = c("Alaska","synthetic Alaska"),
)
#Weights from synthetic control
#(Maryland)24: .063, #(Hawaii)15:.810 #(Wyoming)56:.127
syncontrol <- subset(statepanelfoo, stateid==24)*.063 +
.81*subset(statepanelfoo, stateid==15)+.127*subset(statepanelfoo, stateid==56)
#adding dummies for control (synthetic) and treated state (alaska)
syncontrol$alaska <- 0
alaskapanel <- subset(statepanelfoo, stateid==02)
#adding dummies for treated state (alaska)
alaskapanel$alaska <- 1
#combining into one panel
finalpanel <- rbind(alaskapanel, syncontrol)
post <- as.numeric(finalpanel["YEAR"]>=1982)
#adding dummy variables
finalpanel<-cbind(finalpanel, post)
treatment <- as.numeric((finalpanel["alaska"]==1) & (finalpanel["post"]==1))
finalpanel<-cbind(finalpanel, treatment)
outcome <- "femp"
regvariables <- c("alaska","post","treatment", predictors)
# our modeling effort,
# fully parameterized!
f <- as.formula(
paste(outcome,
paste(regvariables, collapse = " + "),
sep = " ~ "))
print(f)
reg1 <- lm(f, data=finalpanel)
#storing heteskedastic robust coefficients
reg1c <- coeftest(reg1, vcov = vcovHC(reg1,type="HC1"))
reg1c
#excluding controls
reg2 <- lm(femp~alaska+post+treatment, data=finalpanel)
#storing heteskedastic robust coefficients
reg2c <- coeftest(reg2, vcov = vcovHC(reg2,type="HC1"))
reg2c
setwd(outdir)
write.xlsx(finalpanel, "finalpanel.xlsx")
|
4fad11d42d5ac121d7687b84727db5a5cddad501
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RCPmod/examples/cooks.distance.regimix.Rd.R
|
40a85b66152ed410b4b5e410f3936ac07eb0af70
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 873
|
r
|
cooks.distance.regimix.Rd.R
|
library(RCPmod)
### Name: cooks.distance.regimix
### Title: Calculates leave-some-out statistics for a regimix object,
### principally a version of Cook's distance and cross-validated
### predictive logl
### Aliases: cooks.distance.regimix
### Keywords: misc
### ** Examples
## Not run:
##D #not run as R CMD check complains about the time taken.
##D #This code will take a little while to run (<1 minute on my computer)
##D #For leave-one-out cooks distance, use oosSize=1
##D #for serious use times will need to be larger.
##D system.time({
##D example( regimix);
##D cooksD <- cooks.distance( fm, oosSize=10, times=25)
##D })
##D example( regimix) #will fit a model and store in fm
##D #for serious use times will need to be larger.
##D #For leave-one-out cooks distance, use oosSize=1
##D cooksD <- cooks.distance( fm, oosSize=10, times=5)
## End(Not run)
|
afa875a1d0d24060f05a99a0c3ca0f8b5460de81
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/embed/examples/step_lencode_glm.Rd.R
|
426acca0597b6cf1e00c2b99de74e34b17122d2b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 445
|
r
|
step_lencode_glm.Rd.R
|
library(embed)
### Name: step_lencode_glm
### Title: Supervised Factor Conversions into Linear Functions using
### Likelihood Encodings
### Aliases: step_lencode_glm tidy.step_lencode_glm
### Keywords: datagen
### ** Examples
library(recipes)
library(dplyr)
data(okc)
glm_est <- recipe(Class ~ age + location, data = okc) %>%
step_lencode_glm(location, outcome = vars(Class))
# See https://tidymodels.github.io/embed/ for examples
|
8383a6d91e029b93a0acd5f4e5b01552cf4a8b77
|
7f7e928be8f8e54f507cf7a7da20389ecc677c8d
|
/GoogleTrendsScaled.R
|
5d1e89a18bd9a2c845ef8f5262e4148598766aba
|
[] |
no_license
|
ielbert/GoogleTrendsScaled
|
51f8bdf0cc0a4221d6a2058c8ba5ee42ba04dcbf
|
bbe7ea7ada93ea7f40f3d2c1ca018a74229e3265
|
refs/heads/master
| 2016-09-06T12:22:59.395034
| 2016-02-03T15:13:15
| 2016-02-03T15:13:15
| 35,433,723
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,390
|
r
|
GoogleTrendsScaled.R
|
######### Using GTrendsR only for more control
library(GTrendsR)
library(RCurl)
library(rjson)
library(reshape2) #For melt
library(plyr) #For ldply
library(dplyr)
initTrend <- function()
{
ch <- GTrendsR::gconnect("Your Gogole email", "Your Google Password")
authenticatePage2 <- getURL("http://www.google.com", curl = ch)
return(ch)
}
getTrend <- function(term, baseline = '',
date = 'today 6-m',
geo = '', #Leave empty for world
params=list(),
scale = TRUE)
{
query <- paste(baseline, term, sep=',')
pp <- c(list(q = query,
date = date,
cat = "0-18", #0-18 = Shopping
geo = geo,
#content = 1,
cid="TIMESERIES_GRAPH_0",
# cmpt='q',
#tz='',
#graph = 'all_csv',
export = 3 #Data only
),
params)
trendsURL <- "http://www.google.com/trends/fetchComponent"
resultsText <- getForm(trendsURL, .params = pp, curl = ch)
## Sometimes we reach quota limit, in that case stop!
if (any(grep("QUOTA", resultsText))) {
print("Reached Google Trends quota limit! Please try again later.")
}
vec <- strsplit(resultsText, "\\\n{2,}")[[1]]
headers <- unname(sapply(vec, function(v) strsplit(v, "\\\n")[[1]][1]))
json <- sub('^.*google\\.visualization\\.Query\\.setResponse\\(', '', resultsText)
json <- sub(');$', '', json)
json <- gsub('new Date\\(([0-9,]+)\\)', '[\\1]', json)
json <- gsub(',,', ',{"v":0,"f":"0"},', json)
rawtable <- fromJSON(json)$table
cols <- melt(rawtable$cols)
datacols <- as.character(cols[cols[[2]] == 'label' & cols$value != 'Date',]$value)
trend <- ldply(rawtable$rows,
function(r)
{
vals <- unlist(llply(r$c, function(col) col$v))
#Pad with trailing zeroes if data is missing
return(c(vals, rep(0, 3 + length(datacols) - length(vals)))) #3 - for y,m,d
}
)
colnames(trend) <- c('year', 'month', 'day', datacols)
trend$month <- trend$month + 1 #JavaScript starts month numbers from 0
#If baseline is provided return ratio from baseline
if (baseline != '' & scale)
for ( attr in datacols[-1])
trend[[attr]] <- trend[[attr]] / trend[, 3+1]
return(trend)
}
#Google-generated search categories
terms <- c(baseline="/m/0524b41" #Game of Thrones
,"/m/0gxrn7l" #The Newsroom
,"/m/0hr22w5" #Orange is the New Black
,"/m/0h3rv9x" #House of Cards
)
ch <- initTrend()
trend.baseline <- getTrend(terms['baseline'], date = 'today 24-m')
trends <- trend.baseline
trend.interval <- 'today 6-m'
scale <- FALSE
for (term in terms[-1])
{
trend <- getTrend(term, terms['baseline'], date = trend.interval, scale=scale)
term <- tail(colnames(trend), 1) #May be prettified by Google
trends[[term]] <- trend[, term] * ifelse(scale, trend.baseline[, tail(colnames(trend.baseline), 1)], 1)
Sys.sleep(sample(1:3,1))
}
trends %>% head
#Graph the trends
library(ggplot2)
trends %>%
melt(id=c('year', 'month', 'day'),
variable.name = "term", value.name = "trend") %>%
mutate(week=as.Date(ISOdate(year, month, day))) %>%
ggplot(aes(x=week, y=trend, color=term)) +
geom_line() +
theme(axis.text.x = element_text(angle = -45, hjust = 0))
|
4bb85b48a8cabd9b64e39fde13ece3712c68d755
|
02c727c80d9842eb479751345c6cf69623f7705e
|
/Claimants_RCode.R
|
d6c61c0eef1ec3b18eabd84127bf9ba2160c5c28
|
[] |
no_license
|
sanmitjadhav/Logistic-Regression-in-R-Data-Science-Machine-Learning-
|
4dbc79baf6f9c70932bc65f48f171b3fc6866aec
|
3ca4a940d07b446c0efafcbfaf27f44ebab53ee3
|
refs/heads/master
| 2022-11-15T05:03:06.863574
| 2020-07-06T06:54:52
| 2020-07-06T06:54:52
| 277,465,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
Claimants_RCode.R
|
claimants <- read.csv(file.choose())# Choose the claimants Data set
View(claimants)
data=claimants[,-1]
View(data)
claimants <- na.omit(data)
View(claimants)
model <- glm(ATTORNEY~.,data=claimants,family = "binomial")
summary(model)
# Confusion matrix table
prob <- predict(model,type=c("response"),claimants)
View(prob)
confusion<-table(prob>0.5,claimants$ATTORNEY)
confusion
confusion# Model Accuracy
Accuracy<-sum(diag(confusion)/sum(confusion))
Accuracy#70.52
1-Accuracy
sum(confusion[cbind(2:1, 1:2)])/sum(confusion)
?diag
##
pred_values <- NULL
pred_values
yes_no <- NULL
for (i in 1:1096){
pred_values[i] <- ifelse(prob[i]>=0.5,1,0)
yes_no[i] <- ifelse(prob[i]>=0.5,"yes","no")
}
claimants[,"prob"] <- prob
View(prob)
View(claimants)
claimants[,"pred_values"] <- pred_values
View(pred_values)
View(claimants)
claimants[,"yes_no"] <- yes_no
View(claimants)
View(claimants[,c(2,7,8,9)])
# Accuracy
acc <- table(claimants$ATTORNEY,pred_values)
acc
Accuracy<-sum(diag(acc)/sum(acc))
Accuracy # 70.62
# ROC Curve
#install.packages("ROCR")
library(ROCR)
rocrpred<-prediction(prob,claimants$ATTORNEY)
rocrperf<-performance(rocrpred,'tpr','fpr')
plot(rocrperf,colorize=T,text.adj=c(1000,0))
|
2ac5dbf28f0ebc0f07d7b451b5a8d5ae9187b2c4
|
cf2efa64bdcaad29d118177a8370174bf2aa3c3c
|
/examples/core.R
|
37a806304b6e80c2e9e06fba6a7b34e67ba6029c
|
[] |
no_license
|
rOpenGov/europarl
|
30837efe1c5b1e0c886b745d02e0f739c6f63d49
|
f6bda62a433f8c5595078f3b93df877d706cac6f
|
refs/heads/master
| 2023-07-22T10:48:43.859213
| 2023-07-13T13:27:50
| 2023-07-13T13:27:50
| 97,875,276
| 9
| 2
| null | 2021-10-13T21:48:03
| 2017-07-20T20:23:31
|
R
|
UTF-8
|
R
| false
| false
| 1,888
|
r
|
core.R
|
#return subpage of give page & lnk
subpage <- function(url, link) {
s <- html_session(url)
s <- s %>% follow_link(link)
url_1 <- s$url
page <- read_html(url_1)
return(page)
}
#return genders & current status
# change, current status out
get_gender_active <- function(name) {
url <- "http://www.europarl.europa.eu/meps/pl/directory.html?filter=all&leg=0#"
to_return <- c("","")
# page <- subpage(url, name)
link <- name
s <- html_session(url)
s <- s %>% follow_link(link)
url_1 <- s$url
page <- read_html(url_1)
text <- page %>%
html_nodes(".more_info") %>%
html_text()
text <- trimws(text) #clean text, to check! gsub
text[1]
if(text[1] == "") {
to_return[1] <- "inactive"
s <- s %>% follow_link("Przegląd wszystkich kadencji")
page <- read_html(s$url)
h <- page %>%
html_nodes("h4") %>%
html_text()
x <- match("Członek", h)
if(!is.na(x))
to_return[2] <- "Male"
else
to_return[2] <- "Female"
}
else if(text[1] == "Członek") {
to_return[1] <- "active"
to_return[2] <- "Male"
}
else if(text[1] == "Członkini") {
to_return[1] <- "active"
to_return[2] <- "Female"
}
else {
to_return[1] <- "active"
h <- page %>%
html_nodes("h4") %>%
html_text()
x <- match("Członek", h)
if(!is.na(x))
to_return[2] <- "Male"
else
to_return[2] <- "Female"
}
return(to_return)
}
#return id_deputy
get_id_deputy <- function(name) {
url <- "http://www.europarl.europa.eu/meps/en/directory.html?filter=all&leg=0#"
s <- html_session(url) #entry site
s <- s %>% follow_link(name) #go to link -> name of deputy
url_deputy <- s$url #get url of subpage
x <- sub("/.*", "",sub(".*/en/", "", url_deputy)) #extarct id from url
return(x)
}
|
a3ea7b747af5a0490e073f26f8e1c44fb4288730
|
ca0f6de8b85a8e82b6107906ff3f4c862f3eb92f
|
/data-raw/watershed/floodplain_utils.R
|
8a9b27f1b8fd56f26c0f2f2ca66ad030a62a96cb
|
[] |
no_license
|
isabellekavanagh/cvpiaHabitat
|
c583acdf9e2d587a47a2848aa2f9ff99cf526c15
|
083705017b618466847b299f7fb9addb8a1619ca
|
refs/heads/master
| 2021-07-03T02:14:43.895654
| 2020-04-24T18:53:54
| 2020-04-24T18:53:54
| 180,662,606
| 0
| 0
| null | 2020-07-18T00:04:59
| 2019-04-10T20:55:37
|
HTML
|
UTF-8
|
R
| false
| false
| 10,056
|
r
|
floodplain_utils.R
|
library(tidyverse)
library(readxl)
library(glue)
# TODO check that metadata sheet is up to date
metadata <- read_excel('data-raw/watershed/CVPIA_FloodplainAreas.xlsx', sheet = 'MetaData',
col_types = c('text', 'text', 'text', 'text',
rep('numeric', 17), 'text', 'numeric', 'text'), na = 'na')
# function for partially modeled watersheds---------------------------------
# ws = watershed
# df = flow to area relationship dataframe for watershed
scale_fp_flow_area_partial_model <- function(ws, df) {
watershed_metadata <- filter(metadata, watershed == ws)
spring_run_present <- !is.na(watershed_metadata$SR_rearing_length_mi)
steelhead_present <- !is.na(watershed_metadata$ST_rearing_length_mi)
fp_area <- df$modeled_floodplain_area_acres
fp_area_per_mile_modeled <- fp_area/watershed_metadata$FR_length_modeled_mi
# fall run floodplain area
low_grad_len_FR <- watershed_metadata$FR_low_gradient_length_mi
high_grad_len_FR <- watershed_metadata$FR_high_gradient_length_mi
#.1 is downscaling for high gradient
fp_area_FR <- (fp_area_per_mile_modeled * low_grad_len_FR) +
(fp_area_per_mile_modeled * high_grad_len_FR * 0.1)
result <- data.frame(
flow_cfs = df$flow_cfs,
FR_floodplain_acres = fp_area_FR
)
if (spring_run_present) {
# spring run floodplain area
low_grad_len_SR <- watershed_metadata$SR_low_gradient_length_mi
high_grad_len_SR <- watershed_metadata$SR_high_gradient_length_mi
fp_area_SR <- (fp_area_per_mile_modeled * low_grad_len_SR) +
(fp_area_per_mile_modeled * high_grad_len_SR * 0.1)
result <- bind_cols(result, SR_floodplain_acres = fp_area_SR)
}
# steel head floodplain area
if (steelhead_present) {
low_grad_len_ST <- watershed_metadata$ST_low_gradient_length_mi
high_grad_len_ST <- watershed_metadata$ST_high_gradient_length_mi
fp_area_ST <- (fp_area_per_mile_modeled * low_grad_len_ST) +
(fp_area_per_mile_modeled * high_grad_len_ST * 0.1)
result <- bind_cols(result, ST_floodplain_acres = fp_area_ST)
}
return(
mutate(result, watershed = ws)
)
}
# function for non-modeled watersheds---------------------------------
# ws = watershed
scale_fp_flow_area <- function(ws) {
watershed_metadata <- filter(metadata, watershed == ws)
spring_run_present <- !is.na(watershed_metadata$SR_rearing_length_mi)
steelhead_present <- !is.na(watershed_metadata$ST_rearing_length_mi)
# appropriate proxy from watershed, df has flow to area curve
proxy_watershed <- watershed_metadata$scaling_watershed
if (proxy_watershed == 'deer_creek') {
temp_df <- read_excel('data-raw/watershed/CVPIA_FloodplainAreas.xlsx', sheet = 'DeerCreek')
proxy_watershed_metadata <- filter(metadata, watershed == 'Deer Creek')
}
if (proxy_watershed == 'cottonwood_creek') {
temp_df <- read_excel('data-raw/watershed/CVPIA_FloodplainAreas.xlsx', sheet = 'CottonwoodCreek')
proxy_watershed_metadata <- filter(metadata, watershed == 'Cottonwood Creek')
}
if (proxy_watershed == 'tuolumne_river') {
temp_df <- read_excel('data-raw/watershed/CVPIA_FloodplainAreas.xlsx', sheet = 'TuolumneRiver')
proxy_watershed_metadata <- filter(metadata, watershed == 'Tuolumne River')
}
bank_full_flow <- temp_df %>%
filter(modeled_floodplain_area_acres == 0) %>%
summarise(max = max(flow_cfs)) %>%
pull(max)
df <- temp_df %>%
filter(flow_cfs >= bank_full_flow)
# scale flow
scaled_flow <- df$flow_cfs * watershed_metadata$dec_jun_mean_flow_scaling
# fall run area
# divide floodplain area by watershed length of proxy watershed to get area/mile, scale to hydrology
scaled_area_per_mile_FR <- (df$modeled_floodplain_area_acres / proxy_watershed_metadata$FR_length_modeled_mi) *
watershed_metadata$dec_jun_mean_flow_scaling
# apportion area by high gradient/low gradient, .1 is downscaling for high gradient
fp_area_FR <- (scaled_area_per_mile_FR * watershed_metadata$FR_low_gradient_length_mi) +
(scaled_area_per_mile_FR * watershed_metadata$FR_high_gradient_length_mi * 0.1)
result <- data.frame(
flow_cfs = scaled_flow,
FR_floodplain_acres = fp_area_FR
)
if (spring_run_present) {
# spring run floodplain area
scaled_area_per_mile_SR <- (df$modeled_floodplain_area_acres / proxy_watershed_metadata$SR_length_modeled_mi) *
watershed_metadata$dec_jun_mean_flow_scaling
fp_area_SR <- (scaled_area_per_mile_SR * watershed_metadata$SR_low_gradient_length_mi) +
(scaled_area_per_mile_SR * watershed_metadata$SR_high_gradient_length_mi * 0.1)
result <- bind_cols(result, SR_floodplain_acres = fp_area_SR)
}
if (steelhead_present) {
scaled_area_per_mile_ST <- (df$modeled_floodplain_area_acres / proxy_watershed_metadata$ST_length_modeled_mi) *
watershed_metadata$dec_jun_mean_flow_scaling
fp_area_ST <- (scaled_area_per_mile_ST * watershed_metadata$ST_low_gradient_length_mi) +
(scaled_area_per_mile_ST * watershed_metadata$ST_high_gradient_length_mi * 0.1)
result <- bind_cols(result, ST_floodplain_acres = fp_area_ST)
}
return(
mutate(result, watershed = ws)
)
}
# modeling details------------------------------------
print_model_details <- function(ws, species) {
watershed_doc_vars <- filter(metadata, watershed == ws)
if (species == 'sr' & is.na(watershed_doc_vars$SR_length_modeled_mi)) {
warning(sprintf('There are no spring run in %s.', ws))
return(NULL)
}
watershed_method <- watershed_doc_vars$method
model_name <- watershed_doc_vars$model_name
flow_scale <- round(watershed_doc_vars$dec_jun_mean_flow_scaling * 100)
high_grad_factor <- watershed_doc_vars$high_gradient_floodplain_reduction_factor
watershed_name <- ws
if (species == 'fr') {
rearing_length <- round(watershed_doc_vars$FR_rearing_length_mi, 1)
channel_area_modeled <- watershed_doc_vars$FR_channel_area_of_length_modeled_acres
low_grad <- round(watershed_doc_vars$FR_low_gradient_length_mi, 1)
high_grad <- round(watershed_doc_vars$FR_high_gradient_length_mi, 1)
modeled_length <- round(watershed_doc_vars$FR_length_modeled_mi, 1)
}
if (species == 'sr') {
rearing_length <- round(watershed_doc_vars$SR_rearing_length_mi, 1)
channel_area_modeled <- watershed_doc_vars$SR_channel_area_of_length_modeled_acres
low_grad <- round(watershed_doc_vars$SR_low_gradient_length_mi, 1)
high_grad <- round(watershed_doc_vars$SR_high_gradient_length_mi, 1)
modeled_length <- round(watershed_doc_vars$SR_length_modeled_mi, 1)
}
if (species == 'st') {
rearing_length <- round(watershed_doc_vars$ST_rearing_length_mi, 1)
channel_area_modeled <- watershed_doc_vars$ST_channel_area_of_length_modeled_acres
low_grad <- round(watershed_doc_vars$ST_low_gradient_length_mi, 1)
high_grad <- round(watershed_doc_vars$ST_high_gradient_length_mi, 1)
modeled_length <- round(watershed_doc_vars$ST_length_modeled_mi, 1)
}
if (watershed_method == 'full_model_nmfs') {
return(
glue('The entire mapped rearing extent of {rearing_length} miles was modeled',
' using {model_name}. The high quality depth and high quality velocity',
' ("Pref11") "BankArea" result was used as the floodplain area. High quality',
' velocities were assumed to be less than or equal to 0.15 meters per second,',
' and high quality depths were assumed to be between 0.2 meters and 1.5 meters.')
)
}
if (watershed_method == 'full_model') {
return(
glue("The entire mapped rearing extent of {rearing_length} miles was modeled using {model_name}.",
" An active channel area of {channel_area_modeled} acres estimated through remote",
" sensing analysis was subtracted from total inundated area to obtain inundated floodplain area.")
)
}
if (watershed_method == 'part_model') {
return(
glue('A {modeled_length} mile portion of the entire mapped rearing extent of {rearing_length}',
' miles was modeled using {model_name}. Of the entire mapped rearing extent,',
' {low_grad} miles were classified as low gradient and {high_grad} miles were classified',
' as high gradient based on a geomorphic analysis of long profile slopes and valley widths.',
' The floodplain area per unit length was determined for the modeled extent and used to',
' approximate areas for the non-modeled extent. The area per unit length was scaled by a ',
' factor of {high_grad_factor} for the high gradient extent.',
' There was no scaling factor applied to the low gradient extent.')
)
}
if (str_detect(watershed_method, 'scaled')) {
proxies <- c('Deer Creek', 'Cottonwood Creek', 'Tuolumne River')
names(proxies) <- c('dc', 'cc', 'tr')
proxy_ws <- proxies[str_remove(watershed_method, 'scaled_')]
return(
glue(' There was no watershed specific hydraulic modeling available for {watershed_name}.',
' A flow to inundated floodplain area relationship was generated for {watershed_name}',
' by scaling the flow to inundated floodplain area relationship for {proxy_ws}.',
' This scaling used the ratio of mean flow from December to June between the modeled',
' and unmodeled watershed. Flows and corresponding inundated floodplain areas per unit length' ,
' were calculated for {watershed_name} as {flow_scale}% of {proxy_ws}.' ,
' Of the entire mapped {rearing_length} miles rearing',
' extent in {watershed_name}, {low_grad} miles were classified as low gradient and',
' {high_grad} miles were classified as high gradient based on a geomorphic analysis' ,
' of long profile slopes and valley widths. The area per unit length was scaled by a ',
' factor of {high_grad_factor} for the high gradient extent.',
' There was no scaling factor applied to the low gradient extent.')
)
}
}
|
6a612f549128a36b9191c543e2d58889f02d18a4
|
d3f02cbbe322bba1a116549f4568f51d3048125a
|
/source/rescue.R
|
ac8e0ff762f0caf1a84e2c1ad82b5d78bfe8c57b
|
[] |
no_license
|
zuojung/rube_research
|
f840684bc2abed60d5ef29e5f86d62bb404be64f
|
bdf80033f23842aeed2de5fc782bb10361f3ed6a
|
refs/heads/master
| 2021-06-11T10:21:39.765580
| 2017-02-28T20:17:06
| 2017-02-28T20:17:06
| 81,767,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,298
|
r
|
rescue.R
|
## When WinBUGS fails, but "bin" is set, the results up to within "bin" iterations
## of the crash will be saved in codaIndex.txt and coda#.txt.
## With what=MCMC, rescue() places the last "n" recorded iterations into a
## (partially complete) rube object. With what=startVals, the last recorded
## values (or n before that if n>0) will be recorded in a list for uses
## as starting values for another run.
## The value n=NA indicates take all possible iterations when what=MCMC;
## otherwise it indicates to take the last recorded values as the starting values.
### WILL NOT WORK CORRECTLY WITH ARRAYS
rescue <- function(what=c("MCMC","startVals"), n=NA) {
if ("package:R2jags" %in% search()) stop("rescue() does not work with jags")
what <- match.arg(what)
# Just get starting values from last successful recorded iteration
if (what == "startVals") {
### WILL NOT WORK CORRECTLY WITH ARRAYS
if (is.na(n)) n <- 0
if (n<0) n <- -n
K <- length(list.files(pattern="coda[[:digit:]]+[.]txt"))
index <- read.table("codaIndex.txt")
N <- index[1,3]
if (n<0 || n>=N) stop("bad n")
M <- nrow(index)
rows <- seq(from=N-n, by=N, length=M)
lst <- NULL
for (k in 1:K) {
code <- paste(index[,1], "=", read.table(paste("coda",k,".txt",sep=""))[rows,2], sep="")
dev <- which(substring(code,1,9)=="deviance=")
if (length(dev)>0) code <- code[-dev]
leftBracket <- str_locate(code, "\\[")[,1]
isArr <- !is.na(leftBracket)
hasArr <- any(isArr)
if (hasArr) {
arr <- code[isArr]
code <- code[!isArr]
}
# handle non-arrays
lst <- c(lst, list(eval(parse(text=paste("list(",paste(code,collapse=","),")")))))
# handle arrays
if (hasArr) {
arrNames <- unique(substring(arr,1,leftBracket[isArr]-1))
for (arrName in arrNames) {
this <- arr[arrNames==arrName]
eqls <- str_locate(this,"=")[,1]
if (any(is.na(eqls))) stop("error parsing array equal sign")
brackets <- str_locate(this,"\\[.*\\]")
if (any(is.na(brackets[,1]))) stop("error parsing array equal sign")
indices <- substring(this, brackets[,1]+1, brackets[,2]-1)
right <- substring(this, eqls+1, nchar(this)-1)
commas <- str_locate_all(indices[1],",")
dm <- length(commas[[1]][,1]) + 1
if (dm<1) stop("error parsing array commas")
if (dm>3) stop("arrays with >3 dimensions not coded yet")
if (dm==1) {
ind <- try(suppressWarnings(as.numeric(indices)), silent=TRUE)
if (is(ind,"try-error") || any(is.na(ind))) stop("can't read array indices")
vals <- try(suppressWarnings(as.numeric(right)), silent=TRUE)
if (is(vals,"try-error") || any(is.na(vals))) stop("can't read array values")
tmpA <- rep(NA,max(ind))
tmpA[ind] <- vals
} else if (dm==2) {
c1 <- sapply(commas, function(x)x[1,1])
ind1 <- try(suppressWarnings(as.numeric(substring(indices,1,c1-1))), silent=TRUE)
if (is(ind1,"try-error") || any(is.na(ind1))) stop("can't read array indices")
ind2 <- try(suppressWarnings(as.numeric(substring(indices,c1+1))), silent=TRUE)
if (is(ind2,"try-error") || any(is.na(ind2))) stop("can't read array indices")
vals <- try(suppressWarnings(as.numeric(right)), silent=TRUE)
if (is(vals,"try-error") || any(is.na(vals))) stop("can't read array values")
rmax <- max(ind1)
cmax <- max(ind2)
tmpA <- matrix(NA, rmax, cmax)
tmpA[cbind(ind2,ind1)] <- vals
} else {
c1 <- sapply(commas, function(x)x[1,1])
ind1 <- try(suppressWarnings(as.numeric(substring(indices,1,c1-1))), silent=TRUE)
if (is(ind1,"try-error") || any(is.na(ind1))) stop("can't read array indices")
c2 <- sapply(commas, function(x)x[2,1])
ind2 <- try(suppressWarnings(as.numeric(substring(indices,c1+1,c2-1))), silent=TRUE)
if (is(ind2,"try-error") || any(is.na(ind2))) stop("can't read array indices")
ind3 <- try(suppressWarnings(as.numeric(substring(indices,c2+1))), silent=TRUE)
if (is(ind3,"try-error") || any(is.na(ind3))) stop("can't read array indices")
vals <- try(suppressWarnings(as.numeric(right)), silent=TRUE)
if (is(vals,"try-error") || any(is.na(vals))) stop("can't read array values")
rmax <- max(ind1)
cmax <- max(ind2)
tmax <- max(ind3)
tmpA <- array(NA, c(rmax, cmax, tmax))
tmpA[cbind(ind3,ind2,ind1)] <- vals
}
lst[[k]] <- c(lst[[k]], list(tmpA))
names(lst[[k]])[length(lst[[k]])] <- arrName
}
}
}
return(lst)
}
## what=MCMC: create a rube object
K <- length(list.files(pattern="coda[[:digit:]]+[.]txt"))
index <- read.table("codaIndex.txt")
if (is.na(n)) n <- index[1,3]
if (n<2 || n>index[1,3]) stop("bad n")
allNames <- as.character(index[,1])
#lbLoc <- str_locate(allNames, "\\[")[,1]
#baseNames <- allNames
#baseNames[!is.na(lbLoc)] <- substring(baseNames[!is.na(lbLoc)], 1, lbLoc[!is.na(lbLoc)]-1)
#uNames <- unique(baseNames)
M <- nrow(index)
Names <- index[,1]
rows <- apply(index, 1, function(r, p) {m=as.numeric(r[3]); return(paste(m-p+1,":",m,sep=""))}, p=n)
rows <- eval(parse(text=paste("c(",paste(rows,collapse=","),")")))
mat <- matrix(NA, M*n, K)
for (k in 1:K) mat[,k] <- read.table(paste("coda",k,".txt",sep=""))[rows,2]
sims.array <- array(NA, c(n,K,M))
for (m in 1:M) {
for (k in 1:K) {
sims.array[,k,m] <- mat[(m*n-n+1):(m*n),k]
}
}
# collapse arrays
#if (!exists("str_locate")) library(stringr)
#Sel <- str_locate(Names, "\\[.+\\]$")
#if (any(!is.na(Sel[,1]))) {
# stop("arrays not implemented")
#}
dimnames(sims.array) <- list(NULL,NULL,Names)
rslt <- list(sims.array=sims.array)
dm <- dim(rslt$sims.array)
rslt$n.keep <- dm[1]
rslt$n.chains <- dm[2]
rslt$summary <- summarize(sims.array)
rslt$n.burnin <- NA
rslt$n.thin <- NA
rslt$isDIC <- FALSE
class(rslt) <- c("rube","bugs")
return(rslt)
}
|
7610c128d67c247fe149f77b8f52406f47ae6c36
|
1d849c62f86b3f820a16625badb20510903776c0
|
/lab1.R
|
b5e1a944f1a3f1ca35cc8546f8c4de01dd623068
|
[] |
no_license
|
vampzmage/DataAnalytics2021_Hongbo_Zhao
|
20197f099d71fe3118bc089c8352dc8e8661889a
|
e9f0cff8f5a030644295eb7136810def62ecfab3
|
refs/heads/main
| 2023-02-27T15:34:58.337373
| 2021-02-11T14:24:53
| 2021-02-11T14:24:53
| 335,978,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,338
|
r
|
lab1.R
|
days <- c('Mon', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun')#days
temp <- c(28, 30.5, 32, 31.2, 29.3, 27.9, 26.4) #temperature
snowed <-c('T','T', 'F', 'F', 'T', 'T', 'F') #snowed that day
help("data.frame")
RPI_Weather_Week <- data.frame(days,temp, snowed) #create dataframe
RPI_Weather_Week
head(RPI_Weather_Week) #head of the data frame
str(RPI_Weather_Week) #look at structure
summary(RPI_Weather_Week) #summary of the dataframe
RPI_Weather_Week[1,] #showing the 1st row and all colomns
RPI_Weather_Week[,1] #showing 1st col and all row
RPI_Weather_Week[,'snowed']
RPI_Weather_Week[,'days']
RPI_Weather_Week[,'temp']
RPI_Weather_Week[1:5,c("days", "temp")]
RPI_Weather_Week$temp
subset(RPI_Weather_Week, subset=snowed==TRUE)
sorted.snowed <- order(RPI_Weather_Week['snowed'])
sorted.snowed
RPI_Weather_Week[sorted.snowed,]
#RPI_Weather_Week[descending_snowed,]
dec.snow <- order(-RPI_Weather_Week$temp)
dec.snow
#creating dataframes
empty.DataFrame <- data.frame()
v1 <- 1:10
v1
letters
v2 <- letters[1:10]
df <- data.frame(col.name.1 = v1,col.name2 = v2)
df
#importing data and exporting data
#writing to a csv
write.csv(df,file = 'saved_df1.csv')
df2 <- read.csv('saved_df1.csv')
df2
EPI_data <- read.csv("C:/Users/shend/Desktop/DataAna/lab/lab1/2010EPI_data.csv")
View(EPI_data)
attach(EPI_data)
fix(EPI_data)
EPI
tf <- is.na(EPI)
E <- EPI[!tf]
summary(EPI)
fivenum(EPI,na.rm=TRUE)
stem(EPI)
hist(EPI)
hist(EPI, seq(30., 95., 1.0), prob=TRUE)
lines(density(EPI,na.rm=TRUE,bw=1.))
rug(EPI)
plot(ecdf(EPI), do.points=FALSE, verticals=TRUE)
par(pty="s")
qqnorm(EPI); qqline(EPI)
x<-seq(30,95,1)
qqplot(qt(ppoints(250), df = 5), x, xlab = "Q-Q plot for t dsn")
qqline(x)
EPILand<-EPI[!Landlock]
Eland <- EPILand[!is.na(EPILand)]
hist(ELand)
hist(ELand, seq(30., 95., 1.0), prob=TRUE)
plot(ecdf(EPI), do.points=FALSE, verticals=TRUE)
par(pty="s")
qqnorm(EPI); qqline(EPI)
x<-seq(30,95,1)
qqplot(qt(ppoints(250), df= 5), x, xlab= "Q-Q plot for t dsn")
qqline(x)
plot(ecdf(EPI_data$EPI),do.points=FALSE,verticals= TRUE)
plot(ecdf(EPI_data$EPI),do.points=TRUE,verticals= TRUE)
par(pty="s")
qqnorm(EPI_data$EPI)
qqline(EPI_data$EPI)
x <-seq(30,95,1)
x
x2 <-seq(30,95,2)
x2
x2 <-seq(30,96,2)
x2
qqplot(qt(ppoints(250),df=5),x, xlab= "Q-Q plot")
qqline(x)
|
c56176dcc25762b52e00283d744dd029c4e13434
|
2d4c23464da9708b267ca744dd979d02925b3739
|
/5_SVM_Prediction.r
|
1558e43999c357ecfda4c218adb363b31075739e
|
[] |
no_license
|
hellogithub2018/wine_analysis-
|
b69b5a7fdd55f16fb79d4227e028f866741e4ac3
|
496e82f4c14fb04a8269810497541bbfeebe8533
|
refs/heads/master
| 2020-03-21T23:16:48.836264
| 2017-05-28T17:37:05
| 2017-05-28T17:37:05
| 139,176,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
5_SVM_Prediction.r
|
library(e1071)
library(xlsx)
library(corrplot)
library(lattice)
library(ggplot2)
library(caret)
library(rpart)
library(rpart.plot)
library(randomForest)
library(ineq)
t1 = read.xlsx("/Users/AjayKulkarni/Study/Master of Science/Sem 1/CSI 777/data/winequality/new_f/Training.xlsx",1)
t2 = read.xlsx("/Users/AjayKulkarni/Study/Master of Science/Sem 1/CSI 777/data/winequality/new_f/Testing.xlsx",1)
#x_data <- t1[,c(2,8,10,11,12)] # Model 2
#y_data <- t2[,c(2,8,10,11,12)] # Model 2
x_data <- t1[,c(2,3,7,9,10,11,12)] # Model 1
y_data <- t2[,c(2,3,7,9,10,11,12)] # Model 1
model <- svm(as.factor(quality) ~ .,data=x_data)
pred <- predict(model,newdata=y_data)
|
3616d3bf6bffa7c49aaa975696b8609b07998ba6
|
d432e3106e2b9d7806ac8097a781a4d51953e778
|
/models/eda.R
|
a692855421c3116c74872dc97c748d0866a85cfa
|
[] |
no_license
|
Moeymoeymoeymoey/STAT154-Final-Project
|
cd9cf563ad4bec98baa7d3fbc61aa32fcc46acb0
|
94c55dc71c1e9a63825326b48a76599bbd91e22f
|
refs/heads/master
| 2021-01-20T13:04:07.559721
| 2017-05-06T07:38:04
| 2017-05-06T07:38:04
| 90,443,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,814
|
r
|
eda.R
|
library(tm)
library(nnet)
library(e1071)
library(ggplot2)
business.train = read.csv("yelp_academic_dataset_business_train.csv", header = T)
business.review = read.csv("yelp_academic_dataset_review_train.csv", header = T)
qplot(data = business.train, stars, main = "Business Stars")
qplot(data = business.review, stars, main = "Review Stars")
text = as.vector(business.review$text)
lower_txt = tolower(text)
lower_txt = gsub("[[:punct:]]", " ", lower_txt)
split = strsplit(lower_txt, split = " ")
remove_empty_str = function (x) {
x = x[x != ""]
return(x)
}
txt_split = lapply(split, FUN = remove_empty_str)
word_count = NULL
for (i in 1:116474) {
word_count[i] = length(txt_split[[i]])
}
business.review$review_length = word_count
qplot(data = business.review, review_length, main = "Review Length")
stopWords = c(stopwords("en"), "")
cleanReview = function(review, stop_words=stopWords){
# Lowercase review
lower_txt = tolower(text)
# Remove punctuation - (might want to keep !)
lower_txt = gsub("[[:punct:]]", " ", lower_txt)
# Tokenize review
tokens = strsplit(lower_txt, ' ')[[1]]
# Remove stop words
clean_tokens = tokens[!(tokens %in% stopWords)]
clean_review = paste(clean_tokens, collapse=' ')
return(clean_review)
}
cleanCorpus = function(corpus){
# You can also use this function instead of the first.
# Here you clean all the reviews at once using the
# 'tm' package. Again, a lot more you can add to this function...
review_corpus = tm_map(corpus, content_transformer(tolower))
review_corpus = tm_map(review_corpus, removeNumbers)
review_corpus = tm_map(review_corpus, removePunctuation)
review_corpus = tm_map(review_corpus, removeWords, c("the", "and", stopwords("english")))
review_corpus = tm_map(review_corpus, stripWhitespace)
}
|
449b5aaeefe4d77289b1b6f8ddd38258a7acbf49
|
b45043e9d6e2ee18c79253121ad4a8075cc6633f
|
/plot2.R
|
8fddd87c0924189c5009887d24d03c67ce55d713
|
[] |
no_license
|
redjoy-17/ExData_Plotting1
|
32de9b2a46da64113f261d22596f53aa240d93eb
|
4df8fd598765da2f10db907d4f3757a37550ff1f
|
refs/heads/main
| 2023-01-30T09:06:30.491789
| 2020-12-12T17:35:33
| 2020-12-12T17:35:33
| 320,884,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
plot2.R
|
#Script2
directory<-"/Users/Gaia/Downloads"
setwd(directory)
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".")
library("dplyr")
data$Date<-as.Date (data$Date,format= "%d/%m/%Y")
data<-subset(data, Date >= "2007-02-01" & Date <= "2007-02-02" )
data$DateTime=as.POSIXct(paste(data$Date,data$Time))
acrive_power<-as.numeric(data$Global_active_power)
plot(x=data$DateTime,y=active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.copy(png,file="plot2.png",width=480,height=480)
dev.off()
|
4577c0d4d808031d052c7a7d4e0973fd1ed0d31d
|
319a13e48a7e26e5ab660c9dba30521834203dab
|
/RFiles/rgamma.R
|
1e8754ad38be3ee3e88b333e49f649b575f159e5
|
[] |
no_license
|
marco-tn/StaticticsBasic
|
9b4914bec56571754b54481e53f389654178cb3b
|
6dba8d4ac01759cd1e7af302386b9a34f3594475
|
refs/heads/master
| 2020-08-03T13:06:57.357421
| 2019-09-30T03:00:07
| 2019-09-30T03:00:07
| 211,762,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
rgamma.R
|
par(family = "HiraginoSans-W4") # 文字化け回避
# ガンマ分布
set.seed(123)
rgamma(10, shape = 3, rate = 1)
# 統計的性質
nu <- 4
alpha <- 2
x <- rgamma(10000, shape = nu, rate = alpha) # ガンマ乱数を 10000 個
mean(x) # nu/alpha=2 に近い(大数の法則)
hist(x, freq = FALSE, breaks = 50, col = "gray", border = "white",
main = bquote(paste("ガンマ分布 ", Gamma(.(nu), .(alpha))))) # ヒストグラム(密度表示)
curve(dgamma(x, shape = nu, rate = alpha), add = TRUE,
col = "red", lwd = 3) # 理論上の確率密度関数
legend(5, 0.4, legend = c("観測値", "理論値"),
col = c("gray", "red"), lwd = 3) # 凡例を作成
|
f00f09ff98c434185dadc8cc8ea452c74a89c1de
|
acf43911124dfbe1871a9e5e419581ea0dd1d317
|
/analysis/sl-psychopy-analysis/rt_slope/nov_pilot/scripts/nov_pilot_rt_slope_cleaning.R
|
a68a829fa46edec7d742acd12e0ff2297f97afb0
|
[] |
no_license
|
aluu6/qlab
|
0919fd482ef151c9f18eee6d5c8c905e8f89f9f2
|
20a8a4572e2bd44c16db8eddb3ccdebb4119c73f
|
refs/heads/master
| 2020-06-12T01:10:24.387816
| 2019-06-27T19:15:38
| 2019-06-27T19:15:38
| 192,003,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,430
|
r
|
nov_pilot_rt_slope_cleaning.R
|
# FMRI Pilot Analysis
# Violet Kozloff
# March 15th
# This script cleans the auditory and visual files from the November pilot
# NOTE: Does not include participant 9, who did not respond in several conditions
# NOTE: Original psychopy files have been renamed to have a letter as a leading character
# NOTE: Excludes the file 007_auditory_2, which is missing RT data
# ****************************************************************************
#for test purposes only
#setwd("/Users/jasinskagroup/Desktop/QLAB/psychopy_sl_beh-master/rt_slope/nov_pilot/scripts")
# Remove objects in environment
rm(list=ls())
# Clean auditory files ----------------------------------------------------------
# Prepare file paths
auditory_input_path <- "../original_data/auditory/"
auditory_files <- list.files(path=auditory_input_path, pattern="*.csv")
auditory_output_path <- "../cleaned_data/auditory/"
# create a new file containing only the relevant columns in the output folder
auditory_cleaning <- function(file) {
current_file <- read.csv(file)
value <- c("soundFile","trialnum","condition","random_block_key_resp.rt","random_block_key_resp.keys","structured_block_key_resp.rt","structured_block_key_resp.keys","starget","Run","PartID","ttarget","expName")
newdata <- current_file[value]
this_path<-file.path(auditory_output_path, basename(file))
write.csv(newdata, file=(this_path))
}
# Apply function to all auditory files
for (file in auditory_files)
{
auditory_cleaning(paste0(auditory_input_path,file))
}
# Clean visual files ----------------------------------------------------------
# to test
rm(list=ls())
# Prepare file paths
visual_input_path <- "../original_data/visual/"
visual_files <- list.files(path=visual_input_path, pattern="*.csv")
visual_output_path <- "../cleaned_data/visual/"
# create a new file containing only the relevant columns in the output folder
visual_cleaning <- function(file) {
current_file <- read.csv(file)
value <- c("image","trialnum","condition","l_block_trial_key_resp.keys","l_block_trial_key_resp.rt","v_block_trial_key_resp.keys","v_block_trial_key_resp.rt","vtarget","Run","PartID","ltarget","expName")
newdata <- current_file[value]
this_path<-file.path(visual_output_path, basename(file))
write.csv(newdata, file=(this_path))
}
# Apply function to all auditory files
for (file in visual_files)
{
visual_cleaning(paste0(visual_input_path,file))
}
|
5fb18e0ce69e78d2f224c7a3da0d6d556d2c5958
|
08b2fbbcae2905aa361001743e78784727369046
|
/man/add_global_p.tbl_uvregression.Rd
|
623d5ba7cef90d6716af56d31e9f70740fff2e10
|
[
"MIT"
] |
permissive
|
shijianasdf/gtsummary
|
d0bd29d40b31d64ba77c4f19ff4aa74e37d92b1e
|
a6b4bf41553a336223c0d2656135349a2bf1de98
|
refs/heads/master
| 2022-12-11T16:06:26.278085
| 2020-09-14T18:11:35
| 2020-09-14T18:11:35
| 295,585,427
| 2
| 0
|
NOASSERTION
| 2020-09-15T01:50:55
| 2020-09-15T01:50:54
| null |
UTF-8
|
R
| false
| true
| 2,034
|
rd
|
add_global_p.tbl_uvregression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_global_p.R
\name{add_global_p.tbl_uvregression}
\alias{add_global_p.tbl_uvregression}
\title{Adds the global p-value for categorical variables}
\usage{
\method{add_global_p}{tbl_uvregression}(
x,
type = NULL,
include = everything(),
keep = FALSE,
quiet = NULL,
...
)
}
\arguments{
\item{x}{Object with class \code{tbl_uvregression} from the
\link{tbl_uvregression} function}
\item{type}{Type argument passed to \link[car:Anova]{car::Anova}. Default is \code{"III"}}
\item{include}{Variables to calculate global p-value for. Input may be a vector of
quoted or unquoted variable names. tidyselect and gtsummary select helper
functions are also accepted. Default is \code{everything()}.}
\item{keep}{Logical argument indicating whether to also retain the individual
p-values in the table output for each level of the categorical variable.
Default is \code{FALSE}}
\item{quiet}{Logical indicating whether to print messages in console. Default is
\code{FALSE}}
\item{...}{Additional arguments to be passed to \link[car:Anova]{car::Anova}.}
}
\value{
A \code{tbl_uvregression} object
}
\description{
This function uses \link[car:Anova]{car::Anova} with argument
\code{type = "III"} to calculate global p-values for categorical variables.
}
\section{Example Output}{
\if{html}{\figure{tbl_uv_global_ex2.png}{options: width=50\%}}
}
\examples{
tbl_uv_global_ex2 <-
trial[c("response", "trt", "age", "grade")] \%>\%
tbl_uvregression(
method = glm,
y = response,
method.args = list(family = binomial),
exponentiate = TRUE
) \%>\%
add_global_p()
}
\seealso{
Other tbl_uvregression tools:
\code{\link{add_nevent.tbl_uvregression}()},
\code{\link{add_q}()},
\code{\link{bold_italicize_labels_levels}},
\code{\link{inline_text.tbl_uvregression}()},
\code{\link{modify}},
\code{\link{tbl_merge}()},
\code{\link{tbl_stack}()},
\code{\link{tbl_uvregression}()}
}
\author{
Daniel D. Sjoberg
}
\concept{tbl_uvregression tools}
|
1b0ff836886bf768bbe49dfe56930aca476a60a8
|
fa3d752f2667846a58f871e381251b46ab4031f7
|
/Scripts/Figures/top-to-pathway.R
|
ad92d77ad34a8252e06d2770456574ee8edeeadf
|
[
"MIT"
] |
permissive
|
rhong3/Segundo_Melanoma
|
c645b345da898ed56b22ae35eb71c73cadb5255f
|
603d991d6537cb7bffdd6c78a96de14d1a34c87a
|
refs/heads/master
| 2022-04-30T02:19:22.370079
| 2022-03-21T18:23:45
| 2022-03-21T18:23:45
| 234,380,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,109
|
r
|
top-to-pathway.R
|
# 10 genes pathway involvement
toplist = c('ADAM10', 'SCAI', 'TEX30', 'HMOX1', 'CDK4', 'CTNND1', 'DDX11', 'FGA', 'PAEP', 'PIK3CB')
library("org.Hs.eg.db")
library("reactome.db")
annotation <- select(org.Hs.eg.db, keys=toplist, columns=c('SYMBOL', 'ENTREZID'), keytype="SYMBOL")
annotation.path <- select(reactome.db, keys = unique(annotation$ENTREZID), columns=c("PATHID", "PATHNAME"), keytype='ENTREZID')
anno = merge(annotation, annotation.path, by='ENTREZID')
anno$PATHNAME = gsub('Homo sapiens: ', '', anno$PATHNAME)
no_pathway = c('TEX30', 'PAEP')
relax <- read_csv("Results/relax_ICA_GSEA_summary.csv")
strict <- read_csv("Results/strict_ICA_GSEA_summary.csv")
for (m in unique(anno$SYMBOL)){
temp = anno[anno$SYMBOL == m,]
relax[m] = as.numeric(relax$pathway %in% temp$PATHNAME)
strict[m] = as.numeric(strict$pathway %in% temp$PATHNAME)
}
relax['toplists_num'] = rowSums(relax[,12:21])
strict['toplists_num'] = rowSums(strict[,12:21])
relax = relax[order(relax$toplists_num, decreasing = TRUE),]
strict = strict[order(strict$toplists_num, decreasing = TRUE),]
relax = relax[relax$toplists_num > 0, ]
strict = strict[strict$toplists_num > 0, ]
for (i in 1:nrow(relax)){
temp = read_csv(paste("Results/", relax[i, 'group'], '/GSEA/relax/', relax[i, 'IC'], '_', relax[i, 'clinical'], '_lite.csv', sep=''))
inter = intersect(temp$Gene.name, toplist)
for (f in inter){
relax[i, f] = relax[i, f]*100
}
}
relax$importance_score = rowSums(relax[,12:21])
for (i in 1:nrow(strict)){
temp = read_csv(paste("Results/", strict[i, 'group'], '/GSEA/strict/', strict[i, 'IC'], '_', strict[i, 'clinical'], '_lite.csv', sep=''))
inter = intersect(temp$Gene.name, toplist)
for (f in inter){
strict[i, f] = strict[i, f]*100
}
}
strict$importance_score = rowSums(strict[,12:21])
relax = relax[order(relax$importance_score, decreasing = TRUE),]
strict = strict[order(strict$importance_score, decreasing = TRUE),]
write.csv(relax, "Results/toplist_relax_ICA_GSEA_summary.csv", row.names = FALSE)
write.csv(strict, "Results/toplist_strict_ICA_GSEA_summary.csv", row.names = FALSE)
|
ab89b80d182c895362aad1bb5e94c6de31f57d31
|
eaa49925c9c3db2b41bab152c0488fd957a588d3
|
/R/Rscript.R
|
8a6b4654a1e8c8efd8ccab5aa36743d09a6f9c5d
|
[] |
no_license
|
gmonette/testrep
|
7fd00661aa160906a72c502e4ff013647c6373e5
|
0ab82dcf8a368eac973e07a824c48d2f26e518d7
|
refs/heads/master
| 2021-01-10T10:54:06.792883
| 2016-11-07T04:05:31
| 2016-11-07T04:05:31
| 46,613,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25
|
r
|
Rscript.R
|
THis is an R script file
|
5e77240e6309681b9e700644b12248f58bce499a
|
c77938ab77375bd8a524daba269e83a201c22cdf
|
/modules/focalCalculation/focalCalculation.R
|
66432b4e6299afe78aa4e2e931f74fcc08267013
|
[] |
no_license
|
tati-micheletti/borealBirdsAndForestry
|
dbe385d5129770caec12e986371fc76f2317801e
|
27d11df65759ed720d2a6646132597a117c3fc39
|
refs/heads/master
| 2021-03-19T17:13:20.851222
| 2020-09-17T19:07:29
| 2020-09-17T19:07:29
| 121,669,535
| 0
| 4
| null | 2018-08-02T21:52:30
| 2018-02-15T19:03:31
|
HTML
|
UTF-8
|
R
| false
| false
| 5,485
|
r
|
focalCalculation.R
|
defineModule(sim, list(
name = "focalCalculation",
description = paste0("This module reorganizes tiles from another module",
"(matches the exact tile location from all different lists),",
"applies a binary function to certain rasters of all tiles ",
"of a list of a list of rasters, applies a focal function",
" to these tiles and resamples them, merge, and returns",
"a list of resample tiles masked to a given value",
" (in this case, per year)."),
keywords = c("focal", "big data", "raster"),
authors = c(person("Tati", "Micheletti", email = "tati.micheletti@gmail.com", role = c("aut", "cre")),
person("Ian", "Eddy", email = "ian.eddy@canada.ca", role = c("aut", "cre"))),
childModules = character(0),
version = list(SpaDES.core = "0.2.2", focalCalculation = "0.0.1"),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "focalCalculation.Rmd"),
reqdPkgs = list("raster"),
parameters = rbind(
defineParameter("forestClass", "numeric", 1:6, NA, NA,
"Relevant forest classes in land cover map"),
defineParameter("disturbanceClass", "numeric", 2, 0, NA,
paste0("the class value(s) corresponding to input",
" disturbanceType for which to compute focal statistics")),
defineParameter("focalDistance", "numeric", 100, NA, NA,
paste0("The distance at which to compute focal statistics, in units of",
" the input rastesr CRS. This will be used to ",
"create a matrix with circular weights summing to 1)")),
defineParameter("resampledRes", "numeric", 250, NA, NA,
"Resolution to which the final focal raster should be resample to"),
defineParameter(".useCache", "logical", FALSE, NA, NA,
"Should this entire module be run with caching activated?"),
defineParameter("useParallel", "character", NULL, NA, NA,
"Should we parallelize tile processing?"),
defineParameter("nNodes", "numeric", 2, NA, NA,
"How many nodes to use when parallelizing?")
),
inputObjects = bind_rows(
expectsInput(objectName = "rastersList", objectClass = "character",
desc = paste0("Character listing the raster paths to the tiles from",
" Raster3"))
),
outputObjects = bind_rows(
createsOutput(objectName = "focalYearList", objectClass = "list",
desc = paste0("This list is structured in a way that the ",
"masked value passed is the first level of ",
"the list, which is composed of a full patched",
" raster with all merged tiles"))
)
))
## event types
# - type `init` is required for initialiazation
doEvent.focalCalculation = function(sim, eventTime, eventType) {
switch(
eventType,
init = {
sim$focalYearList <- list()
# schedule future event(s)
sim <- scheduleEvent(sim, time(sim), "focalCalculation", "focalOperations")
},
focalOperations = {
sim$focalYearList[[paste0("Year", time(sim))]] <- Cache(applyFocalToTiles,
listTilePaths = sim$rastersList,
pathData = dataPath(sim),
pathCache = cachePath(sim),
forestClass = P(sim)$forestClass,
focalDistance = P(sim)$focalDistance,
disturbanceClass = P(sim)$disturbanceClass,
recoverTime = P(sim)$recoverTime,
resampledRes = P(sim)$resampledRes,
useParallel = P(sim)$useParallel,
nNodes = P(sim)$nNodes,
currentYear = time(sim),
cacheId = paste0("focalToTiles", max(P(sim)$focalDistance),
"m", time(sim)))
sim <- scheduleEvent(sim, time(sim) + 1, "focalCalculation", "focalOperations")
},
warning(paste("Undefined event type: '", current(sim)[1, "eventType", with = FALSE],
"' in module '", current(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
)
return(invisible(sim))
}
.inputObjects <- function(sim) {
if (!suppliedElsewhere("rastersList", sim)) {
sim$rastersList <- Cache(createRandomRasterList, rastersPerList = 5, numberOfLists = 3)
message(crayon::yellow(paste0("List of tile paths not found (no other module is creating it). ",
"Using a dummy list of rasters")))
}
return(invisible(sim))
}
|
c0e43514fe160992c972232481be92a9c483d1c1
|
1c366b0df210fecfdd4e16049bc3b28c961e6595
|
/install.R
|
254ca36c600e0ce81d935b105ab255a782405bcf
|
[] |
no_license
|
asancpt/workshop-nonmem-2017
|
f13476469a25b71f2c5d3187fac06a9e2ff0b457
|
c1654be70d652442ebeb363691626b510e03305b
|
refs/heads/master
| 2021-06-11T13:50:01.010553
| 2017-02-22T00:52:12
| 2017-02-22T00:52:12
| 82,635,468
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
install.R
|
install.packages("c:/NMW2017/nmw_0.1.0.zip", repos = NULL, type = "win.binary")
library(nmw)
|
9788c040ff8d675f9671b77fcb259b21b292d1b6
|
b4c34a229ddcfc5e33e25baea1d28364faf17270
|
/man/readDescFile.Rd
|
ee449e5bf4bf1218cc7a3c6edf3a79ffefebbd36
|
[
"Apache-2.0"
] |
permissive
|
ammar257ammar/ArrayAnalysis-Bioconductor
|
f5e1ec1397d033155784d5b7f803513c394aa3ff
|
4efd1bb11136d7c8108998ce20c31506e5830efb
|
refs/heads/master
| 2023-03-26T21:16:03.584761
| 2021-03-25T15:57:37
| 2021-03-25T15:57:37
| 351,494,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
readDescFile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_Stat.R
\name{readDescFile}
\alias{readDescFile}
\title{Read desc file. Return array names and groups}
\usage{
readDescFile(descfile, outprint = FALSE)
}
\arguments{
\item{descfile}{(Status=required)}
\item{outprint}{(datatype=logical, Default=FALSE)}
}
\value{
a dataframe with description matrix
}
\description{
Read desc file. Return array names and groups
}
\examples{
#example here
}
|
cc841cc0fbfbb8beb60866fa69addd911438e413
|
cce54237311aab66c2914e4992eb844fe5d260c7
|
/Code from Ephraim/AntNetwork.r
|
c95246015c53efb2469976baf0b8a03fb8da9afa
|
[] |
no_license
|
MLBartley/Ant-Research
|
765ec5fb04366c289b950ba8614472cb26f5b302
|
921e560d6802bdc930ec622834a51d39a2c12487
|
refs/heads/master
| 2020-04-05T23:16:03.410628
| 2019-12-05T19:13:54
| 2019-12-05T19:13:54
| 42,834,698
| 0
| 0
| null | 2017-11-03T18:04:38
| 2015-09-21T00:08:07
|
HTML
|
UTF-8
|
R
| false
| false
| 2,138
|
r
|
AntNetwork.r
|
##############################
##
## Read in data
##
## antlist.rg2 = A "list" where each object is an individual ant
## ID = ant ID
## type = forager, nest, or Queen
## cells = vector of cell IDs the ant was in, in temporal order
## t.in.cell = time spent in the cell
## time = actual time the ant moved into the cell
## format: MonthDay.Fraction of day
## day = day of observation (8 days total)
##
##############################
load("antlist.rg2.Rdata")
str(antlist.rg2)
##############################
##
## Create raster of nest with cell labels
##
##############################
library(raster)
nx=11
ny=7
extent.mat=matrix(c(1-.5,nx+.5,1-.5,ny+.5),nrow=2,byrow=T)
ee=extent(extent.mat)
ee
rast=raster(ee,nrows=ny,ncol=nx,crs="+proj=longlat +datum=WGS84")
values(rast) <- 0
wall.pts=cbind(6,1:6)
values(rast)[cellFromXY(rast,wall.pts)] <- NA
wall.pts=cbind(2:10,4)
values(rast)[cellFromXY(rast,wall.pts)] <- NA
image(rast,main="Nest Raster with Cell Labels")
xy=xyFromCell(rast,1:77)
text(xy[,1],xy[,2])
##############################
##
## Get "contacts" when ants are in the same cell
##
## Columns:
## i = ID of first ant
## j = ID of second ant
## start = time stamp when contact started
## end = time stamp when contact ended
## length = end-start
##
##############################
source("get.contacts.r")
C=get.contacts(antlist.rg2)
## remove NA's
na.idx=which(is.na(C[,3]))
c=C[-na.idx,]
c.rg2=data.frame(c)
str(c.rg2)
summary(c.rg2)
## histogram of length of contact
hist(c.rg2[,5],breaks=200)
## table of the absolute number of contacts between each pair
table(c.rg2[,1:2])
###############################
##
## Plotting observed contact networks aggregated over each day
##
###############################
library(network)
days=sort(unique(floor(c.rg2$start)))
days
network.day=list()
for(i in 1:length(days)){
day=days[i]
idx.day=which(c.rg2$start>day &c.rg2$start<day+1)
network.day[[i]]=network(c.rg2[idx.day,],vertex.attr=5,vertex.attrnames="length",directed=FALSE)
}
par(mfrow=c(2,4))
for(i in 1:8){
plot(network.day[[i]],main=days[i])
}
|
c28a1f2a178a7a08019c976f44693af18aa5c542
|
450d1e2f3f661fb725f5dd86c243967d825ccaf4
|
/ContinousDistn.R
|
f4d2e3b1d24783c4bd4136be5542d3e0887f9a4d
|
[] |
no_license
|
wli289/R
|
5a18f85ca34c06adb87a422740d2fa3736702041
|
2b327e387784f35a113857e67c63e298d8889601
|
refs/heads/master
| 2021-05-14T05:25:19.510529
| 2018-01-04T06:01:48
| 2018-01-04T06:01:48
| 116,221,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,782
|
r
|
ContinousDistn.R
|
# Basics R
# Continuous distributions in R, splicing and mixing
#install.packages("actuar")
library(actuar)
library(VGAM)
###############################################################
# Example 1: gamma distribution
# define a grid
x <- seq(0,1000,by=1)
# define a set of scale and shape parameters
scaleparam <- seq(100,250,by=50)
shapeparam <- 2:5
# varying the shape parameter
plot(x, dgamma(x, shape = shapeparam[1], scale = 100), type = "l", ylab = "Gamma density")
for(k in 2:length(shapeparam)){
lines(x,dgamma(x,shape = shapeparam[k], scale = 100), col = k)
}
legend("topright", c("shape=2", "shape=3", "shape=4", "shape=5"), lty=1, col = 1:4)
title("Pdf gamma density, with scale=100, and varying shape")
###############################################################
# varying the scale parameter
plot(x, dgamma(x, shape = 2, scale = scaleparam[1]), type = "l", ylab = "Gamma density")
for(k in 2:length(scaleparam)){
lines(x,dgamma(x,shape = 2, scale = scaleparam[k]), col = k)
}
legend("topright", c("scale=100", "scale=150", "scale=200", "scale=250"), lty=1, col = 1:4)
title("Pdf gamma density, with shape=2, and varying scale")
###############################################################
## Example 2: Pareto density
z<- seq(0,3000,by=1)
scaleparam <- seq(2000,3500,500)
shapeparam <- 1:4
# varying the shape parameter
plot(z, dparetoII(z, loc=0, shape = shapeparam[1], scale = 2000), ylim=c(0,0.002),type = "l", ylab = "Pareto density")
for(k in 2:length(shapeparam)){
lines(z,dparetoII(z,loc=0, shape = shapeparam[k], scale = 2000), col = k)
}
legend("topright", c("shape=1", "shape=2", "shape=3", "shape=4"), lty=1, col = 1:4)
title("Pdf Pareto density, with scale=2000, and varying shape")
###############################################################
#varying the scale parameter
plot(z, dparetoII(z, loc=0, shape = 3, scale = scaleparam[1]), type = "l", ylab = "Pareto density")
for(k in 2:length(scaleparam)){
lines(z,dparetoII(z,loc=0, shape = 3, scale = scaleparam[k]), col = k)
}
legend("topright", c("scale=2000", "scale=2500", "scale=3000", "scale=3500"), lty=1, col = 1:4)
title("Pdf Pareto density, with shape=3, and varying scale")
###############################################################
## Example 3: Weibull density
z<- seq(0,400,by=1)
scaleparam <- seq(50,200,50)
shapeparam <- seq(1.5,3,0.5)
# varying the shape parameter
plot(z, dweibull(z, shape = shapeparam[1], scale = 100), ylim=c(0,0.012), type = "l", ylab = "Weibull density")
for(k in 2:length(shapeparam)){
lines(z,dweibull(z,shape = shapeparam[k], scale = 100), col = k)
}
legend("topright", c("shape=1.5", "shape=2", "shape=2.5", "shape=3"), lty=1, col = 1:4)
title("Pdf Weibull density, with scale=100, and varying shape")
###############################################################
#varying the scale parameter
plot(z, dweibull(z, shape = 3, scale = scaleparam[1]), type = "l", ylab = "Weibull density")
for(k in 2:length(scaleparam)){
lines(z,dweibull(z,shape = 3, scale = scaleparam[k]), col = k)
}
legend("topright", c("scale=50", "scale=100", "scale=150", "scale=200"), lty=1, col = 1:4)
title("Pdf Weibull density, with shape=3, and varying scale")
###############################################################
## Example 4:GB2
gb2density <- function(x,shape1,shape2,shape3,scale){
mu <- log(scale)
sigma <- 1/shape3
xt <- (log(x)-mu)/sigma
logexpxt<-ifelse(xt>23,yt,log(1+exp(xt)))
logdens <- shape1*xt - log(sigma) - log(beta(shape1,shape2)) - (shape1+shape2)*logexpxt -log(x)
exp(logdens)
}
x<- seq(0,400,by=1)
alpha1<-5
alpha2<-4
gamma <-2
theta <- seq(150,250,50)
# varying the scale parameter
plot(x, gb2density(x, shape1=alpha1,shape2=alpha2,shape3=gamma, scale = theta[1]),
type = "l", ylab = "Gen Beta 2 density",
main =
expression(paste("GB2 density with ", alpha[1], "=5,", alpha[2], "=4,", alpha[3],
"=2, and varying scale (",theta, ") parameters")) )
for(k in 2:length(theta)){
lines(z,gb2density(x,shape1=alpha1,shape2=alpha2,shape3=gamma, scale = theta[k]), col = k)
}
legend("topleft", c("theta=150", "theta=200", "theta=250"), lty=1, cex=0.6,col = 1:3)
###############################################################
## Example 5: A mixed density
## specify density of a mixture of 2 gamma distributions
MixtureGammaDensity <- function(x, a1, a2, alphaGamma1, thetaGamma1, alphaGamma2, thetaGamma2){
a1 * dgamma(x, shape = alphaGamma1, scale = thetaGamma1) + a2 * dgamma(x, shape = alphaGamma2, scale = thetaGamma2)
}
w <- 1:30000/100
a1<-0.5
a2<-0.5
alpha1 <- 4
theta1 <- 7
alpha2 <- 15
theta2 <- 7
MixGammadens <- MixtureGammaDensity(w, a1,a2,alpha1, theta1, alpha2, theta2)
plot(w, MixGammadens, type = "l")
###############################################################
# Example 6: density obtained through splicing
## combine an Exp on (0,c) with a Pareto on (c,\infty)
SpliceExpPar <- function(x, c, v, theta, gamma, alpha){
if(0<=x & x<c){return(v * dexp(x, 1/theta)/pexp(c,1/theta))}else
if(x>=c){return((1-v)*dparetoII(x,loc=0, shape = alpha, scale = theta)/(1-pparetoII(x,loc=0, shape = alpha, scale = theta)))}
}
x <- t(as.matrix(1:2500/10))
spliceValues <- apply(x,2,SpliceExpPar, c = 100, v = 0.6, theta = 100, gamma = 200, alpha = 4)
plot(x,spliceValues, type = 'l')
###############################################################
## Example 7: Gamma vs Pareto Tails
y <- 100:10000/100
fgamma <- dgamma(y,shape=1/3,scale=15) #Gamma density function with mean=5 and variance=75
fpareto <- dpareto(y, shape = 3, scale = 10) #Pareto density function with mean=5 and variance=75
plot(y,fgamma,lwd=2,type="l",ylim=c(0,0.2))
lines(y, fpareto, col = "red", lwd = 2) ## but tail behavior is different
|
f3767581c11b8c2b1e4325baa4d11fd8aaed3972
|
3903745168bccf83d719c0e548a7d5f3e3ae23f2
|
/new/5_disturbance_graphs.R
|
43987067b2478edfcde77ab28ae45fe21cf97146
|
[] |
no_license
|
remoteforests/remoteforests_disturbance
|
210837a2a466436071b021bdf5efd7e63bfebdfb
|
fce58ee2c7fea8706f8d90c29dc3e86ddaf1f0da
|
refs/heads/master
| 2023-07-19T17:37:15.078421
| 2023-07-12T16:00:52
| 2023-07-12T16:00:52
| 127,264,617
| 3
| 0
| null | 2023-05-11T17:12:21
| 2018-03-29T08:53:55
|
R
|
UTF-8
|
R
| false
| false
| 2,001
|
r
|
5_disturbance_graphs.R
|
# 0. setup ----------------------------------------------------------------
library(tidyverse);library(pool);library(ggrepel)
source("new/pw.R")
# 5. GRAPHS ---------------------------------------------------------------
# 5. 1. data --------------------------------------------------------------
data.all <- tbl(KELuser, "dist_event") %>% mutate(peak = "yes") %>%
right_join(., tbl(KELuser, "dist_chrono"), by = c("dist_chrono_id" = "id")) %>%
inner_join(., tbl(KELuser, "dist_plot"), by = c("dist_plot_id" = "id")) %>%
inner_join(., tbl(KELuser, "plot"), by = c("plot_id" = "id")) %>%
select(plotid, ncores, type, year, ca_pct, kde, peak) %>%
collect()
# 5. 2. plotting ----------------------------------------------------------
pdf("new/plots.pdf", width = 18, height = 10, pointsize = 12, onefile = T)
for (p in unique(data.all$plotid)) {
n <- data.all %>% filter(plotid %in% p) %>% distinct(., ncores) %>% pull()
data.gg <- data.all %>% filter(plotid %in% p)
print(
ggplot(data.gg) +
geom_histogram(aes(year, weight = ca_pct), breaks = seq(1590, 2010, 10), fill = "grey80") +
geom_histogram(aes(year, weight = ca_pct), binwidth = 1, fill = "grey20") +
geom_line(aes(year, kde), size = 1, colour = "grey20") +
geom_point(data = data.gg %>% filter(peak %in% "yes"), aes(year, kde), shape = 21, colour = "grey20", fill = "#78c2ef", size = 3) +
geom_text_repel(data = data.gg %>% filter(peak %in% "yes"), aes(year, kde, label = year), colour = "#78c2ef", size = 3) +
geom_hline(aes(yintercept = 10), linetype = 2, colour = "grey80") +
coord_cartesian(xlim = c(1590, 2010)) + coord_cartesian(ylim = c(0, 100)) +
xlab("Year") + ylab("Canopy area (%)") +
ggtitle(paste(p, "(number of cores:", n, ")", sep = " ")) +
theme_bw() +
facet_wrap(~type)
)
cat(p)
remove(n, data.gg, p)
}
dev.off()
# ! close database connection ---------------------------------------------
poolClose(KELuser)
|
f09b34a10b7795fd4253b32ee4ae561231c6b56f
|
977e25b030bc27e923f52b08305a6dec2cfd02fd
|
/financial_trading/c01_trading_basics/basics.r
|
e98fe158207394c2424b50941146a7aebdfefd49
|
[] |
no_license
|
printfCRLF/rr
|
d4cd813fafef7d64da2722ade9e14220c12e17ff
|
4116f726f5ad7a8cadbe6841d13abbdb998ee294
|
refs/heads/master
| 2021-04-15T15:08:37.032087
| 2019-07-12T08:29:26
| 2019-07-12T08:29:26
| 126,468,211
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 150
|
r
|
basics.r
|
getSymbols("SPY",
from = "2000-01-01"
to = "2016-06-30"
src = "yahoo"
adjust = TRUE )
plot(Cl(SPY))
|
bdc906a82d2d1729c8543cbcd717971067d43047
|
1205947be035b71dbe297a2a9f3ca99254244d35
|
/Leaflet/couche geo.R
|
e434e3d8d4d0963423eceddf0531f72737a74842
|
[] |
no_license
|
vivienroussez/Maille_habitat
|
08389903053240633ff643ed0e66c57bd6f18db6
|
af0194f313be5c8e7988ad1f304310c944cbf9d5
|
refs/heads/master
| 2020-03-18T19:24:21.728438
| 2018-06-27T06:50:59
| 2018-06-27T06:50:59
| 135,152,333
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,523
|
r
|
couche geo.R
|
require(tidyverse)
require(foreach)
require(doParallel)
require(maptools)
# setwd("M:/ETUDES/Zonage logement/")
#setwd("D:/Zone logement/")
# load("com_zones.RData")
# load("../Indicateurs/data/Base.RData")
# load("Zonages/Zonages_reg.RData")
######################################################################
### Ajout des variables correspondant aux simulations interrégionales
######################################################################
imp <- dir("Zonages/CSV")
imp <- imp[substr(imp,1,6)=="ZInter"]
sim <- gsub( "ZInter_","",gsub(".csv","",imp) )
zon <- list()
for (i in 1:length(imp)){
zon[[i]] <- read.csv(file=paste("Zonages/CSV/",imp[i],sep=""), sep=";", dec=',')
#names(zon)[i]<-imp[i]
names(zon[[i]])[1] <- "codgeo"
names(zon[[i]])[2] <- paste("zone","_inter_",sim[i],sep="")
}
# # déjà fait ?
# zonages <- merge(zonages,zon[[1]],by.x="codgeo",by.y="codgeo",all.x=T)
# for (i in 2:length(zon))
# {
# zonages <- merge(zonages,zon[[i]],by.x="codgeo",by.y="codgeo",all.x=T)
# }
# zonages %>% head()
rm(i,imp,sim)
spdf_zonage <- merge(sansile,zonages,by.x="Codgeo",by.y="codgeo")
lesquelles <- names(spdf_zonage) %>% grep(pattern = "inter") ### A changer selon la version de zonage
### (reg ou interReg) qu'on veut traiter
var <- names(spdf_zonage)[lesquelles]
fusion_spdf<- function(spdf,crit,nom){
aa <- unionSpatialPolygons(spdf,crit)
df <- data.frame(zone=row.names(aa))
names(df) <- nom
row.names(df)<-row.names(aa)
aa <- SpatialPolygonsDataFrame(aa,df)
return(aa)
}
# On crée toutes les couches correspondantes à chaque simul, en parallèle pour gagner du temps
cl <- makeCluster(detectCores()-1)
clusterEvalQ(cl,require(maptools))
clusterEvalQ(cl,"fusion_spdf")
registerDoParallel(cl)
system.time({
lst_zon <- foreach(i=1:length(var)) %dopar% fusion_spdf(spdf_zonage,spdf_zonage@data[,var[i]],nom=var[i])
})
stopCluster(cl)
# system.time({
# lst_zon2 <- list()
# for (i in 1:length(var)) { lst_zon2[[i]] <- fusion_spdf(spdf=spdf_zonage,crit=spdf_zonage@data[,var[i]],nom=var[i])}
# })
names(lst_zon) <- var
##############################################################################
### On crée des couches pour chaque maille de restitution (pour appli shiny)
#load("Zonages/Visuzon/dat.RData")
geo <- select(sansile@data,-Surface,-cat_au,-ur)
geo <- lapply(geo,as.factor) %>% as.data.frame()
dep <- readShapePoly("M:/GEOGRAPHIE/GEO2016/Couches/DEP.shp")
dep@data <- data.frame(dep=dep@data[,"Num_dep"])
reg2016 <- readShapePoly("M:/GEOGRAPHIE/GEO2016/Couches/REG.shp")
reg2016@data <- data.frame(reg2016=reg2016@data[,"Num_reg"])
AU <- readShapePoly("M:/GEOGRAPHIE/GEO2016/Couches/AU.shp")
AU@data <- data.frame(AU=AU@data$au)
ze <- readShapePoly("M:/GEOGRAPHIE/GEO2016/Couches/ZE.shp")
ze@data <- data.frame(ze=ze@data$ze)
bv <- readShapePoly("M:/GEOGRAPHIE/GEO2016/Couches/BV.shp")
bv@data <- data.frame(bv=bv@data$bv)
EPCI <- unionSpatialPolygons(sansile,sansile@data$epci)
d <- data.frame(EPCI=row.names(EPCI))
row.names(d) <- d$EPCI
EPCI <- SpatialPolygonsDataFrame(EPCI,d)
reg <- unionSpatialPolygons(sansile,sansile@data$reg)
d <- data.frame(reg=row.names(reg))
row.names(d)<-d$reg
reg <- SpatialPolygonsDataFrame(reg,d)
lst_mailles <- list("dep"=dep,"AU"=AU,"EPCI"=EPCI,"reg"=reg,"reg2016"=reg2016,"ze"=ze,"bv"=bv)
zonages <- select(zonages,-starts_with("zone_R"))
save(zonages,lst_zon,geo,lst_mailles,sansile,file="Zonages/Visuzon/dat.RData")
save(zonages,lst_zon,geo,lst_mailles,sansile,file="Zonages/VisuMaille/dat.RData")
|
99ad5d7db84461f7880c745d7a35b0c661db8224
|
87bda86c8f157f8eb02bb6eac56621d20009625f
|
/R/kissr.R
|
8eaee495bac6873976aa4212f00c2e8fe7f44f4c
|
[] |
no_license
|
jack-palmer/kissr
|
3da4935b3b4260703b2132ea85d01668b4e4936d
|
f57b22665e9fd597e45454122c50eb32556d1c36
|
refs/heads/master
| 2020-12-28T23:51:00.347963
| 2016-07-18T17:32:27
| 2016-07-18T17:32:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 614
|
r
|
kissr.R
|
#' kissr: A package for loading report data from Kiss Metrics
#'
#' The kissr package provides one function:
#' `read`
#'
#' You can call the read function with a \link{KissReports} object to get a list
#' of all reports associated with your KissMetrics account and you can call the
#' read function with a \link{KissReport} object to get the actual data from a
#' report.
#'
#' Please note the KissMetrics API requires some time to access, kissr will wait
#' for up to 150 seconds for KissMetrics to fully generate a report. If it takes
#' longer, kissr will time out.
#'
#' @docType package
#' @name kissr
NULL
|
b503bf943cdea71df5506d5bf678fd9ddb9e83a8
|
d44ba73766486241f2e2f05f9a8e2f289d195a40
|
/man/check_deviation_set.Rd
|
530020c7e7fbc65859f55e2ccae21ee7fd088495
|
[] |
no_license
|
TGuillerme/spptest
|
080536dfa0fe28d00a62735d30ae231696cd78eb
|
0e76a605262c52fcb7582b053aacafbd023c7393
|
refs/heads/master
| 2020-03-29T01:42:27.174391
| 2016-12-16T12:49:57
| 2016-12-16T12:49:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 321
|
rd
|
check_deviation_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deviation.r
\name{check_deviation_set}
\alias{check_deviation_set}
\title{Check the object.}
\usage{
check_deviation_set(deviation_set)
}
\arguments{
\item{deviation_set}{A potential deviation_set object.}
}
\description{
Check the object.
}
|
3a68a82906c9cbcf2d6f048d9bda94c4ba7eeb8c
|
18fe33b772956c677e07bd7f508c7fc2181e22ed
|
/tests/testthat/test-optimize-leaf.R
|
05fde24e5f5cd7d79dcbc7a6bf5cc5176d124e0e
|
[
"MIT"
] |
permissive
|
muir-lab/leafoptimizer
|
d4667bba98c44696971c0dfa4de793c9e754ea17
|
0929db5a42dc237155ec8708baf357c1e5aaebce
|
refs/heads/master
| 2023-04-07T06:41:08.539178
| 2021-09-07T20:28:14
| 2021-09-07T20:28:14
| 92,799,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,961
|
r
|
test-optimize-leaf.R
|
context("optimize_leaf")
library(leafoptimizer)
# Revise and uncomment
# bake_par <- make_bakepar()
# constants <- make_constants()
# enviro_par <- make_enviropar()
# leaf_par <- make_leafpar()
#
# carbon_costs <- list(H2O = 1000, SR = 0)
#
# ol1 <- optimize_leaf("g_sc", carbon_costs, bake_par, constants, enviro_par,
# leaf_par)
#
# ol2 <- optimize_leaf("leafsize", carbon_costs, bake_par, constants, enviro_par,
# leaf_par)
#
# ol3 <- optimize_leaf("sr", carbon_costs, bake_par, constants, enviro_par,
# leaf_par)
#
# ol4 <- optimize_leaf(c("g_sc", "leafsize"), carbon_costs, bake_par, constants,
# enviro_par, leaf_par)
#
# ol5 <- optimize_leaf(c("g_sc", "sr"), carbon_costs, bake_par, constants,
# enviro_par, leaf_par)
#
# ol6 <- optimize_leaf(c("leafsize", "sr"), carbon_costs, bake_par, constants,
# enviro_par, leaf_par)
#
# ol7 <- optimize_leaf(c("g_sc", "leafsize", "sr"), carbon_costs, bake_par,
# constants, enviro_par, leaf_par)
test_that("optimize_leaf calculates T_leaf and A correctly", {
cs <- make_constants()
lp <- make_leafpar(cs)
bp <- make_bakepar()
ep <- make_enviropar()
pars <- c(cs, lp, bp, ep)
upars <- pars %>% purrr::map_if(function(x) is(x, "units"), drop_units)
# Calculate T_leaf, A, and E ----
tl1 <- tealeaves::tleaf(
tealeaves::leaf_par(pars),
tealeaves::enviro_par(pars),
tealeaves::constants(pars))$T_leaf
upars$T_leaf <- upars %>%
find_tleaf(., . , .) %>%
magrittr::use_series("T_leaf")
tl2 <- upars$T_leaf
expect_equal(drop_units(tl1), tl2)
pars$T_leaf <- tl1
upars$T_leaf <- tl2
A1 <- photosynthesis::photo(
photosynthesis::leaf_par(pars),
photosynthesis::enviro_par(pars),
photosynthesis::bake_par(pars),
photosynthesis::constants(pars))$A
ph <- upars %>%
c(bake(., ., ., unitless = TRUE)) %>%
find_A()
A2 <- ph$A
expect_equal(drop_units(A1), A2)
})
test_that("carbon_balance calculates E correctly", {
# cs <- make_constants()
# lp <- make_leafpar(cs)
# bp <- make_bakepar()
# ep <- make_enviropar()
#
# T_leaf <- set_units(300, "K")
#
# blp <- lp %>%
# c(T_leaf = T_leaf) %>%
# photosynthesis::bake(bp, cs, unitless = FALSE)
#
# baked_pars <- c(cs, lp[!(names(lp) %in% names(blp))], blp, ep) %>%
# purrr::map_if(function(x) is(x, "units"), drop_units)
#
# eb1 <- tealeaves::energy_balance(T_leaf, lp, ep, cs, quiet = TRUE,
# unitless = FALSE, components = TRUE)
# E1 <- eb1$components$E
# T_leaf %<>% drop_units()
# eb2 <- tealeaves::energy_balance(T_leaf, baked_pars, baked_pars, baked_pars, quiet = TRUE,
# components = TRUE, unitless = TRUE, check = FALSE)
# E2 <- eb2$components$E
# expect_equal(E1, E2)
})
|
e815baa791d25a6f4341823c16ef31d40aea1e55
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/device_validation/sig_align.R
|
0902815d6808525f278370a45b6b13cfd1195617
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061
| 2021-02-28T05:33:08
| 2021-02-28T05:33:08
| 32,551,526
| 7
| 1
| null | 2020-08-17T22:37:43
| 2015-03-19T23:25:01
|
OpenEdge ABL
|
UTF-8
|
R
| false
| false
| 13,956
|
r
|
sig_align.R
|
rm(list=ls())
library(XML)
library(ggplot2)
library(reshape2)
library(scales)
Sys.setenv(TZ='GMT')
source('sig_align_helpers.R')
subject="6"
first=as.POSIXct(strptime(as.character("20150921071000"),"%Y%m%d%H%M%S"),tz="PDT")
last=as.POSIXct(strptime(as.character("20150921080000"),"%Y%m%d%H%M%S"),tz="PDT")
subject_dir=paste("/home/anna/scg3/subject",subject,"/",sep="")
gs_name=paste("gold_standard_details_",subject,".tsv",sep="")
states_name=paste("states_",subject,".tsv",sep="")
basis_name=paste("basis_",subject,".tsv",sep="")
fitbit_name=paste("fitbit_",subject,".tsv",sep="")
microsoft_name=paste("microsoft_",subject,".tsv",sep="")
apple_name=paste("apple_",subject,".tsv",sep="")
#READ IN GOLD STANDARD DATA
gold_standard_det=data.frame(read.table(paste(subject_dir,gs_name,sep=""),header=T,sep='\t',stringsAsFactors = FALSE))
names(gold_standard_det)=c("gold_standard_startDate","gold_standard_hr","gold_standard_energy")
gold_standard_det$gold_standard_startDate=strptime(as.character(gold_standard_det$gold_standard_startDate),"%Y%m%d%H%M%S")
gold_standard_det$gold_standard_startDate=as.POSIXct(gold_standard_det$gold_standard_startDate,tz="PDT")
gold_standard_det$gold_standard_hr=as.numeric(gold_standard_det$gold_standard_hr)
gold_standard_det$gold_standard_energy=as.numeric(gold_standard_det$gold_standard_energy)
#READ IN THE STATE DATA
states=data.frame(read.table(paste(subject_dir,states_name,sep=""),header=T,sep='\t',stringsAsFactors=FALSE,row.names=1))
states$startTime=strptime(as.character(states$startTime),"%Y%m%d%H%M%S")
states$endTime=strptime(as.character(states$endTime),"%Y%m%d%H%M%S")
states$startTime=as.POSIXct(states$startTime,tz="PDT")
states$endTime=as.POSIXct(states$endTime,tz="PDT")
names(states)=c("startTime","endTime","GoldStandardHRSteadyState","GoldStandardEnergySteadyState")
#READ IN BASIS DATA
basis=data.frame(read.table(paste(subject_dir,basis_name,sep=""),sep="\t",header=T,stringsAsFactors = FALSE))
names(basis)=c("basis_startDate","basis_energy", "basis_gsr","basis_hr","basis_temp","basis_steps")
basis$basis_startDate=as.POSIXct(strptime(as.character(basis$basis_startDate),"%Y%m%d%H%M%S"),tz="PDT")
basis$basis_hr=as.numeric(basis$basis_hr)
basis$basis_energy=as.numeric(basis$basis_energy)
basis$basis_steps=as.numeric(basis$basis_steps)
#FITBIT DATA
fitbit=data.frame(read.table(paste(subject_dir,fitbit_name,sep=""),sep='\t',header=TRUE,stringsAsFactors = FALSE))
fitbit$Date=as.POSIXct(strptime(fitbit$Date,"%Y%m%d%H%M%S"),tz="PDT")
names(fitbit)=c("fitbit_date","fitbit_hr","fitbit_energy","fitbit_steps")
fitbit$fitbit_hr=as.numeric(fitbit$fitbit_hr)
fitbit$fitbit_energy=as.numeric(fitbit$fitbit_energy)
fitbit$fitbit_steps=as.numeric(fitbit$fitbit_steps)
#MICROSOFT DATA
microsoft=data.frame(read.table(paste(subject_dir,microsoft_name,sep=""),sep="\t",header=TRUE,stringsAsFactors = FALSE))
microsoft$Time=as.POSIXct(strptime(microsoft$Time,"%Y%m%d%H%M%S"),tz="PDT")
names(microsoft)=c("microsoft_date","microsoft_hr","microsoft_energy","microsoft_steps")
microsoft$microsoft_hr=as.numeric(microsoft$microsoft_hr)
microsoft$microsoft_energy=as.numeric(microsoft$microsoft_energy)
microsoft$microsoft_steps=as.numeric(microsoft$microsoft_steps)
#READ IN APPLE DATA
apple=data.frame(read.table(paste(subject_dir,apple_name,sep=""),sep='\t',header=TRUE, stringsAsFactors = FALSE))
apple$Date=as.POSIXct(strptime(apple$Date,"%Y-%m-%d %H:%M:%S"),tz="PDT")
apple$apple_hr=as.numeric(apple$apple_hr)
apple$apple_energy=as.numeric(apple$apple_energy)
apple$apple_steps=as.numeric(apple$apple_steps)
############################################################################################################################################
## Align the heart rate data ##
hr_basis_offset=cor.cross.sweep(basis$basis_hr,gold_standard_det$gold_standard_hr)
hr_fitbit_offset=cor.cross.sweep(fitbit$fitbit_hr,gold_standard_det$gold_standard_hr)
hr_apple_offset=cor.cross.sweep(apple$apple_hr,gold_standard_det$gold_standard_hr)
hr_microsoft_offset=cor.cross.sweep(microsoft$microsoft_hr,gold_standard_det$gold_standard_hr)
## Align the energy data ##
energy_basis_offset=cor.cross.sweep(basis$basis_energy,gold_standard_det$gold_standard_energy)
energy_fitbit_offset=cor.cross.sweep(fitbit$fitbit_energy,gold_standard_det$gold_standard_energy)
energy_apple_offset=cor.cross.sweep(apple$apple_energy,gold_standard_det$gold_standard_energy)
energy_microsoft_offset=cor.cross.sweep(microsoft$microsoft_energy,gold_standard_det$gold_standard_energy)
## Align the step data, no gold standard, align to basis since it samples continuously##
steps_basis_offset=cor.cross.sweep(basis$basis_steps,fitbit$fitbit_steps)
steps_apple_offset=cor.cross.sweep(apple$apple_steps,fitbit$fitbit_steps)
steps_microsoft_offset=cor.cross.sweep(microsoft$microsoft_steps,fitbit$fitbit_steps)
##############################################################################################################################################
#ADJUST SIGNAL POSITIONS BASED ON CALCULATED OFFSET FROM THE REFERENCE
#HR
#basis
hr_length=length(gold_standard_det$gold_standard_hr)
if(hr_basis_offset < 0)
{
prefix=rep("NA",-1*hr_basis_offset)
hr_basis=c(prefix,basis$basis_hr)
}else
{
hr_basis=basis$basis_hr[1+hr_basis_offset:length(basis$basis_hr)]
}
hr_basis=hr_basis[1:min(length(hr_basis),hr_length)]
if (length(hr_basis) < hr_length)
{
pad=rep(NA,hr_length-length(hr_basis))
hr_basis=c(hr_basis,pad)
}
#fitbit
if(hr_fitbit_offset < 0)
{
prefix=rep(NA,-1*hr_fitbit_offset)
hr_fitbit=c(prefix,fitbit$fitbit_hr)
}else
{
hr_fitbit=fitbit$fitbit_hr[1+hr_fitbit_offset:length(fitbit$fitbit_hr)]
}
hr_fitbit=hr_fitbit[1:min(length(hr_fitbit),hr_length)]
if (length(hr_fitbit) < hr_length)
{
pad=rep(NA,hr_length-length(hr_fitbit))
hr_fitbit=c(hr_fitbit,pad)
}
#microsoft
if(hr_microsoft_offset < 0)
{
prefix=rep(NA,-1*hr_microsoft_offset)
hr_microsoft=c(prefix,microsoft$microsoft_hr)
}else
{
hr_microsoft=microsoft$microsoft_hr[1+hr_microsoft_offset:length(microsoft$microsoft_hr)]
}
hr_microsoft=hr_microsoft[1:min(length(hr_microsoft),hr_length)]
if (length(hr_microsoft) < hr_length)
{
pad=rep(NA,hr_length-length(hr_microsoft))
hr_microsoft=c(hr_microsoft,pad)
}
#apple
if(hr_apple_offset < 0)
{
prefix=rep(NA,-1*hr_apple_offset)
hr_apple=c(prefix,apple$apple_hr)
}else
{
hr_apple=apple$apple_hr[1+hr_apple_offset:length(apple$apple_hr)]
}
hr_apple=hr_apple[1:min(length(hr_apple),hr_length)]
if (length(hr_apple) < hr_length)
{
pad=rep(NA,hr_length-length(hr_apple))
hr_apple=c(hr_apple,pad)
}
hr_df=data.frame(gold_standard_det$gold_standard_startDate,gold_standard_det$gold_standard_hr,hr_basis,hr_fitbit,hr_microsoft,hr_apple,stringsAsFactors = FALSE)
names(hr_df)=c("Date","GoldStandard","Basis","Fitbit","Microsoft","Apple")
save(hr_df,file=paste("hr_df",subject,sep="_"))
#ENERGY!
#basis
energy_length=length(gold_standard_det$gold_standard_energy)
if(energy_basis_offset < 0)
{
prefix=rep("NA",-1*energy_basis_offset)
energy_basis=c(prefix,basis$basis_energy)
}else
{
energy_basis=basis$basis_energy[1+energy_basis_offset:length(basis$basis_energy)]
}
energy_basis=energy_basis[1:min(length(energy_basis),energy_length)]
if (length(energy_basis) < energy_length)
{
pad=rep(NA,energy_length-length(energy_basis))
energy_basis=c(energy_basis,pad)
}
#fitbit
if(energy_fitbit_offset < 0)
{
prefix=rep(NA,-1*energy_fitbit_offset)
energy_fitbit=c(prefix,fitbit$fitbit_energy)
}else
{
energy_fitbit=fitbit$fitbit_energy[1+energy_fitbit_offset:length(fitbit$fitbit_energy)]
}
energy_fitbit=energy_fitbit[1:min(length(energy_fitbit),energy_length)]
if (length(energy_fitbit) < energy_length)
{
pad=rep(NA,energy_length-length(energy_fitbit))
energy_fitbit=c(energy_fitbit,pad)
}
#microsoft
if(energy_microsoft_offset < 0)
{
prefix=rep(NA,-1*energy_microsoft_offset)
energy_microsoft=c(prefix,microsoft$microsoft_energy)
}else
{
energy_microsoft=microsoft$microsoft_energy[1+energy_microsoft_offset:length(microsoft$microsoft_energy)]
}
energy_microsoft=energy_microsoft[1:min(length(energy_microsoft),energy_length)]
if (length(energy_microsoft) < energy_length)
{
pad=rep(NA,energy_length-length(energy_microsoft))
energy_microsoft=c(energy_microsoft,pad)
}
#apple
if(energy_apple_offset < 0)
{
prefix=rep(NA,-1*energy_apple_offset)
energy_apple=c(prefix,apple$apple_energy)
}else
{
energy_apple=apple$apple_energy[1+energy_apple_offset:length(apple$apple_energy)]
}
energy_apple=energy_apple[1:min(length(energy_apple),energy_length)]
if (length(energy_apple) < energy_length)
{
pad=rep(NA,energy_length-length(energy_apple))
energy_apple=c(energy_apple,pad)
}
energy_df=data.frame(gold_standard_det$gold_standard_startDate,as.numeric(gold_standard_det$gold_standard_energy),as.numeric(energy_basis),as.numeric(energy_fitbit),as.numeric(energy_microsoft),as.numeric(energy_apple),stringsAsFactors = FALSE)
names(energy_df)=c("Date","GoldStandard","Basis","Fitbit","Microsoft","Apple")
save(energy_df,file=paste("energy_df",subject,sep="_"))
#STEPS
#basis
steps_length=length(fitbit$fitbit_steps)
if(steps_basis_offset < 0)
{
prefix=rep("NA",-1*steps_basis_offset)
steps_basis=c(prefix,basis$basis_steps)
}else
{
steps_basis=basis$basis_steps[1+steps_basis_offset:length(basis$basis_steps)]
}
steps_basis=steps_basis[1:min(length(steps_basis),steps_length)]
if (length(steps_basis) < steps_length)
{
pad=rep(NA,steps_length-length(steps_basis))
steps_basis=c(steps_basis,pad)
}
#microsoft
if(steps_microsoft_offset < 0)
{
prefix=rep(NA,-1*steps_microsoft_offset)
steps_microsoft=c(prefix,microsoft$microsoft_steps)
}else
{
steps_microsoft=microsoft$microsoft_steps[1+steps_microsoft_offset:length(microsoft$microsoft_steps)]
}
steps_microsoft=steps_microsoft[1:min(length(steps_microsoft),steps_length)]
if (length(steps_microsoft) < steps_length)
{
pad=rep(NA,steps_length-length(steps_microsoft))
steps_microsoft=c(steps_microsoft,pad)
}
#apple
if(steps_apple_offset < 0)
{
prefix=rep(NA,-1*steps_apple_offset)
steps_apple=c(prefix,apple$apple_steps)
}else
{
steps_apple=apple$apple_steps[1+steps_apple_offset:length(apple$apple_steps)]
}
steps_apple=steps_apple[1:min(length(steps_apple),steps_length)]
if (length(steps_apple) < steps_length)
{
pad=rep(NA,steps_length-length(steps_apple))
steps_apple=c(steps_apple,pad)
}
steps_df=data.frame(fitbit$fitbit_date,as.numeric(fitbit$fitbit_steps),as.numeric(steps_basis),as.numeric(steps_microsoft),as.numeric(steps_apple),stringsAsFactors = FALSE)
names(steps_df)=c("Date","Fitbit","Basis","Microsoft","Apple")
#steps_df$Date=as.POSIXct(strptime(steps_df$Date,"%Y-%m-%d %H:%M:%S"),tz="PDT")
save(steps_df,file=paste("steps_df",subject,sep="_"))
###PLOT!!###
##HR
mdf<- melt(hr_df, id="Date",measure=c("GoldStandard","Basis","Fitbit","Microsoft", "Apple")) # convert to long format
p=ggplot(data=mdf,
aes(x=Date, y=value, col=variable)) +
geom_line() +
geom_point(data=states,aes(endTime,GoldStandardHRSteadyState,col="GoldStandardHRSteadyState"),size=5)+
theme_bw(20)+
theme(axis.text = element_text(size = 18),
legend.key = element_rect(fill = "navy"),
legend.text=element_text(size=18),
legend.background = element_rect(fill = "white"),
legend.position = c(0.14, 0.80),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title=element_text(size=18,face="bold"))+
annotate("text", x = states$startTime+50, y = rep(200,length(states$startTime)), label = rownames(states),size=8)+
xlab("Time")+
ylab("BPM")+
scale_fill_discrete(name="Device")+
geom_vline(data=states, linetype=4, aes(xintercept=as.numeric(endTime)) )+
xlim(c(first,last))+
scale_color_manual(values=c("#FF0000", "#66FF33", "#0000FF","#CC9900","#CC9900","#BDA0CB"))
p
#ENERGY!
mdf<- melt(energy_df, id="Date",measure=c("GoldStandard","Basis","Fitbit","Microsoft", "Apple")) # convert to long format
p=ggplot(data=mdf,
aes(x=Date, y=value, col=variable)) +
geom_line() +
geom_point(data=states,aes(endTime,GoldStandardEnergySteadyState,col="GoldStandardEnergySteadyState"),size=5)+
theme_bw(20)+
theme(axis.text = element_text(size = 18),
legend.key = element_rect(fill = "navy"),
legend.text=element_text(size=18),
legend.background = element_rect(fill = "white"),
legend.position = c(0.14, 0.80),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title=element_text(size=18,face="bold"))+
annotate("text", x = states$startTime+50, y = rep(30,length(states$startTime)), label = rownames(states),size=8)+
xlab("Time")+
ylab("Kcal")+
scale_fill_discrete(name="Device")+
geom_vline(data=states, linetype=4, aes(xintercept=as.numeric(endTime)) )+
xlim(c(first,last))+
scale_color_manual(values=c("#FF0000", "#66FF33", "#0000FF","#CC9900","#CC9900","#BDA0CB"))
p
#STEPS!
mdf<- melt(steps_df, id="Date",measure=c("Fitbit","Basis","Microsoft","Apple")) # convert to long format
p=ggplot(data=mdf,
aes(x=Date, y=value, colour=variable))+
geom_line()+theme_bw(20)+
theme(axis.text = element_text(size = 18),
legend.key = element_rect(fill = "navy"),
legend.text=element_text(size=18),
legend.background = element_rect(fill = "white"),
legend.position = c(0.14, 0.80),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title=element_text(size=18,face="bold"))+
annotate("text", x = states$startTime+50, y = rep(200,length(states$startTime)), label = rownames(states),size=8)+
xlab("Time")+
ylab("Steps")+
scale_fill_discrete(name="Device")+
geom_vline(data=states, linetype=4, aes(xintercept=as.numeric(endTime)) )+
scale_color_manual(values=c("#FF0000", "#66FF33", "#0000FF","#BDA0CB"))+
xlim(c(first,last))
p
|
52e045d299582cf9048253f9e1d9422d6b8c118d
|
196ff4c376e540a9e4ffd3293949af516c51319e
|
/R/fluster_methods.R
|
56629a2e59eb275282e104f18db6649ebe694231
|
[] |
no_license
|
rogerswt/fluster
|
e1163375c2be750d8300e6923f6b3ccc2195ab6c
|
e5c455d698f0997074309b679860779819cb2b89
|
refs/heads/master
| 2021-07-24T04:35:59.018408
| 2021-07-17T20:15:30
| 2021-07-17T20:15:30
| 229,350,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,085
|
r
|
fluster_methods.R
|
#
# fluster_methods.R
#
# declares exposed functions.
#
# 2019-12-18 WTR
#
#
################################################################################
################################################################################
# Copyright Still Pond Cytomics LLC 2019. ##
# All Rights Reserved. No part of this source code may be reproduced ##
# without Still Pond Cytomics' express written consent. ##
################################################################################
################################################################################
##### NOTE ######
# I think that there should be an @import igraph here, but it breaks the build
# Not sure why it's working without it...
#' @importFrom igraph normalize tree parent
#' @import Rtsne
#' @import diptest
#' @import KernSmooth
#' @import igraph
#' @import fields
#' @import flowFP
#' @import cluster
#' @import wadeTools
NULL
#' @title Fingerprint-based Clustering
#' @param fcs The data (either a flowFrame or a flowSet)
#' @param parameters The parameters in fcs used for analysis. Default is all
#' parameters in the input data.
#' @param transformation Specify the transformation used when pre-processing
#' the data. Legal values are tranformations support in [wadeTools::ax()], which are
#' c('biexp', 'asinh', 'log', 'linear', 'none'). This is used to draw appropriate axes
#' in the included visualizations that depict MFI values. Default is 'biexp'.
#' @param nRecursions The number of recursions in calculating the fingerprint (default = 12)
#' @param nclust The number of clusters you want panoply to make. If NULL, fluster
#' will decide based on analysis of the clustering dendrogram.
#' @param merge Logical: should we merge initial clusters based on categorical similarity?
#' will make a guess as to the "best" number of clusters
#' @param graph Logical: should we compute the MST for visualization?
#' @param tsne Logical: should we compute a tSNE embedding for visualization?
#' @param manual_thresholds A named vector of one or more values that will override
#' the internally-calculated positivity thresholds. If NULL (default) use the internally-calculated
#' thresholds.
#' @param modality If previously computed with examine_positivity_thresholds, you can pass
#' in the resulting modality object. If NULL, modality is computed internally.
#' @param sd_fac A factor by which to multiply per-marker cluster standard deviation to determine
#' positivity. Smaller values tend to force a marker to be declared either positive
#' or negative, rather than "undetermined". Default: 1.0.
#' @description Fluster (**F**ingerprint-based c**luster**ing)
#' implements a workflow starting with Cytometric Fingerprint (CF) binning of
#' a flowFrame (or flowSet)
#' using flowFP to a relatively high resolution (default nRecursions of 12 results
#' in 4096 bins). The bin centroids are then computed, using the median value of all
#' of the events in the bin for each of the included parameters. Next, these
#' multivariate bin centroids are clustered using agglommerative hierarchical clustering
#' \code{cluster::agnes()}. Bins in a cluster carry their cellular cargo into
#' the cluster. The resulting data are represented in a graph structure and/or as a tSNE embedding
#' for visualization and interpretation.
#'
#' @return An object of class 'fluster', with the following elements:
#' \describe{
#' \item{mod}{The flowFPModel generated}
#' \item{centers}{Multivariate bin centers}
#' \item{graph}{A graph that can be used for visualization}
#' \item{clustering}{A named list containing cluster membership of the bins}
#' }
#' @examples
#' load(system.file("extdata", "sampled_flowset_young.rda", package = "fluster"))
#' flust_params = c(7:9, 11:22)
#' flust_obj = fluster(fs_young, parameters = flust_params)
#' @export
fluster = function(fcs, parameters = NULL, transformation = 'biexp', nRecursions = 12, nclust = NULL, merge = TRUE, graph = TRUE, tsne = TRUE,
manual_thresholds = NULL, modality = NULL, sd_fac = 1.0) {
call.args = list(parameters = parameters, transformation = transformation, nRecursions = nRecursions,
nclust = nclust, merge = merge, graph = graph, tsne = tsne,
manual_thresholds = manual_thresholds,
modality = modality, sd_fac = sd_fac
)
if (is(fcs, "flowFrame")) {
ff = fcs
} else if (is(fcs, "flowSet")) {
message("Aggregating the flowSet...")
ff = suppressWarnings(as(fcs, "flowFrame"))
flowCore::exprs(ff) = flowCore::exprs(ff)[,which(flowCore::colnames(flowCore::exprs(ff)) != "Original")]
} else {
stop("Argument fcs must either be a flowFrame or a flowSet\n")
}
# check parameters
if (is.null(parameters)) {
parameters = flowCore::colnames(ff)
}
if (is.numeric(parameters)) {
parameters = flowCore::colnames(ff)[parameters]
}
message("computing fingerprint bins...")
mod = flowFP::flowFPModel(ff, parameters = parameters, nRecursions = nRecursions)
fp = flowFP::flowFP(ff, mod)
message("calculating bin centers...")
res = calculate_bin_phenotypes(fp = fp, fs = ff)
mat = t(res$center)
variance = t(res$variance)
# agnes on bin centers
message("clustering bins...")
ag = agnes(mat)
# check nclust
if (is.null(nclust)) {
nclust = advise_n_clust(ag, show = FALSE)$n1
message("advising ", nclust, " clusters...")
}
clusters = cutree(as.hclust(ag), k = nclust)
c_index = list()
for (i in 1:nclust) {
idx = which(clusters == i)
c_index[[i]] = idx
}
clst = list(clst = clusters, c_index = c_index, c_centers = NULL)
fluster_obj = list(call.args = call.args, fcs = ff, transformation = transformation,
parameters = parameters, mod = mod, fp = fp, centers = mat, bvar = variance, agnes_obj = ag,
graph = NULL, tsne = NULL, clustering = clst, modality = NULL)
class(fluster_obj) = "fluster"
# determining modality and calculating positivity thresholds
if (is.null(modality)) {
message("calculating positivity thresholds using cluster extremes...")
fluster_obj = positivity_thresholds(fluster_obj, ff, manual_thresholds = manual_thresholds)
} else {
message("Using previously calculated thresholds")
}
# merging clusters
if (merge) {
fluster_obj = merge_categorical_clusters(fluster_obj = fluster_obj, sd_fac = sd_fac)
nclust = max(fluster_obj$clustering$clst)
message("merging clusters, resulting in ", nclust, " clusters...")
}
# calculate cluster centers, mostly for visualization
message("Calculating cluster centers...")
c_centers = matrix(NA, nrow = 0, ncol = length(parameters))
colnames(c_centers) = parameters
for (i in 1:nclust) {
idx = which(fluster_obj$clustering$clst == i)
c_centers = rbind(c_centers, distributions_bins(fluster_obj, bin_indices = idx)$mn)
}
fluster_obj$clustering$c_centers = c_centers
if (graph) {
# set up to plot as a graph
message("building a MST representation of clusters...")
fluster_obj = fluster_add_mst(fluster_obj)
}
if (tsne) {
message("building a tSNE representation of clusters...")
fluster_obj = fluster_add_tsne(fluster_obj)
}
fluster_obj
}
#' @title Make a Plot of Modality
#' @description Positivity thresholds are calculated by finding the compact extremes
#' of marker expressions in various clusters. Thresholds are the mid-point between
#' extremes.
#' @param fluster_obj A fluster object.
#' @export
display_modality = function(fluster_obj) {
modality = fluster_obj$modality
parameters = names(modality$thresh)
kde = modality$kde
# for displaying as polygons make sure first and last points of the kde's are 0
klen = length(kde[[1]][[1]]$y)
for (marker in parameters) {
kde[[marker]][["global"]]$y[1] = kde[[marker]][["global"]]$y[klen] = 0
kde[[marker]][["lo"]]$y[1] = kde[[marker]][["lo"]]$y[klen] = 0
kde[[marker]][["hi"]]$y[1] = kde[[marker]][["hi"]]$y[klen] = 0
}
# calculate plot layout
n = length(parameters) + 1
sq = sqrt(n)
frac = sq - floor(sq)
if (frac == 0) {
ac = dn = floor(sq)
} else {
ac = floor(sq) + 1
dn = ceiling(n / ac)
}
opar = par(mfrow = c(dn, ac), mar = c(2, 2, 2, 1))
for (marker in parameters) {
plot(kde[[marker]][["global"]], type = 'l', lwd = 1, xaxt = 'n', xlab = '', ylab = '', main = marker)
ax(type = fluster_obj$transformation)
bcol = "dodgerblue2"
rcol = "indianred2"
polygon(kde[[marker]][["global"]], col = make_transparent("black", alpha = .25), border = "black")
polygon(kde[[marker]][["lo"]], col = make_transparent(bcol), border = bcol)
polygon(kde[[marker]][["hi"]], col = make_transparent(rcol), border = rcol)
xline(modality$med_lo[marker], lty = 'dotdash', col = "dodgerblue2")
xline(modality$med_hi[marker], lty = 'dotdash', col = "indianred2")
xline(modality$thresh[marker], lty = 'dotdash', col = "black", lwd = 2)
}
par(opar)
}
#' @title Merge Categorically Similar Clusters
#' @description Clusters are labeled with a categorical vector in which each
#' marker is either "hi" or "lo" with respect to a threshold. If a marker is not unambiguously
#' either hi or lo, it's labeled as "un" for "unknown. To receive hi (lo), the
#' cluster center must be sufficiently above (below) the threshold in units of
#' the standard deviation of that marker.
#' @param fluster_obj A fluster object.
#' @param sd_fac A factor multiplying the standard deviation to determine if that
#' marker is sufficiently above (below) the threshold in order to labeled "hi" ("lo").
#' @return A fluster object after categorical merging.
#' @export
merge_categorical_clusters = function(fluster_obj, sd_fac = 1.0) {
n_clust = max(fluster_obj$clustering$clst)
# overwrite affected call parameters in fluster_obj
fluster_obj$call.args$sd_fac = sd_fac
fluster_obj$call.args$merge = TRUE
# get the categorical mapping
categ = list()
# for (i in 1:n_clust) {
categ = categorical_phenotype_all_clusters(fluster_obj = fluster_obj, sd_fac = sd_fac)
# }
# roll through and create clusters of clusters
cmerge = list()
phenotype = list()
# cvec is a vector of cluster indices. When a cluster joins a merge, it's removed from this vector
cvec = 1:n_clust
k = 1
while (length(cvec) > 0) {
# get the head of the list of remaining clusters
ith = cvec[1]
cmerge[[k]] = ith # add ith to the next merge
phenotype[[k]] = categ[[ith]] # record the phenotype
cvec = cvec[which(cvec != ith)] # remove it from cvec
j = 1
while (j <= length(cvec)) {
jth = cvec[j]
if (compare_categories(categ[[ith]], categ[[jth]])) {
cmerge[[k]] = append(cmerge[[k]], jth) # add jth cluster to cmerge
cvec = cvec[which(cvec != jth)] # remove jth cluster from cvec
j = j - 1 # don't skip next element
}
j = j + 1
}
k = k + 1
}
# replace clustering slot with the merged result
if (is.null(fluster_obj[["original_clustering"]])) {
orig_clustering = fluster_obj$clustering
}
c_index = list()
for (i in 1:length(cmerge)) {
c_index[[i]] = vector(mode = 'numeric')
for (j in 1:length(cmerge[[i]])) {
c_index[[i]] = append(c_index[[i]], orig_clustering$c_index[[cmerge[[i]][j]]])
}
}
nbins = 2 ^ nRecursions(fluster_obj$mod)
clst = rep(NA, length = nbins)
for (i in 1:length(c_index)) {
clst[c_index[[i]]] = i
}
clustering = list(clst = clst, c_index = c_index, phenotype = phenotype, func_phenotype = NULL)
fluster_obj$orig_clustering = orig_clustering
fluster_obj$clustering = clustering
fluster_obj
}
#' @title Add a minimum spanning tree representation to the fluster object
#' @description Add a minimum spanning tree representation to the fluster object
#' #param fluster_obj The result of running fluster()
#' @return A fluster object with the graph slot populated.
#' @export
fluster_add_mst = function(fluster_obj) {
mfi = as.list(data.frame(fluster_obj$centers))
g = build_graph(mfi = mfi)
g = add_mfi_vertex_attributes(g, mfi)
mst = igraph::mst(g)
ag = fluster_obj$agnes_obj
n_clust = length(fluster_obj$clustering$c_index)
cag = agnes_to_community(ag, nclust = n_clust)
gcomm = make_graph_from_community(comm = cag, g = mst)
gcomm = attach_layout_fr(gcomm)
fluster_obj$graph = gcomm
fluster_obj
}
#' @title Add a tSNE representation to the fluster object
#' @description Add a tSNE representation to the fluster object
#' #param fluster_obj The result of running fluster()
#' @return A fluster object with the tsne slot populated.
#' @export
fluster_add_tsne = function(fluster_obj) {
centers = fluster_obj$clustering$c_centers
n_items = nrow(centers)
perplexity = min((n_items - 1) / 3, 30)
set.seed(137) # so we'll get the same map for the same data
res = Rtsne(dist(centers), perplexity = perplexity)$Y
colnames(res) = c("tsne_1", "tsne_2")
fluster_obj$tsne = res
fluster_obj
}
#' @title plot_fluster_graph
#' @description Draw a picture of the result of fluster using graph-based representation of clusters.
#' @param fluster The result of running fluster
#' @param markers Markers to display in the spread
#' @param mode Compute colors using either global or per-marker distributions. Global
#' distributions are color-coded directly by signals produced by the cytometer. Per-marker
#' mode shows colors relative to the positivity threshold for each marker (red colors for
#' above-threshold and blue colors for below-threshold) in units of standard deviation
#' for each marker.
#' @param vs The max size of nodes in the graph
#' @param ms The minimum size of nodes in the graph
#' @param log.size If true, scale node sizes logarithmically
#' @param vertex_frame Logical. Should we draw a frame.
#' @param legend Logical. Draw color legend.
#' @param cex.main Scale factor for titles of the individual markers.
#' @param cex.lab Scale factor for labels.
#' @return N/A.
#' @examples
#' plot_fluster(fluster_obj)
#' @export
plot_fluster_graph = function(fluster, markers = colnames(fluster$centers), mode = c("global", "per-marker"),
vs = 10, ms = 5, log.size = FALSE, vertex.frame = TRUE,
legend = TRUE, cex.main = 2, cex.lab = 2) {
mode = match.arg(mode)
plot_comm_spread(fluster, markers = colnames(fluster$mat), mode, vs = vs, ms = ms,
log.size = log.size, vertex.frame = vertex.frame, cex.main = cex.main)
if (legend) {
if (markers[1] != 'categorical') {
if (mode == 'global') {
draw_color_scale(cex.lab = cex.lab, transformation = fluster$transformation)
} else {
draw_per_marker_scale(dyn_range = 2, cex.lab = cex.lab)
}
} else {
draw_cluster_legend(fluster_obj = fluster, cex.text = cex.lab)
}
}
}
#' @title plot_fluster_tsne
#' @description Draw a picture of the result of fluster using tsne representation of clusters.
#' @param fluster The result of running fluster
#' @param markers Markers to include in the spread
#' @param mode Compute colors using either global or per-marker distributions. Global
#' distributions are color-coded directly by signals produced by the cytometer. Per-marker
#' mode shows colors relative to the positivity threshold for each marker (red colors for
#' above-threshold and blue colors for below-threshold) in units of standard deviation
#' for each marker.
#' @param cex Scale factor for node size
#' @param proportional Logical. Scale by the number of events in the cluster
#' @param emph Logical. Emphasize each blob with a black line.
#' @param cex.lab Scale factor for titles of the individual markers
#' @param highlight_clusters IF not NULL, a collection of cluster indices to highlight.
#' @param legend Logical. Whether or not to draw a legend.
#' @return N/A.
#' @examples
#' plot_fluster(fluster_obj)
#' @export
plot_fluster_tsne = function(fluster, markers = colnames(fluster$centers), mode = c("global", "per-marker"),
cex = 20.0, proportional = TRUE, emph = TRUE, cex.lab = 2,
highlight_clusters = NULL, legend = TRUE, show_cluster_numbers = NULL) {
mode = match.arg(mode)
plot_tsne_spread(fluster, markers, mode, cex, proportional, emph, highlight_clusters, show_cluster_numbers)
if (legend) {
if (markers[1] != 'categorical') {
if (mode == 'global') {
draw_color_scale(cex.lab = cex.lab, transformation = fluster$transformation)
} else {
draw_per_marker_scale(dyn_range = 2, cex.lab = cex.lab)
}
} else {
draw_cluster_legend(fluster_obj = fluster, cex.text = cex.lab)
}
}
}
#' title map_functional_names
#' @description Based on a user-specified table, assign symbolic names to clusters
#' @param fluster_obj The result of running fluster()
#' @param defs_file A file containing the functional definitions.
#' @return A decorated fluster object
#' @export
map_functional_names = function(fluster_obj, defs_file) {
fd = retrieve_categorical_definitions(defs_file)
fluster_obj = assign_functional_names(fluster_obj, fd)
}
#' @title Map a Sample to a Fluster Model
#' @description This function determines, for a single sample, the number of cells in each cluster.
#' @param ff A sample flowFrame
#' @param fluster_obj An object of type "fluster", the result of running fluster()
#' @return Per-cluster counts and fractions
#' @export
fluster_map_sample = function(ff, fluster_obj) {
# apply the flowFPModel to the sample
fp = flowFP(fcs = ff, model = fluster_obj$mod)
# get the vector of event bin membership
btag = tags(fp)[[1]]
# assign event cluster membership
nclust = max(fluster_obj$clustering$clst)
nevents = nrow(ff)
c_count = vector('numeric', length = nclust)
for (i in 1:nclust) {
bidx = fluster_obj$clustering$c_index[[i]]
eidx = which(btag %in% bidx)
c_count[i] = length(eidx)
}
# convert to percentages of total cells
c_pctg = c_count / nevents
# return the result
return(list(counts = c_count, fractions = c_pctg))
}
#' @title Visualize Cluster Phenotypes
#' @description Draw a "phenobar" representation of a cluster phenotype. Bars have
#' a height equalt to the medial value of the parameter and are
#' color-coded. Error flags represent first and third quartiles of the bin centers
#' belonging to the cluster.
#' @param fluster_obj An object of type "fluster", the result of running fluster()
#' @param parameters Which parameters to include in the plot (default = all parameters)
#' @param cluster Which cluster to plot.
#' @param bin_indices Instead of bin indices in a cluster, specify them directly.
#' @param plot_global_flag Indicate the global distributions.
#' @param show_thresholds Logical. Show per-parameter thresholds.
#' @param show_sd_fac Logical. Superimpose modified error flags if sd_fac != 1.0.
#' @param main Title of plot.
#' @export
fluster_phenobars = function(fluster_obj,
parameters = fluster_obj$parameters,
cluster = 1, bin_indices = NULL,
plot_global_flag = FALSE,
show_thresholds = TRUE,
show_sd_fac = TRUE,
main = paste("Cluster", cluster)) {
# make an empty plot
plot(0, 0, pch = '', xlim = c(0, bx(262143)), ylim = c(1 - .3, length(parameters) + .3),
xaxt = 'n', yaxt = 'n',
xlab = '', ylab = '',
main = main)
wadeTools::ax(1, type = fluster_obj$transformation)
axis(side = 2, labels = parameters, at = 1:length(parameters), las = 1)
centers = fluster_obj$centers
# get the bin indices of the cluster
if (is.null(bin_indices)) {
idx = which(fluster_obj$clustering$clst == cluster)
} else {
idx = bin_indices
}
val = distributions_bins(fluster_obj, bin_indices = idx)
# draw the mean
col = pcolor(val$mn, min_value = 0, max_value = 5)
add_bars(vals = val$mn, yvals = 1:length(parameters), col = col)
# draw the flags
for (i in 1:length(parameters)) {
draw_flag(y = i, q1 = val$lo[i], q3 = val$hi[i], med = NA, cex = 2, lwd = 2)
}
if (show_sd_fac) {
sd_fac = fluster_obj$call.args$sd_fac
if (sd_fac != 1) {
for (i in 1:length(parameters)) {
sdev = val$hi[i] - val$lo[i]
mod_sdev = sd_fac * sdev
draw_flag(y = i, q1 = val$mn[i] - .5 * mod_sdev, q3 = val$mn[i] + 0.5 * mod_sdev, med = NA, cex = 2, lwd = 2, col = 'gray')
}
}
}
if (show_thresholds) {
for (i in 1:length(parameters)) {
p = parameters[i]
draw_thresh(y = i, thresh = fluster_obj$modality$thresh[p], len = .7, col = 'blue', lwd = 2)
}
}
# add global flags
if (plot_global_flag) {
n_bins = 2^nRecursions(fluster_obj$mod)
qglbl = distributions_bins(fluster_obj, bin_indices = 1:n_bins)
for (i in 1:length(parameters)) {
draw_flag(y = i - .2, q1 = qglbl$lo[i], q3 = qglbl$hi[i], med = qglbl$mn[i], cex = 2, lwd = 2, col = 'gray')
}
}
}
#' @title Gate Events in Cluster(s)
#' @description Given a fluster model and a list of its clusters, gate a flowFrame
#' or flowSet so as to include events that below to these clusters.
#' @param fluster_obj An object of type "fluster", the result of running fluster()
#' @param fcs_obj A flowFrame or flowSet to be gated.
#' @param clusters A list of clusters we want to gate.
#' @return The gated flowFrame or flowSet
#' @export
fluster_gate_clusters = function(fluster_obj, fcs_obj, clusters) {
# get a list of bins that belong to the indicated clusters
bvec = which(fluster_obj$clustering$clst %in% clusters)
# gate the fcs_obj
if (is(fcs_obj) == "flowFrame") {
ff = fcs_obj
# apply the flowFP model to the fcs_obj
ff = fcs_obj
fp = flowFP(ff, fluster_obj$mod)
ev = which(tags(fp)[[1]] %in% bvec)
exprs(ff) = exprs(ff)[ev, ]
return(ff)
} else {
fs = fcs_obj
fp = flowFP(fs, fluster_obj$mod)
for (i in 1:length(fs)) {
ev = which(tags(fp)[[i]] %in% bvec)
exprs(fs[[i]]) = exprs(fs[[i]])[ev, ]
}
return(fs)
}
}
# New method to find positivity thresholds. Given a (high resolution) clustering,
# calculate, for each marker, the median and spread of each cluster. Then, identify
# the most negative and most positive (and also tight) clusters. The positivity
# threshold will be the mid-point between them.
#
# We prioritize tight clusters over diffuse clusters by computing a figure of merit,
# which is the normalized median value divided by the spread.
#
# NOTE: One of fluster_obj or ff must not be null. If fluster_obj is null, then
# it is calculated from ff. If fluster_obj is not null, and its internal fcs
# object also isn't null, it is used as ff, and the ff argument is ignored.
#' @title Positivity Thresholds
#' @param fluster_obj An existing fluster object, the result of running fluster().
#' @param fcs A flowFrame or flowSet.
#' @param parameters The parameters you wish to consider.
#' @param manual_thresholds A named vector of one or more values that will override
#' the internally-calculated positivity thresholds.
#' @param show Logical. Should we display the result graphically?
#' @param show_range The range of values to include in calculating the kernel
#' density estimates.
#' @description Given a (high resolution) clustering, calculate, for each marker,
#' the median and spread of each cluster. Then, identify the most negative and most
#' positive (and also tight) clusters. The positivity threshold will be between
#' them, closer to the tighter of the two.
#'
#' We prioritize tight clusters over diffuse clusters by computing a figure of merit,
#' which is the normalized median value divided by the spread.
#'
#' NOTE: One of fluster_obj or ff must not be null. If fluster_obj is null, then
#' it is calculated from ff. If fluster_obj is not null, and its internal fcs
#' object also isn't null, it is used as ff, and the ff argument is ignored.
#'
#' ALSO NOTE: This function internally de-rails and de-negs the data so that the
#' kernel density estimates are not confused by rail or excessivley negative events.
#' This is an experimental feature.
#'
#' @return A fluster object that includes a modality slot.
#'
#' @export
positivity_thresholds = function(fluster_obj = NULL, fcs = NULL,
parameters = NULL,
manual_thresholds = NULL,
show = FALSE, show_range = NULL) {
if (is.null(fluster_obj)) {
if (is.null(fcs)) {
stop("fluster_obj and fcs can't both be null.")
}
if (is(fcs, "flowSet")) {
fcs = suppressWarnings(as(fcs, "flowFrame"))
flowCore::exprs(fcs) = flowCore::exprs(fcs)[,which(flowCore::colnames(flowCore::exprs(fcs)) != "Original")]
}
if (is.null(parameters)) {
stop("Need to supply parameters if we're starting with FCS data (and not a fluster object).")
}
if (is.numeric(parameters)) {
parameters = flowCore::colnames(fcs)[parameters]
}
fluster_obj = fluster(fcs = fcs, parameters = parameters, merge = FALSE, graph = FALSE, tsne = FALSE)
}
# sanity check that names of thresholds matches names in parameters
if (!is.null(manual_thresholds)) {
n_manual = length(manual_thresholds)
if (length(which(names(manual_thresholds) %in% parameters)) < n_manual) {
message("WARNING: names of manual thresholds do not match parameters. Ignoring...")
manual_thresholds = NULL
}
}
ff = fluster_obj$fcs
parameters = fluster_obj$parameters
# # experiment with derail and deneg to avoid misleading threshold determination
# ff = derail(ff, parameters = parameters)
# ff = deneg(ff, parameters = parameters, min.value = bx(-1000))
# fp = flowFP(ff, model = fluster_obj$mod)
# # fp = fluster_obj$fp
#
# tg = tags(fp)[[1]]
# tmp_flus = fluster_obj
# tmp_flus$fcs = ff
# tmp_flus$fp = fp
#
# res = spreads_and_meds(tmp_flus)
tg = tags(fluster_obj$fp)[[1]]
res = spreads_and_meds(fluster_obj)
kde = list() # for visualization
if (is.null(show_range)) {
idx = which(flowCore::colnames(ff) %in% parameters)
bot = min(parameters(ff)$minRange[idx])
top = max(parameters(ff)$maxRange[idx])
show_range = c(bot, top)
}
med_lo = med_hi = thresh = rep(NA, length = length(parameters))
names(med_lo) = names(med_hi) = names(thresh) = parameters
for (marker in parameters) {
idx = select_extremes(med = res$med[, marker], spread = res$spread[, marker])
kde[[marker]] = list()
kde[[marker]][["global"]] = normalize.kde(bkde(exprs(ff)[, marker], gridsize = 1001, range.x = show_range))
ev_lo = which(tg %in% fluster_obj$clustering$c_index[[idx$lo]])
kde[[marker]][["lo"]] = normalize.kde(bkde(exprs(ff)[ev_lo, marker], gridsize = 1001, range.x = show_range))
wid_lo = IQR(exprs(ff)[ev_lo, marker])
ev_hi = which(tg %in% fluster_obj$clustering$c_index[[idx$hi]])
kde[[marker]][["hi"]] = normalize.kde(bkde(exprs(ff)[ev_hi, marker], gridsize = 1001, range.x = show_range))
wid_hi = IQR(exprs(ff)[ev_hi, marker])
med_lo[marker] = median(exprs(ff)[ev_lo, marker])
med_hi[marker] = median(exprs(ff)[ev_hi, marker])
pos_fac = wid_lo / (wid_lo + wid_hi)
thresh[marker] = med_lo[marker] + pos_fac * (med_hi[marker] - med_lo[marker])
}
modality = list(kde = kde, med_lo = med_lo, med_hi = med_hi, thresh = thresh)
# attach modality to the fluster object
fluster_obj$modality = modality
if (show) {
display_modality(fluster_obj)
}
# override manually supplied thresholds
for (i in 1:length(manual_thresholds)) {
fluster_obj$modality$thresh[names(manual_thresholds)[i]] = manual_thresholds[i]
}
return(fluster_obj)
}
|
a8a2240d19522ca02d22325d5edc08208fb6bfb2
|
7f89e404e52e5a72b4c6caa972f0f195f69c4240
|
/model/calibration.R
|
0271bbde196bc4ac8f94ff0af6ca4a8920c0441d
|
[] |
no_license
|
brandonjoeltan/covstretch
|
59d7f4321d1fb69a0894efb553253e48c903912b
|
4fbd62ec539f66b92a85c726d3229c86f4da5b6a
|
refs/heads/main
| 2023-03-19T01:10:55.321547
| 2021-03-16T22:42:30
| 2021-03-16T22:42:30
| 336,088,960
| 0
| 0
| null | 2021-02-04T21:36:50
| 2021-02-04T21:36:49
| null |
UTF-8
|
R
| false
| false
| 2,734
|
r
|
calibration.R
|
library(tidyverse)
load("data/default_inputs.Rdata")
# Demographics (for comparing HIC vs LIC)
hic_pop <- pbc_spread[countries["High-income countries"],] %>% as.numeric()
lic_pop <- pbc_spread[countries["Low-income countries"],] %>% as.numeric()
ifr_hic <- c(0.002, 0.006, 0.03, 0.08, 0.15, 0.60, 2.2, 5.1, 9.3)/100
ifr_lic <- ifr_hic*(3.2/2)^(5:(-3))
#concave function
A=.95
beta=(log(0.8)- log(A))/log(.5)
Q=0.5
#harm<-ifr_hic
harm<-ifr_lic
#pop <- hic_pop/sum(hic_pop)
pop <- lic_pop/sum(lic_pop)
optimal_policy<- function(pop,harm,Q){
Q_temp<-Q
for (i in 1:length(harm)){
x<-( Q_temp*harm[1:(length(harm)-i +1) ]^(1/(1-beta)) / sum(pop[1:(length(harm)-i +1)]*harm[1:(length(harm)-i +1)]^(1/(1-beta)) ) )
#print(x)
i_star<-i
if( max( A*x^beta) <0.95 ) {
break
}
Q_temp<-Q_temp-pop[(length(harm)-i +1)]
}
x_temp<-x
for (i in 1:(length(harm)-i_star+ 1)){
x<-( Q_temp*harm[i:(length(harm)-i_star+ 1) ]^(1/(1-beta)) / sum(pop[i:(length(harm)-i_star+ 1)]*harm[i:(length(harm)-i_star+ 1)]^(1/(1-beta)) ) )
#print(x)
i_min<-i
if( min( A*x^beta) >0.5 ) {
break
}
}
x_star<- c( rep(0,i_min-1),x,rep(1,i_star-1))
if (i_star==9){x_star<-c(x_temp,rep(1,i_star-1))}
return(x_star)
#A*x_star^beta
}
hic_results<-data_frame(
"Age Group" = c("0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","90+"),
"Population Share" = hic_pop/sum(hic_pop),
"1.0"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,1.0),
"0.9"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.9),
"0.8"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.8),
"0.7"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.7),
"0.6"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.6),
"0.5"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.5),
"0.4"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.4),
"0.3"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.3),
"0.2"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.2),
"0.1"= optimal_policy(hic_pop/sum(hic_pop),ifr_hic,0.1))
lic_results<-data_frame(
"Age Group" = c("0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","90+"),
"Population Share" = lic_pop/sum(lic_pop),
"1.0"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,1.0),
"0.9"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.9),
"0.8"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.8),
"0.7"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.7),
"0.6"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.6),
"0.5"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.5),
"0.4"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.4),
"0.3"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.3),
"0.2"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.2),
"0.1"= optimal_policy(lic_pop/sum(lic_pop),ifr_lic,0.1))
|
2996d9073a5466713f974fd069bffab0127d4524
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BBmisc/examples/collapse.Rd.R
|
d2c924cabe31c3f8979c10b550e0da64d2e1bda1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
collapse.Rd.R
|
library(BBmisc)
### Name: collapse
### Title: Collapse vector to string.
### Aliases: collapse
### ** Examples
collapse(c("foo", "bar"))
collapse(c("foo", "bar"), sep = ";")
|
0929fb99784829ad4b94c50c34f4fcb64471dd51
|
77ff13c4c17a8f0c7469cd914eb856ebda6e52a2
|
/man/predicting_sim.Rd
|
e6b93e2254d2c4e75a14652b8f50fae814e44f93
|
[] |
no_license
|
carlonlv/DataCenterSim
|
f88623620c32816e97bd53b78ef6931f66ca8521
|
fa2cc2592969c40d3e8494c2be46a94641b235f1
|
refs/heads/master
| 2022-01-19T12:04:49.255542
| 2022-01-07T19:40:39
| 2022-01-07T19:40:39
| 228,258,775
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,540
|
rd
|
predicting_sim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{predicting_sim}
\alias{predicting_sim}
\title{Simulation of Scheduling Jobs Based On Predictions.}
\usage{
predicting_sim(
object,
x,
xreg,
start_point = 1,
wait_time = 0,
cores,
write_type,
plot_type,
...
)
}
\arguments{
\item{object}{A uni-length sim object that represents a specific parameter setting.}
\item{x}{A matrix of size n by m representing the target dataset for scheduling and evaluations.}
\item{xreg}{A matrix of length n by m representing the dataset that target dataset depends on for scheduling and evaluations.}
\item{start_point}{A numeric number that represents the starting point of the simulation. Default value is \code{1}.}
\item{wait_time}{A numeric number that represents the time between training and testing. Default value is \code{0}.}
\item{cores}{The number of threads for parallel programming for multiple traces, not supported for windows users.}
\item{write_type}{A character that represents how to write the result of simulation, can be one of "charwise", "tracewise", "paramwise" or "none".}
\item{plot_type}{A character that represents how to plot the result of simulation can be one of "charwise", "tracewise", "paramwise" or "none".}
\item{...}{Characters that represent the name of parent directories that will be passed to \code{write_location_check}.}
}
\value{
An S4 sim result object.
}
\description{
Sequantially training and testing by scheduling a job.
}
\keyword{internal}
|
e787917df0e7cbd4887eeacbfce7373da40be8cb
|
12af7f39927dd30022183067a704e881ceb077b2
|
/scripts/print_session_info.R
|
5ce80b283618983b055f83e505faa29a3e411f11
|
[] |
no_license
|
harmonic-analytics/r-docker-demo
|
082d4be39f388dd46b39cda9ff6bb98928191b53
|
8ddf3cd37242ddd70ce5a0f51c2b7777bf4ab389
|
refs/heads/master
| 2023-01-19T17:23:50.628733
| 2020-11-24T20:19:41
| 2020-11-24T20:19:41
| 315,453,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40
|
r
|
print_session_info.R
|
library(data.table)
print(sessionInfo())
|
fddfe575a2dbfec71a4e04a8d2b7d722105db6be
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MPV/examples/p2.13.Rd.R
|
ed76f16912febded1984b809818790f305d6babe
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 644
|
r
|
p2.13.Rd.R
|
library(MPV)
### Name: p2.13
### Title: Data Set for Problem 2-13
### Aliases: p2.13
### Keywords: datasets
### ** Examples
data(p2.13)
attach(p2.13)
plot(days~index, ylim=c(-20,130))
ozone.lm <- lm(days ~ index)
summary(ozone.lm)
# plots of confidence and prediction intervals:
ozone.conf <- predict(ozone.lm, interval="confidence")
lines(sort(index), ozone.conf[order(index),2], col="red")
lines(sort(index), ozone.conf[order(index),3], col="red")
ozone.pred <- predict(ozone.lm, interval="prediction")
lines(sort(index), ozone.pred[order(index),2], col="blue")
lines(sort(index), ozone.pred[order(index),3], col="blue")
detach(p2.13)
|
9f1fbd4c77442af74343d9ce6657beec9c356d87
|
8d22f71a6e7ffdcd47b507bd7ec097c7a8185f4b
|
/docs/app.R
|
405e79fdf86a1892eb1979ce2bb02133e6c1d7e6
|
[
"MIT"
] |
permissive
|
RinteRface/waypointer
|
fe59e223f5b845928d400e3d5c5a31a53524a311
|
c6363e281e8bda192feb0dad8dd3e13c450fecc5
|
refs/heads/master
| 2020-05-01T06:20:05.781988
| 2020-01-30T21:06:38
| 2020-01-30T21:06:38
| 177,327,500
| 11
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
app.R
|
library(shiny)
library(waypointer)
library(fullPage)
options <- list(
sectionsColor = c('#FFF07C', '#E2FCEF', '#FFE2D1', '#ECEBE4', '#C4E7D4'),
parallax = TRUE,
autoScrolling = FALSE
)
ui <- fullPage(
menu = c("waypointer" = "link1",
"Trigger" = "link2",
"Animations" = "section3",
"Plots" = "section4",
"Install" = "section5"),
opts = options,
tags$head(
tags$link(
href = "https://fonts.googleapis.com/css?family=Montserrat",
rel = "stylesheet"
)
),
use_waypointer(),
tags$style("*{font-family: 'Montserrat', sans-serif;}"),
fullSection(
center = TRUE,
menu = "link1",
fullContainer(
center = TRUE,
h1("waypointer"),
br(),
br(),
p("Simple animated waypoints for Shiny")
)
),
fullSection(
menu = "link2",
fullContainer(
uiOutput("waypoint1")
)
),
fullSection(
menu = "section3",
center = TRUE,
fullContainer(
fullRow(
fullColumn(
uiOutput("waypoint2")
),
fullColumn(
verbatimTextOutput("direct")
)
)
)
),
fullSection(
menu = "section4",
center = TRUE,
h2("Animate plots"),
br(),
plotOutput("waypoint3")
),
fullSection(
menu = "section5",
center = TRUE,
fullContainer(
br(),
h2("Install"),
br(),
code("remotes::install_github('RinteRface/waypointer')"),
br(),
br(),
br(),
tags$a(
id = "code",
icon("code"),
href = "https://github.com/RinteRface/waypointer",
class = "fa-7x"
)
)
)
)
server <- function(input, output, session) {
w1 <- Waypoint$new(
"waypoint1",
offset = "50%"
)$
start()
w2 <- Waypoint$new(
"waypoint2",
offset = "50%",
animation = "fadeInLeft"
)$
start()
w3 <- Waypoint$new(
"waypoint3",
offset = "50%",
animation = "fadeInUp"
)$
start()
output$waypoint1 <- renderUI({
req(w1$get_triggered())
if(w1$get_triggered())
h2("Programatically trigger waypoints")
})
output$waypoint2 <- renderUI({
req(w2$get_triggered())
if(w2$get_triggered()){
w2$animate()
h3("Animate when triggered!")
}
})
output$direct <- renderPrint({
w2$get_direction()
})
output$waypoint3 <- renderPlot({
req(w3$get_triggered())
if(w3$get_triggered()){
w3$animate()
hist(runif(100))
}
}, bg = "transparent")
}
shinyApp(ui, server)
|
2be76d1500e069e01946d1b9a20f33660eb3b17d
|
081c62f36f7703d7987218c1c22931e083198e73
|
/myelo/R/scholz12.R
|
90b4552b305881fbb9c82eeca3c6247ced3e3810
|
[] |
no_license
|
radivot/myelo
|
be7ed23a6d1772e55310ced91270aa1d09da6735
|
2498bed404c98f096fcda4075c34a2881265e24b
|
refs/heads/master
| 2022-12-15T00:11:22.751773
| 2022-12-04T14:24:36
| 2022-12-04T14:24:36
| 6,070,078
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,263
|
r
|
scholz12.R
|
#'PKPD G-CSF model of Scholz et al. TBMM 2012, without pegfilgrastim or chemotoxicities
#'
#'This function returns the right hand side of an ordinary differential
#'equation model of G-CSF published by M. Scholz et al. in 2012
#'(in Theoretical Biology and Medical Modelling). Subcutaneous injections are not modeled.
#'The intended use of this function is as an argument to \code{ode()} of the \code{deSolve} package.
#'
#'The model is depicted below
#'\if{html}{\figure{scholz.png}} \if{latex}{\figure{scholz.png}{options: width=5in}}
#'
#' Here S = stem cells, CG = colony forming units of granulocytes
#' and macrophages, PGB = proliferating granulopoietic blasts,
#' MGB = maturing granulopoietic blasts
#' The system is regulated by G-CSF.
#'
#'@param Time The parameters of this model do not depend on time: this argument
#'exists here as a dummy only because deSolve expects it to exist.
#'@param State Vector of current states. The elements of this vector must be
#'named because within this function's definition is
#'\code{with(as.list(State,Pars), {XXX} )} and code in XXX presupposes that it
#'can call named elements as variables with those names.
#'@param Pars Vector of parameter values with \emph{named} (see above)
#'elements.
#'@return A list of length 2 (as expected by deSolve) where the first list
#'element is the vector of derivatives, i.e. the right hand side of the ODEs
#' and the second element of the list is a vector of auxiliary
#'variables that one may wish to track over time.
#'@note This work was supported by the National Cancer Institute and Tufts
#'Integrative Cancer Biology Program under U54CA149233-029689.
#'@author Tom Radivoyevitch (\email{txr24@@case.edu})
#'@seealso \code{\link{myelo-package}, \link{rcj12}}
#'@references M. Scholz, S. Schirm, M. Wetzler, C. Engel
#'and M. Loeffler, Pharmacokinetic and -dynamic modelling of
#'G-CSF derivatives in humans, \emph{Theoretical Biology and Medical Modelling}
#' \bold{9} 32 (2012).
#'@keywords IO
#'@export
#'@examples
#'
#'\dontrun{
#'library(myelo)
#' X0=rep(1,24)
#'names(X0)<-c("g","g1","g2","g3","g4","S","CG","PGB","G4a","G4b","G4c","G4d","G4e",
#' "G5a","G5b","G5c","G5d","G5e","G6a","G6b","G6c","G6d","G6e","GRA")
#'# WARNING: the following is needed because the model is not completely specified!
#'APGBout=APGBin=ACGout=ACGin=GSS=G6nor=GRAnor=1
#'out <- ode(X0,1:300,scholz12, scholzPars)
#'rbind(head(out),tail(out))
#'
#'}
#'
scholz12<-function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
# to keep things simple we will leave out the pegfilgrastim and leave out chemo toxicity, i.e. we look at only G-CSF
G6=G6a+G6b+G6c+G6d+G6e
MGB=G4a+G4b+G4c+G4d+G4e+G5a+G5b+G5c+G5d+G5e+G6
G=CG+PGB+MGB
arg=wG6*G6+wGRA*GRA/(wG6*G6nor+wGRA*GRAnor)
Pendo=Pendomax - (Pendomax-Pendomin)*((Pendomax-Pendonor)/(Pendomax-Pendomin))^(arg^Pendob)
Pref= VFD*gref*(kFu+vGRAFmax/(kGRAFm+VFD*gref))
dg=Pref*Pendo-kFu*g-vGRAFmax*g*GRA/GRAnor/(kGRAFm+g)
gCenRel=g/gref
dg1=gCenRel-DFGCSF*g1
dg2=DFGCSF*(g1-g2)
dg3=DFGCSF*(g2-g3)
dg4=DFGCSF*(g3-g4)
gCenRelDelay=DFGCSF*g4
Srel=S/Snor
x=wG*log(G/GSS)+wS*ifelse(Srel>1,Srel-1,log(Srel))
yS=(-0.5/log(2))*(log((aintS-amaxS)/(aminS-aintS))-log((anorS-amaxS)/(aminS-anorS)))*x+
0.5*log((anorS-amaxS)/(aminS-anorS))
yCG=(-0.5/log(2))*(log((aintCG-amaxCG)/(aminCG-aintCG))-log((anorCG-amaxCG)/(aminCG-anorCG)))*x+
0.5*log((anorCG-amaxCG)/(aminCG-anorCG))
aS=(amaxS*exp(-yS)+aminS*exp(yS))/(exp(-yS)+exp(yS))
aCG=(amaxCG*exp(-yCG)+aminCG*exp(yCG))/(exp(-yCG)+exp(yCG))
thetaS=2*ifelse(Srel>1,1,1/Srel^0.6)
p=pdelta*tanh(-thetaS*(Srel-1)-thetaG*(Srel-1))+0.5
# psiCX=ifelse(chemoOn,1,0) # skip toxicity for now
# psiCX=ifelse(chemoOn&first,ffc,0)
dS = (2*p-1)*S*aS/TS # - kS*psiCX*S # skip tox
Sout=2*(1-p)*S*aS/TS
ACG=AmaxCGF - (AmaxCGF-AminCGF)*((AmaxCGF-AnorCGF)/(AmaxCGF-AminCGF))^(gCenRelDelay^AbCGF)
TCG=TmaxCGF - (TmaxCGF-TminCGF)*((TmaxCGF-TnorCGF)/(TmaxCGF-TminCGF))^(gCenRelDelay^AbCGF)
dCG = Sout*ACGin - CG*ACG/TCG # - kCG*psiCX*CG # skip chemotox
CGout=ACGout*CG*ACG/TCG
APGB=AmaxPGBF - (AmaxPGBF-AminPGBF)*((AmaxPGBF-AnorPGBF)/(AmaxPGBF-AminPGBF))^(gCenRelDelay^AbPGBF)
TPGB=TmaxPGBF - (TmaxPGBF-TminPGBF)*((TmaxPGBF-TnorPGBF)/(TmaxPGBF-TminPGBF))^(gCenRelDelay^AbPGBF)
dPGB = CGout*APGBin - PGB*APGB/TPGB #- kPGB*psiCX*PGB
PGBout=APGBout*PGB*APGB/TPGB
AG4=AnorG4F
TG4=TmaxG4F - (TmaxG4F-TminG4F)*((TmaxG4F-TnorG4F)/(TmaxG4F-TminG4F))^(gCenRelDelay^AbG6F)
dG4a =PGBout - G4a*5/TG4 # - kMGB*psiCX*G4a
G4aout=G4a*5*AG4/TG4
dG4b =G4aout - G4b*5/TG4 #- kMGB*psiCX*G4b
G4bout=G4b*5*AG4/TG4
dG4c =G4bout - G4c*5/TG4 #- kMGB*psiCX*G4c
G4cout=G4c*5*AG4/TG4
dG4d =G4cout - G4d*5/TG4 #- kMGB*psiCX*G4d
G4dout=G4d*5*AG4/TG4
dG4e =G4dout - G4e*5/TG4 #- kMGB*psiCX*G4e
G4out=G4e*5*AG4/TG4
AG5=AnorG5F
TG5=TmaxG5F - (TmaxG5F-TminG5F)*((TmaxG5F-TnorG5F)/(TmaxG5F-TminG5F))^(gCenRelDelay^AbG6F)
dG5a =G4out - G5a*5/TG5 #- kMGB*psiCX*G5a
G5aout=G5a*5*AG5/TG5
dG5b =G5aout - G5b*5/TG5 #- kMGB*psiCX*G5b
G5bout=G5b*5*AG5/TG5
dG5c =G5bout - G5c*5/TG5 # - kMGB*psiCX*G5c
G5cout=G5c*5*AG5/TG5
dG5d =G5cout - G5d*5/TG5 #- kMGB*psiCX*G5d
G5dout=G5d*5*AG5/TG5
dG5e =G5dout - G5e*5/TG5 #- kMGB*psiCX*G5e
G5out=G5e*5*AG5/TG5
AG6=AmaxG6F - (AmaxG6F-AminG6F)*((AmaxG6F-AnorG6F)/(AmaxG6F-AminG6F))^(gCenRelDelay^AbG6F)
TG6=TmaxG6F - (TmaxG6F-TminG6F)*((TmaxG6F-TnorG6F)/(TmaxG6F-TminG6F))^(gCenRelDelay^AbG6F)
dG6a =G5out - G6a*5/TG6 #- kMGB*psiCX*G6a
G6aout=G6a*5*AG6/TG6
dG6b =G6aout - G6b*5/TG6 #- kMGB*psiCX*G6b
G6bout=G6b*5*AG6/TG6
dG6c =G6bout - G6c*5/TG6 #- kMGB*psiCX*G6c
G6cout=G6c*5*AG6/TG6
dG6d =G6cout - G6d*5/TG6 #- kMGB*psiCX*G6d
G6dout=G6d*5*AG6/TG6
dG6e =G6dout - G6e*5/TG6 #- kMGB*psiCX*G6e
G6out=G6e*5*AG6/TG6
# psiPred=ifelse(predOn,1,0)
TGRA=TnorGRA #*(1+TpredGRA*psiPred)
dGRA=G6out-GRA/TGRA
return(list(c(dg,dg1,dg2,dg3,dg4,dS,dCG,dPGB,dG4a,dG4b,dG4c,dG4d,dG4e,dG5a,dG5b,dG5c,dG5d,dG5e,
dG6a,dG6b,dG6c,dG6d,dG6e,dGRA),
c(G=G,CG=CG,PGB=PGB)))
})
}
|
78542be39cc83a9c025fb49a3169fb7b67bc090e
|
7929670b01dddcf9bec30812e12867090048c23e
|
/corso di statistica/Esercizio 3.15.R
|
37b0f73b94d9ff0c5a4237be4a0e3cbd4d68d3a0
|
[] |
no_license
|
DMMP0/R-stuff-from-school
|
1fc193756d438957e67db63e29a33196cdb2d964
|
cd1ec2197eee4b2d86c0fc0c06343364bec18183
|
refs/heads/master
| 2023-01-03T20:08:01.288399
| 2020-10-24T14:51:57
| 2020-10-24T14:51:57
| 302,140,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,159
|
r
|
Esercizio 3.15.R
|
set.seed(203818)
#Si lancia per tre volte una moneta non truccata. Calcolare la probabilità
#a) di ottenere tre volte testa;
#b) di non ottenere tre volte testa;
#c) di ottenere almeno una volte croce;
#d) di ottenere almeno una volta testa.
money <- c("TESTA", "CROCE")
volte <- 10000
num <- 0; den <- 0
#a
for (i in 1:volte)
{
out <- sample(money, 3, TRUE)
den <- den + 1
if (out[1]=="TESTA" & out[2]=="TESTA" & out[3]=="TESTA")
num <- num+1
}
cat(num,den,num/den,"\n")
#1183 10000 0.1183
#b
num <- 0; den <- 0
for (i in 1:volte)
{
out <- sample(money, 3, TRUE)
den <- den + 1
if (!(out[1]=="TESTA" & out[2]=="TESTA" & out[3]=="TESTA"))
num <- num+1
}
cat(num,den,num/den,"\n")
#8801 10000 0.8801
#c
num <- 0; den <- 0
for (i in 1:volte)
{
out <- sample(money, 3, TRUE)
den <- den + 1
if ((out[1]=="CROCE" | out[2]=="CROCE" | out[3]=="CROCE"))
num <- num+1
}
cat(num,den,num/den,"\n")
#8742 10000 0.8742
#d
num <- 0; den <- 0
for (i in 1:volte)
{
out <- sample(money, 3, TRUE)
den <- den + 1
if ((out[1]=="TESTA" | out[2]=="TESTA" | out[3]=="TESTA"))
num <- num+1
}
cat(num,den,num/den,"\n")
#8721 10000 0.8721
#end
|
da9742f695dfecce4a61851128aafb930229bda5
|
778913bea65f651c02369ba0d34b000f8bc1c191
|
/Exercício_Small e Melium Data.R
|
1502285bc05d7b63195ce078d4f153c88f871413
|
[] |
no_license
|
LivaniaDantas/etl_com_r
|
59079bc0ff14252e0e14cacbd78309fd6c1e38f0
|
9ed54ecbd77d2b6acf370965928417e71dd65e6b
|
refs/heads/master
| 2023-05-30T19:54:49.060361
| 2021-07-05T14:51:50
| 2021-07-05T14:51:50
| 355,997,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,332
|
r
|
Exercício_Small e Melium Data.R
|
library(data.table)
#Criando um dado volumoso (large data)
casos= 2e7
# criando o data.frame com o total de casos definido acima
largeData = data.table(a=rpois(casos, 3),
b=rbinom(casos, 1, 0.7),
c=rnorm(casos),
d=sample(c("básico","fundamental","médio","superior"), casos, replace=TRUE),
e=rnorm(casos),
f=rpois(casos, 4),
g=rnorm(casos)
)
# função para retornar o tamanho do objeto
object.size(largeData)
# Verificar as primeiras linhas
head(largeData)
# Salvando a base no disco
write.table(largeData,"bases_originais/largeData.csv",sep=",",row.names=FALSE,quote=FALSE)
#Compartilhe com a gente um código criado por você em que você usa um dos dois processos otimizados (read.csv2 com amostragem ou fread) para ler uma base de dados ampla! Também compartilhe prints, mostrando a eficiência de cada caso. Mas lembre-se de conferir se a interpretação das colunas está de acordo, hein??? Não adiante puxar os dados com eficiência e perder eficácia, criando anomalias!
enderecoBase <- 'bases_originais/largeData.csv'
# extração direta via read.csv
system.time(extracaoDireta <- read.csv2(enderecoBase))
#Print do Retorno
# usuário sistema decorrido
# 42.67 3.72 96.69
str (extracaoDireta)
# extração via função fread, que já faz amostragem automaticamente
system.time(extracaoOtimizada <- fread(enderecoBase))
#Print do Retorno
#usuário sistema decorrido
# 5.95 3.28 127.77
str (extracaoOtimizada)
############################################
#Compartilhe com a gente um código criado por você em que um large data é lido através da função ff. Também mostre pelo menos duas operações estatísticas (média, mediana...) ou matemáticas básicas (soma, produto...), e uma aplicação de estatística inferencial (regressão linear, X²...), usando uma amostra da base
library(ff)
install.packages("ffbase")
library(ffbase)
# criando o arquivo ff
system.time(extracaoff <- read.csv.ffdf(file=enderecoBase))
#Print do Retorno
# usuário sistema decorrido
# 57.55 8.81 144.42
# Verificando a classe do objeto
class(extracaoff)
#Print do Retorno
#[1] "ffdf"
# Verificando o tamanho
object.size(extracaoff)
#Print do Retorno
#28520 bytes
#Operações Estatísticas
mean(extracaoff[,2])
#Print do Retorno
#[1] 0.7000566
median(extracaoff[,2])
#Print do Retorno
#[1] 1
# Regressão a partir de uma amostra
extracaoffAmostra <- extracaoff[sample(nrow(extracaoff), 100000) , ]
head(extracaoffAmostra)
regressao <- lm(a ~ b + f, data=extracaoffAmostra)
summary (regressao)
#Print do Retorno da Regressão
#Residuals:
# Min 1Q Median 3Q Max
#-3.0440 -1.0023 -0.0016 1.0070 11.0023
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 2.9783780 0.0148766 200.205 <2e-16 ***
# b 0.0007425 0.0119861 0.062 0.9506
# f 0.0046369 0.0027361 1.695 0.0901 .
#---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#Residual standard error: 1.736 on 99997 degrees of freedom
#Multiple R-squared: 2.875e-05, Adjusted R-squared: 8.754e-06
#F-statistic: 1.438 on 2 and 99997 DF, p-value: 0.2375
|
b140682efe88ddf49530f41884d1f69d149201c6
|
3dfdf797ca9575185e5f4b2fb11464658c872865
|
/scripts/manhattan_plots_Ta.R
|
96e946111571160092b9b64ffcb942671e4ad292
|
[] |
no_license
|
Africrop/gwas_african_rice
|
91309e28e64ce014e9ee4f82bae274d14fb964f2
|
a34e44930cbb374fb859ddbcd8849911617cb6e6
|
refs/heads/master
| 2021-03-30T21:43:00.152077
| 2018-11-05T09:44:06
| 2018-11-05T09:44:06
| 124,543,211
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,653
|
r
|
manhattan_plots_Ta.R
|
###################################################################################################################################
#
# Copyright 2017 IRD and Grenoble-Alpes University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# You should have received a copy of the CeCILL-C license with this program.
#If not see <http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.txt>
#
# Intellectual property belongs to IRD and Grenoble-Alpes University
#
# Written by Philippe Cubry, Nhung Ta and Yves Vigouroux
#
###################################################################################################################################
# This script file was developed to produce plots for the association analysis
# It will also produce tables of candidates loci using a FDR threshold
# except for MLMM where the Bonferroni threshold suggested by the authors is used
# Information about arguments :
# the variable to test
#### Getting the arguments from the script invocation ####
args <- commandArgs(TRUE)
pheno_variable <- args[1]
ifelse(is.na(args[2]),fdr <- 5,fdr <- as.numeric(args[2]))
print(paste("FDR is set to",fdr))
# Defining libraries paths
.libPaths(c("/usr/local/R-3.3.3/lib64/R/library", .libPaths()))
print("loading libraries")
library(RColorBrewer); cols <- brewer.pal(9,"Set1")
library(qqman)
library(Cairo)
library(qvalue)
#### Function to calculate correlations and adding them to a plot ####
panel.cor <- function(x, y, digits = 5, cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
# correlation coefficient
r <- cor(x, y)
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste("r= ", txt, sep = "")
text(0.5, 0.6, txt)
# p-value calculation
p <- cor.test(x, y)$p.value
txt2 <- format(c(p, 0.123456789), digits = digits)[1]
txt2 <- paste("p= ", txt2, sep = "")
if(p<0.00001) txt2 <- paste("p= ", "<0.00001", sep = "")
text(0.5, 0.4, txt2)
}
#### Reading data ####
print("reading data")
data <- read.table(paste(pheno_variable,"/results_",pheno_variable,".txt",sep=""))
mlmm.threshold <- read.table(paste(pheno_variable,"/mlmm_threshold_",pheno_variable,".txt",sep=""))
#### Computing FDR ####
print("computing FDR")
for(a in c("lfmm","cate","emma","gapit")){
assign(paste("qv.",a,".",pheno_variable,sep=""),
qvalue(p = data[paste("pv.",a,sep="")],
fdr = fdr/100))
assign(paste("candidates.",a,".",pheno_variable,sep=""),
which(get(paste("qv.",a,".",pheno_variable,sep=""))$significant))
}
#### Manhattan plots ####
##### PDF plots #####
print("producing PDF manhattan plots")
pdf(file = paste(pheno_variable,"/manhattan_plots_",pheno_variable,"_by_method.pdf",sep=""),paper = 'a4r')
par(mfrow=c(1,2))
#LFMM
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.lfmm",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.lfmm.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.lfmm.",pheno_variable,sep="")),
paste("pv.lfmm",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"LFMM"))
qq(pvector = data$pv.lfmm,main=paste("qqplot LFMM",pheno_variable))
#CATE
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.cate",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.cate.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.cate.",pheno_variable,sep="")),
paste("pv.cate",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"cate"))
qq(pvector = data$pv.cate,main=paste("qqplot cate",pheno_variable))
#EMMA
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.emma",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.emma.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.emma.",pheno_variable,sep="")),
paste("pv.emma",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"emma"))
qq(pvector = data$pv.emma,main=paste("qqplot emma",pheno_variable))
#GAPIT
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.gapit",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.gapit.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.gapit.",pheno_variable,sep="")),
paste("pv.gapit",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"GAPIT/MLM"))
qq(pvector = data$pv.gapit,main=paste("qqplot GAPIT/MLM",pheno_variable))
par(mfrow=c(1,1))
pairs(data[,c("pv.lfmm","pv.cate","pv.emma","pv.gapit")], upper.panel = panel.cor)
dev.off()
#### PNG format #####
print("producing PNG manhattan plots")
CairoPNG(filename = paste(pheno_variable,"/manhattan_plots_",pheno_variable,".png",sep=""),units = "cm",width = 21,height = 29.7,res = 600)
par(mfrow=c(4,2))
#LFMM
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.lfmm",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.lfmm.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.lfmm.",pheno_variable,sep="")),
paste("pv.lfmm",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"LFMM"))
qq(pvector = data$pv.lfmm,main=paste("qqplot LFMM",pheno_variable))
#CATE
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.cate",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.cate.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.cate.",pheno_variable,sep="")),
paste("pv.cate",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"cate"))
qq(pvector = data$pv.cate,main=paste("qqplot cate",pheno_variable))
#EMMA
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.emma",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.emma.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.emma.",pheno_variable,sep="")),
paste("pv.emma",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"emma"))
qq(pvector = data$pv.emma,main=paste("qqplot emma",pheno_variable))
#GAPIT
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.gapit",snp = "SNP",
genomewideline = ifelse(test = length(get(paste("candidates.gapit.",pheno_variable,sep="")))>0,
yes = -log10(max(data[get(paste("candidates.gapit.",pheno_variable,sep="")),
paste("pv.gapit",sep="")])),
no = FALSE),
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"GAPIT/MLM"))
qq(pvector = data$pv.gapit,main=paste("qqplot GAPIT/MLM",pheno_variable))
dev.off()
#### Correlation plots between associations methods #####
print("producing correlations plots between association methods")
CairoPNG(filename = paste(pheno_variable,"/corr_plots_",pheno_variable,".png",sep=""),units = "cm",width = 21,height = 29.7,res = 600)
par(mfrow=c(1,1))
pairs(data[,c("pv.lfmm","pv.cate","pv.emma","pv.gapit")], upper.panel = panel.cor)
dev.off()
#### Diagnostic qqplots #####
print("producing qq-plots")
CairoPNG(filename = paste(pheno_variable,"/diagnostic_plots_",pheno_variable,".png",sep=""),units = "cm",width = 21,height = 29.7,res = 600)
par(mfrow=c(4,2))
#AOV
plot(y = sort(runif(n = length(data$pv.aov))),
x = sort(data$pv.aov),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for Analysis of Variance") +
abline(0,1,col="red")
#LFMM
plot(y=sort(runif(n = length(data$pv.lfmm))),
x=sort(data$pv.lfmm),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for LFMM") +
abline(0,1,col="red")
#CATE
plot(y=sort(runif(n = length(data$pv.cate))),
x=sort(data$pv.cate),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for CATE") +
abline(0,1,col="red")
#EMMA
plot(y=sort(runif(n = length(data$pv.emma))),
x=sort(data$pv.emma),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for EMMA") +
abline(0,1,col="red")
#GAPIT
plot(y=sort(runif(n = length(data$pv.gapit))),
x=sort(data$pv.gapit),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for GAPIT/MLM") +
abline(0,1,col="red")
#MLMM extBIC
plot(y=sort(runif(n = length(data$pv.mlmm.extBIC))),
x=sort(data$pv.mlmm.extBIC),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for MLMM/extBIC") +
abline(0,1,col="red")
#MLMM mbonf
plot(y=sort(runif(n = length(data$pv.mlmm.mbonf))),
x=sort(data$pv.mlmm.mbonf),
ylab="Expected p-values",
xlab="Observed p-values",
main="qqplot for MLMM/mbonf") +
abline(0,1,col="red")
dev.off()
#### MLMM plots ####
print("producing MLMM plots")
CairoPNG(filename = paste(pheno_variable,"/mlmm_",pheno_variable,".png",sep=""),units = "cm",width = 21,height = 29.7,res = 600)
par(mfrow=c(2,2))
#For extBIC
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.mlmm.extBIC",snp = "SNP",
genomewideline = mlmm.threshold$V1,
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"MLMM extBIC"))
qq(pvector = data$pv.mlmm.extBIC,main=paste("qqplot MLMM extBIC",pheno_variable))
#For mbonf
manhattan(x = data,chr = "Chromosome.gapit",bp = "Pos",p = "pv.mlmm.mbonf",snp = "SNP",
genomewideline = mlmm.threshold$V1,
suggestiveline = FALSE, col=cols,main=paste(pheno_variable,"MLMM mbonf"))
qq(pvector = data$pv.mlmm.mbonf,main=paste("qqplot MLMM mbonf",pheno_variable))
dev.off()
#### Saving candidates lists ####
print("saving candidates lists")
for(a in c("lfmm","cate","emma","gapit")){
write.table(x=(data[get(paste("candidates.",a,".",pheno_variable,sep="")),
c("SNP","Chr","Pos",paste("pv.",a,sep=""))]),
file = paste0(pheno_variable,"/candidates_",pheno_variable,"_",a,"_fdr",fdr,"perc.txt"),
quote = FALSE,
row.names = FALSE)
}
write.table(x=data[which(-log10(data$pv.mlmm.extBIC) > mlmm.threshold$V1),
c("SNP","Chr","Pos","pv.mlmm.extBIC")],
file = paste0(pheno_variable,"/candidates_",pheno_variable,"_mlmm_extBIC.txt"),
quote = FALSE,
row.names = FALSE)
write.table(x=data[which(-log10(data$pv.mlmm.mbonf) > mlmm.threshold$V1),
c("SNP","Chr","Pos","pv.mlmm.mbonf")],
file = paste0(pheno_variable,"/candidates_",pheno_variable,"_mlmm_mbonf.txt"),
quote = FALSE,
row.names = FALSE)
print("Done")
|
5325f9ea23fbe31aa20a73026881a0de15079223
|
7b3389755018027cfc74b86bddbb62fdcb1f0ef1
|
/MerlijnIndividual/plotClusters.R
|
b0d100b625fd7883612a7cd3b3e40b77ba0ca9e3
|
[] |
no_license
|
Merlijn-van-Breugel/Machine-Learning
|
81d3eb96c375ad5bfc3c61ae24ec80d15bad86d2
|
e110a44ea8ae77051adabf3f905a68147670a9bf
|
refs/heads/master
| 2021-01-12T11:44:00.632472
| 2016-12-20T10:39:09
| 2016-12-20T10:39:09
| 72,281,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,265
|
r
|
plotClusters.R
|
#Loop over number of iterations
plotClusters <- function(objectscores,centers,clusters,catscores.labeled,plotlabels){
dfplot <- as.data.frame(cbind(objectscores,clusters))
#Make ellipses around centroids
conf.rgn <- do.call(rbind,lapply(unique(dfplot$cluster),function(t)
data.frame(cluster=as.character(t),
ellipse(cov(dfplot[dfplot$cluster==t,1:2]),
centre=as.matrix(centers[t,1:2]),
level=0.95),
stringsAsFactors=FALSE)))
#Make df of labeled catscores
if (plotlabels>1){
dfplot.labels <- do.call("rbind", catscores.labeled)
}
plot.clusters <- ggplot() +
geom_point(data=dfplot, aes(D1,D2,color = as.factor(clusters)))+
geom_point(size=0.5)+
scale_shape(solid=FALSE) +
xlab("Dimension 1")+
ylab("Dimension 2")+
theme_bw()+
theme(aspect.ratio = 1)+
theme(legend.position="bottom")+
geom_point(data=as.data.frame(centers),aes(D1,D2,color = as.factor(rownames(centers))),shape=18,size=4)+
guides(colour = guide_legend(override.aes = list(size=5)))+
geom_path(data=conf.rgn, aes(D1,D2,color = as.factor(cluster)))+
guides(colour = guide_legend(override.aes = list(size=5)))
if (plotlabels==2){
plot.clusters<- plot.clusters+
geom_text_repel(data=dfplot.labels, aes(D1,D2,label = Label)
,arrow = arrow(length = unit(0.01, 'npc')))+
geom_point(data = dfplot.labels, aes(x = 0),
y = 0, size = 0)
}else if (plotlabels==3){
plot.clusters<- plot.clusters+
guides(colour = guide_legend(override.aes = list(size=5)))+
geom_text_repel(data=dfplot.labels, aes(D1,D2,label = Label
,color = Variable)
,arrow = arrow(length = unit(0.01, 'npc')))
}
plot.clusters<- plot.clusters+
if (plotlabels < 3){
scale_colour_discrete(name ="Clusters")
}else{
scale_colour_discrete(name ="Clusters and variables")
}
return(plot.clusters)
}
|
b3667d328db556ca26eac40c6b80a6ac52ff3c33
|
e4c89b9cd1d77ba0365c0b4dd76f4368bf388139
|
/tests/testthat/test_gmt_import.R
|
8b047befbd7dddac916868c87126483eb8eb8a40
|
[
"MIT"
] |
permissive
|
jhchung/geneARTP
|
ea7b659db69deb9143e6cfc35cff1db9c4a6c9b9
|
008a206f466ccc6f4a861104acc369d877acf2ec
|
refs/heads/master
| 2020-12-24T14:17:43.188915
| 2015-09-18T19:27:20
| 2015-09-18T19:27:20
| 30,929,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
test_gmt_import.R
|
library(geneARTP)
test_that("Test if GMT file is imported correctly",{
test_gmt_line <- "gene_set1\tGene set description\tGene1\tGene2\tGene3"
expect_match(names(format_gmt(test_gmt_line)), "gene_set1")
expect_equal(length(format_gmt(test_gmt_line)), 1)
expect_equal(length(format_gmt(test_gmt_line)[[1]]), 3)
})
|
a6d014c36332a4ffac9100b5c4295e51e0f90783
|
090ff155d4d2ab91ddabc7a84c5206c45f4de211
|
/fit_period.R
|
2ea10c9e563cce899a0be5e35eea06a2f2251a2e
|
[] |
no_license
|
jcval94/Tesis
|
89db0e64bc51aa47e6c053d6eb0c8008fc66b168
|
cd9764811b6054c4f163aaafc45b80d971acb6ac
|
refs/heads/master
| 2021-07-12T04:39:28.303815
| 2020-08-09T05:30:43
| 2020-08-09T05:30:43
| 190,092,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,753
|
r
|
fit_period.R
|
cll<-function ()
{
ctx <- try(rstudioapi::getActiveDocumentContext(), silent = TRUE)
if (!inherits(ctx, "try-error")) {
contenido <- ctx[["contents"]]
file_w <- file(paste0(getwd(),"/new.R"))
writeLines(((contenido)), file_w)
writeLines(as.character(parse("new.R")), file_w)
utils::file.edit(paste0(getwd(),"/new.R"))
}
return(invisible())
}
x <- c(rnorm(1000),rnorm(1000,3))
y <- density(x)
Y = c(y[['y']])
X = c(y[['x']])
# crr <- min(y[['x']])
# X <- c(y[['x']]-crr)
# X <- c(X, X + max(X))
p <- TSA::periodogram(Y, plot = F)
dds <- data.frame(freq = 1/p$freq, spec = p$spec, orden = 1:length(p$spec))
dds <- head(dds[order(-dds$spec), ], 10)
periodos <- round(dds[['freq']],0)
for (p in periodos) {
print(X[p])
}
library(TSA)
library(forecast)
y.dc <- decompose(ts(Y, frequency = 171))
plot(y.dc)
frequency(Y)
plot(y)
N = length(Y)
I = abs(fft(Y)/sqrt(N))^2
P = (4/N) * I
f = (0:floor(N/2))/N
plot(f, I[1:((N/2) + 1)], type = "h", xlab = "frequency", ylab = "", main = "Periodogram of CO2 data after trend removal", col = "blue")
D_F = data.frame(Freq = f, Spec = I[1:((N/2) + 1)])
1/head(D_F[order(-D_F$Spec),'Freq'])
###########################
periodicidad <- function(ts, place = 10) {
ddT <- data.frame(freq = c(), spec = c(), orden = c())
ords <- floor(length(ts) * 0.7):length(ts)
for (lu in ords) {
p <- TSA::periodogram(ts[1:lu], plot = F)
dds <- data.frame(freq = 1/p$freq, spec = p$spec, orden = 1:length(p$spec))
dds <- head(dds[order(-dds$spec), ], place)
ddT <- rbind(ddT, dds)
}
ddT <- ddT[order(-ddT$spec), ]
Maxi <- max(ddT$spec)
ddT <- head(ddT[ddT$orden > 2, ], 15)
ddT$Freq_Orden <- paste0(ddT$freq, "_", ddT$orden)
ddT <- suppressWarnings(reshape2::dcast(ddT, Freq_Orden ~ ., max, value.var = "spec"))
ddT$. <- ddT$./Maxi
ddT <- ddT[order(-ddT$.), ]
return(list(unique(as.numeric(do.call("rbind", strsplit(ddT$Freq_Orden, "_"))[, 1])), ddT))
}
library(timeSeries)
data(LPP2005REC)
df <- LPP2005REC
periodicidad(df, place = 10)
length(df)
#------------------------------------------------------
# Siguiente intento: entrenar un modelo que obtenga
# los parámetros de una bimodal
# https://www.youtube.com/watch?v=nktiUUd6X_U
# muy bueno
# https://tinyheero.github.io/2016/01/03/gmm-em.html
library("ggplot2")
library("dplyr")
library("reshape2")
options(scipen = 999)
set.seed(1)
comp1.vals <- data_frame(comp = "A",
vals = rnorm(50, mean = 1, sd = 0.5))
comp2.vals <- data_frame(comp = "B",
vals = rnorm(50, mean = 1.5, sd = 0.5))
vals.df <- bind_rows(comp1.vals, comp2.vals)
vals.df %>%
ggplot(aes(x = vals, y = "A", color = factor(comp))) +
geom_point(alpha = 0.4) +
scale_color_discrete(name = "Source of Data") +
xlab("Values") +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
legend.position = "top")
wait <- c(rnorm(103,0,4),rnorm(130,7,6))
plot(density(wait))
wait.kmeans <- kmeans(wait, 2)
wait.kmeans.cluster <- wait.kmeans$cluster
wait.df <- data_frame(x = wait, cluster = wait.kmeans.cluster)
wait.summary.df <- wait.df %>%
group_by(cluster) %>%
summarize(mu = mean(x), variance = var(x), std = sd(x), size = n())
#################
wait.summary.df <- wait.summary.df %>%
mutate(alpha = size / sum(size))
#################################################
#' Expectation Step of the EM Algorithm
#'
#' Calculate the posterior probabilities (soft labels) that each component
#' has to each data point.
#'
#' @param sd.vector Vector containing the standard deviations of each component
#' @param sd.vector Vector containing the mean of each component
#' @param alpha.vector Vector containing the mixing weights of each component
#' @return Named list containing the loglik and posterior.df
e_step <- function(x, mu.vector, sd.vector, alpha.vector) {
comp1.prod <- dnorm(x, mu.vector[1], sd.vector[1]) * alpha.vector[1]
comp2.prod <- dnorm(x, mu.vector[2], sd.vector[2]) * alpha.vector[2]
sum.of.comps <- comp1.prod + comp2.prod
comp1.post <- comp1.prod / sum.of.comps
comp2.post <- comp2.prod / sum.of.comps
sum.of.comps.ln <- log(sum.of.comps, base = exp(1))
sum.of.comps.ln.sum <- sum(sum.of.comps.ln)
list("loglik" = sum.of.comps.ln.sum,
"posterior.df" = cbind(comp1.post, comp2.post))
}
#' Maximization Step of the EM Algorithm
#'
#' Update the Component Parameters
#'
#' @param x Input data.
#' @param posterior.df Posterior probability data.frame.
#' @return Named list containing the mean (mu), variance (var), and mixing
#' weights (alpha) for each component.
m_step <- function(x, posterior.df) {
comp1.n <- sum(posterior.df[, 1])
comp2.n <- sum(posterior.df[, 2])
comp1.mu <- 1/comp1.n * sum(posterior.df[, 1] * x)
comp2.mu <- 1/comp2.n * sum(posterior.df[, 2] * x)
comp1.var <- sum(posterior.df[, 1] * (x - comp1.mu)^2) * 1/comp1.n
comp2.var <- sum(posterior.df[, 2] * (x - comp2.mu)^2) * 1/comp2.n
comp1.alpha <- comp1.n / length(x)
comp2.alpha <- comp2.n / length(x)
list("mu" = c(comp1.mu, comp2.mu),
"var" = c(comp1.var, comp2.var),
"alpha" = c(comp1.alpha, comp2.alpha))
}
for (i in 1:50) {
if (i == 1) {
# Initialization
e.step <- e_step(wait, wait.summary.df[["mu"]], wait.summary.df[["std"]],
wait.summary.df[["alpha"]])
m.step <- m_step(wait, e.step[["posterior.df"]])
cur.loglik <- e.step[["loglik"]]
loglik.vector <- e.step[["loglik"]]
} else {
# Repeat E and M steps till convergence
e.step <- e_step(wait, m.step[["mu"]], sqrt(m.step[["var"]]),
m.step[["alpha"]])
m.step <- m_step(wait, e.step[["posterior.df"]])
loglik.vector <- c(loglik.vector, e.step[["loglik"]])
loglik.diff <- abs((cur.loglik - e.step[["loglik"]]))
if(loglik.diff < 1e-6) {
break
} else {
cur.loglik <- e.step[["loglik"]]
}
}
}
loglik.vector
plot_mix_comps <- function(x, mu, sigma, lam) {
lam * dnorm(x, mu, sigma)
}
data.frame(x = wait) %>%
ggplot() +
geom_histogram(aes(x, ..density..), binwidth = 1, colour = "black",
fill = "white") +
stat_function(geom = "line", fun = plot_mix_comps,
args = list(m.step$mu[1], sqrt(m.step$var[1]),
lam = m.step$alpha[1]),
colour = "red", lwd = 1.5) +
stat_function(geom = "line", fun = plot_mix_comps,
args = list(m.step$mu[2], sqrt(m.step$var[2]),
lam = m.step$alpha[2]),
colour = "blue", lwd = 1.5) +
ylab("Density") +
xlab("Values") +
ggtitle("Final GMM Fit")
|
a72ef5b59f43997c1ef0f7b50b609842e2fd068f
|
3700b4903b68eb5cd21ceb8d470427708644d2ef
|
/sec sem bks/StatisticsforDataAnalysis/unit-II/rnorm.R
|
bc1cb248e56d26404230c9960f3ce1ad4ab34612
|
[] |
no_license
|
repleeka/MTech-CSE-books
|
b3dce00e47ea1d624647c90807d974290ce34ab1
|
e9e8e52c271ddb1a3c318cb454094664d75265bd
|
refs/heads/master
| 2021-12-15T01:16:00.070094
| 2017-07-07T15:22:13
| 2017-07-07T15:22:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
rnorm.R
|
set.seed (21)
# Use to reproduce the data in the figure
par(mfrow=c(2,3))
x <- pretty(c(6.5,13.5), 40)
for(i in 1:5){
y <- rnorm(50, mean=10, sd=1)
hist(y, prob=TRUE, xlim=c(6.5,13.5), ylim=c(0,0.5),main="")
lines(x, dnorm(x,10,1))
}
par(mfrow=c(1,1))
|
0da0f14ea46e6705c114919242b201f81d71e28b
|
c2b419e168de4bc1be340f049f26edb5bce6be64
|
/R/RcppExports.R
|
7eab95f84b3f767c0faa2a513bb936e0498fc28d
|
[
"MIT"
] |
permissive
|
Ilia-Kosenkov/Dipol2Red
|
922d889f0da0fbb87983c0efde982c612e5e1ce2
|
916da01ac57b639a136ca2d5f8ffe94086b0c4aa
|
refs/heads/master
| 2021-12-14T16:50:18.346248
| 2021-12-07T12:37:01
| 2021-12-07T12:37:01
| 337,994,287
| 0
| 0
|
NOASSERTION
| 2021-02-11T10:19:49
| 2021-02-11T10:19:48
| null |
UTF-8
|
R
| false
| false
| 501
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
d2r_fsigma_2 <- function(input, date_col, obs_col, what, extra_vars, eps, itt_max) {
.Call('_Dipol2Red_d2r_fsigma_2', PACKAGE = 'Dipol2Red', input, date_col, obs_col, what, extra_vars, eps, itt_max)
}
d2r_correct_pol <- function(data, px_corr, py_corr, angle_corr) {
.Call('_Dipol2Red_d2r_correct_pol', PACKAGE = 'Dipol2Red', data, px_corr, py_corr, angle_corr)
}
|
2569f88a24e4a770f3dc7371a8b4ab7eaf4f9b01
|
afdc42af8a468f29b56269626dccbe5859d0e595
|
/R_src/unblkcmt/main/main.R
|
89f05ee3004823a7c5207e45fc7d77c41ff5767a
|
[] |
no_license
|
pengyu/pytools
|
feb44c3da2922dae5a51d19abe2fbc6c05a5076f
|
578af6b6135f1fc999c89ca0ae0ca314cbdbfc76
|
refs/heads/master
| 2021-03-12T23:49:06.527392
| 2013-04-16T03:15:03
| 2013-04-16T03:15:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 98
|
r
|
main.R
|
source('../unblkcmt.R')
x=10
y=20
unblkcmt(
'
x+y
')
unblkcmt(
'
a=1
b=2
a+b
')
|
b49fb004c3835b8f3204d8a5c35e4b8b8c655270
|
15a72bd7450ebf51e4b65fc0eaccd12f868f0920
|
/man/bugs.data.Rd
|
eafd51135d9cf3ec5e073bf19d14dbda4a826a51
|
[] |
no_license
|
cran/R2OpenBUGS
|
68a992b28b7934a01be1667af22dbde506dd90eb
|
21402f0d531767b04db24ba94021f3d5d0507e6a
|
refs/heads/master
| 2021-06-04T13:42:26.061056
| 2020-04-02T16:31:15
| 2020-04-02T16:31:15
| 17,681,794
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 916
|
rd
|
bugs.data.Rd
|
\name{bugs.data}
\alias{bugs.data}
\title{Writing input for OpenBUGS}
\description{Write file for \pkg{OpenBUGS} to read.}
\usage{
bugs.data(data, dir = getwd(), digits = 5, data.file = "data.txt")
}
\arguments{
\item{data}{either a named list (names corresponding to variable names
in the \code{model.file}) of the data for the \pkg{OpenBUGS} model,
\emph{or} a vector or list of the names of the data objects used by
the model}
\item{dir}{the directory to write the file \file{data.txt} to}
\item{digits}{number of significant digits used for \pkg{OpenBUGS}
input, see \code{\link{formatC}}}
\item{data.file}{name for the file R writes the data into.}
}
\value{The name of \code{data.file} is returned and as a side effect,
the data file is written}
\seealso{The main function to be called by the user is \code{\link{bugs}}.}
\keyword{file}
\keyword{IO}
|
dba22bcabffd7f0b3afea76883327e3b2e43b575
|
16bd5de71408bb24e941831522be2ee24dead30f
|
/R4ML/R/r4ml.sampling.R
|
5d3b8abc69e8f415ffea58d0ac76fe023fa8acfc
|
[
"Apache-2.0"
] |
permissive
|
kant/r4ml
|
3f8ff34737da3073c573d4c9c88b74fefa25badd
|
f5dc4d27eedecba953456e33efd89fe1f89ee5e1
|
refs/heads/master
| 2020-03-28T08:42:05.446324
| 2018-06-07T18:28:13
| 2018-06-07T18:28:13
| 147,983,208
| 0
| 0
|
Apache-2.0
| 2018-09-09T01:30:28
| 2018-09-09T01:30:28
| null |
UTF-8
|
R
| false
| false
| 5,918
|
r
|
r4ml.sampling.R
|
#
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#' @include zzz.R
NULL
## note: implement all the sample algo later
#' Generate random samples from an r4ml.frame or r4ml.matrix or spark SparkDataFrame
#'
#' Two sampling methods are supported:
#'
#' 1. Random sampling: Generate a random subset of the given dataset with
#' the specified size in the form of a percentage.
#'
#' 2. Partitioned sampling: Split the given dataset into the specified
#' number of randomly generated non-overlapping subsets.
#'
#' @name r4ml.sample
#' @title Random sampling
#' @param data (r4ml.frame, r4ml.matrix, or SparkDataFrame): Dataset to sample from
#' @param perc (numeric): For random sampling, an atomic value between (0, 1)
#' that represents the sampling percentage. For partitioned sampling, a
#' vector of numerics in the interval (0, 1), such that their sum is
#' exactly 1.
#' @param experimental (logical)
#' @param cache (logical): If TRUE, output is cached
#' @return For random sampling, a single r4ml.frame/r4ml.matrix/SparkDataFrame is returned. For
#' partitioned sampling, a list of r4ml.frames or r4ml.matrices or Spark SparkDataFrame is returned, and each
#' element in the list represents a partition.
#' @export
#' @examples \dontrun{
#'
#' # Generate a 10% random sample of data
#' iris_r4ml_df <- as.r4ml.frame(iris)
#' sample_iris_r4ml_df <- r4ml.sample(iris_r4ml_df, 0.1)
#'
#' # Generate the 50 samples
#' sample_50_iris_r4ml_df <- r4ml.sample(iris_r4ml_df, 50/nrow(iris_r4ml_df))
#'
#' # Randomly split the data into training (70%) and test (30%) sets
#' iris_r4ml_df_split <- r4ml.sample(iris_r4ml_df, c(0.7, 0.3))
#' nrow(iris_r4ml_df_split[[1]]) / nrow(iris_r4ml_df)
#' nrow(iris_r4ml_df_split[[2]]) / nrow(iris_r4ml_df)
#'
#' # Randomly split the data for 10-fold cross-validation
#' iris_cv <- r4ml.sample(iris_r4ml_df, rep(0.1, 10))
#' }
r4ml.sample <- function(data, perc, experimental=FALSE,
cache = FALSE) {
logSource <- "r4ml.sample"
# Parameter validation
if (missing(data)) {
data <- NULL
}
if (missing(perc)) {
perc <- NULL
}
if (.r4ml.isNullOrEmpty(data)) {
r4ml.err(logSource, "A dataset must be specified")
}
if (.r4ml.isNullOrEmpty(perc)) {
r4ml.err(logSource, "perc must be specified.")
}
if (!inherits(data, "r4ml.frame") & !inherits(data, "r4ml.matrix") & !inherits(data, "SparkDataFrame")) {
r4ml.err(logSource, "The specified dataset must either be an r4ml.frame, r4ml.matrix, or SparkDataFrame.")
}
# function to convert the output type to the relevant input type
outputType = function(...) {
data_type <- class(data)
castDF <- function(df) {
casted_df = df
if (data_type == 'r4ml.frame') {
casted_df <- as.r4ml.frame(df, repartition = FALSE)
} else if (data_type == 'r4ml.matrix') {
casted_df <- as.r4ml.matrix(df)
} else if (data_type == 'SparkDataFrame') {
casted_df <- df
} else {
r4ml.err(logSource, "Unsupported type " %++% data_type %++% " passed in")
}
if (cache & !casted_df@env$isCached) {
dummy <- SparkR::cache(casted_df)
}
casted_df
}
args <- list(...)
out_type <- sapply(args, castDF)
out_type
}
if (length(perc) == 1) {
if (perc > 1) {
r4ml.err(logSource, "perc must be <= 1")
}
df1 <- SparkR::sample(data, FALSE, perc)
return(outputType(df1))
} else if (length(perc) == 2 && experimental == TRUE && # this features doesn't always work on cluster
(class(data) %in% c('r4ml.frame', 'r4ml.matrix', 'SparkDataFrame'))) {
# this is probably slightly faster version of when length(perc)>2
if (abs(sum(perc) - 1.0) >= 1e-6) {
r4ml.err(logSource, "Random splits must sum to 1")
}
perc_1 <- perc[1]
df1 <- SparkR::sample(data, FALSE, perc_1)
df2 <- SparkR::except(data, df1)
out <- c(df1, df2)
out <- do.call(outputType, list(df1, df2))
return (out)
} else if (length(perc) >= 2) {
if (abs(sum(perc) - 1.0) >= 1e-6) {
r4ml.err(logSource, "sum of perc weights must be equal 1")
}
rcolname = "__r4ml_dummy_runif__"
if (rcolname %in% SparkR::colnames(data)) {
r4ml.err(logSource, "data already has column " %++% rcolname)
}
# create the uniform random (0,1) in the column rcolname
aug_data <- SparkR::withColumn(data, rcolname, SparkR::rand())
aug_data_cnames <- SparkR::colnames(aug_data)
# partition the column based the uniform disribution
create_data_fold <- function (bounds) {
lb <- bounds[[1]]
ub <- bounds[[2]]
predicate <- lb %++% " <= " %++% rcolname %++% " and " %++% rcolname %++% " < " %++% ub
filter_aug_data <- SparkR::filter(aug_data, predicate)
cnames <- SparkR::colnames(aug_data)
filter_data <- SparkR::select(filter_aug_data,
aug_data_cnames[aug_data_cnames != rcolname])
#filter_data <- filter_aug_data
filter_data
}
perc_lbound <- cumsum(perc) - perc
perc_ubound <- cumsum(perc)
folded_data <- lapply(Map(c, perc_lbound, perc_ubound), create_data_fold)
out <- do.call(outputType, folded_data)
return(out)
} else {
r4ml.err(logSource, "Other forms of sampling not implemented yet")
#@TODO
}
return (NULL)
}
|
64d13ca09e133678113ab02f0ea144e78ec0911f
|
8c2390dfd1f98368dabac26964d8f0e08db55f7d
|
/cachematrix.R
|
70d354a762ca50c1c21db4e81e9ac69702576663
|
[] |
no_license
|
caomengchu/ProgrammingAssignment2
|
def49f50b86a1c6126c7c5ed0a64eca7002f8a21
|
ef906106d750ebb66e7503a3f5124e37938ed9d7
|
refs/heads/master
| 2020-12-25T00:27:45.283450
| 2015-02-19T03:22:52
| 2015-02-19T03:22:52
| 30,999,230
| 0
| 0
| null | 2015-02-19T03:09:27
| 2015-02-19T03:09:27
| null |
UTF-8
|
R
| false
| false
| 1,060
|
r
|
cachematrix.R
|
## Below is a solution to solve the inverse a given matrix quickly by combining a cache setting with a traditional
## inverse calculation. It consist of two functions makeCacheMatrix and cacheSolve.
## makeCacheMatrix is a function of matrix that create several functions and make them a list
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<-NULL
}
get<-function()x
setinv<-function(inverse){
inv<-inverse
}
getinv<-function() inv
list(set=set,get=get,setinv=setinv,getinv=getinv)
}
## cacheSolve is a function that return the inverse of a given matrix: it first checks in cache if the matrix
## has already in it.If yes, just get the cached value; if no, do inverse calculation and save it in cache.
cacheSolve <- function(x, ...) {
inv<-x$getinv()
if(!is.null(inv)){
message("getting cached data ")
return(inv)
}
data<-x$get()
inv<-solve(data)
x$setinv(inv)
inv
}
|
3dae761af9fb9c632335e5ee9092563d1ed407bd
|
b08db46f82fade26c685e2896362e4eb4d3bda3c
|
/man/show_stats.Rd
|
4c1c0004e9b6fd92247ab49a8fd1f509a83387d6
|
[] |
no_license
|
mvadu/influxdbr2
|
c27b5637c74a9dbc11c59dbb737040a13123015d
|
57a4e94c8968ca6111cb3433fc9fb0dc8a355965
|
refs/heads/master
| 2021-06-09T13:29:10.992503
| 2017-01-08T16:48:01
| 2017-01-08T16:48:01
| 69,948,873
| 2
| 2
| null | 2017-01-08T16:48:02
| 2016-10-04T08:58:36
|
R
|
UTF-8
|
R
| false
| true
| 583
|
rd
|
show_stats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/influxdb_diagnostics.R
\name{show_stats}
\alias{show_stats}
\title{show_stats}
\usage{
show_stats(con)
}
\arguments{
\item{con}{An influx_connection object (s. \code{influx_connection}).}
}
\value{
A list of data.frame objects.
}
\description{
Show stats
}
\details{
This function is a convenient wrapper for showing stats
by calling \code{influx_query} with the corresponding query.
}
\author{
Dominik Leutnant (\email{leutnant@fh-muenster.de})
}
\seealso{
\code{\link[influxdbr2]{influx_connection}}
}
|
a95762f89ccd2fc492e6047bb392238e82d7b32d
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/RSDA/R/display.sym.table.R
|
89531a77d41947c9022911da0ad68cadbc8c67f1
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68
|
r
|
display.sym.table.R
|
display.sym.table <-
function(sym.data) {
return(sym.data$meta)
}
|
dac289204ec3982c304692de2aaf422fa4c71c26
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qtlcharts/examples/iheatmap.Rd.R
|
50bb8a6910898426f5ecf064ab4a807fc8381f07
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 348
|
r
|
iheatmap.Rd.R
|
library(qtlcharts)
### Name: iheatmap
### Title: Interactive heat map
### Aliases: iheatmap
### Keywords: hplot
### ** Examples
n <- 101
x <- y <- seq(-2, 2, len=n)
z <- matrix(ncol=n, nrow=n)
for(i in seq(along=x))
for(j in seq(along=y))
z[i,j] <- x[i]*y[j]*exp(-x[i]^2 - y[j]^2)
## No test:
iheatmap(z, x, y)
## End(No test)
|
72750839db51f33c6860b75542e556c4f7101d67
|
5a70e57be476118ad2d4e0c3f5f67a375279e560
|
/tests/testthat/test-public-service.R
|
6d45e69e7497a900304e15249ff2d896296352d1
|
[] |
no_license
|
dgruenew/aws.s3
|
cd41d01bbf503ba4b159aa9b548a78630cafa6a4
|
3b855d3330f54f9c4ac4b476aa0085e6d653e6e1
|
refs/heads/master
| 2021-03-05T11:13:23.264439
| 2020-03-10T16:32:23
| 2020-03-10T16:32:23
| 246,118,024
| 2
| 0
| null | 2020-03-09T18:57:43
| 2020-03-09T18:57:42
| null |
UTF-8
|
R
| false
| false
| 146
|
r
|
test-public-service.R
|
context("Public service tests")
test_that("intentional bad keys", {
expect_error(bucketlist(key = 'BAD KEY', secret = 'BAD SECRET'))
})
|
ce144ddee31193936e18d8b2a0928021e298f6e1
|
cfa9a6c3519a17bcded7cb5091be11c02739434d
|
/man/is.op.Rd
|
2bebc816b17157489ba2c3d8a3b0caf8cb1e5d2f
|
[] |
no_license
|
cran/ggloop
|
5f3529804c9c94c35788689d17dd733615b6672f
|
e3aaa56cbd19c4c9d6f1d2598a48eb3571d16a75
|
refs/heads/master
| 2021-01-11T03:49:08.312424
| 2016-10-20T01:58:31
| 2016-10-20T01:58:31
| 71,409,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 473
|
rd
|
is.op.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.eval.R
\name{is.op}
\alias{is.op}
\title{Determine if an input uses an arithmetical operator (\code{/}, \code{+},
\code{-}, \code{*}, \code{^}).}
\usage{
is.op(lst)
}
\arguments{
\item{lst}{A list object to be tested.}
}
\description{
Matches the arugment the \code{ops} string using \code{grep}. Any matches are
subsequently noted and the unique list is returned.
}
|
b715d0ba2829294542119ff1ae93986004ae5659
|
de66d28db21fd3b4a7c7e0e6f753be5a9c6d2ba5
|
/_blos/_to_merge_/query2csv.r
|
71322caf1911f58752478d60737aca448c716b31
|
[] |
no_license
|
blavoie/oracle-scripts
|
d6a0df1291e5ca16f66fd82c9f2528f2a5bcd388
|
c2476d1645d19a95928b40d8abc2cb7d85aa84ea
|
refs/heads/master
| 2021-01-10T21:15:03.145717
| 2017-08-08T16:27:34
| 2017-08-08T16:27:34
| 38,309,901
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
query2csv.r
|
library(RJDBC)
drv <-JDBC("oracle.jdbc.driver.OracleDriver","/path/to/jdbc/ojdbc6.jar")
conn<-dbConnect(drv,"jdbc:oracle:thin:@grahn-dev.us.oracle.com:1521:orcl","scott","tiger")
data <-dbGetQuery(conn, "select * from emp")
write.table(data, file="out.csv", sep = ",", row.names=FALSE)
|
12b3b0cdda76c7ab56287a7e9f5f9efbdce78bb5
|
6aa1786f3098a283acfea631cf21f2bcc865234c
|
/Air_bnb/app.R
|
67aa7aae17d06bb31a60fc1a61bb1cda8f4e06fa
|
[] |
no_license
|
vharsheny/Vharsheny-Datascience
|
45d03df801e996379968d9c9e87839b75f550ba0
|
3c1114bdbd3e62d15f2d3baa6b767cbbe550d111
|
refs/heads/main
| 2023-05-31T22:19:45.230551
| 2021-06-04T06:04:17
| 2021-06-04T06:04:17
| 326,940,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,904
|
r
|
app.R
|
library(tidyverse)
library(janitor)
library(leaflet)
library(ggmap)
library(corrplot)
library(RColorBrewer)
library(ggcorrplot)
library(shiny)
library(shinydashboard)
library(DT)
ui<-dashboardPage(dashboardHeader(title = "Airbnb NYC"
),
dashboardSidebar(sidebarMenu(id='sidebar',
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
menuItem("Home", tabName = "Home", icon = icon("home")),
menuItem("Room availablity", tabName = "Room_availablity", icon = icon("area-chart")),
menuItem("Price", tabName = "Price", icon = icon("line-chart")),
menuItem("Room Type", tabName = "Room_Type", icon = icon("line-chart")),
menuItem("Map", tabName = "Map", icon = icon("line-chart"))
#menuItem("Map", tabName = "Map", icon = icon("line-chart"))
)),
dashboardBody(
tabItems(
tabItem(tabName = "Home",
uiOutput("table"),
uiOutput("corr1")
#uiOutput("swing1")
),
tabItem(tabName = "Room_availablity",
uiOutput("location")
),
tabItem(tabName = "Price",
fluidRow(
column(4,
h3("Filter by Price"),
sliderInput("price", label = "Price:",
min = 1, max = 4000, value = c(1,3000))
),
column(4,
h3("Filter by Neighbourhood Group"),
checkboxGroupInput("neighbourhood_group", label="Neighbourhood Group:",
choices = c("Bronx","Brooklyn","Manhattan","Queens","Staten Island"),
selected = c("Bronx","Brooklyn"))
),
),
uiOutput("price")
),
tabItem(tabName = "Room_Type",
fluidRow(
column(4,
h3("Filter by Room Type"),
checkboxGroupInput("room_type", label="Room Type:",
choices = c("Private room","Entire home/apt","Shared room"),
selected = c("Private room","Entire home/apt"))
),
column(4,
h3("Filter by Neighbourhood Group"),
checkboxGroupInput("neighbourhood_group1", label="Neighbourhood Group:",
choices = c("Bronx","Brooklyn","Manhattan","Queens","Staten Island"),
selected = c("Bronx","Brooklyn"))
),
),
uiOutput("room_type")
),
tabItem(tabName = "Map",
fluidRow(
column(4,
h3("Filter by Price"),
sliderInput("price1", label = "Price:",
min = 1, max = 4000, value = c(1,3000))
),
column(4,
h3("Filter by Availability"),
sliderInput("availability_365", label = "Availability:",
min = 1, max = 366, value = c(1,300))
),
column(4,
h3("Filter by Room Type"),
checkboxGroupInput("room_type1", label="Room Type:",
choices = c("Private room","Entire home/apt","Shared room"),
selected = c("Private room","Entire home/apt"))
),
),
leafletOutput("mymap")
)
)
)
)
server <- function(input, output, session) {
anb1<- reactive({
"AB_NYC_2019.csv" %>%
read_csv()
})
output$tb1<-renderDataTable({
anb1() %>% head(5)
})
output$table<-renderUI({
tags$div(
tags$h3("Table",style="color:black"),
DTOutput("tb1")
)
})
anb2 <-reactive({
select(anb1(),-c(name,neighbourhood_group,neighbourhood,host_name,last_review,room_type,id,host_id,reviews_per_month))
#select(anb1(),-c(name,neighbourhood_group,neighbourhood,host_name,last_review,room_type,latitude,longitude,reviews_per_month,id,host_id))
})
output$corr<-renderPlot({
airbnbcor<-cor(anb2())
ggcorrplot(airbnbcor, hc.order = TRUE, type = "full",
lab = TRUE)
})
output$corr1 <- renderUI({
tags$div(
tags$h3("Correlation of Data",style="color:black"),
plotOutput("corr")
)
})
#1. location wise room availability
observe({
output$plot1<-renderPlot({
anb1()%>%
group_by(neighbourhood_group) %>%
summarise(count=availability_365 %>% sum) %>%
arrange(desc(neighbourhood_group)) %>%
ungroup %>%
slice(1:5) %>%
arrange(desc(count)) %>%
ggplot(aes(x=neighbourhood_group %>% factor() %>% fct_reorder(count),
y=count))+
geom_col(position = position_dodge())+
geom_col(fill="#06cc4b")+
theme_minimal()+
geom_text(aes(label = count), position = position_dodge(width = 1),
vjust = -0.5, size = 3.0)+
theme(
plot.title = element_text(size = 20),
axis.text = element_text(size = 10),
plot.subtitle = element_text(size = 10),
legend.position = "bottom")+
labs(title="Availability of Rooms ",subtitle="Based on Location",
x="Location",y="Room availability")
})
output$location<-renderUI({
tags$div(
tags$h3("Romms Available at various location",style="color:black"),
plotOutput("plot1")
)
})
})
#2.average price for each area ====
df2 <- reactive({
df2 <- anb1() %>%
filter((price >= input$price[1] & price <= input$price[2]) &
(neighbourhood_group %in% input$neighbourhood_group)) #within the checkbox numeric values
return(df2)
})
observe({
output$plot2<-renderPlot({
df2()%>%
group_by(neighbourhood_group) %>%
summarise(count=price%>% mean (no.rm=T) %>% round(2)) %>%
ungroup() %>%
slice(1:5) %>%
arrange(desc(count)) %>%
ggplot(aes(x=count,
y=neighbourhood_group %>% factor() %>% fct_reorder(count)))+
geom_col(position = position_dodge())+
geom_col(fill="orange")+
coord_flip()+
theme_minimal()+
geom_text(aes(label = count), position = position_dodge(width = 1),
vjust = -0.5, size = 3.5)+
theme(
plot.title = element_text(size = 15),
axis.text = element_text(size = 15),
plot.subtitle = element_text(size = 10),
legend.position = "bottom")+
labs(title="Average price of each Location",x="Price",y="Location")
})
output$price<-renderUI({
tags$div(
plotOutput("plot2")
)
})
})
#3. ranking by room type
df1 <- reactive({
df1 <- anb1() %>%
filter((neighbourhood_group %in% input$neighbourhood_group1) &
(room_type %in% input$room_type)) #within the checkbox numeric values
return(df1)
})
observe({
output$plot3<-renderPlot({
df1() %>%
group_by(room_type,neighbourhood_group) %>%
summarise(count=c(number_of_reviews) %>% sum %>% round(2)) %>%
arrange(desc(count)) %>%
#mutate(count=count/100) %>%
mutate(room=room_type %>% factor() %>% fct_reorder(count)) %>%
ggplot(aes(x =neighbourhood_group,
y=count,
fill=room,
))+
geom_col(position = position_dodge())+
scale_fill_manual(values = c("#06cc4b", "#349ded","#F6972C"))+
labs(title = "Room Type of Each Location",
subtitle = "Based on Reviews",
x = "Location",
y = "Reviews",
fill = "Room Type")+
geom_text(aes(label = count %>% round(2)), position = position_dodge(width = 1),
vjust = -0.5, size = 3)+
theme_minimal() +
theme(
plot.title = element_text(size = 20),
axis.text = element_text(size = 10),
plot.subtitle = element_text(size = 10),
legend.position = "bottom")
})
output$room_type<-renderUI({
tags$div(
plotOutput("plot3")
)
})
})
# anb1 %>%
# group_by(neighbourhood_group, room_type)%>%
# summarise(#Number = n(),
# MedianPrice = median(price, na.rm = T)) %>%
# ggplot() +
# geom_point(aes(x = neighbourhood_group, y = room_type , color = MedianPrice)) +
# xlab("") + ylab("")+ theme_minimal(base_size = 13) +
# theme(strip.background = element_blank(), strip.text = element_text(color = "transparent")) +
# ggtitle("Relationship Between Property Type, Room Type,\nArea, Rating, and Price")
# map
#Creating Listings across NYC
df <- reactive({
df <- anb1() %>%
filter((price >= input$price1[1] & price <= input$price1[2]) &
(availability_365 >= input$availability_365[1] & availability_365 <= input$availability_365[2]) &
(room_type %in% input$room_type1)) #within the checkbox numeric values
return(df)
})
observe({
output$mymap <- renderLeaflet({
leaflet(df()) %>%
addTiles() %>%
addMarkers(~longitude, ~latitude,labelOptions = labelOptions(noHide = F),clusterOptions = markerClusterOptions(),
popup = paste0("<b> Name: </b>", anb1()$name , "<br/><b> Host Name: </b>",
anb1()$host_name, "<br> <b> Price: </b>", df()$price, "<br/><b> Room Type: </b>", df()$room_type)) %>%
setView(-74.00, 40.71, zoom = 12) %>%
addProviderTiles("CartoDB.Positron")
})
})
}
shinyApp(ui,server)
|
f20bfef5eda19157dac32960386d7db6efd1fddc
|
3f7e5afc088b578c99d04219b68907439291b0ea
|
/R/doyday.R
|
21d8eb35aa54855885fffb4cea9e4d50bbae8688
|
[] |
no_license
|
cran/insol
|
53c6d9cae154efafc0fda8bd99045cc91fc94026
|
4fe6f26351af22121a5319b4a8b4950b66ca6226
|
refs/heads/master
| 2021-06-04T03:14:31.090918
| 2021-02-10T14:30:05
| 2021-02-10T14:30:05
| 17,696,790
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
doyday.R
|
doyday <-
function(year,doy){
if (nargs() < 1 ) {cat("USAGE: doyday(year,doy) \nUSAGE: doyday \n(year.dy)"); return()}
fyy = floor(year)
if (nargs() == 1 ) {
nd = ifelse((fyy%%4==0 & fyy%%100!=0) | fyy%%400==0,366,365)
dd = (year-fyy)*nd
} else {
yy = year
dd = doy }
fdd = floor(dd)
hh = (dd-fdd)*24
fhh = floor(hh)
mm = (hh-fhh)*60
fmm = floor(mm)
ss = (mm-fmm)*60
fss = floor(ss)
return(strptime(paste(fyy,fdd,fhh,fmm,fss),format="%Y %j %H %M %S"))
}
|
f4190c740a021d1b908a0724f8021ce5988a8fb9
|
14fb055b9993d2df2b648dc785892cb37b7a313a
|
/01_model_generator/maria_generator.R
|
b928ea6234e0c741ef8f8a1582919eaf9111fe18
|
[] |
no_license
|
MARIA-Pipeline/MARIAProject
|
d263fbb5602097e3878c0282e6f2676217933631
|
f9a2011fb9fdfb321ff07d0af8fe436fee35937f
|
refs/heads/master
| 2021-01-10T17:00:26.845856
| 2015-12-08T09:00:06
| 2015-12-08T09:00:06
| 47,611,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,963
|
r
|
maria_generator.R
|
# Guillaume Lobet - University of Liege
# ArchiSimple Batch
# The aim of this script is to run ArchiSimple in a batch mode in order to create
# any number of root systems, with any combinaison of parameters.
# The script works as follow:
# - Looping over the parameter set (user defined)
# - For each combinaison, create the param.txt file used by ArchiSimple
# - Run ArchiSimple and generate the corresponding RSML file
# - Once all the root systems are generated, create the images using RSML_reader.jar
# - Once all the images are generated, analyse them using MARIAJ.jar
# - Then, use a Random Forest algorithm to define the linear equation for each variable
# - Finaly, perfom a k-means clustering in the simulation data to be re-used during the image fitting
library(data.table)
options(scipen=999) # Disable scientific notation
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Where is ArchiSimple
setwd("/Users/guillaumelobet/Dropbox/research/projects/research/MARIA/maria_scripts/01_model_generator/") # Where is the ArchiSimple folder?
eval = c(F,T) # To check the number root system that will be generated, use c(F). To generate then, use c(F,T)
verbatim <- F
# Range of parameters
P_nbMaxPrim_range <- c(1,40) # Number of primary axes. Put 40 to have a monocot, 1 to have a dicot
P_type_range <- c(1,2) # Type of simulation (1 = 3D, 2 = 2D, 3 = shovelomics)
repetitions <- c(1) # number of repetitions for each parameter set
P_duree <- 12 # Total length lenght of the simulation [days] 12 is a good compromise between the speed of the simulation and the final size of the root system
# Influence total size
P_penteVitDiam_range <- seq(from=12, to=35, by=10) # Slope between the diameter and the root growth [-]
P_propDiamRamif_range <- seq(from=0.5, to=0.9, by=0.4) # Relation between the diameter of a parent and a child root
P_distRamif_range <- seq(from=4, to=6, by=2) # Distance between two successive lateral roots [mm]
P_coeffCroissRad_range <- seq(from=0, to=0, by=0.5) # Coefficient of radial growth. 0 for monocots
P_diamMax_range <- seq(from=0.4, to=1.6, by=0.8) # Max diameter for the primary roots [mm]
P_maxLatAge_range <- seq(from=15, to=15, by=10) # Maximal age growing age for the laterals [days]
# Influence root exploration
P_angLat_range <- seq(from=0.4, to=1.5, by=1) # Emission angle for the laterals [radian]
P_intensiteTropisme_range <- seq(from=0.001, to=0.031, by=0.02) # strenght of the gravitropic response
P_angInitMoyVertPrim_range <- seq(from=1.3, to=1.7, by=0.4) # Emission angle for the principal roots. Between 0 and PI/2 [radian]
P_tertiary <- 0 # Generate the tertiary roots
stochasticity <- T # Try to have parameters covering the whoel space of posibilities by varying them.
create_rsml <- F # Create the RSML files ArchiSimple
create_images <- T # Using RSML_reader.jar
analyse_images <- T # Using MARIA_J.jar
machine_learning <- F # Machine learning to define the equations for the fitting
create_clusters <- F # k-means clustering to the image fitting
# Clustering parameters
n_clusters <- 50 # Number of clusters to create
# Random Forest parameters
n_trees <- 10 # the number of trees for a random forest model
# (this factor affects the most the running time and precision, for large data sets a value of 5-20 is fine)
n_var <- 5 # the number of most important variables of a random forest that shall be used as
# predictors in the linear model ( to much variables causes overfitting )
accu <- 0.95 # the level of desired precision that is expected a response variable to have in order to considered
# as a predictor variable in the remodeling step two
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Name of the simulation
name <- ""
# Root system parameters
## General
P_penteDureeVieDiamTMD <- 2000
P_coeffCroissRad <- 0.5 # Coefficient of radial growth
P_probaMaxArret <- 0.6 # Probability of a root to stop growing
P_TMD <- 0.1
## Primary roots
P_diamMax <- 1.2 # Max diameter for the primary roots [mm]
P_penteVitDiam <- 15 #51 # Slope between the diametr and the root growth [-]
P_angInitMoyVertPrim <- 1.4 # Emission angle for the principal roots. Between 0 and PI/2 [radian]
P_slopePrimAngle <- -0.3 # Slope btw the age of the and the insertion angle of the primary
P_vitEmissionPrim <- 1 # Speed of emission of the primary roots [root/day]
P_simultEmiss <- 0 # Emmission of seminal roots, 3 days after the start (0 = NO, 1 = YES)
P_nbSeminales <- 5 # Number of seminal roots
P_nbMaxPrim <- 40 # Max number of primary axis
## Secondary roots
P_intensiteTropisme <- 0.001 # strenght of the gravitropic response
P_propDiamRamif <- 0.8 # Relation between the diameter of a parent and a child root
P_distRamif <- 2 # Distance between two successive lateral roots [mm]
P_maxLatAge <- 10 # Maximal age growing age for the laterals [days]
P_angLat <- 1.3 # Emission angle for the laterals [radian]
P_diamMin <- 0.014 # Min diameter for a root [mm]
P_coeffVarDiamRamif <- 0.2 # Variation coefficient for the diameter fof the laterals
# Simulation parameters
P_type <- 1 # Type of simuulation (1 = 3D, 2 = 2D, 3 = shovelomics)
P_IC_meca <- 0.03 # Mecanical impedence of the soil
P_shovel <- 70 # Depth of sampling for the shovelomics
# Loop over the parameters space
counter <- 0
t <- Sys.time()
# Evaluate the number of simulations
tot_sim <- 0
percent <- 0
for(e in eval){
countss <- 1
######################## Create the RSML files
if(create_rsml){
setwd("./archisimple/") # Set the correct repository
for(P_nbMaxPrim in P_nbMaxPrim_range){
for(P_type in P_type_range){
message(paste0("STARTING TYPE ",P_type," SIMULATIONS FOR ",P_nbMaxPrim," PRIMARIES"))
for(P_penteVitDiam in P_penteVitDiam_range){
for(P_angInitMoyVertPrim in P_angInitMoyVertPrim_range){
for(P_intensiteTropisme in P_intensiteTropisme_range){
for(P_propDiamRamif in P_propDiamRamif_range){
for(P_distRamif in P_distRamif_range){
for(P_coeffCroissRad in P_coeffCroissRad_range){
for(P_diamMax in P_diamMax_range){
for(P_maxLatAge in P_maxLatAge_range){
for(P_angLat in P_angLat_range){
for(i in repetitions){ # Repetitions
if(countss < 300e9){
countss <- countss+1
if(e){
# check if dicot or monocot root
species <- "dicot"
if(P_nbMaxPrim > 1){
P_coeffCroissRad <- 0
species <- "monocot"
}
# Change the name pre-fix based on the simualtion type
if(P_type == 1) basename <- paste("/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/rsml/", species, "-3D", sep="")
if(P_type == 2) basename <- paste("/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/rsml/", species, "-2D", sep="")
if(P_type == 3) basename <- paste("/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/rsml/", species, "-shov", sep="")
# Setup the name of the file, containing the principal info about the simulation
name <- paste(basename, "-",
P_penteVitDiam,"-",
P_vitEmissionPrim, "-",
P_angInitMoyVertPrim, "-",
P_intensiteTropisme, "-",
P_propDiamRamif, "-",
P_distRamif,"-",
P_coeffCroissRad, "-",
P_diamMax, "-",
P_angLat, "-",
P_maxLatAge, "-r",
i,
sep="")
if(verbatim) message(name)
var <- c(P_duree,
P_simultEmiss,
P_vitEmissionPrim,
P_nbSeminales,
P_nbMaxPrim,
P_diamMin,
P_diamMax,
P_penteVitDiam,
P_intensiteTropisme,
P_distRamif,
P_propDiamRamif,
P_coeffVarDiamRamif,
P_probaMaxArret,
P_TMD,
P_penteDureeVieDiamTMD,
P_coeffCroissRad,
P_angLat,
P_tertiary,
name,
P_type,
P_IC_meca,
P_shovel,
P_maxLatAge,
P_angInitMoyVertPrim,
P_slopePrimAngle
)
cat(var, file=paste("param.txt",sep=""), sep='\n') # Create the input file for Archisimple
system("./ArchiSimp5Maria") # Run Archisimple
}
counter <- counter+1
# Counter to track the evolution of the simulations
if(e){
prog <- ( counter / tot_sim ) * 100
if(prog > percent){
message(paste(round(prog), "% of root systems generated"))
percent <- percent + 1
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
setwd("../") # Back to the old repo
}
if(e){
######################## Create and save the images
if(create_images){
message("Creating root images and ground truth data")
system('java -Xmx4000m -jar RSML_reader.jar "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/rsml/" "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/images/" "../07_maria_shiny/data/root_all_data.csv"')
#system('java -Xmx4000m -jar RSML_reader.jar "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/rsml/" "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/images/" "/Users/guillaumelobet//Desktop/root_data.csv"')
}
######################## Analyse the images
if(analyse_images){
message("Analysing root images")
system('java -Xmx4000m -jar MARIAJ.jar "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/all/images/" "../07_maria_shiny/data/root_all_estimators.csv" "3"')
#system('java -Xmx4000m -jar MARIAJ.jar "/Users/guillaumelobet/Desktop/Work/archisimple/outputs/images/" "/Users/guillaumelobet/Desktop/root_estimators.csv" "3"')
}
######################## Machine learning to retrieve the parameters
if(machine_learning){
message("Machine learning started")
source("maria_learning.r")
descriptors <- read.csv("../07_maria_shiny/data/root_estimators.csv")
parameters <- read.csv("../07_maria_shiny/data/root_data.csv")
colnames(parameters)[colnames(parameters) == "width"] <- "true_width"
colnames(parameters)[colnames(parameters) == "depth"] <- "true_depth"
diff <- nrow(descriptors) - nrow(parameters)
if(diff < 0) parameters <- parameters[0:nrow(descriptors),]
if(diff > 0) descriptors <- descriptors[0:nrow(parameters),]
parameters <- parameters[,-c(6,19)] # Remove the non varying patameters (n primary for instance)
# Add the simulation parameters to the set of variables to estimate
for(i in 1:nrow(parameters)){
args <- strsplit(as.character(parameters$image[i]), "-")[[1]]
parameters$P_penteVitDiam[i] <- args[3] #--------------
parameters$P_angInitMoyVertPrim[i] <- args[5] #--------------
parameters$P_intensiteTropisme[i] <- args[6] #--------------
parameters$P_propDiamRamif[i] <- args[7] #--------------
parameters$P_distRamif[i] <- args[8] #--------------
parameters$P_diamMax[i] <- args[10] #--------------
parameters$P_angLat[i] <- args[11]
parameters$P_maxLatAge[i] <- args[12]
}
for(i in 18:25) parameters[,i] <- as.numeric(parameters[,i])
rs <- cbind(descriptors, parameters)
# Get the set of linear equations with the whole dataset
regs <- maria_learning(fname = rs,
f_vec = c(73:96),
p_vec = c(2:71,73:96),
nrt = n_trees,
ntop = n_var,
acc = accu)
to_est <- names(regs[[1]])
# Format the dataset
maria_regs <- list()
for(te in to_est){
maria_regs[[te]] <- list(fit1 = regs[[1]][[te]],
fit2 = regs[[2]][[te]],
# error = error[[te]],
x = param_test[[te]])
}
# Save the results to be reused in shiny
maria_regs <- regs
save(maria_regs, file="../07_maria_shiny/data/maria_regs.RData")
regs$best
# Vizualize the results
descriptors <- read.csv("../07_maria_shiny/data/root_estimators.csv")
results <- test_training(regs, descriptors)
to_est <- to_est[-c(5,18)]
# Look at the quality of the estimations
par(mfrow=c(4,4), mar=c(4,4,4,3))
for(te in to_est){
print(te)
estimation <- results[[te]]
truth <- parameters[[te]]
fit <- lm(estimation ~ truth)
title <- paste(te, " || ", round(summary(fit)$r.squared, 3))
plot(truth, estimation, main = title, col="#00000050")
abline(a = 0, b = 1, lty=2, col="red", lwd=2)
abline(fit)
}
colnames(results) <- paste0("est_",colnames(results))
to_match <- cbind(descriptors,results)
g_truth <- cbind(descriptors,parameters[-1])
write.csv(to_match, "~/Desktop/estimators.csv")
write.csv(parameters, "~/Desktop/data.csv")
}
# Create k-means clusters
if(create_clusters){
message("Clusering started")
descriptors <- fread("../07_maria_shiny/data/root_estimators.csv")
save(descriptors, file="../07_maria_shiny/data/maria_descriptors.RData")
descriptors <- data.frame(descriptors)
parameters <- fread("../07_maria_shiny/data/root_data.csv")
save(parameters, file="../07_maria_shiny/data/maria_parameters.RData")
parameters <- data.frame(parameters)
colnames(parameters)[colnames(parameters) == "width"] <- "true_width"
colnames(parameters)[colnames(parameters) == "depth"] <- "true_depth"
sim_data <- cbind(parameters, descriptors[2:ncol(descriptors)])
#to_cluster <- na.omit(sim_data[,2:(ncol(sim_data)-4)]) # listwise deletion of missing
to_cluster <- scale(sim_data[,2:(ncol(sim_data)-4)])
# K-Means Cluster Analysis
fit <- kmeans(to_cluster, 5) # 5 cluster solution
# get cluster means
clusters <- aggregate(to_cluster, by=list(fit$cluster), FUN=mean)
save(clusters, file="../07_maria_shiny/data/maria_clusters.RData")
# append cluster assignment
sim_data <- data.frame(sim_data, fit$cluster)
save(sim_data, file="../07_maria_shiny/data/maria_sim_data.RData")
}
}
tot_sim <- counter
counter <- 0
percent <- 0
if(!e) message(paste(tot_sim," root systems will be created..."))
}
# Print the results
diff = Sys.time() - t
print(paste(tot_sim," root systems were generated and analysed in ",diff," seconds"), quote=F)
#------------------------------------------------------------------------
# References
# Pagès et al. (2013).
# Calibration and evaluation of ArchiSimple, a simple model of root system architecture,
# 290, 76–84. doi:10.1016/j.ecolmodel.2013.11.014
|
8a78f019607dbd8130852d85ccc0db339c4def12
|
2a61cb94f08f6b2b95bc29a5ee798507d1b7bac6
|
/plot1.R
|
e7d7dcd9fb2c9e6bdd0900e0fbf8bc1bb423bb61
|
[] |
no_license
|
erijozsef/ExData_Plotting1
|
cad339f138340d0b7672174faadb48f06e35b1a2
|
079c5fd0211ec2307c0092be2058be4845c9b97f
|
refs/heads/master
| 2020-12-28T22:22:23.175963
| 2015-11-07T20:53:12
| 2015-11-07T20:53:12
| 45,728,443
| 0
| 0
| null | 2015-11-07T08:25:05
| 2015-11-07T08:25:04
| null |
UTF-8
|
R
| false
| false
| 507
|
r
|
plot1.R
|
# Load the packages
library(dplyr)
library(readr)
# Set working directory
setwd("E:/Documents/R programs/ExData_Plotting1")
# Read the data
df <- read_delim("../dataset/household_power_consumption.txt", delim=';', col_types="ccnnnnnnn", na="?")
# Filter the data
df1 <- filter(df, Date=="1/2/2007" | Date=="2/2/2007")
# Create histogram file
png( "plot1.png", width=480, height=480 )
hist(df1$Global_active_power, col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
3771a577757706272faa4cec9b52a310b00c6724
|
79c1230450df725058abbeb278aaaa5f1da35743
|
/R/RSDdist.R
|
ca5151983364e71e65e56512a6308e33da55e682
|
[] |
no_license
|
jaspershen/statTarget-2
|
fb72dadaabc9a2115d2f09cdfab5dd2dfb429296
|
7dd60ad81612b8d097be800a2255844761c56972
|
refs/heads/master
| 2021-01-12T04:25:42.064307
| 2016-11-18T05:36:50
| 2016-11-18T05:36:50
| 77,608,156
| 1
| 1
| null | 2016-12-29T11:40:22
| 2016-12-29T11:40:22
| null |
UTF-8
|
R
| false
| false
| 4,548
|
r
|
RSDdist.R
|
RSDdist <-function(sample.rsd,sample.nor.rsd,QC.rsd,QC.nor.rsd) {
#browser()
colour1<-NULL
colour2<-NULL
colour1[(sample.nor.rsd/sample.rsd)>1]<-"#D55E00"
colour1[(sample.nor.rsd/sample.rsd)==1]<-"grey48"
colour1[(sample.nor.rsd/sample.rsd)<1]<-"#009E73"
colour2[(QC.nor.rsd/QC.rsd)>1]<-"#D55E00"
colour2[(QC.nor.rsd/QC.rsd)==1]<-"grey48"
colour2[(QC.nor.rsd/QC.rsd)<1]<-"#009E73"
s.rsd.up<-sum(colour1=="#D55E00")*100/length(colour1)
s.rsd.no<-sum(colour1=="grey48")*100/length(colour1)
s.rsd.down<-sum(colour1=="#009E73")*100/length(colour1)
q.rsd.up<-sum(colour2=="#D55E00")*100/length(colour2)
q.rsd.no<-sum(colour2=="grey48")*100/length(colour2)
q.rsd.down<-sum(colour2=="#009E73")*100/length(colour2)
dirout.w = paste(getwd(), "/statTarget/shiftCor/RSDresult", sep="")
pdf(file.path(dirout.w,"RSD variation.pdf"),width=10,height=7)
layout(matrix(c(1,2),ncol=2))
#par(mar=c(5,5,4,2))
plot(sample.rsd,sample.nor.rsd,xlab="RSD (Before correction)",
ylab="RSD (After correcion)",
col=colour1,cex.lab=0.8,cex.axis=0.8,main="RSD variation of samples",
cex.main=1,pch=19)
abline(0,1,lwd=1,lty=2)
abline(h=30,lwd=1,lty=2)
abline(v=30,lwd=1,lty=2)
legend("topleft",c(paste("Increased RSD after correction:",
round(s.rsd.up,2),"%"),
paste("No changed RSD after correction:",
round(s.rsd.no,2),"%"),
paste("Decreased RSD after correction:",
round(s.rsd.down,2),"%")),
col=c("#D55E00","grey48","#009E73"),pch=19,cex= 0.6)
plot(QC.rsd,QC.nor.rsd,xlab="RSD (Before correction)",
ylab="RSD (After correction)",
col=colour2,cex.lab=0.8,cex.axis=0.8,
main="RSD variation QC",cex.main=1,pch=19)
abline(0,1,lwd=1,lty=2)
abline(h=30,lwd=1,lty=2)
abline(v=30,lwd=1,lty=2)
legend("topleft",c(paste("Increased RSDafter correction:",
round(q.rsd.up,2),"%"),
paste("Not changed RSD after correction:",
round(q.rsd.no,2),"%"),
paste("Decreased RSD after correction:",
round(q.rsd.down,2),"%")),
col=c("#D55E00","grey48","#009E73"),pch=19,cex= 0.6)
dev.off()
##
s.rsd.dis<-sapply(seq(0,1.9,0.1),function (x){
sum(sample.rsd>x&sample.rsd<=x+0.1)})*100/length(sample.rsd)
s.nor.rsd.dis<-sapply(seq(0,1.9,0.1),function (x){
sum(sample.nor.rsd>x&sample.nor.rsd<=x+0.1)})*100/length(sample.nor.rsd)
q.rsd.dis<-sapply(seq(0,1.9,0.1),function (x){
sum(QC.rsd>x&QC.rsd<=x+0.1)})*100/length(QC.rsd)
q.nor.rsd.dis<-sapply(seq(0,1.9,0.1),function (x){
sum(QC.nor.rsd>x&QC.nor.rsd<=x+0.1)})*100/length(QC.nor.rsd)
rsd.dis<-rbind(s.rsd.dis,s.nor.rsd.dis,q.rsd.dis,q.nor.rsd.dis)
colnames(rsd.dis)<-paste(paste(seq(0,190,10),
seq(10,200,10),sep="-"),"%",sep="")
rownames(rsd.dis)<-c("sample","sample.nor","QC","QC.nor")
#write.csv(rsd.dis,file.path(path,"RSD distribution.csv"))
#par(new=TRUE)
pdf(file.path(dirout.w,"RSD distribution.pdf"),width=8,height=6)
layout(matrix(c(1,2),ncol=2))
#par(mar=c(3,6,4,5))
barp <- barplot(rsd.dis[1:2,],horiz=TRUE,beside=TRUE,
col=c("#D55E00","#009E73"),border = c("#D55E00","#009E73"),
names.arg=paste(seq(0,190,10),seq(10,200,10),sep="-"),ylab="RSD (%)",
las=2,cex.lab=0.6,cex.main=0.8,
cex.axis = 0.6,cex.names = 0.5,main="Samples")
text(rsd.dis[1:2,]+1, barp,
labels=round(rsd.dis[1:2,],0), xpd=TRUE,cex = 0.3,col="grey48")
legend("topright",c("Before Correction","After Correction"),
col=c("#D55E00","#009E73"),
lty=0.5,pch= 15,cex=0.75,horiz =FALSE,bty="n")
#par(mar=c(5,6,4,5))
barp <- barplot(rsd.dis[3:4,],horiz=TRUE,
beside=TRUE,col=c("#D55E00","#009E73"),
border = c("#D55E00","#009E73"),
names.arg=paste(seq(0,190,10),seq(10,200,10),sep="-"),ylab="RSD (%)",
las=2,cex.lab=0.6,cex.main=0.8,cex.axis = 0.6,
cex.names = 0.5,main="Quality Controls (QC)")
text(rsd.dis[3:4,]+1, barp, labels=round(rsd.dis[3:4,],0),
xpd=TRUE,cex = 0.3,col="grey48")
legend("topright",c("Before Correction","After Correction"),
col=c("#D55E00","#009E73"),lty=0.5,pch= 15,
cex=0.75,horiz =FALSE,bty="n")
#dev.copy2pdf(file.path(dirout.w,"RSD distribution.pdf"),width=8,height=6)
dev.off()
}
|
50afdb65c51172f65d048a0f3bcb8cbb25761287
|
ff0a2852a43d34bc01f564299d3e4d1751eec56f
|
/hdl_trial.R
|
dd008fe50316873bb5337650efc0190d7cc25d20
|
[] |
no_license
|
shinilraina/Summer_project
|
85ac5755b1e52ac4cff7a5e5ce2f5ec19c66ce36
|
17b2b26775972581ed3c1b0c69432d6a0d9eb0e1
|
refs/heads/main
| 2023-07-15T13:13:00.986967
| 2021-09-06T10:25:01
| 2021-09-06T10:25:01
| 374,995,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,219
|
r
|
hdl_trial.R
|
rm(list=ls())
setwd("~/Summer_project/HDL")
.libPaths(c("/rds/general/user/sr4515/home/R/x86_64-redhat-linux-gnu-library/3.6",
"/usr/lib64/R/library",
"/usr/share/R/library" ))
start_time <- Sys.time()
library(devtools)
#install_github("zhenin/HDL/HDL")
#install.packages("doSNOW")
library(doSNOW)
library(HDL)
library(GenomicSEM)
#How the function works
#source("~/Summer_project/HDL/HDL.run.R")
gwas2.example <- readRDS("~/Summer_project/HDL/gwas2.array.example.rds")
gwas2.example=gwas2.example[1:10,]
gwas1.example <- readRDS("~/Summer_project/HDL/gwas1.array.example.rds")
gwas1.example=gwas1.example[1:10,]
#untar("uk_biobank/UKB_imputed_SVD_eigen99_extraction.tar.gz")
LD.path <- "~/Desktop/Summer_project/Data/UKB_imputed_SVD_eigen99_extraction"
#list.files(LD.path)
res.HDL <- HDL.rg.parallel(gwas1.example, gwas2.example, LD.path, numCores = 2)
#res.HDL
#Do the HDL function on the actual datasets
#Load and format the GWAS datasets to be compatible with the HDL function
source("/rds/general/project/hda_students_data/live/Group2/Shinil/Summer_project/Scripts/load_significant_datasets.R")
alz.data<-alz.sig
p.data<-p.sig
#alz.data
#als.data
#epilepsy.data
#lewy.data
#ms.data
#p.data
#stroke.data
#Put it in a loop so it goes over each combination automatically
alz.data<-alz.data[1:10,]
p.data<-p.data[1:10,]
#stroke.data<-stroke.data[1:100,]
#The format of the dataset should be snp,a1, a2,n,beta,se,(Z-score)
#head(alz.data)
alz.data<-alz.data[,c(3:8)]
#head(alz.data)
colnames(alz.data)<-c("SNP","a1","a2","or","se","p")
#head(p.data)
for (i in 1:nrow(p.data)){
p.data$N[i]<-sum(p.data$N_cases[i],p.data$N_controls[i])
}
colnames(p.data)<-c("SNP","A1","A2","maf","b","se","p","N_cases","N_controls","N")
p.data$SNP<- gsub(".*:", "rs", p.data$SNP)
LD.path <- "~/Summer_project/HDL/UKB_imputed_SVD_eigen99_extraction"
output="/rds/general/project/hda_students_data/live/Group2/Shinil/Summer_project/processed/hdl_trial.Rout"
result.HDL <- HDL.rg.parallel(gwas1, gwas2, LD.path, numCores = 2)
result.HDL
data_list<-c("alz.data","als.data","epilepsy.data","lewy.data","ms.data","p.data","stroke.data")
combn(data_list,2)
end_time <- Sys.time()
end_time - start_time
|
b3b7610114e2cbbe9257e5b03db23d707c198bbb
|
5c0eff341989318352bd4cbec4a1ecb38e7fa0e7
|
/10_2019/Part1_GR_2.R
|
35d6c70b9137a341b9d236be84804aaaf6724402
|
[] |
no_license
|
janiszewskibartlomiej/R-Postgraduate_studies_on_WSB
|
e98180715827ea55f346dd8f9a7e49159925ca47
|
6d22288dcd845547d424337a1f4601494955d51f
|
refs/heads/master
| 2020-08-28T05:15:50.391256
| 2020-03-02T09:03:36
| 2020-03-02T09:03:36
| 217,602,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,798
|
r
|
Part1_GR_2.R
|
##########
# PART 1 #
##########
#---- hello world ----
print("Hello WSB")
#---- calculator ----
1 + 1
2 - 1
2 / 2
5 %% 2
#---- math / stat functions ----
sum(1, 2, 3)
mean(c(1,2,3)) # srednia
median(c(1,2,3)) #mediana
min(c(1,2,3))
c <- c(1,2,3) # c to jest vector
#----- variables ----
a <- 3 # przypisanie wartości do a
a <- 4
#---- data types-----
## logical
TRUE
FALSE
a <- TRUE
b <- FALSE
class(a)
b
## numeric
c <- 12
c <- 12.5
class(c)
class(d)
## text
e <- "my smaple text"
class(e)
## vectors
aa <- c(1,2,3)
class(aa)
vec
# choose 1st element from vector
aa[1]
# choose first two elemenets from vector
aa[1:2]
# what happened?
tst <- c(1, "bla", TRUE)
tst[2]
class(tst)
#wektor musi byc zawsze w tym samym typie powyzszy przyklad - wszytsko jest tekstem poniewaz jeden element jest tekstem
## lists
my_list <- list(c(1,2,3), c(4,5,6,7))
#bardziej zaawansowane obiety sa pokazywanen w data a nie values
# choose first object from list
my_list[[1]]
# choose 2nd and 3rd element from 1st object
my_list[[1]][2:3]
## matrix
my_matrix <- matrix(c(1,2,3,4,5,6), nrow = 2, ncol = 3)
my_matrix1 <- matrix(c(1,2,3,4,5,6), nrow = 2, ncol = 3, byrow = TRUE)
my_matrix
my_matrix1
my_matrix[1,] # wszytskie elemnety z wiersza
my_matrix1[1,]
my_matrix1[,2] #wszytstkie wiersze z 2 kolumny
## arrays - tu jest budowanie w przestrzeni
my_array <- array(data = c("green", "blue"), dim = c(2,3,4)) #dim - 2 wiersze 3 kolumny
#i powtorz 4 razy
my_array
## factors - typ wynikowy pokazuje ile jest unkalnych wynkow
vec1 <- c("dom", "kot", "pies", "kot", "kot", "pies")
factor(vec1)
my_factor <- factor(vec1)
my_factor
## data frames - ramka danych
#df jest sklejeniem kolumnowym - jest sortowanie i lupa to jest super
df <- data.frame(ID = c(1,2,3,4),
Miasto = c("Gdańsk", "Gdynia", "Sopot", "Gdańsk"),
Cena = c(10,20,20,40))
# unique values
vec2 <- c(1,2,3,4,4)
unique((vec2))
#---- operators ----
# math
v1 <- c(5.5 , 6)
v2 <- c(3, 4)
v1 + v2
v1 - v2
v1 * v2 #1 element z 1 elem v2 drugi z drugim
v1 / v2
v1 %% v2 #reszta z dzielenia
v1 ^ v2
# relation
v1 > v2
v1 < v2
v1 == v2
v1 <= v2
v1 >= v2
v1 != v2
# logical
t1 <- ( 1 + 2 > 4)
t2 <- ( 3 + 4 == 7)
t1
t2
t1 & t2 #wspolna wartosc dla obu
t1 | t2 # operator OR
t1 || t2 # operator OR
# other operators
2:4 #tworzy liczby od lewej do prawej - od 2 do 4 włacznie
v1 <- 8
v2 <- 12
t <- 1:10
t
v1
v2
v1 %in% t # operator in
v2 %in% t
#--- if clause ----
if (1 > 2) {
print("jest super na wsb")
} else if (1 ==2){
print("ujdzie")
} else {
print("nie az tak super")
}
#---- loops ----
# for
for (i in 1:10) {
print(i)
}
# while
i = 1
while (i < 7) {
print(i)
i = i + 1
}
#---- functions -----
mySum <- function(a,b=10) {
result <- a+b
return(result)
}
mySum(5, b = 4)
mySum(a = 5)
#----- text operations -----
# correct
a <- 'Start and end with single quote'
print(a)
b <- "Start and end with double quotes"
print(b)
c <- "single quote ' in between double quotes"
print(c)
d <- 'Double quotes " in between single quote'
print(d)
# wrong
# e <- 'Mixed quotes"
# print(e)
#
# f <- 'Single quote ' inside single quote'
# print(f)
#
# g <- "Double quotes " inside double quotes"
# print(g)
# concatenate (text)
text1 <- "First part of the sentence"
text2 <- "Second part of the sentence"
text1
test2
paste(text1, text2, sep = ". ")
x <- c("a", "b", "c", "d")
y <- c("w", "x", "y", "z")
paste(x, y, sep = "")
paste(x, y, sep = ";", collapse = "%") # collapse skleja caly tekst w jeden bardzo prydaten do zapytan np sql
# basic text operations
x <- "Sales in August 2018: 46500"
nchar(x)
tolower(x)
toupper(x)
#---- vector operations -----
# sequence
v <- 5:10
v1 <- 6.6 : 12.6
v
v1
seq(from = 5, to = 10, by = 0.1)
# choose elements
t <- c("MO", "TU", "WE", "TH", "FR", "SA", "SO")
t
t[t == "MO" | t == "TU"]
t[-c(t =="MO")] # usuwanie danych
t[-1]
#---- lists -----
myList2 <- list(c(1,2,3),
FALSE,
sin)
myList2
# name a list
names(myList2) <- c("Numbers", "Logic", "Funtion")
myList2
# call list
myList2$Numbers
# concat list
temList <- list(1,2,3)
newList <- c(myList2, temList)
newList
# list to vector
tst1 <- unlist(newList)
#---- matrix ----
# create new matrix by row
m <- matrix(1:6, nrow = 2, byrow = TRUE)
# create new matrix by column
m1 <- matrix(1:6, nrow = 2, byrow = FALSE)
m1
# create row and column names
col1 <- c("col1", "col2", "col3")
row1 <- c("row1", "row2")
# assign row and column names to matrix
m1 <- matrix(1:6, nrow = 2, byrow = FALSE, dimnames = list(row1, col1))
m1
# call matrix elements
m1[1,1]
# row 2
m1[2,]
# column 1
m1[,1]
# factor variables
v3 = c("Gdansk", "Gdynia", "Sopot", "Sopot")
v4 = factor(v3)
is.factor(v4)
is.factor(v3)
#as.character(v4)
levels(v4)
#------ data frames -----
df1 <- data.frame(City = c("Gdansk", "Gdynia", "Sopot", "Gdansk", "Gdynia", "Sopot", "Sopot", "Gdansk"),
Sales = c(500, 400, 200, 400, 200, 250, 200, 100 ),
Employees = c(6, 4, 1, 6, 4, 1, 2, 3 ))
# create new data frame
df2 <- data.frame(City_new = df1$City,
Slaes_new = df1$Sales)
# choose rows / columns
df1[2,]
df1[1:5,]
df1[1:5, 3]
# add new column do data frame
df3 <- cbind(df1, test = c(1,2,3,4,5,6,7,8))
# add new row do data frame
df4 <- rbind(df1, c("Gdansk", 100, 1))
# rbind, cbind
# is na
vec1 <- c(23, 45, NA, 45, 12, 34, 56, 21)
vec1
is.na(vec1)
!is.na(vec1)
vec1 == 12
vec1[!is.na(vec1)]
NA #jest vartością pustą null of length 1
NULL # pusty obiekt NULL object - empty one
|
215ed1f3f8cbbf83217e7fc7096b5f9e913f1ffd
|
d98b7d973db4770b573ffcf2e61a37ffa74ecb21
|
/airbnb/stack_models/layer1_hdda1.R
|
0b82c57ee35110956facee1ec833da1ca783e913
|
[] |
no_license
|
brandenkmurray/kaggle
|
a27a85f172c5ecd58d9fc58219b3e31400be597e
|
30924c37e15772b6e7125b341931d7c775b07d0b
|
refs/heads/master
| 2021-01-10T14:18:07.768888
| 2017-08-29T00:20:48
| 2017-08-29T00:20:48
| 44,857,026
| 30
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,182
|
r
|
layer1_hdda1.R
|
library(readr)
library(data.table)
library(caret)
library(reshape2)
library(dplyr)
library(kknn)
library(Matrix)
library(doParallel)
setwd("/home/branden/Documents/kaggle/airbnb")
threads <- ifelse(detectCores()>8,detectCores()-3,detectCores()-1)
ts1Trans <- data.table(read.csv("./data_trans/ts1_pp_v4.csv"))
xgbImpVars <- read_csv("./stack_models/xgb7Imp.csv")
load("./data_trans/cvFoldsList.rda")
#ndcg metric from air's script
ndcg5 <- function(preds, dtrain) {
labels <- getinfo(dtrain,"label")
num.class = 12
pred <- matrix(preds, nrow = num.class)
top <- t(apply(pred, 2, function(y) order(y)[num.class:(num.class-4)]-1))
x <- ifelse(top==labels,1,0)
dcg <- function(y) sum((2^y - 1)/log(2:(length(y)+1), base = 2))
ndcg <- mean(apply(x,1,dcg))
return(list(metric = "ndcg5", value = ndcg))
}
# Logloss function
LogLoss <- function(actual, predicted, eps=1e-15) {
predicted[predicted < eps] <- eps;
predicted[predicted > 1 - eps] <- 1 - eps;
-1/nrow(actual)*(sum(actual*log(predicted)))
}
# Only do KNN with summary variables and Departments
kknnPre <- preProcess(ts1Trans[filter==0,4:ncol(ts1Trans), with=FALSE], method=c("zv","center","scale"))
t1kknn <- predict(kknnPre, ts1Trans[filter==0,4:ncol(ts1Trans), with=FALSE])
# s1knn <- data.frame(matrix(rep(0, ncol(t1knn)*nrow(s1Trans)), ncol=ncol(t1knn), nrow=nrow(s1Trans)))
# colnames(s1knn) <- names(t1knn)
# s1knn[,colnames(s1knn) %in% colnames(t1knn)] <- s1Trans[,colnames(s1Trans) %in% colnames(s1knn),with=FALSE]
s1kknn <- predict(kknnPre, ts1Trans[filter==2,4:ncol(ts1Trans), with=FALSE])
t1kknn$class <- ts1Trans[filter==0, "class",with=FALSE]
t1kknn$class <- as.factor(t1kknn$class)
kknn4_stack_preds <- matrix(0, nrow=nrow(t1kknn), ncol=12)
logLossTable <- data.frame(fold=seq(1:length(cvFolds)), LogLoss=rep(0, length(cvFolds)))
for (i in 1:length(cvFolds)){
kknn4 <- kknn(as.factor(class) ~ .,
train=t1kknn[cvFolds[[i]],],
test=t1kknn[-cvFolds[[i]],],
k=300,
distance=1,
kernel="triweight")
kknn4_stack_preds[-cvFolds[[i]],] <- kknn4$prob
logLossTable[i,2] <- LogLoss(model.matrix(~class-1, t1kknn[-cvFolds[[i]],"class", with=FALSE]), kknn4$prob)
}
cvPreds <- kknn4_stack_preds
samp <- read.csv('sample_submission.csv')
cnames <- paste("kknn4", names(samp)[2:ncol(samp)], sep="_")
colnames(cvPreds) <- cnames
cvPreds <- cbind(ts1Trans[filter==0,"VisitNumber",with=FALSE], cvPreds)
write.csv(cvPreds, "./stack_models/cvPreds_kknn4.csv", row.names=FALSE)
# LogLoss(model.matrix(~class-1, t1kknn[-cvFolds[[5]],"class", with=FALSE]), kknn4_stack_preds$prob)
# LogLoss(model.matrix(~class-1, t1kknn[,"class", with=FALSE]), kknn4_stack_preds)
kknn4full <- kknn(as.factor(class) ~ .,
train=t1kknn,
test=s1kknn,
k=300,
distance=1,
kernel="triweight")
save(kknn4full, file="./stack_models/kknn4full.rda")
testPreds <- kknn4full$prob
colnames(testPreds) <- cnames
testPreds <- cbind(ts1Trans[filter==2,"VisitNumber",with=FALSE], testPreds)
write.csv(testPreds, "./stack_models/testPreds_kknn4full.csv", row.names=FALSE)
|
f018522cbe7de29f5f2babcb59c4bb6e32e88ec6
|
1af700299fe5a8cf8b7999b24df48a845a9afa2f
|
/plot4.R
|
4ab2d1eb4e972c95eb62ee81d83bc7df48a1d202
|
[] |
no_license
|
S2P6/ExData_Plotting1
|
1dd5e36d1b1232101cef4ce6b9319314ebf931d8
|
6a63ac3fbe994e0fe854877f5f805d4b5f925ad9
|
refs/heads/master
| 2021-01-19T03:38:54.691551
| 2015-02-08T23:16:27
| 2015-02-08T23:16:27
| 30,483,010
| 0
| 0
| null | 2015-02-08T06:22:17
| 2015-02-08T06:22:16
| null |
UTF-8
|
R
| false
| false
| 1,281
|
r
|
plot4.R
|
setwd(paste(getwd(),"/RCourse", sep = ""))
##reading all the lines
fn <- "household_power_consumption.txt"
Alldata <- read.csv2(fn, sep=";",na.strings="?", dec=".")
datemin <- strptime("01/02/2007","%d/%m/%Y")
##cutting with two dates
datemax <- strptime("02/02/2007","%d/%m/%Y")
ourdata <- Alldata[strptime(Alldata$Date,"%d/%m/%Y")<= datemax,]
ourdata <- ourdata[strptime(ourdata$Date,"%d/%m/%Y")>= datemin,]
y<-ourdata$Global_active_power
dt <- strptime(paste(ourdata$Date,ourdata$Time),"%d/%m/%Y %H:%M:%S")
sm1<-ourdata$Sub_metering_1
sm2<-ourdata$Sub_metering_2
sm3<-ourdata$Sub_metering_3
## Plot 4
png(filename = "plot4.png")
par(mfrow = c(2,2))
##Plot 1/4
plot(dt,y, xlab ="", type="l", main ="", ylab = "Global Active Power (kilowatts)")
##Plot 2/4
plot(dt,ourdata$Voltage, xlab ="datetime", type="l", main ="", ylab = "Voltage")
## Plot 3/4
plot(dt,sm1, xlab ="", type="l", main ="", ylab = "Energy sub metering")
lines(dt,sm2, type="l", col = "red")
lines(dt,sm3, type="l", col = "blue")
textlegend <-c("Sub_metering_1", "Sub_metering_2","Sub_metering_3")
legend("topright", lty = 1, col = c("blacK","red","blue"), legend = textlegend)
## Plot 4/4
plot(dt,ourdata$Global_reactive_power, xlab ="datetime", type="l", main ="", ylab="Global_reactive_power")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.