blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b8ecbe44d50adb414cc346d62aefd9e84e6211a8 | 0e036047a0cf79be1818f2dc901d5ffc5dc2acfb | /01_data_structures.R | 41d8c432447417513662be9cbcd4d95ecd9debac | [] | no_license | MacaryMoon/IntroToR | 6f0d47d29b53d4b765a202cff62f80cb19efbcaa | a1796f854b51cb279ca3b29c18f3e15d35c16515 | refs/heads/master | 2021-05-05T20:07:27.817020 | 2018-01-24T16:07:12 | 2018-01-24T16:07:12 | 117,838,406 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,026 | r | 01_data_structures.R | # Author: Macary
# Date: 10 January 2018
#Purpose: Script to create and analyse data frames
#Read the data file from disk
cats <- read.csv(file="data/feline-data.csv")
cats
#address a particular column with $
cats$weight
cats$coat
#add 2kg to each weight
cats$weight+2
#data types
typeof (cats$weight)
typeof (cats$coat)
class(cats)
#vectors
my_vector <-vector(length=3)
my_vector
my_vector <- vector(mode="character", length=3)
my_vector
#make a vector
combine_vector <- c(2,4,8)
combine_vector
#this is wrong because all elements in a vector need to be in the same data type
combine_vector <-c(2,4,"eight")
char_vector <-c("2" , "4" , "8")
num_vector <-as.numeric(char_vector)
#exercise
MK <-1:26
MK<-MK *2
names(MK)<-letters
MK
#Factors and levels
coats<-c("tabby", "tortoiseshell", "tortoiseshell","black","tabby")
coats
#structure
str(coats)
#look at a vector, and if there are repeating units, factor them into categories
categories <-factor(coats)
class(categories)
typeof(categories)
str(categories)
|
6a3487baa3067ce204db624647b9ca5b5f93cad3 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051713-test.R | d12d23463136510c62bd6edd35fac84a39c94537 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,671 | r | 1610051713-test.R | testlist <- list(rates = c(-2.9718420926367e-147, 7.52609340439459e-304, NaN, -9.77818300079457e-150, -9.77579636327222e-150, -2.23083509972902e-289, 1.67616711008101e-319, 0, 0, 9.72926582174422e-309, 2.84809453888922e-306, 2.84347436052587e-312, 2.47222686857108e-94, 3.5601616320854e-306, 8.90029544463878e-306, 0, -1.14111380630909e+306, 6.985949169073e-310, -2.97578918250622e-288, NaN, 7.19865872045558e-310, -6.05301950308561e-157, 7.37827237216662e-301, -3.89790094419408e-31, 3.78576699573368e-270, 1.00891829346111e-309, 1.38527101433411e-309, -8.81442566346662e-280, -2.30331110816477e-156, -2.30331110816477e-156, -2.30327665540415e-156, -2.30331110816477e-156, 2.56647495592567e-301, 6.95335640686727e-310, -2.30328334270592e-156, -2.30331110816477e-156, -2.30331110816477e-156, -2.30331110816272e-156, -5.5516993870748e+306, 4.65661286890991e-10, 5.43230939247793e-312, -1.00914053742587e-149, 7.74546126861323e-304, -8.8144298992236e-280, 2.56647495437793e-301, -2.30331110816311e-156, 3.7835305088198e+117, 4.00791700745219e-306, -6.64678428257374e-287, -5.463546900558e-108, -5.46354690059085e-108, -5.46354690059085e-108 ), thresholds = c(-9.77818300079457e-150, 3.78987813636309e-312, 3.65365169083783e-306, -9.77818300079457e-150, 7.14190420369699e-304, 5.43226988934558e-312, 0, 0, 0, 3.60739284464096e-313, 2.84809453917917e-306, -5.48612677708843e+303, 2.72888655986149e-312, 3.66145950596111e-314, -1.2341419504043e-30, -1.80650535611164e+307, -9.7757963632732e-150, 2.73729184714066e-312, NaN, 3.94108708470682e-312, NA, -2.24767481577173e-289, NaN), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
674001d3a92206f574e694f7b8c3601a431cdef1 | 5714088f3dd29cf046a5927ae9ae5330a0a3b4d7 | /server.R | fb5e99c91aebf1616eb7b61faedf920078a07599 | [] | no_license | vcueva/shinyappProject | 232a557ff7219be963d430f4e7043473885739ee | a1d9c26f232d2db988fef22c55052c06427cf8eb | refs/heads/master | 2021-01-10T02:10:41.053763 | 2016-02-26T04:19:23 | 2016-02-26T04:19:23 | 52,578,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 812 | r | server.R | library(UsingR)
shinyServer(
function(input, output) {
normalNumbers <- eventReactive(input$normal, {
rnorm(input$number, input$mu, input$sigma)
})
output$newHist <- renderPlot({
nNumbers <- normalNumbers()
hist(nNumbers, xlab='data', col='lightblue',main='Normal Data Histogram')
percentage <- input$percentage
samp <- sample(nNumbers, round(percentage*input$number/100))
xBar <- mean(samp)
s <- sd(samp)
minDataX <- min(nNumbers)+1
maxDataY <- input$number*.19
lines(c(input$mu, input$mu), c(0, 2000),col="yellow",lwd=5)
lines(c(xBar, xBar), c(0, 2000),col="red",lwd=5)
text(minDataX, maxDataY, paste("xBar = ", round(xBar,2)))
text(minDataX, maxDataY*0.95, paste("s = ", round(s,2)))
}) }
)
|
aa07bd2f89e21131653d1beb75056b361c003a8b | 95384806efc8ff3fe7d81766ee28674f126c1cd2 | /plot4.R | 7e279e2163b04b6bf365e80d189b48f6241f7934 | [] | no_license | natthawute/ExData_Plotting1 | 1a19be8012fba9fe8d1a85652c1c94ee718267f2 | b9873db4bd9dd6f6423dcb8571d47c1edb87edf1 | refs/heads/master | 2021-07-23T22:27:40.630136 | 2017-11-03T16:58:45 | 2017-11-03T16:58:45 | 109,400,986 | 0 | 0 | null | 2017-11-03T13:50:28 | 2017-11-03T13:50:28 | null | UTF-8 | R | false | false | 1,315 | r | plot4.R | data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date,format = "%d/%m/%Y")
subset_data <- subset(data, Date>=as.Date("2007-02-01", format="%Y-%m-%d") & Date<=as.Date("2007-02-02", format="%Y-%m-%d"))
png("plot4.png", width = 480, height = 480)
par(mfcol=c(2,2))
plot(subset_data$DateTime, subset_data$Global_active_power, type = "l", main = NA, xlab = NA, ylab = "Global Active Power")
plot(subset_data$DateTime, subset_data$Sub_metering_1, type = "l", main = NA, xlab = NA, ylab = "Energy sub metering")
lines(subset_data$DateTime, subset_data$Sub_metering_2, col = "red")
lines(subset_data$DateTime, subset_data$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1, 1, 1, 1), bty = "n")
plot(subset_data$DateTime, subset_data$Voltage, type = "l", main = NA, xlab = "datetime", ylab = "Voltage")
plot(subset_data$DateTime, subset_data$Global_reactive_power, type = "l", main = NA, xlab = "datetime", ylab = "Golbal_reactive_power")
dev.off() |
8c10d04a4fc2e0b0a8e6429d6555905d855d86ce | 19e64db90cbe271e48fd5f592894e4ff7a30dc72 | /cachematrix.R | 0e2460cb1fba34aacc22def90fa0a840684e62f7 | [] | no_license | PCusan/ProgrammingAssignment2 | 351562f756793333e109fe08be3b50c9c2c59210 | 09e41acb07093d1feb4c667cee48119f5ddaed6b | refs/heads/master | 2020-12-26T02:40:35.346322 | 2015-07-26T19:42:54 | 2015-07-26T19:42:54 | 39,738,554 | 0 | 0 | null | 2015-07-26T19:21:52 | 2015-07-26T19:21:51 | null | UTF-8 | R | false | false | 1,178 | r | cachematrix.R | # makeCacheMatrix is a function that returns a list of functions
# Its purpose is to store a matrix and a cached value of the inverse of the
# matrix.
## Create a special "matrix", which is a list containing
## a function to
## - set the value of the matrix
## - get the value of the matrix
## - setinvmat the value of the inverse matrix
## - getinvmat the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
c <- NULL
set <- function(y) {
x <<- y
c <<- NULL
}
get <- function() x
setinvmat <- function(invmat) c <<- invmat
getinvmat <- function() {c}
list(set = set, get = get,
setinvmat = setinvmat,
getinvmat = getinvmat)
}
# This function first checks if the inverse matrix is in the cache.
# If it is not in the memory it will use solve() to invert the matrix.
cacheSolve <- function(x, ...) {
c <- x$getinvmat()
if(!is.null(c)) {
message("getting cached data")
return(c)
}
data <- x$get()
c <- solve(data, ...)
x$setinvmat(c)
c
}
|
e62c336efa95d037ffd725c0b62446b1a0f897e4 | cb4b5e3ea5e2ab76c74d35cba0f20831d065e4bd | /04_build_LM.R | 49302ef9c15cafc6efd9b15d5612bd6810d9b047 | [] | no_license | silverbullet1472/AGBProj | f314ed3f226a30d43ae7fafa6188d041a05b3ffd | da30910b505efcb5f59e03b089a392cb7e74d880 | refs/heads/master | 2022-06-19T10:31:17.479262 | 2020-05-12T11:15:10 | 2020-05-12T11:15:10 | 255,493,198 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,979 | r | 04_build_LM.R | # select metrics(MASS)
# calculate model performance(ten-fold validation)
# build model and save as .Rdata
rm(list = ls())
library(MASS)
data <- read.csv(file="./merged_data.csv", head=T)
colnames(data)
# FID 1
# plot parameters 2:8
# agb 9
# dem 10:12
# s1 13:24
# p2 25:29
# l8 30:123 (vegetation index -> 116:123)
data <- na.omit(data)
data <- data[data$AveAGB<250,]
data[1:8] <- NULL
colnames(data)
fit<-lm(AveAGB~.,data = data)
step.back <- stepAIC(fit,direction = "backward",trace=F)
summary(step.back)
step.for <- stepAIC(fit,direction = "forward",trace=F)
summary(step.for)
step.both <- stepAIC(fit,direction = "both",trace=F)
summary(step.both)
# use metrics selected by backward stepwise regression
sel.lm <- names(step.back$coefficients)
sel.lm
sel.lm <- sel.lm[2:length(sel.lm)]
sel.lm
save(sel.lm,file="sel.lm.Rdata")
# 10-fold validation ####
rmse <- function(x,y)
{
sqrt(mean((x-y)^2))
}
r2 <- function(x,y)
{
cor(x,y)^2
}
load("sel.lm.Rdata")
data <- data[c("AveAGB",sel.lm)]
# 10-fold
tenfold <- function(){
df <- data.frame(rmse.train=numeric(),
rmse.test=numeric(),
r2.train=numeric(),
r2.test=numeric())
ind <- sample(10, nrow(data), replace=T)
for (i in 1:10){
data.train <- data[ind!=i,]
data.test<- data[ind==i,]
model.lm<-lm(AveAGB~.,data = data.train)
pred.train <- predict(model.lm,data.train)
pred.test <- predict(model.lm,data.test)
r2.train <- r2(data.train$AveAGB,pred.train)
r2.test <- r2(data.test$AveAGB,pred.test)
rmse.train <- rmse(data.train$AveAGB,pred.train)
rmse.test <- rmse(data.test$AveAGB,pred.test)
df[nrow(df)+1,] <- c(rmse.train,rmse.test,r2.train,r2.test)
}
c(mean(df$rmse.train),mean(df$rmse.test),mean(df$r2.train),mean(df$r2.test))
}
# result
result.lm <- tenfold()
result.lm
save(result.lm,file="result.lm.Rdata")
# model
model.lm<-lm(AveAGB~.,data = data)
save(model.lm,file="model.lm.Rdata")
|
66284ea5e7ab0a6691fd0b2f28dafce7b353adac | 902416fef5aab577b284418b4c8e425c3b554a86 | /Scripts/Pinaster3.R | 709303679af8cf6c0eee296e5398e2510133d0d0 | [] | no_license | aureliodiazherraiz/digital.agri | d574d74310fc5448685d20d603e5628e50a62299 | 796ee5121580ea58f04228347667b86dc584ce1c | refs/heads/main | 2023-03-29T15:04:54.571647 | 2021-03-30T14:11:34 | 2021-03-30T14:11:34 | 351,802,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,107 | r | Pinaster3.R | #### PINASTER ####
names(pinaster3)
cor_pinaster3<-round(cor(pinaster3[, -c(1:3,29:32,39:42)], use="complete.obs"),2) %>%
corrplot::corrplot(method = "shade", shade.col = NA, tl.col = "black", tl.srt = 55,
tl.cex = 0.5, number.cex = 0.5, addCoef.col = "black", type = "lower", diag = F,
addshade = "all", order = "FPC")
jpeg(filename = "corr_pinaster.jpeg", width = 1000, height = 1000)
corrplot::corrplot(cor_pinaster3, method = "shade", shade.col = NA, tl.col = "black", tl.srt = 55,
tl.cex = 0.5, number.cex = 0.5, addCoef.col = "black", type = "lower",
diag = F, addshade = "all", order = "FPC") ## para visualizar el peso de las variables
dev.off()
#las vriables que tienen una mayor correlacion con AB3_Mgha son soil_mean & stdv, aet_mean&stdv, mean_ndvi y tree_dens. Con ellas formulamos el modelo
plot(pinaster3$mean_ndvi, pinaster3$AB3_Mgha)
lm(log(AB3_Mgha) ~ mean_ndvi, data = pinaster3) %>% summary()
plot(pinaster3$mean_ndvi, log(pinaster3$AB3_Mgha))
abline(a=1.5618, b=4.0437, lwd=3, col='blue')
pinaster3.nor<- scale(pinaster3[,-c(1:3,29:32,39:42)],center=T,scale=T) %>% as.data.frame()
### Calcular VIF basandonos en las funciones vifcor y vifstep
#############################################################
v1 <- vifcor(pinaster3.nor, th=0.8)#0.8 es la significancia del 80%#
v1
v2 <- vifstep(pinaster3.nor, th=3)
v2
re1 <- exclude(pinaster3.nor,v2)
re1
X11()
plot(re1)
cor_pinaster3v2 <- round(cor(re1, use="complete.obs"),2) %>%
corrplot::corrplot(method = "shade", shade.col = NA, tl.col = "black",
tl.srt = 55, tl.cex = 0.8, number.cex = 0.8,
addCoef.col = "black", type = "lower", diag = F,
addshade = "all", order = "FPC")
#### GLM ####
pinaster3_vif <- glm(AB3_Mgha ~ Tree_dens + mean_ndvi + mo_sup + textura, data = pinaster3.nor)
summary(pinaster3_vif)
performance(pinaster3_vif)
rsq(pinaster3_vif) #r cuadrado de 0.4732783
sink("pinaster3_vif_glm.doc")
print(summary(glm(AB3_Mgha ~ Tree_dens + mean_ndvi + mo_sup + textura, data = pinaster3.nor)))
sink() # returns output to the console
#### PODER PREDICTIVO GLM ####
set.seed(1369)
data_train <- pinaster3.nor %>% sample_frac(.8)
data_test <- setdiff(pinaster3.nor, data_train)
mean(data_train$AB3_Mgha)
mean(data_test$AB3_Mgha)
hist(data_train$AB3_Mgha)
hist(data_test$AB3_Mgha)
pinaster3_vifglm <- glm(AB3_Mgha ~ Tree_dens + mean_ndvi +
mo_sup + textura, data=data_train)
summary(pinaster3_vifglm)
performance(pinaster3_vifglm)
rsq(pinaster3_vifglm) #r cuadrado de 0.5078
# Predicciones de entrenamiento
prediccion_train<-predict.glm(pinaster3_vifglm,newdata = data_train)
# MSE de entrenamiento
training_mse <- mean((prediccion_train - data_train$AB3_Mgha)^2)
paste("Error (mse) de entrenamiento:", training_mse)#"Error (mse) de entrenamiento: 0.480804459730057"
# Predicciones de test
prediccion_test<-predict.glm(pinaster3_vifglm,newdata = data_test)
# MSE de entrenamiento
test_mse <- mean((prediccion_test - data_test$AB3_Mgha)^2)
paste("Error (mse) del test:", test_mse)#"[1] "Error (mse) del test: 0.671095447411617"
data_test$prediccion<-prediccion_test
r2test<-lm(prediccion ~ AB3_Mgha, data = data_test)#Adjusted R-squared: 0.359
summary(r2test)
names(data_test)
ggpairs(data_test[, c(31,49)], lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
ggplot(data_test[, c(31,49)],aes(x=AB3_Mgha,y=prediccion))+
geom_point()+
geom_smooth(method="glm", se=T)+
labs(subtitle = "GLM",
y = "Biomass_prediction",
x = "Ppinaster_biomass",
title = "P pinaster biomass prediction") +
stat_cor(aes(label = paste(..rr.label.., ..p.label.., sep = "~`,`~")),
label.x = 2, label.y = -4, size = 5.5) +
stat_regline_equation(label.x = 2, label.y = -4.5, size = 5.5)
#### GAM ####
pinaster3_vifgam <- gam(AB3_Mgha ~ s(Tree_dens) + s(mean_ndvi) + s(mo_sup) + s(n_sup),
data = pinaster3.nor)
summary(pinaster3_vifgam)#R-sq.(adj) = 0.558
sink("pinaster3_vif_gam.doc")
print(summary(gam(AB3_Mgha ~ s(Tree_dens) + s(mean_ndvi) + s(mo_sup) + s(n_sup),
data = pinaster3.nor)))
sink() # returns output to the console
# Predicciones de entrenamiento
prediccion_train<-predict.gam(pinaster3_vifgam,newdata = data_train)
# MSE de entrenamiento
training_mse <- mean((prediccion_train - data_train$AB3_Mgha)^2)
paste("Error (mse) de entrenamiento:", training_mse)#"Error (mse) de entrenamiento: 0.415009070180227"
# Predicciones de test
prediccion_test<-predict.gam(pinaster3_vifgam,newdata = data_test)
# MSE de entrenamiento
test_mse <- mean((prediccion_test - data_test$AB3_Mgha)^2)
paste("Error (mse) del test:", test_mse)#"[1] "Error (mse) del test: 0.477256704261409"
data_test$prediccion<-prediccion_test
r2test<-lm(prediccion ~ AB3_Mgha, data = data_test)#Adjusted R-squared: 0.5349
summary(r2test)
names(data_test)
ggpairs(data_test[, c(31,49)], lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
#### ARBOL DE DECISIONES ####
#de alguna forma sirve para ver cuales variables con mas relevantes
set.seed(1649)
data_train <- pinaster3[, -c(1:3,29:32,39:42)] %>% sample_frac(.8)
data_test <- setdiff(pinaster3[, -c(1:3,29:32,39:42)], data_train)
head(data_train)
mean(data_train$AB3_Mgha)
mean(data_test$AB3_Mgha)
hist(data_train$AB3_Mgha)
hist(data_test$AB3_Mgha)
#creamos el CART model
pinaster3.tree <- rpart(AB3_Mgha ~ .,data=data_train)
rpart.plot(pinaster3.tree, extra = 100)
prp(pinaster3.tree)
par(xpd = NA)
jpeg("pinaster3.tree.jpg", width=1000, height=820, units="px",
pointsize=5, bg="white",res=300)
plot(pinaster3.tree)
text(pinaster3.tree)
dev.off()
printcp(suber.tree)
plotcp(suber.tree)
|
25278e47342be1d99714c99dcf90715aad6db228 | 4d453169619db0c29de65e355f2a49ece464b8fa | /4.eda/proj2.pm25/plot3.R | d73985e9ce872dd307d9085d428dbc835d185b28 | [] | no_license | gsstuart/datasciencecoursera | 1f11a81f5faf43ce2c350ad1074fb43cd2f99fb0 | ec169fb0982e4f4e0a71bff1dd4dc8d2a02452ad | refs/heads/master | 2020-05-02T14:48:33.386645 | 2017-05-19T20:46:53 | 2017-05-19T20:46:53 | 22,657,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,237 | r | plot3.R | # DIRECTIVE:
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
# which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008? Use the ggplot2 plotting system to make a
# plot answer this question.
library(plyr)
library(ggplot2)
# SET WORKING DIRECTORY HERE
setwd('/home/scott/datasciencecoursera/4.eda/proj2.pm25')
# set up for PNGs, if desired
writePNG = 0
if (writePNG) png(filename='plot3.png', width=900)
# load the data set
# for caching purposes, we assume that if the "nei" variable exists, then the data is already loaded
# and up-to-date.
if (!exists('nei0')) nei0 = readRDS('summarySCC_PM25.rds')
# subset to Baltimore City
baltimore = nei0[nei0$fips == '24510', c('year','Emissions','type')]
# get the sums per year/type
totalPM = ddply(baltimore, c('year','type'), function(b) sum(b[,'Emissions']))
# generate the plot
g = ggplot(totalPM, aes(year, V1))
g + facet_grid(. ~ type) +
geom_area(aes(fill=type), alpha=0.77) +
theme_bw() +
labs(title = expression(PM[2.5] * ' Baltimore City'), y='emissions')
# clean-up PNG if necessary
if (writePNG) dev.off()
|
3cdb453d6a4270be340b4ac05953de25b0019d77 | 8a173889edb40e971c604584c53367b4032c77f2 | /ui.R | 36f69340b8dea4f9fcb72e46dfc6e4dda6b1a72b | [] | no_license | shrutimahajan123/IMDB-Data-Analysis | 5981e0e2e742beebbc11b6cc82dbf276e9b1fee9 | fbdc92223330bc5b5004e98ea1eb7082f2371f87 | refs/heads/main | 2023-06-11T10:47:22.590508 | 2021-06-25T05:02:22 | 2021-06-25T05:02:22 | 380,127,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,082 | r | ui.R | # Title: FIT5147 Visualisation Project
# Author: Shruti Sandeep Mahajan
# Student ID: 31235786
# Tutor: Mohit Gupta
## --------------------------------------
## Importing required libraries
## --------------------------------------
#install.packages("shiny")
library(shiny)
#install.packages("shinydashboard")
library(shinydashboard)
#install.packages("dplyr")
library(dplyr)
#install.packages("leaflet")
library(leaflet)
#install.packages("tidyr")
library(tidyr)
#install.packages("tidyverse")
library(tidyverse)
#install.packages("rgdal")
library(rgdal)
#install.packages("ggplot2")
library(ggplot2)
#install.packages("plotly")
library(plotly)
#install.packages("reshape2")
library(reshape2)
#install.packages("gapminder")
library(gapminder)
#install.packages("shinythemes")
library(shinythemes)
#install.packages("shinyWidgets")
library(shinyWidgets)
#install.packages("shinyalert")
library(shinyalert)
#install.packages("shinycssloaders")
library(shinycssloaders)
#install.packages("shinybusy")
library(shinybusy)
## --------------------------------------
## Reading data
## --------------------------------------
demographic_imdb <- read.csv("demographic_imdb.csv")
demographic_imdb <- demographic_imdb %>% drop_na()
ratings_data <-read.csv("IMDb ratings.csv")
page_votes <-read.csv("gender_votes.csv")
## --------------------------------------
## Transforming data
## --------------------------------------
# Considering data after the year 1924
demographic_imdb <- demographic_imdb %>% filter(year > 1924)
##############
# Retrieving name of the country with count of movies
demo_country_cnt <- as.data.frame(table(demographic_imdb$country))
# Renaming the column names
colnames(demo_country_cnt) <- c("NAME","movie_cnt")
# Changing the name of the countries as per international norms
demographic_imdb$country <-gsub("USA","United States", demographic_imdb$country)
# Changing the name of country to map data on the worldmap
demo_country_cnt$NAME <- gsub("Soviet Union","Russia", demo_country_cnt$NAME)
demo_country_cnt$NAME <- gsub("USA","United States", demo_country_cnt$NAME)
# Loading thematic mapping for countries on world map
dsn <- "TM_WORLD_BORDERS_SIMPL-0.3"
layer1<- "TM_WORLD_BORDERS_SIMPL-0.3"
# Using readOGR() to read the thematic mappings of the file
map_world = readOGR(dsn,layer1)
# Performing cleaning of thematic data to suit our purpose
map_world@data$POP2005 <- map_world@data$POP2005 %>% replace_na(0)
# Performing computations to simplify the values
map_world@data$POP2005 <- as.numeric(as.character(map_world@data$POP2005)) / 1000000 %>% round(3)
# Selecting required features for the data
map_world@data <- data.frame(map_world@data , demo_country_cnt[match(map_world@data$NAME, demo_country_cnt$NAME),])
# Retrieving required features
map_world@data <- map_world@data[,c("FIPS","ISO2","ISO3","LON","LAT","UN","NAME","AREA","POP2005",
"REGION","SUBREGION","movie_cnt")]
# Setting up palette for for the world map for bins set for range of number of movies
pal_hue <- colorBin( palette="YlGn", domain=map_world@data$movie_cnt ,na.color="transparent", bins=c(0,750,1500,2250,3000,Inf))
######
# Assigning two categories for gender (to be used in page 3)
gender_choice <- c("Male","Female")
## --------------------------------------
## Creating UI
## --------------------------------------
ui <- navbarPage(
# Setting aestheitcs for tab
position = c("static-top", "fixed-top", "fixed-bottom"),
header = NULL,
footer = NULL,
inverse = TRUE,
fluid = TRUE,
# Title of web-based application
title= h4("Analysis of IMDb data"),
# First tab
tabPanel(h5("Introduction"),
# Shiny alert widget as pop up message for users
useShinyalert(),
# Setting background color
setBackgroundColor("ghostwhite",gradient ="linear"),
# Setting the aesthetics for the Introduction page
fluidRow(
h3(htmlOutput("welcome_page"), align="center", style = "font-family: 'times';color: #000000; margin-left:80px; margin-right:80px;")
),
# Loading image on the web page
div(tags$img(src = "mov.jpg", height = 500, width = 1000), style="text-align:center;"),
),
# Second tab
tabPanel(h5("Demographic Analysis"),
fluidRow(column(4,offset=4,h3("Demographic Analysis of IMDb data"))),
fluidRow(
column(2,
wellPanel(title = "User Guide",
solidHeader = TRUE,
status = "primary",
style = "background: yellow",
htmlOutput("usr_guide_pg1"))
),
column(10,(wellPanel(title = "Insights",
solidHeader = TRUE,
status = "primary",
#style = "background:blue",
htmlOutput("Insight_pg1"))))
),
fluidRow(
column(2,wellPanel(selectInput("features","Genre:",unique(demographic_imdb$genre),
multiple = TRUE,
selected = c("Drama","Horror","Thriller","Romance")))
),
column(5,
wellPanel(withSpinner(leafletOutput("country_plot",height=400)),align="center")
),
column(5,
wellPanel(withSpinner(plotOutput("genre_count",height=400)))
)
)),
# Third tab
tabPanel(h5("Time-based Analysis"),
fluidRow(column(4,offset=4,h3("Time based Analysis of IMDb data"))),
fluidRow(
column(2,
wellPanel(title = "User Guide",
solidHeader = TRUE,
status = "primary",
style = "background: yellow",
htmlOutput("usr_guide_pg2"))
),
column(10,wellPanel(title = "Insights",
solidHeader = TRUE,
status = "primary",
htmlOutput("insight_pg2"),
height=200)
)),
fluidRow(
column(2,wellPanel(
selectInput("select_genre",
"Select one or more Genre:",
unique(demographic_imdb$genre),
multiple = TRUE,
selected = c("Drama","Crime","Mystery","Comedy")),
selectInput("sun_year","Select an year",unique(demographic_imdb$year),selected = c("2020")),
selectInput("heat_lang","Select languages",unique(demographic_imdb$language),multiple=TRUE,
selected = c("English","Hindi","French","German","Russian","Spanish")))
),
column(10,wellPanel(withSpinner(plotlyOutput("genre_trend",height=350)))
)
),
fluidRow(
column(5,offset=2,wellPanel(withSpinner(plotlyOutput("sunburst",height=400)))),
column(5,wellPanel(withSpinner(plotlyOutput("heat_map",height=400)))))
),
# Fourth tab
tabPanel(h5("Popularity based Analysis"),
fluidRow(column(4,offset=4,h3("Popularity based Analysis of IMDb data"))),
fluidRow(
column(2,
wellPanel(
title = "User Guide",
solidHeader = TRUE,
status = "primary",
style = "background: yellow",
htmlOutput("usr_guide_pg3"))),
column(10,
wellPanel(title = "Insight",
solidHeader = TRUE,
status = "primary",
htmlOutput("insight_pg3")))),
fluidRow(
column(2,
selectInput("gender_sel","Select Gender",choices=gender_choice,multiple= TRUE,selected =c("Female")),
checkboxInput('all','Select All/None',value =TRUE),
selectInput("age_sel","Select Age group",c("0-18 years","18-30 years","30-45 years","Above 45 years"),
multiple=FALSE, selected=c("18-30 years")),
selectInput("gnr_sel","Select Genre",unique(demographic_imdb$genre),multiple=TRUE,
selected=c("Drama","Comedy","Biography","Action","Animation"))),
column(5,wellPanel(withSpinner(plotlyOutput("box_chart")))),
column(5,wellPanel(withSpinner(plotlyOutput("scatter"))))
))
)
## -----------------------------------------------------
## Configuring the server
## -----------------------------------------------------
server<- function(input,output,session){
# Pop up message when application loads
shinyalert("Please wait..the movies are loading!",
imageUrl ="https://media.istockphoto.com/vectors/cinema-hall-movie-interior-with-coming-soon-text-on-white-screen-and-vector-id1066635730?k=6&m=1066635730&s=612x612&w=0&h=5THdaHrMGqqRVJHYnQFkrKHbCRdExr53Rn1RyrougOk=")
# Text for introduction page
output$welcome_page <- renderUI({HTML(paste("<B> Movies have been contributing to entertainment worldwide for many years.Let us explore the
multitude of features and insights gathered from the IMDb data from the year 1924 to 2020.",
"IMDb has been performing credit indexing for movies worldwide and has a vast dataset with multiple attributes. These atributes will help
us to garner insights into the popularity of movies and audience perspective for various movies ",sep="<br/>"))})
# User guide for page 1
output$usr_guide_pg1 <- renderUI({HTML(paste("<B> User Guide",
"1. Select desired genre(s).",
"2. Click on a country over the map.",
sep="<br/>"))})
# Insights from page 1
output$Insight_pg1 <- renderUI({HTML(paste("Here we aim to get insights into the distribution of number of movies made in different genres
worldwide,i.e. in different countries. This helps us understand the popularity of genres demographically.",
"<B> For example: In India, most of the movies are from Drama genre , followed by Action and Comedy.
Animation can be observed as a popular genre in Japan when compared to other countries.This is particularly
helpful for publication houses to understand the global market to make informed decisions. The distribution of movies
in different genre to total movies in the country can also be observed here.",sep="<br/>"))})
# User guide for page 2
output$usr_guide_pg2 <- renderUI({HTML(paste("<B> User Guide",
"1. Select desired genre(s).",
"2. Select desired year.",
"3. Select desired language(s)",sep="<br/>"))})
# Insights from page 2
output$insight_pg2 <-renderUI({HTML(paste("Here we aim understand the trend of genres across the years
and also get insights into the dependency between genre and languages for a given year.This visualisation is helpful for
understanding the effect languages have on genre and how the viewership has grown over the years.",
"<B> For example: The movies for Comedy genre rose significantly from 2000 to 2004 with most maximum number of movies
made in English language.","These insights are helpful for movie buffs and movie critics to analyse the
distibution of genre and language for various years","",sep="<br/>"))})
# User guide for page 3
output$usr_guide_pg3 <- renderUI({HTML(paste("<B> User Guide",
"1. Select a gender type.",
"2. Select desired age group.",
"3. Select desired genres",sep="<br/>"))})
# Insights from page 3
output$insight_pg3 <- renderUI({HTML(paste("Here we aim to understand the popularity of various genres within different genders and age
group. We also understand how the popularity of genres is changing within age groups wrt duration of the movie.
Duration of a movie is a major factor towards which various age groups and genders can have different affinity.",
"<B> For example: Animation genre has received high votes for the age group of 0-18 years from
both Male and Female gender. However, it can be observed that it did not receive higher ratings
from viewers in the age group of 45 above years.","It can also be observed that with increase in duration, the
votes received for 0-18 age group the average votes were more when copared to age group of 45 and above years.",sep="<br/>"))})
# World map visualisation
output$country_plot <- renderLeaflet({
leaflet(map_world) %>%
# Now add tiles to it
addTiles() %>%
addPolygons(
fillColor = ~pal_hue(movie_cnt),
stroke=TRUE,
fillOpacity = 0.9,
color="white",
weight=0.3,
label = paste( "Country:", map_world@data$NAME,",",
"Number of Movies: ", map_world@data$movie_cnt,
sep=" "),
labelOptions = labelOptions(
style = list("font-weight" = "bold", padding = "4px 8px"),
textsize = "13px",
direction = "auto",
highlight = highlightOptions(weight=5,color="red",fillOpacity = 0.7,bringToFront = TRUE)
),
layerId = map_world@data$NAME) %>%
# Setting the default zoom level for map
setView(lng= 0, lat=30, zoom = 2)%>%
addLegend(pal= pal_hue, values= ~movie_cnt, title="Number of Movies",position = "bottomleft")
})
observe(
{
click = input$country_plot_shape_click
sub = map_world@data %>% filter(NAME %in% click$id)
if(is.null(click) || is.null(sub))
return()
output$genre_count<- renderPlot({
sel_country <-demographic_imdb %>%
filter(country %in% sub$NAME)
data_genre <- sel_country %>%
filter(genre %in% input$features) %>%
group_by(genre)%>%
summarise(Num_genre_mov = length(genre))
ggplot(data= data_genre, aes(x=genre, y=Num_genre_mov)) +
geom_segment(aes(x = genre, y = 0, xend = genre, yend = Num_genre_mov ), color="black") +
geom_point( size=5, color="red", fill=alpha("orange", 0.3), alpha=0.7, shape=21, stroke=2,
hoverinfo = 'text',
text = ~paste('</br> Number of Movies: ', Num_genre_mov))+
theme_light() +
theme(
panel.grid.major.x = element_blank(),
panel.border = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_text(angle=0, hjust=0.5, size=11,colour="black")
) +
xlab("Genre") +
ylab("Number of movies") +
labs(title = paste("Number of movies in selected genre for",click$id))
})
})
# Visualisation for line chart
output$genre_trend <- renderPlotly({
trn_genre <- demographic_imdb %>%
filter(genre %in% input$select_genre) %>%
group_by(genre,year)%>%
summarise(Num_genre_mov = length(genre))
plot_ly(trn_genre, x = ~year, y = ~Num_genre_mov, type = 'scatter', mode = 'lines',
linetype = ~genre,line = list(color = 'Pastel1', width = 2,dash = 'solid'),
hoverinfo = 'text',
text = ~paste('</br> Year: ', year,
'</br> Number of movies: ', Num_genre_mov)) %>%
layout(title = "Trend of Genres",
xaxis = list(title = "Year"),
yaxis = list (title = "Number of movies"))
})
# Visualisation for Sunburst
output$sunburst <-renderPlotly({
sun_burst <- demographic_imdb %>%
filter(year %in% input$sun_year) %>%
filter(genre %in% input$select_genre) %>%
group_by(year,genre,language) %>%
summarise(Num_lang_mov = length(imdb_title_id))
plot_ly(
labels = sun_burst$language,
parents = sun_burst$year,
values = sun_burst$Num_lang_mov,
type = 'sunburst',
maxdepth = 3,
domain = list(column = 1),
insidetextorientation='radial',
extendsunburstcolors = TRUE
)%>%
layout(title = "Distribution of movies in different language with years")
})
#Visualisation for heat map
output$heat_map <-renderPlotly({
heat_data <- demographic_imdb %>%
filter(year %in% input$sun_year) %>%
filter(genre %in% input$select_genre) %>%
filter(language %in% input$heat_lang) %>%
group_by(year,genre,language) %>%
summarise(Num_lang_mov = length(imdb_title_id))
heat_data$Num_lang_mov[is.na(heat_data$Num_lang_mov)] <- 0
plot_ly(x=heat_data$genre, y=heat_data$language,
z = heat_data$Num_lang_mov,
type = "heatmap",
colorscale='Bluered_r',
hoverinfo = 'text',
text = ~paste('</br> Language: ', heat_data$language,
'</br> Genre: ', heat_data$genre,
'</br> Number of movies: ', heat_data$Num_lang_mov))%>%
layout(title = "Distribution of genre with language",
xaxis = list(title = "Genre"),
yaxis = list (title = "Language"))
})
observe({
updateSelectInput(session,"gender_sel",choices= gender_choice,selected=if(input$all)gender_choice)
})
# Visualisation for Box Plot
output$box_chart <-renderPlotly({
# For selected gender = Male
if(input$gender_sel == "Male"){
male_age_votes <- page_votes %>% select(genre, males_0age_avg_vote,males_18age_avg_vote,
males_30age_avg_vote,males_45age_avg_vote)
if(input$age_sel == "0-18 years"){
male_age_votes <- male_age_votes %>% select(genre,males_0age_avg_vote)
male_age_votes <- male_age_votes %>% rename(male_under_18_years = males_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
male_age_votes <- male_age_votes %>% select(genre,males_18age_avg_vote)
male_age_votes <- male_age_votes %>% rename(male_18_to_30_years = males_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
male_age_votes <- male_age_votes %>% select(genre,males_30age_avg_vote)
male_age_votes <- male_age_votes %>% rename(male_30_to_45_years = males_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
male_age_votes <- male_age_votes %>% select(genre,males_45age_avg_vote)
male_age_votes <- male_age_votes %>% rename(male_above_45_years = males_45age_avg_vote)
}
male_age_votes <- male_age_votes %>% filter(genre %in% input$gnr_sel)
age_votes <-melt(male_age_votes)
}
# For selected gender = Female
if(input$gender_sel == c("Female")){
female_age_votes <- page_votes %>% select(genre, females_0age_avg_vote,females_18age_avg_vote,
females_30age_avg_vote,females_45age_avg_vote)
if(input$age_sel == "0-18 years"){
female_age_votes <- female_age_votes %>% select(genre,females_0age_avg_vote)
female_age_votes <- female_age_votes %>% rename(female_under_18_years = females_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
female_age_votes <-female_age_votes %>% select(genre,females_18age_avg_vote)
female_age_votes <- female_age_votes %>% rename(female_18_to_30_years = females_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
female_age_votes <- female_age_votes %>% select(genre,females_30age_avg_vote)
female_age_votes <- female_age_votes %>% rename(female_30_to_45_years = females_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
female_age_votes <- female_age_votes %>% select(genre,females_45age_avg_vote)
female_age_votes <- female_age_votes %>% rename(female_above_45_years = females_45age_avg_vote)
}
female_age_votes <- female_age_votes %>% filter(genre %in% input$gnr_sel)
age_votes <-melt(female_age_votes)
}
# For selected gender = Male and Female
if(input$all == TRUE){
gndr_age_votes <- page_votes %>% select(genre,males_0age_avg_vote,males_18age_avg_vote,males_30age_avg_vote,males_45age_avg_vote,
females_0age_avg_vote,females_18age_avg_vote,females_30age_avg_vote,females_45age_avg_vote)
if(input$age_sel == "0-18 years"){
gndr_age_votes <- gndr_age_votes %>% select(genre,females_0age_avg_vote,males_0age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(male_under_18_years = males_0age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(female_under_18_years = females_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
gndr_age_votes <- gndr_age_votes %>% select(genre,females_18age_avg_vote,males_18age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(male_18_to_30_years = males_18age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(female_18_to_30_years = females_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
gndr_age_votes <- gndr_age_votes %>% select(genre,females_30age_avg_vote,males_30age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(male_30_to_45_years = males_30age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(female_30_to_45_years = females_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
gndr_age_votes <- gndr_age_votes %>% select(genre,females_45age_avg_vote,males_45age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(male_above_45_years = males_45age_avg_vote)
gndr_age_votes <- gndr_age_votes %>% rename(female_above_45_years = females_45age_avg_vote)
}
gndr_age_votes <- gndr_age_votes %>% filter(genre %in% input$gnr_sel)
age_votes <-melt(gndr_age_votes)
}
fig <- plot_ly(age_votes, x=~variable, y = ~value, color = ~genre, type = "box")
fig <- fig %>%
layout(title="Genre popularity with Age group and Gender",
xaxis= list(title="Audience of selected gender and age group"),
yaxis=list(title= "Number of votes"),
boxmode = "group")
fig
})
# Visualisation for scatter plot
output$scatter<-renderPlotly({
# For selected gender = Male
if(input$gender_sel == "Male"){
if(input$age_sel == "0-18 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,males_0age_avg_vote)
rate_pg <- rate_pg %>% rename(male_vote = males_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,males_18age_avg_vote)
rate_pg <- rate_pg %>% rename(male_vote = males_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,males_30age_avg_vote)
rate_pg <- rate_pg %>% rename(male_vote = males_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,males_45age_avg_vote)
rate_pg <- rate_pg %>% rename(male_vote = males_45age_avg_vote)
}
rate_pg <- rate_pg %>% filter(genre %in% input$gnr_sel)
scat_data<-rate_pg %>% group_by(imdb_title_id,genre, duration,male_vote)%>% summarize(no_mov = length(imdb_title_id),
no_dur = mean(duration),no_votes = mean(male_vote))
}
# For selected gender = Female
if(input$gender_sel == "Female"){
if(input$age_sel == "0-18 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,females_0age_avg_vote)
rate_pg <- rate_pg %>% rename(female_vote = females_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,females_18age_avg_vote)
rate_pg <- rate_pg %>% rename(female_vote = females_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,females_30age_avg_vote)
rate_pg <- rate_pg %>% rename(female_vote = females_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,females_45age_avg_vote)
rate_pg <- rate_pg %>% rename(female_vote = females_45age_avg_vote)
}
rate_pg <- rate_pg %>% filter(genre %in% input$gnr_sel)
scat_data<-rate_pg %>% group_by(imdb_title_id,genre, duration,female_vote) %>% summarize(no_mov = length(imdb_title_id),
no_dur = mean(duration),no_votes = mean(female_vote))
}
# For selected gender = Male and Female
if(input$all == TRUE){
if(input$age_sel == "0-18 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,allgenders_0age_avg_vote)
rate_pg <- rate_pg %>% rename(all_vote = allgenders_0age_avg_vote)
}
if(input$age_sel == "18-30 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,allgenders_18age_avg_vote)
rate_pg <- rate_pg %>% rename(all_vote = allgenders_18age_avg_vote)
}
if(input$age_sel == "30-45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,allgenders_30age_avg_vote)
rate_pg <- rate_pg %>% rename(all_vote = allgenders_30age_avg_vote)
}
if(input$age_sel == "Above 45 years"){
rate_pg <- page_votes %>% select(imdb_title_id,genre,continent,country,year,duration,allgenders_45age_avg_vote)
rate_pg <- rate_pg %>% rename(all_vote = allgenders_45age_avg_vote)
}
rate_pg <- rate_pg %>% filter(genre %in% input$gnr_sel)
scat_data<-rate_pg %>% group_by(imdb_title_id,genre, duration,all_vote) %>% summarize(no_mov = length(imdb_title_id),
no_dur = mean(duration),no_votes = mean(all_vote))
}
scat_data <-scat_data %>% group_by(genre) %>% summarize(no_mov = length(imdb_title_id),
no_dur = mean(duration),no_votes = mean(no_votes))
scat_data %>%
plot_ly() %>%
add_markers(x = ~no_votes,
y = ~no_dur,
color = ~genre,
size = ~no_mov,
text = ~paste("Average duration of movie: ", round(no_dur,1),
"<br>",
"Average votes:", round(no_votes,1),
"<br>",
"Average Number of movies:", round(no_mov,1)
),
hoverinfo = "text") %>%
layout(title="Genre Popularity for various duration",
xaxis=list(title="Votes received(scale :1-10)"),
yaxis=list(title= "Duration(in minutes)"))
})
}
## -----------------------------------------------------
## Running the Shiny App
## -----------------------------------------------------
shinyApp(ui,server) |
3c26cb1b066741eb870ddfd10e99c021c56f125b | 3340f5d6a5d6edd892a418a7148de60a0766ac54 | /server.R | 1f1428ef9335b9f61b36100c9a93a6c016385e2e | [] | no_license | florbits/shinyFrailtypack | 1794ce0e3c40730f2d0add21a22816f897a95e2e | 9554a2e000eed15bc9527719094ac7ad3a9f1b1f | refs/heads/master | 2020-05-17T14:35:42.689936 | 2015-09-07T09:42:31 | 2015-09-07T09:42:31 | 42,043,762 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,620 | r | server.R | # server.R
shinyServer(
function(input, output,session) {
isFileLoaded<-reactive({
if (is.null(input$datafile))
return (FALSE)
})
isFileRDAfunc <- reactive({
tempValues$nameAsString<- basename(input$datafile$name)
tempValues$isRDA <-grepl(".rda",tempValues$nameAsString)
if (tempValues$isRDA==TRUE){
paste("Uploaded file: ",tempValues$nameAsString," with ",dim(lodadedData())[1]," entries")
}
# for future implementations use else if to customize other file extentions messages
else paste("Uploaded file: ",tempValues$nameAsString," with ",dim(lodadedData())[1]," entries")
})
output$fileStatusInfo <- renderText({
if (is.null(input$datafile)==FALSE) filetypeVal<-isFileRDAfunc()
else filetypeVal="FILE is not YET LOADED"
filetypeVal})
updateDisplayedArguments<-reactive({
if (!tempValues$nameAsString==''){
tempValues$columnNames<-names(lodadedData())
argChoices <- unlist(strsplit(tempValues$columnNames, split=","))
updateSelectizeInput(session, 'survArguments', choices = argChoices,server = TRUE)
updateSelectizeInput(session, 'usedArguments', choices = argChoices,server = TRUE)
updateSelectizeInput(session, 'terminalValue', choices = argChoices,server = TRUE)
updateSelectizeInput(session, 'clusterColumn', choices = argChoices,server = TRUE)
updateSelectizeInput(session, 'slopeValue' , choices = argChoices,server = TRUE)
updateSelectizeInput(session, 'termEvArguments', choices = argChoices,server = TRUE)
updateSelectInput(session, 'clusteredData')
updateNumericInput(session,'clustersNb')
updateSelectInput(session,'alphaValue')
updateSelectInput(session,'recurrentAGValue')
updateTextInput(session, 'kappaValues')
updateSelectInput(session, 'crossValidation')
updateSelectInput(session,'correlation')
}
else argChoices <- 'empty_field'
})
lodadedData<-reactive({
inFile <- input$datafile
if (is.null(inFile)){
return(NULL)}
frailtyData <- do.call(rbind, lapply(inFile$datapath, function(M){
if (tempValues$isRDA==TRUE) ws<-get(load(M))
else ws <-read.csv(M, header=TRUE)
}))
})
output$contents <- renderDataTable({
updateDisplayedArguments() # the order of called functions is VERY important for display output !
lodadedData()
#head(frailtyData, n = input$obs)
}, options = list(lengthMenu = c(5,10,50,200,500,1000), pageLength = 5)
)
resultsBlock<-reactive({
switchCurrentState()
if(is.null(tempValues$transformedData)==FALSE) readmissionTransformed <-tempValues$transformedData
customFunct <- eval(parse(text=toString(tempValues$totalStringToPrint)))
})
switchCurrentState<-reactive({
if (tempValues$currentState==TRUE) tempValues$currentState=FALSE
else tempValues$currentState==TRUE
})
output$functionAsText <- renderText({
tempValues$dataFileName=unlist(strsplit(tempValues$nameAsString,".", fixed=TRUE))
eval(parse(text = c("data(",tempValues$dataFileName[1],")")))
if ((is.null(lodadedData())==FALSE) & input$JointGselectedformula =='Joint General default options' & input$modelType =='Joint General' ) {
tempValues$resultsFileName=input$JointGselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time,event) ~ cluster(id) + dukes + charlson + sex + chemo + terminal(death),
formula.terminalEvent = ~ dukes + charlson + sex + chemo, data =",tempValues$dataFileName[1],", jointGeneral = TRUE,
n.knots = 8, kappa = c(2.11e+08, 9.53e+11))")}
else if ((is.null(lodadedData())==FALSE) & input$Jointselectedformula =='JOINT frailty model with gap times' & input$modelType =='Joint' ) {
tempValues$resultsFileName=input$Jointselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + dukes + charlson + sex + chemo + terminal(death),
formula.terminalEvent = ~ dukes + charlson + sex + chemo, data =",tempValues$dataFileName[1],",
n.knots = 8 , kappa = c(2.11e+08,9.53e+11))")}
else if ((is.null(lodadedData())==FALSE) & input$Jointselectedformula =='Stratified JOINT frailty model with gap times' & input$modelType =='Joint' ) {
tempValues$resultsFileName=input$Jointselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + dukes + charlson + strata(sex) + chemo + terminal(death),
formula.terminalEvent = ~ dukes + charlson + sex + chemo, data =",tempValues$dataFileName[1],",
n.knots = 8, kappa = c(2.11e+08,2.11e+08,9.53e+11))")}
else if ((is.null(lodadedData())==FALSE) & input$Jointselectedformula =='JOINT frailty model without alpha parameter' & input$modelType =='Joint' ) {
tempValues$resultsFileName=input$Jointselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + dukes + charlson + sex + chemo + terminal(death),
formula.terminalEvent = ~ dukes + charlson + sex + chemo, data =",tempValues$dataFileName[1],",
n.knots = 8, kappa = c(2.11e+08,9.53e+11), Alpha = 'none')")}
else if ((is.null(lodadedData())==FALSE) & input$Jointselectedformula =='JOINT frailty model for clustered data' & input$modelType =='Joint' ) {
tempValues$resultsFileName=input$Jointselectedformula
tempValues$transformedData <- transform(readmission,group=id%%31+1)
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(t.start, t.stop, event) ~ cluster(group) + num.id(id) + dukes + charlson + sex + chemo + terminal(death),
formula.terminalEvent = ~ dukes + charlson + sex + chemo, data = readmissionTransformed,
recurrentAG = TRUE, n.knots = 10, kappa = c(2.11e+08,9.53e+11))")}
else if ((is.null(lodadedData())==FALSE) & input$Multivariateselectedformula =='MULTIVARIATE frailty model with gap times' & input$modelType =='Multivariate' ) {
tempValues$totalStringToPrint <- paste("multivPenal(Surv(TIMEGAP,INDICREC)~ cluster(PATIENT) + v1 + v2 + event2(INDICMETA) + terminal(INDICDEATH),
formula.Event2 =~ v1 + v2 + v3, formula.terminalEvent =~ v1, data = dataMultiv,
hazard = 'Weibull')")}
else if ((is.null(lodadedData())==FALSE) & input$Coxselectedformula=='COX proportionnal hazard model with gap times' & input$modelType =='Cox') {
tempValues$resultsFileName=input$Jointselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ dukes + charlson + sex + chemo, n.knots = 10, kappa = 1, data =",tempValues$dataFileName[1],",
cross.validation = TRUE)")}
else if ((is.null(lodadedData())==FALSE) & input$Additiveselectedformula =='ADDITIVE frailty model with no correlation between random effects' & input$modelType =='Additive'){
tempValues$resultsFileName=input$Additiveselectedformula
tempValues$totalStringToPrint <- paste("additivePenal(Surv(t1,t2,event) ~ cluster(group) + var1 + var2 + slope(var1), cross.validation = TRUE, data =",tempValues$dataFileName[1],",
correlation = FALSE, n.knots = 10, kappa = 1)")}
else if ((is.null(lodadedData())==FALSE) & input$Additiveselectedformula =='ADDITIVE frailty model with a correlation between random effects' & input$modelType =='Additive'){
tempValues$resultsFileName=input$Additiveselectedformula
tempValues$totalStringToPrint <- paste("additivePenal(Surv(t1,t2,event) ~ cluster(group) + var1 + var2 + slope(var1), cross.validation = TRUE, data =",tempValues$dataFileName[1],",
correlation = TRUE, n.knots = 10, kappa = 1)")}
else if ((is.null(lodadedData())==FALSE) & input$Nestedselectedformula =='NESTED frailty model' & input$modelType =='Nested'){
tempValues$resultsFileName=input$input$Nestedselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(t1, t2, event) ~ cluster(group) + subcluster(subgroup) + cov1 + cov2, data =",tempValues$dataFileName[1],",
n.knots = 8, kappa = 50000, cross.validation = TRUE)")}
else if ((is.null(lodadedData())==FALSE) & input$Nestedselectedformula =='Stratified NESTED frailty model' & input$modelType =='Nested'){
tempValues$resultsFileName=input$Nestedselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(t1, t2, event) ~ cluster(group) + subcluster(subgroup) + cov1 + strata(cov2), data =",tempValues$dataFileName[1],",
n.knots = 8, kappa = c(50000,50000))")}
else if ((is.null(lodadedData())==FALSE) & input$Sharedselectedformula =='Shared frailty model with gap times' & input$modelType =='Shared'){
tempValues$resultsFileName=input$Sharedselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + dukes + charlson + sex + chemo, n.knots = 10, data =",tempValues$dataFileName[1],",
kappa = 1, cross.validation = TRUE)")}
else if ((is.null(lodadedData())==FALSE) & input$Sharedselectedformula =='Stratified shared frailty model with gap times' & input$modelType =='Shared'){
tempValues$resultsFileName=input$Sharedselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + charlson + dukes + chemo + strata(sex), n.knots = 10, data =",tempValues$dataFileName[1],",
kappa = c(2.11e+08,2.11e+08))")}
else if ((is.null(lodadedData())==FALSE) & input$Sharedselectedformula =='Shared frailty model with time-varying effect of covariates' & input$modelType =='Shared'){
tempValues$resultsFileName=input$Sharedselectedformula
tempValues$totalStringToPrint <- paste("frailtyPenal(Surv(time, event) ~ cluster(id) + dukes + charlson + timedep(sex) + chemo, n.knots = 8, data =",tempValues$dataFileName[1],",
kappa = 1, betaknots = 3, betaorder = 1)")}
else if ((is.null(lodadedData())==FALSE) & input$Sharedselectedformula =='Shared frailty model with interval-censored data' & input$modelType =='Shared'){
tempValues$resultsFileName=input$Sharedselectedformula
bcos$event <- ifelse(bcos$left!=bcos$right,1,0)
bcos$group <- c(rep(1:20,4),1:14)
tempValues$totalStringToPrint <- paste("frailtyPenal(SurvIC(left, right, event) ~ cluster(group) + treatment, n.knots = 8, data =",tempValues$dataFileName[1],", kappa = 10000)")}
else if ((is.null(lodadedData())==FALSE) & ((input$JointGselectedformula== 'custom options' & input$modelType =='Joint General') |
(input$Sharedselectedformula == 'custom options' & input$modelType =='Shared') |
(input$Additiveselectedformula== 'custom options' & input$modelType =='Additive') |
(input$Coxselectedformula== 'custom options' & input$modelType =='Cox') |
(input$Jointselectedformula== 'custom options' & input$modelType =='Joint') |
(input$Multivariateselectedformula== 'custom options' & input$modelType =='Multivariate') |
(input$Nestedselectedformula== 'custom options' & input$modelType =='Nested'))){
tempValues$resultsFileName=paste(input$modelType," custom Results")
tempValues$totalStringToPrint <-customFunctionBuilder()
}
else return (NULL)
})
customFunctionBuilder<-reactive({
if (!tempValues$nameAsString==''){
funcToCall=''
cluster=c("cluster(",input$clusterColumn,") + ")
num.id= ''
formula.terminalEvent =''
terminal=''
slope=''
recurrentAGString=''
correlation=''
n.knotsString =input$knotsNb
stringToPrint=''
terminalEventString=''
fileNameAsString = tempValues$dataFileName[1]
alphaString =''
jointGeneralValue =''
kappaString=c(", kappa = c(",input$kappaValues,")")
cross.validation= "FALSE"
if(input$modelType=='Joint' | input$modelType=='Joint General'){
funcToCall <-'frailtyPenal'
formula.terminalEvent =',formula.terminalEvent = ~'
selectedTerEvSize<-length(input$termEvArguments)
if (is.null(input$numID)==FALSE) {
num.id = paste("+ num.id(",input$numID,") + ")
tempValues$transformedData <- transform(readmission,group=id%%31+1)
}
if (selectedTerEvSize>=1){
for (p in 1:selectedTerEvSize) {
if (p==selectedTerEvSize) terminalEventString<-c(terminalEventString,input$termEvArguments[p])
else terminalEventString<-c(terminalEventString,input$termEvArguments[p],' + ')
}
}
alphaString = c(", Alpha = '",input$alphaValue,"'")
terminal =c(" + terminal(",input$terminalValue,")")
recurrentAGString=c(", recurrentAG = ",input$recurrentAGValue)
if (input$modelType=='Joint General') jointGeneralValue = ", jointGeneral = TRUE"
}
else if (input$modelType =='Cox'){
funcToCall <-'frailtyPenal'
cluster=''
cross.validation=input$crossValidation
}
else if (input$modelType =='Shared' | input$modelType =='Nested') funcToCall <-'frailtyPenal'
else if (input$modelType=='Additive'){
funcToCall <-'additivePenal'
slope =c(" + slope(",input$slopeValue,")")
cross.validation=input$crossValidation
correlation=paste(", correlation = ",input$correlation)
}
else if (input$modelType=='Multivariate') funcToCall <-'multivPenal'
else funcToCall <- ''
survString=input$survArguments[1]
tempValues$columnNames<-names(lodadedData())
selectedSurvSize<-length(input$survArguments)
selectedArgSize<-length(input$usedArguments)
#Part of string common for all the models
if (selectedSurvSize>=2){
for (j in 2:selectedSurvSize) {
survString<-c(survString,',',input$survArguments[j])
}
}
if (selectedArgSize>=1){
for (i in 1:selectedArgSize) {
if (i==selectedArgSize) stringToPrint<-c(stringToPrint,input$usedArguments[i])
else stringToPrint<-c(stringToPrint,input$usedArguments[i],' + ')
}
}
tempValues$customKnotsNb<- input$knotsNb[1]
tempStringVect <-c(funcToCall, "(Surv(", survString, ") ~",cluster, stringToPrint, terminal, slope, formula.terminalEvent, terminalEventString,
correlation, recurrentAGString," , n.knots = ",n.knotsString, kappaString, " , data = ", fileNameAsString , alphaString,
jointGeneralValue, " , cross.validation =", cross.validation ,")")
tempValues$totalStringToPrint<- paste(tempStringVect,sep='', collapse = '')
}
})
#output for program status object
output$out2 <- renderPrint({
input$initiateFit # detects when "EXECUTE" button is pressed
isolate({ # prevents code re-execution, unless "EXECUTE" button is pressed
if (is.null(resultsBlock())==TRUE) {
cat(sprintf("Press EXECUTE to procces function"))
}
else cat(sprintf("Process finished. Please check results tab"))
})
})
# output RESULTS string object
output$fitResults <- renderPrint({
input$initiateFit # detects when "EXECUTE" button is pressed
isolate({ # prevents code re-execution, unless "EXECUTE" button is pressed
if (is.null(resultsBlock())==FALSE) {
print(resultsBlock(), digits = 4)
}
else cat(sprintf("NO data file selected yet"))
})
})
# save as file output
output$downloadResultsData <- downloadHandler(
filename = function() { paste(tempValues$resultsFileName, '.txt', sep='') },
content = function(filename) {
sink(filename)
print(resultsBlock(), digits = 4)
# Stop writing to the file
sink()
}
)
# outplut Plot with display parameters
output$plot2display <- renderPlot({
if (is.null(resultsBlock())==TRUE){
paste ("No plot can be desplayed at this moment")}
else plotInput()},
width = "auto", height = "auto", res = 72, quoted = FALSE
)
# download version for outplut Plot
output$downloadPlot <- downloadHandler(
filename = "Shinyplot.png",
content = function(file) {
png(file)
plotInput()
dev.off()
})
# function that draws the Plot
plotInput <- function(){
p <-plot(resultsBlock(),type.plot="haz",event="recurrent",conf.bands=TRUE)}
# current chosen model message
output$text1 <- renderText({
cat(sprintf("You have selected", input$modelType, "model"))
})
#######################################################################################################################
#######################################################################################################################
}
)
|
fc864be6aa982220ea53d75f7f5a800a3d6d8aaf | 3d921d2dd15f258c260a60a2575ebeb56c99fbcf | /man/usz_13c_a.Rd | 9dbada4c50a7aeb8eaa0cf0799fafa158291c543 | [] | no_license | cran/breathtestcore | a337fdb3c918edbda7700b314f5c6a41bfc61693 | e83afbca373e670b02448d008bcac89c7b50efa0 | refs/heads/master | 2023-02-19T01:58:33.820793 | 2023-02-13T13:00:07 | 2023-02-13T13:00:07 | 90,961,247 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 821 | rd | usz_13c_a.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/breathtestcore.R
\docType{data}
\name{usz_13c_a}
\alias{usz_13c_a}
\title{Exotic 13C breath test data}
\usage{
data(usz_13c_a)
}
\description{
13C time series PDR data from three different groups in a randomized
(= not-crossover) design. This are unpublished data from
\href{https://www.usz.ch/fachbereich/gastroenterologie-und-hepatologie/}{Gastroenterology and Hepatology,
University Hospital Zurich}.
Data are formatted as described in \code{\link{usz_13c}}. These time series present
a challenge for algorithms.
}
\examples{
\donttest{
library(dplyr)
library(ggplot2)
data(usz_13c_a)
d = usz_13c_a \%>\%
cleanup_data() \%>\% # recommended to test for validity
nlme_fit()
plot(d)
}
}
\keyword{datasets}
|
cc7868bc38810443b025dc0ec4a57643ae8963cd | 49e160077670a4ba49a15ecd6bcd570569690c36 | /man/get_metadata.Rd | cdfb612af2e4d7d406bed4ebb0b167753f46bd17 | [
"MIT"
] | permissive | ParizadB/cbmr | 9a04bc33b9880f77a1fb852b3d5073672ff3badf | dfea121a189d6f5a07852f9d60cf5f1a9ce22693 | refs/heads/main | 2023-03-26T21:11:45.729253 | 2021-03-17T14:23:10 | 2021-03-17T14:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 587 | rd | get_metadata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Labguru_functions.R
\name{get_metadata}
\alias{get_metadata}
\title{Download metadata from Labguru}
\usage{
get_metadata(exp_id = NULL, exp_name = NULL)
}
\arguments{
\item{exp_id}{numeric experimental id, fastest way to get data}
\item{exp_name}{character of experimental name}
}
\description{
Download metadata from Labguru when given a experiment id or
an experiment name
}
\examples{
\dontrun{
labguru_set_token(
token = "abcdefg",
server = "https://sund.labguru.com"
)
get_metadata(exp_id = 1)
}
}
|
2509e4104602d921c212c49d5354fc78c045f88b | 6de313ba33ca7eeb89f94b15e83bbb15baef2960 | /man/calculate.tm.p.values.Rd | 66af3949f9d0abd826e90523371e3a0fd9597740 | [] | no_license | QuackenbushLab/MONSTER | 4f2c717102741d90fc9b3510a892c35398141ca1 | 5799b572cb3996bdcd320e0f6862dbe7e6fc112f | refs/heads/master | 2021-06-11T05:23:17.245826 | 2020-10-15T15:41:08 | 2020-10-15T15:41:08 | 68,241,149 | 3 | 2 | null | 2020-10-15T15:41:10 | 2016-09-14T20:34:32 | R | UTF-8 | R | false | true | 791 | rd | calculate.tm.p.values.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_functions.R
\name{calculate.tm.p.values}
\alias{calculate.tm.p.values}
\title{Calculate p-values for a tranformation matrix}
\usage{
calculate.tm.p.values(monsterObj, method = "z-score")
}
\arguments{
\item{monsterObj}{monsterAnalysis Object}
\item{method}{one of 'z-score' or 'non-parametric'}
}
\value{
vector of p-values for each transcription factor
}
\description{
This function calculates the significance of an observed
transition matrix given a set of null transition matrices
}
\examples{
# data(yeast)
# design <- c(rep(0,20),rep(NA,10),rep(1,20))
# monsterRes <- monster(yeast$exp.cc, design, yeast$motif, nullPerms=100, numMaxCores=4)#'
data(monsterRes)
calculate.tm.p.values(monsterRes)
}
|
55e23ba6086e2d66aff97f752e003940ac113a3c | a8cdedefea85a4615a14cbe20323598300b5886c | /swirl/12 k-means clustering.R | 7a675fb2dc9a3bedbf59c9b5535db25bb4dd7b9d | [] | no_license | PawFran/exploratory-data-analysis | 5031184bfa8d9dbad84877cfc7c31b5e9074eb3f | 36a74f36320607d2ae8b2ae2c3d26642054bceac | refs/heads/master | 2020-06-13T08:55:29.853884 | 2016-12-02T21:11:42 | 2016-12-02T21:11:42 | 75,422,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,020 | r | 12 k-means clustering.R | cmat
# first step
points(cx, cy, col = c('red', 'orange', 'purple'), pch = 3, cex = 2, lwd = 2)
mdist(x, y, cx, cy)
apply(distTmp, 2, which.min)
points(x, y, pch = 19, cex = 2, col = cols1[newClust])
# second
tapply(x, newClust, mean)
tapply(y, newClust, mean)
points(newCx, newCy, col = cols1, pch = 8, cex = 2, lwd = 2)
mdist(x, y, newCx, newCy)
apply(distTmp2, 2, which.min)
points(x, y, pch = 19, cex = 2, col = cols1[newClust2])
# third
tapply(x, newClust2, mean)
tapply(y, newClust2, mean)
points(finalCx, finalCy, col = cols1, pch = 9, cex = 2, lwd = 2)
# runnig kmeans algorithm
kmeans(dataFrame, centers = 3)
kmObj$iter
plot(x, y, col = kmObj$cluster, pch = 19, cex = 2)
points(kmObj$centers, col = c('black', 'red', 'green'), pch = 3, cex = 3, lwd = 3)
# now six clusters
plot(x, y, col = kmeans(dataFrame, 6)$cluster, pch = 19, cex = 2)
plot(x, y, col = kmeans(dataFrame, 6)$cluster, pch = 19, cex = 2)
plot(x, y, col = kmeans(dataFrame, 6)$cluster, pch = 19, cex = 2)
|
255e32ae7a0ab76b5d1462b1095c71b6326c5dd8 | 3ca23ecf1bdebbd51f27bab9cb1c65f553b8dafd | /estat-inf-parametros-normal.R | 6e85867c0e023e3aebc8949bc3d7a96c8098b08f | [] | no_license | arpanosso/dashboard | c99ce6045aa010c328e2941ee337c0470281ba9e | 3e9586c699bf4f77e98168ec656ff115c7169972 | refs/heads/master | 2023-06-05T01:08:39.992521 | 2021-06-21T19:02:25 | 2021-06-21T19:02:25 | 370,525,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,416 | r | estat-inf-parametros-normal.R | library(tidyverse)
library(shiny)
library(shinydashboard)
ui <- dashboardPage(
header=dashboardHeader(
title = "Estatística-UNESP"
),
sidebar=dashboardSidebar(
sidebarMenu(
menuItem("Normal",tabName = "normal"),
menuItem("Binomial",tabName = "binomial"),
menuItem("Poisson", tabName = "poisson")
)
),
body=dashboardBody(
tabItems(
tabItem(
tabName = "normal",
fluidRow( # sempre usar com Colum, para não dar toque
column(h2("Estudo dos Parâmetros da Normal"),width = 12),
column(
h3("Distribuição 01"),
sliderInput("m1","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd1","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
h3("Distribuição 02"),
width = 4,
sliderInput("m2","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd2","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
h3("Distribuição 03"),
width = 4,
sliderInput("m3","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd3","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
width = 12,
plotOutput("grafico_hist")
)
)
),
tabItem(
tabName = "binomial",
h1("Estudo de parâmetros Binomial"),
fluidRow( # sempre usar com Colum, para não dar toque
column(h2("Estudo dos Parâmetros da Normal"),width = 12),
column(
h3("Distribuição 01"),
sliderInput("m1","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd1","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
h3("Distribuição 02"),
width = 4,
sliderInput("m2","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd2","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
h3("Distribuição 03"),
width = 4,
sliderInput("m3","Média",
min=0,
max=100,
value = 50,
step = 1),
numericInput("sd3","Desvio-padrão",min=5,
max=25,
value = 7,
step = 1)
),
column(
width = 12,
plotOutput("grafico_hist")
)
)
),
tabItem(
tabName = "poisson",
h1("Estudo de parâmetros Poisson")
)
)
),
title = "Meu App"
)
server <- function(input, output, session) {
output$grafico_hist <- renderPlot({
ggplot(data.frame(x = c(0, 100)), aes(x = x)) +
stat_function(fun = \(x) dnorm(x,input$m1,input$sd1),
col="red")+
stat_function(fun = \(x) dnorm(x,input$m2,input$sd2),
col="blue") +
stat_function(fun = \(x) dnorm(x,input$m3,input$sd3),
col="darkgreen")+
theme_bw() +
labs(x="Variável aleatória (X)",
y="Densidade de frequência")+
theme(
axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16)
)+
geom_vline(xintercept = c(input$m1,input$m2,input$m3),
color = "darkgray",lty=2)
})
}
shinyApp(ui, server)
|
01ef81bf341474039dcc817e795ae5de8114bdaf | c24fa12b935ce3594723229c83968daba133012f | /Problem set 2/Project/.Rproj.user/F8D558E1/sources/per/t/160F13D7-contents | 929c9297f0ecc7f0cb490ab854745a64eda85537 | [] | no_license | mmahin/Data-Mining-Fall-20 | 8de523d2256b3016fddba043f994b5975ee9e3e9 | 1b433943108bb17740b2cd2b964adff5e2610a24 | refs/heads/main | 2023-02-05T10:55:18.223739 | 2020-12-25T20:58:09 | 2020-12-25T20:58:09 | 323,483,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,449 | 160F13D7-contents | library(spatstat)
#clear the environment
rm(list=ls())
#Import the data
mydata1 = read.csv("Zone_1.csv", header = TRUE)
mydata1$buildingType[mydata1$buildingType == "collective_house"] <- "collective_houses&garages"
mydata1$buildingType[mydata1$buildingType == "garage"] <- "collective_houses&garages"
mydata1$buildingType <- as.factor(mydata1$buildingType)
unique(mydata1$buildingType)
# Read minimum and maximum locations
min_longitude = min(mydata1$longitude)
max_longitude = max(mydata1$longitude)
min_latitude = min(mydata1$latitude)
max_latitude = max(mydata1$latitude)
par(mfrow=c(1,1))
#----- Task c: Analysis for Different Building Patterns ------
myPattern1 <- ppp(mydata1$longitude, mydata1$latitude, marks = mydata1$buildingType, c(min_longitude,max_longitude), c(min_latitude,max_latitude))
plot1 <- plot(Kcross(myPattern1, "collective_houses&garages", "single_house"), main="A: Colocation Collective Houses and garages as same category with sigle house",
xlab="Distance r",
ylab="K(r)")
#Import the data
mydata2 = read.csv("Zone_1.csv", header = TRUE)
mydata2$buildingType[mydata2$buildingType == "collective_house"] <- "collective_houses&single_houses"
mydata2$buildingType[mydata2$buildingType == "single_house"] <- "collective_houses&single_houses"
mydata2$buildingType <- as.factor(mydata2$buildingType)
unique(mydata2$buildingType)
myPattern2 <- ppp(mydata2$longitude, mydata2$latitude, marks = mydata2$buildingType, c(min_longitude,max_longitude), c(min_latitude,max_latitude))
plot2 <- plot(Kcross(myPattern2, "collective_houses&single_houses", "garage"), main="B: Colocation Collective Houses and single house as same category with garage",
xlab="Distance r",
ylab="K(r)")
#Import the data
mydata3 = read.csv("Zone_1.csv", header = TRUE)
mydata3$buildingType[mydata3$buildingType == "garage"] <- "garage&single_houses"
mydata3$buildingType[mydata3$buildingType == "single_house"] <- "garage&single_houses"
mydata3$buildingType <- as.factor(mydata3$buildingType)
unique(mydata3$buildingType)
myPattern3 <- ppp(mydata3$longitude, mydata3$latitude, marks = mydata3$buildingType, c(min_longitude,max_longitude), c(min_latitude,max_latitude))
plot3 <- plot(Kcross(myPattern3, "garage&single_houses", "collective_house"), main="C: Colocation single Houses and garages as same category with collectieve house",
xlab="Distance r",
ylab="K(r)")
| |
31c5013ba29dfb91d803791bdc5a72439d87ee6f | 9b995e7e160c8cd794a0ae27e6784a14b5cabca8 | /tests/testthat.R | e871b9baf8feb2f40fcc1147ecec96fb10d41bee | [] | no_license | azradey/elliptical-package | 29acbac65101aa5716f2f4a8463205de69c886f6 | ad163129e2c88a2a8d193f5ed288928f6a8737fe | refs/heads/master | 2020-03-19T06:07:21.392176 | 2018-06-04T08:13:36 | 2018-06-04T08:13:36 | 135,992,187 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64 | r | testthat.R | library(testthat)
library(Elliptical)
test_check("Elliptical")
|
fbbe49e05c64e18f3b568f2bac82d8a19f37d7a2 | 51bd763f0e55fdc51815b166c2e7098e8254e05a | /cachematrix.R | b2594cb6c332976a5d5d81b9c793d826195d0788 | [] | no_license | dingzonghui/ProgrammingAssignment2 | eda0c09f6d6e3226a2f505c4845f80508b0b8100 | a38c47f3d4c1252819922c83e0055913ea094da2 | refs/heads/master | 2021-01-18T08:06:16.301625 | 2015-08-22T19:41:33 | 2015-08-22T19:41:33 | 41,218,155 | 0 | 0 | null | 2015-08-22T17:43:23 | 2015-08-22T17:43:22 | null | UTF-8 | R | false | false | 1,865 | r | cachematrix.R | ## The following functions work together to create a square matrix
## and make the inverse of the matrix available in the cache
## makeCacheMatrix function creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
# store the cached value with initial NULL value
cache <- NULL
# create the matrix in the working environment
set <- function(y) {
x <<- y
cache <<-NULL
}
# get the value of the matrix
get <- function() x
# invert the matrix and store in cache
setMatrix <- function(inverse) cache <<- inverse
# get the inverted matrix from cache
getInverse <- function() cache
# return the created function to the working environment
list(set = set, get = get, setMatrix = setMatrix, getInverse = getInverse)
}
## cacheSolve function computes the inverse of the matrix returned by makeCacheMatrix function
## If the inverse has already been calculated and the matrix has not changed,
## it’ll get the inverse from the cache directly without computation again.
cacheSolve <- function(x, ...) {
# output of makeCacheMatrix()
# return: inverse of the original matrix input to makeCacheMatrix()
cache <- x$getInverse()
# if the inverse matrix has already been calculated
if(!is.null(cache)){
# get it from the cache and skips the computation
message("getting cached data")
return(cache)
}
# otherwise, calculates the inverse
matrix.data <- x$get()
cache <- solve(matrix.data, ...)
# set the value of the inverted matrix in cache via setMatrix function
x$setMatrix(cache)
return(cache)
}
|
136c6a7368c09bebc5d2487f1da50e3b6f08fca0 | 64d5df27325f07af9c602ddf85a81ff4f0aec189 | /man/ref_textbox-class.Rd | 450d503e98d41162771d747ab959a68101dd0c88 | [] | no_license | cran/stacomiR | 0641860bef2f4b5c05490d06de2c58fe3fe30059 | 981c67ba5d18ee9a5c357192e4ba4f9e864ec039 | refs/heads/master | 2022-07-22T14:11:55.680662 | 2022-07-18T08:20:02 | 2022-07-18T08:20:02 | 95,473,811 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 517 | rd | ref_textbox-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ref_textbox.R
\docType{class}
\name{ref_textbox-class}
\alias{ref_textbox-class}
\title{ref_textbox referencial class}
\description{
allows to a put a value within a glabel
}
\section{Slots}{
\describe{
\item{\code{title='character'}}{the title of the box giving the possible choices}
\item{\code{labels}}{the logical parameters choice}
\item{\code{checked}}{a vector}
}}
\author{
cedric.briand@eptb-vilaine.fr
}
|
5592967998721aee42262eb726375cdcb6f91787 | 6fd0fb62fd2713eba430e95eb45b6beb0077aadb | /16 cycle.R | fc684797bff787b018b1820b8fc6d76f5f4c95b3 | [] | no_license | kshf59/Cycle-Accident | 360c04ed7474e70eb6477192adbca26a7b3ee945 | 74cf8cbd89027bd115efe65fa02c3f7ddee07f3f | refs/heads/master | 2021-05-02T12:00:26.378403 | 2018-02-15T08:50:46 | 2018-02-15T08:50:46 | 120,734,162 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 993 | r | 16 cycle.R | library(ggmap)
library(ggplot2)
library(dplyr)
library(readr)
X2016_cycle_accident <- read_csv("Cycle Accident/2016 cycle accident.csv")
X2016 <- arrange(X2016_cycle_accident, 스팟코드)
X2016[1:188,5] <- "서울특별시"
X2016 <- filter(X2016, 관할경찰서 == "서울특별시")
#1 서울 지도 가져오기
kor <- get_map("seoul", zoom=11, maptype='roadmap')
#2, 지도에 찍을 위도, 경도, 이름 저장하기
kor.map <- ggmap(kor) + geom_point(data=X2016,
aes(x=경도, y=위도),
size=2,
alpha=0.7,
colour = "blue"
)
kor.map + geom_text(data=X2016,
aes(x=경도, y=위도+0.005, label=발생건수),
size=2.5)
# 발생건수 10 이상
ten <- X2016 %>% filter(발생건수 >= 10)
ten <- ten[-2,]
ten <- ten[-2,]
ten <- ten[-2,]
ten <- ten[-6,]
View(ten)
|
1454f0de015e9d7d275c7b1a44840178b042c409 | bd6ecd74ff1a8d75f37a776fe68767b776e055b4 | /etc/unused_code/scripts/R/normal.r | 8dcb758190afd69676b21961793341c1c9aea691 | [] | no_license | Bilalh/Gen | 0f0eb8ed4d712dcff71b427537ad7e42fd260fb2 | 95e7a2f4588daca3aa66768688e9f77bbdcfb4d0 | refs/heads/master | 2022-01-14T07:16:38.288178 | 2016-09-21T16:10:31 | 2016-09-21T16:10:31 | 178,632,039 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 751 | r | normal.r | #!/usr/bin/env Rscript
library(MASS)
Sigma <- matrix(c(10,3,3,2),2,2)
Sigma <- matrix(rep(0,9),3,3)
Sigma <- matrix(c(1,0,0, 1,0,0, 1,0,0),3,3)
Sigma
r <- mvrnorm(n=5, c(100,40,20), Sigma)
n=10
p=.5
x=0:10
p=dbinom(x,size=n,prob=p)
plot(x,p,type="h",xlim=c(-1,11),ylim=c(0,0.5),lwd=2,col="blue",ylab="p")
points(x,p,pch=16,cex=2,col="dark red")
rmultinom(10, size = 12, prob = c(0.1,0.2,0.8))
pr <- c(1,3,6,10) # normalization not necessary for generation
rmultinom(10, 20, prob = pr)
## all possible outcomes of Multinom(N = 3, K = 3)
X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
X
round(apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))), 3) |
e3b1112ed1fadda14fe5424f854761383507a275 | 743cb55fca908b0dd5ae89c42cba048b5cc8669b | /activation-analyses/R/wgcna_prepdata.R | 8008c165c83ce6997f2feb21ac04f2c17273a194 | [
"CC-BY-4.0"
] | permissive | chr1swallace/cd4-pchic | 8309901bad59950dd93ba535984431873d10af3b | ccdb757c5c3760eb914b2cb3f0e9f06ef9e24af7 | refs/heads/master | 2021-03-27T12:36:31.582058 | 2017-08-09T11:08:24 | 2017-08-09T11:08:24 | 56,878,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,354 | r | wgcna_prepdata.R | library(WGCNA)
library(org.Hs.eg.db)
library(data.table)
x <- org.Hs.egENSEMBL
xx <- as.list(org.Hs.egENSEMBL2EG)
options(stringsAsFactors = FALSE);
enableWGCNAThreads(20)
setwd(file.path(CD4CHIC.ROOT,"activation-analyses/R"))
source("common.R")
## get rna-seq limma data @ 4hrs
base.dir<-'/home/oliver/STATS/PROJECTS/T-CELL_PROFILE/WGCNA/'
support.dir<-paste0(base.dir,'support/')
## limma.thresh = 0.05 # SHOULD THIS BE 0.01?
## cd4.limma<-fread(paste0(support.dir,"cd4-limma.csv"),header=TRUE,stringsAsFactors=FALSE,sep=" ")
##
## list of protein_coding genes that are upregulated
##cd4.limma<-subset(cd4.limma,biotype=="protein_coding")
## cd4.down<-subset(cd4.limma, adj.P.Val<=limma.thresh & logFC>0)$id
## cd4.up<-subset(cd4.limma, adj.P.Val<=limma.thresh & logFC<0)$id
## cd4.not.detected<-subset(cd4.limma, is.na(adj.P.Val))$id
## cd4.other<-setdiff(cd4.limma$id, c(cd4.down,cd4.up,cd4.not.detected))
## setattr(cd4.limma, 'colnames', make.names(colnames(cd4.limma)))
## library(ggplot2)
## cd4.limma <- within(cd4.limma,
## {nonact.total <- nonact.1+nonact.2
## act.total <- act.1+act.2})
## library(ggplot2)
## with(cd4.limma, qplot(-logFC,-log10(adj.P.Val),col=log(Length))) + labs(x="Fold change (act/nonact)")
## with(cd4.limma, qplot(AveExpr,-log10(adj.P.Val))) + geom_smooth()
## with(cd4.limma, qplot(log(nonact.total+1),-log10(adj.P.Val))) + geom_smooth()
## with(cd4.limma, qplot(log(act.total+1),-log10(adj.P.Val))) + geom_smooth()
## with(cd4.limma, qplot(log(nonact.total+1),log(act.total+1),col=-log10(adj.P.Val)))
## load the expression data and meta data
tcell.expr.data.file<-paste0(support.dir,'/t-cell-act.profiles.RData')
(load(file=tcell.expr.data.file))
## load the meta data
tcell.expr.meta.data.file<-paste0(support.dir,'/Xaq_PTPN22_pairs_genotype_status-1.csv')
pheno<-read.csv(file=tcell.expr.meta.data.file,header=TRUE,stringsAsFactors=FALSE)
pheno$index<-with(pheno,paste(Pair,Donor,sep="."))
## get the log2 transformed data
exp.dataset<-exprs(gene.vsnorm.data)
## get a list of samples as we want to transform
samples<-pData(gene.vsnorm.data)
## pull these apart to add meta data information more complicated
## as id's don't follow a strict pattern
sample.df<-do.call("rbind",lapply(strsplit(rownames(samples),"\\_"),function(x){
sname<-paste(x,sep="_",collapse="_")
x[1]<-sub("Pair","",x[1])
x[2]<-sub("t","",x[2])
if(length(x)==4)
return(data.frame(sname=sname,pair=x[1],time=x[2],treatment=x[3],individual=sub("^D([^\\.]+.*)\\.CEL","\\1",x[4])))
if(length(x)==3)
return(data.frame(sname=sname,pair=x[1],time=x[2],treatment='T0',individual=sub("^D([^\\.]+.*)\\.CEL","\\1",x[3])))
#x[3]<-paste(x[3],x[4],sep="_",collapse="_")
return(data.frame(sname=sname,pair=x[1],time=x[2],treatment=x[3],individual=sub("^D([^\\.]+.*)\\.CEL","\\1",x[4])))
}))
##fixes D prefix inclusion
sample.df$individual<-sub("D","",sample.df$individual)
##fixed instances where t0 is not named as US
sample.df[sample.df$time=="0" & sample.df$treatment=="T0",]$treatment="US"
sample.df$index<-with(sample.df,paste(pair,individual,sep="."))
pheno.data<-merge(pheno,sample.df,by.x="index",by.y="index",all.x=TRUE)
## expression set filtering
genes.to.remove<-which(apply(exp.dataset,1,max)<expression.threshold)
if(length(genes.to.remove)>0){
filt.exp<-exp.dataset[-genes.to.remove,]
}else{
filt.exp<-exp.dataset
}
failed.hybs <-paste0(c("Pair15_t0_D2","Pair15_t2_S_D1","Pair15_t2_S_D2","Pair15_t4_S_D1"),".CEL")
failed.hybs %in% colnames(exp.dataset)
filt.exp <- filt.exp[, setdiff(colnames(exp.dataset),failed.hybs)]
## for the time being remove any probes that don't have an ensgene id
## array.annotation.file
array.annot.file<-paste0(support.dir,'HuGene-1_0-st-v1.e67.genes2GN-prbsts.tab')
map=read.table(array.annot.file,header=TRUE,stringsAsFactors=FALSE)
ens67.biotype.file<-paste0(support.dir,'e67_gene_biotyes.csv')
biot<-read.csv(ens67.biotype.file)
map<-merge(map,biot,by.x='ens.gene.id',by.y='Ensembl.Gene.ID',all.x=TRUE)
map.filt<-subset(map,!is.na(ens.gene.id) & Gene.Biotype=="protein_coding")
## remove those ensid's that map to multiple entrez id's
## for GO enrichment WGCNA need entrez id code below
## makes sure that this all works at the cost of removing
## genes
#map.filt$entrezId<-xx[map.filt$ens.gene.id]
#map.filt<-map.filt[sapply(map.filt$entrezId,length)==1,]
#map.filt$entrezId<-unlist(map.filt$entrezId)
## removed duplicated entrez id's (prob need to do this better in future)
#map.filt<-map.filt[!duplicated(map.filt$entrezId),]
filt.exp<-filt.exp[which(rownames(filt.exp) %in% map.filt$probeset.id),]
## lookup for relating pheno to colnames -- here position in matrix is location in pheno.data
## value is column in filt.exp
## expset2pheno.data<-match(pheno.data$sname,colnames(filt.exp))
## pheno.data$expsetmatch<-expset2pheno.data
pheno.data <- pheno.data[ match(colnames(filt.exp), pheno.data$sname), ]
with(pheno.data, ftable(time,treatment))
with(pheno.data, ftable(time,treatment)) %>% sum()
## compute change from baseline for each sample
## used for plotting resultant modules
## DE matrix
samples.t0 <- subset(pheno.data,time==0)
samples.t1 <- subset(pheno.data,time!=0 & uniqueID %in% samples.t0$uniqueID)
samples.t1 <- merge(samples.t1,samples.t0[,c("uniqueID","sname")],by="uniqueID",suffixes=c("",".0"))
all(samples.t1$sname %in% colnames(filt.exp))
all(samples.t1$sname.0 %in% colnames(filt.exp))
treated.t1 <- subset(samples.t1,treatment=="S")
delta.exp <- filt.exp[,samples.t1$sname] - filt.exp[,samples.t1$sname.0]
delta.treated <- filt.exp[,treated.t1$sname] - filt.exp[,treated.t1$sname.0]
## switch this in to work with changes in expression wrt baseline (t=0 unstimulated)
s.exp<-list(all=t(delta.exp),
treated=t(delta.treated))
## correct sample mix up
pca <- prcomp(t(filt.exp))
tmp <- pheno.data
tmp$pc1 <- pca$x[,"PC1"]
tmp$pc2 <- pca$x[,"PC2"]
ggplot(tmp,aes(x=pc1,y=pc2,col=treatment,pch=time)) + geom_point()
subset(tmp, pc2 > -10 & time==21 & treatment=="S")
subset(tmp, pc2 < -20 & time==21 & treatment=="US")
pheno.data[pheno.data$sname %in% c("Pair10_t21_S_D1.CEL","Pair10_t21_S_D2.CEL"),"treatment"] <- "US"
pheno.data[pheno.data$sname %in% c("Pair10_t21_US_D1.CEL","Pair10_t21_US_D2.CEL"),"treatment"] <- "S"
save(filt.exp,s.exp,map.filt,pheno.data,file=file.expression)
|
518b577069b58ba0afd4b24cc7b7d9f9e0255bb3 | 285541e8ae77482ac7eeb5b51ce06edeb96ef246 | /R/envelopes.r | 11af29ad86420a6939d6802c79832e27e6a4c739 | [] | no_license | myllym/GET | 2033c4f590da7cce114b588e7e39b243b543dcdf | 72988291d9c56b468c5dddfb5bc2c23f519b6dca | refs/heads/master | 2023-08-24T23:23:14.364346 | 2023-08-15T21:33:51 | 2023-08-15T21:33:51 | 68,914,145 | 12 | 5 | null | 2022-11-16T07:55:16 | 2016-09-22T11:20:34 | R | UTF-8 | R | false | false | 66,379 | r | envelopes.r | get_alternative <- function(global_envelope) {
attr(global_envelope, "alternative")
}
# It should be:
# small_significant=TRUE for 'rank', 'erl', 'cont' and 'area' -> ordering decreasing
# small_significant=FALSE for 'qdir', 'st', 'unscaled' -> ordering increasing (decreasing=FALSE)
critical <- function(distance, alpha, Nfunc, small_significant) {
distancesorted <- sort(distance, decreasing=small_significant)
distancesorted[floor((1-alpha)*Nfunc)]
}
# Multiple envelopes can be plotted in certain situations.
# These functions return the names of the lower and upper bounds of the envelopes
# that exist in global_envelope objects.
# largest = FALSE: names of all envelopes
# largest = TRUE: the maximal envelope corresponding to smallest alpha
env_loname <- function(alpha, largest=FALSE) {
if(length(alpha)>1) {
if(largest) paste0("lo.", 100*(1-min(alpha)))
else paste0("lo.", 100*(1-alpha))
}
else "lo"
}
env_hiname <- function(alpha, largest=FALSE) {
if(length(alpha)>1) {
if(largest) paste0("hi.", 100*(1-min(alpha)))
else paste0("hi.", 100*(1-alpha))
}
else "hi"
}
make_envelope_object <- function(type, curve_set, LB, UB, T_0,
picked_attr, isenvelope,
Malpha, alpha, distance) {
Nfunc <- curve_set_nfunc(curve_set)
if(curve_set_is1obs(curve_set)) {
df <- data.frame(curve_set_rdf(curve_set), obs=curve_set_1obs(curve_set),
central=T_0, lo=LB, hi=UB)
}
else {
df <- data.frame(curve_set_rdf(curve_set), central=T_0, lo=LB, hi=UB)
}
if(isenvelope) {
res <- spatstat.explore::fv(x=df, argu = picked_attr[['argu']],
ylab = picked_attr[['ylab']], valu = "central", fmla = ". ~ r",
alim = c(min(curve_set[['r']]), max(curve_set[['r']])),
labl = picked_attr[['labl']], desc = picked_attr[['desc']],
unitname = NULL, fname = picked_attr[['fname']], yexp = picked_attr[['yexp']])
attr(res, "shade") <- c("lo", "hi")
attr(res, "alternative") <- picked_attr[['alternative']]
}
else {
res <- df
attrnames <- names(picked_attr)
for(n in attrnames) attr(res, n) <- picked_attr[[n]]
}
class(res) <- c("global_envelope", class(res))
if(curve_set_is2d(curve_set)) class(res) <- c("global_envelope2d", class(res))
attr(res, "method") <- "Global envelope"
attr(res, "type") <- type
attr(res, "alpha") <- alpha
attr(res, "M") <- distance
attr(res, "M_alpha") <- Malpha
res
}
# Functionality for central regions based on a curve set
# @param ... Ignored.
individual_central_region <- function(curve_set, type = "erl", coverage = 0.50,
alternative = c("two.sided", "less", "greater"),
probs = c((1-coverage[1])/2, 1-(1-coverage[1])/2),
quantile.type = 7,
central = "median") {
isenvelope <- inherits(curve_set, "envelope")
if(!is.numeric(coverage) || any(coverage <= 0 | coverage > 1)) stop("Unreasonable value of coverage.")
coverage <- sort(coverage, decreasing = TRUE)
alpha <- 1 - coverage
if(!(type %in% c("rank", "erl", "cont", "area", "qdir", "st", "unscaled")))
stop("No such type for global envelope.")
alternative <- match.arg(alternative)
small_significant <- TRUE
if(type %in% c("qdir", "st", "unscaled")) {
small_significant <- FALSE
if(alternative != "two.sided") {
warning("For qdir, st and unscaled envelopes only the two.sided alternative is valid.")
alternative <- "two.sided"
}
}
check_probs(probs)
if(!(central %in% c("mean", "median"))) {
central <- "median"
warning("Invalid option fiven for central. Using central = median.")
}
if(central == "median" && type %in% c("qdir", "st", "unscaled")) {
central <- "mean"
warning("Using the mean as the central function. qdir, st and unscaled envelopes are defined with the mean.")
}
picked_attr <- pick_attributes(curve_set, alternative=alternative) # saving for attributes / plotting purposes
if(isenvelope & length(alpha)>1) {
# Note: no fv object for multiple coverages
isenvelope <- FALSE
}
curve_set <- convert_to_curveset(curve_set, allfinite=TRUE)
# Measures for functional ordering
measure <- type
scaling <- ""
switch(type,
qdir = {
measure <- "max"
scaling <- "qdir"
},
st = {
measure <- "max"
scaling <- "st"
},
unscaled = {
measure <- "max"
scaling <- "none"
})
distance <- forder(curve_set, measure=measure, scaling=scaling,
alternative=alternative, probs=probs, quantile.type=quantile.type)
all_curves <- data_and_sim_curves(curve_set) # all the functions
Nfunc <- length(distance) # Number of functions
nr <- curve_set_narg(curve_set)
# Define the central curve T_0
T_0 <- get_T_0(curve_set, central=central)
# Check reasonability of Nfunc vs alpha
if(any(Nfunc*alpha < 1-.Machine$double.eps^0.5))
stop("Number of functions s is only ", Nfunc, ", but smallest alpha is ", min(alpha),
". So, s*alpha is ", Nfunc*min(alpha), ".", sep="")
# The critical value
Malpha <- critical(distance, alpha, Nfunc, small_significant)
#-- 100(1-alpha)% global envelope
LBounds <- UBounds <- vector(mode="list", length=length(alpha))
switch(type,
rank = {
for(i in 1:nr) {
Hod <- sort(all_curves[,i])
for(ai in seq_along(alpha)) {
LBounds[[ai]][i]<- Hod[Malpha[ai]]
UBounds[[ai]][i]<- Hod[Nfunc-Malpha[ai]+1]
}
}
},
erl =,
cont =,
area = {
for(ai in seq_along(alpha)) {
j <- distance >= Malpha[ai]
for(i in 1:nr){
lu <- range(all_curves[j,i])
LBounds[[ai]][i]<- lu[1]
UBounds[[ai]][i]<- lu[2]
}
}
},
qdir = { # Note: All coverage levels use same probs
curve_set_res <- residual(curve_set, use_theo=TRUE)
quant_m <- curve_set_quant(curve_set_res, probs=probs, type=quantile.type)
for(ai in seq_along(alpha)) {
LBounds[[ai]] <- T_0 - Malpha[ai]*abs(quant_m[1,])
UBounds[[ai]] <- T_0 + Malpha[ai]*abs(quant_m[2,])
}
},
st = {
sdX <- curve_set_sd(curve_set)
for(ai in seq_along(alpha)) {
LBounds[[ai]] <- T_0 - Malpha[ai]*sdX
UBounds[[ai]] <- T_0 + Malpha[ai]*sdX
}
},
unscaled = {
for(ai in seq_along(alpha)) {
LBounds[[ai]] <- T_0 - Malpha[ai]
UBounds[[ai]] <- T_0 + Malpha[ai]
}
})
switch(alternative,
"two.sided" = {},
"less" = { for(ai in seq_along(alpha)) UBounds[[ai]] <- Inf },
"greater" = { for(ai in seq_along(alpha)) LBounds[[ai]] <- -Inf })
if(length(alpha) > 1) { # Multiple envelopes
names(LBounds) <- names(UBounds) <- paste0(100*coverage)
LB <- do.call(cbind, LBounds)
UB <- do.call(cbind, UBounds)
}
else {
LB <- LBounds[[1]]
UB <- UBounds[[1]]
}
res <- make_envelope_object(type, curve_set, LB, UB, T_0,
picked_attr, isenvelope,
Malpha, alpha, distance)
attr(res, "call") <- match.call()
res
}
# Functionality for global envelope tests based on a curve set (individual central region + p-values)
individual_global_envelope_test <- function(curve_set, type = "erl", alpha = 0.05,
alternative = c("two.sided", "less", "greater"),
ties = "erl",
probs = c(0.025, 0.975), quantile.type = 7,
central = "mean") {
alternative <- match.arg(alternative)
tmp <- convert_to_curveset(curve_set, allfinite=TRUE, verbose=FALSE)
if(!curve_set_is1obs(tmp))
stop("The curve_set does not contain one observed function. Testing does not make sense.\n Did you want to construct a central region of your data? See the function central_region.")
if(!is.numeric(alpha) || any(alpha < 0 | alpha >= 1)) stop("Unreasonable value of alpha.")
res <- individual_central_region(curve_set, type=type, coverage=1-alpha,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central)
# The type of the p-value
possible_ties <- c('midrank', 'random', 'conservative', 'liberal', 'erl')
if(!(ties %in% possible_ties)) stop("Unreasonable ties argument!")
# Measures for functional ordering
distance <- attr(res, "M")
#-- Calculate the p-values
switch(type,
rank = {
u <- -distance
#-- p-interval
p_low <- estimate_p_value(x=u[1], sim_vec=u[-1], ties='liberal')
p_upp <- estimate_p_value(x=u[1], sim_vec=u[-1], ties='conservative')
#-- unique p-value
if(ties == "erl") {
distance_lexo <- forder(curve_set, measure="erl", alternative=alternative)
u_lexo <- -distance_lexo
p <- estimate_p_value(x=u_lexo[1], sim_vec=u_lexo[-1], ties="conservative")
}
else p <- estimate_p_value(x=u[1], sim_vec=u[-1], ties=ties)
},
erl = {
u_lexo <- -distance
p <- estimate_p_value(x=u_lexo[1], sim_vec=u_lexo[-1], ties="conservative")
},
cont = {
u_cont <- -distance
p <- estimate_p_value(x=u_cont[1], sim_vec=u_cont[-1], ties="conservative")
},
area = {
u_area <- -distance
p <- estimate_p_value(x=u_area[1], sim_vec=u_area[-1], ties="conservative")
},
qdir = {
p <- estimate_p_value(x=distance[1], sim_vec=distance[-1])
},
st = {
p <- estimate_p_value(x=distance[1], sim_vec=distance[-1])
},
unscaled = {
p <- estimate_p_value(x=distance[1], sim_vec=distance[-1])
})
# Change the "method" attribute
attr(res, "method") <- paste(attr(res, "method"), " test", sep="")
# Add attributes related to p-values
attr(res, "p") <- p
if(type == "rank") {
attr(res, "p_interval") <- c(p_low, p_upp)
attr(res, "ties") <- ties
}
# Update "call" attribute
attr(res, "call") <- match.call()
res
}
# Functionality for combined_central_region and combined_global_envelope_test (two-step procedure)
combined_CR_or_GET <- function(curve_sets, CR_or_GET = c("CR", "GET"), coverage, ...) {
ntests <- length(curve_sets)
if(ntests < 1) stop("Only one curve_set, no combining to be done.")
check_curve_set_dimensions(curve_sets) # Do not catch the curve_set here
CR_or_GET <- match.arg(CR_or_GET)
# 1) First stage: Calculate the functional orderings individually for each curve_set
res_ls <- lapply(curve_sets, FUN = function(x) { individual_central_region(x, ...) })
type <- attr(res_ls[[1]], "type")
# 2) Second stage: ERL central region/test
# Create a curve_set for the ERL test
k_ls <- lapply(res_ls, FUN = function(x) attr(x, "M"))
k_mat <- do.call(rbind, k_ls, quote=FALSE)
Nfunc <- ncol(k_mat)
# Construct the one-sided ERL central region
if(type %in% c("qdir", "st", "unscaled")) alt2 <- "greater"
else alt2 <- "less"
switch(CR_or_GET,
CR = {
curve_set_u <- create_curve_set(list(r=1:ntests, obs=k_mat))
res_erl <- individual_central_region(curve_set_u, type="erl", coverage=coverage, alternative=alt2)
},
GET = {
curve_set_u <- create_curve_set(list(r=1:ntests, obs=k_mat[,1], sim_m=k_mat[,-1]))
res_erl <- individual_global_envelope_test(curve_set_u, type="erl", alpha=1-coverage, alternative=alt2)
}
)
res_erl <- envelope_set_labs(res_erl, xlab="Function", ylab="ERL measure")
attr(res_erl, "labels") <- names(curve_sets)
coverage <- 1 - attr(res_erl, "alpha") # ordered
# 3) The 100(1-alpha)% global combined ERL envelope
distance_lexo_sorted <- sort(attr(res_erl, "M"), decreasing=TRUE)
Malpha <- distance_lexo_sorted[floor(coverage*Nfunc)]
LBounds <- UBounds <- vector("list", length(coverage))
for(ai in seq_along(coverage)) {
# Indices of the curves from which to calculate the convex hull
curves_for_envelope_ind <- which(attr(res_erl, "M") >= Malpha[ai])
# Curves
curve_sets <- lapply(curve_sets, FUN=convert_to_curveset)
all_curves_l <- lapply(curve_sets, function(x) { data_and_sim_curves(x) })
# Curves from which to calculate the convex hull
curves_for_envelope_l <- lapply(all_curves_l, function(x) { x[curves_for_envelope_ind,] })
# Bounding curves
LBounds[[ai]] <- lapply(curves_for_envelope_l, FUN = function(x) { apply(x, MARGIN=2, FUN=min) })
UBounds[[ai]] <- lapply(curves_for_envelope_l, FUN = function(x) { apply(x, MARGIN=2, FUN=max) })
}
# Update the bounding curves (lo, hi) and Malpha to the first level central regions
if(length(coverage) == 1) { # Use names 'lo' and 'hi'
for(i in 1:ntests) {
if(get_alternative(res_ls[[i]]) != "greater") res_ls[[i]]$lo <- LBounds[[1]][[i]]
if(get_alternative(res_ls[[i]]) != "less") res_ls[[i]]$hi <- UBounds[[1]][[i]]
attr(res_ls[[i]], "alpha") <- attr(res_ls[[i]], "M_alpha") <- NULL
attr(res_ls[[i]], "method") <- paste0("1/", ntests, "th of a combined global envelope test")
}
}
else { # Names according to the coverages, i.e. lo.xx, hi.xx where xx represent the levels
for(i in 1:ntests) {
if(get_alternative(res_ls[[i]]) != "greater")
for(ai in seq_along(coverage))
res_ls[[i]][paste0("lo.", 100*coverage[ai])] <- LBounds[[ai]][[i]]
if(get_alternative(res_ls[[i]]) != "less")
for(ai in seq_along(coverage))
res_ls[[i]][paste0("hi.", 100*coverage[ai])] <- UBounds[[ai]][[i]]
res_ls[[i]]$lo <- res_ls[[i]]$hi <- NULL
attr(res_ls[[i]], "alpha") <- attr(res_ls[[i]], "M_alpha") <- NULL
attr(res_ls[[i]], "method") <- paste0("1/", ntests, "th of a combined global envelope test")
}
}
if(!is.null(names(curve_sets))) names(res_ls) <- names(curve_sets)
# Return
attr(res_ls, "level2_ge") <- res_erl
attr(res_ls, "level2_curve_set") <- curve_set_u
switch(CR_or_GET,
CR = {
attr(res_ls, "method") <- "Combined global envelope"
},
GET = {
attr(res_ls, "method") <- "Combined global test"
})
if(!is.null(attr(res_ls[[1]], "argu")))
res_ls <- envelope_set_labs(res_ls, xlab=attr(res_ls[[1]], "xlab"),
ylab=substitute(italic(T(i)), list(i=attr(res_ls[[1]], "argu"))))
else
res_ls <- envelope_set_labs(res_ls, xlab=expression(italic(r)),
ylab=expression(italic(T(r))))
attr(res_ls, "alternative") <- get_alternative(res_ls[[1]])
attr(res_ls, "type") <- type
attr(res_ls, "alpha") <- 1-coverage
attr(res_ls, "M") <- attr(res_erl, "M")
attr(res_ls, "M_alpha") <- attr(res_erl, "M_alpha")
attr(res_ls, "p") <- attr(res_erl, "p")
attr(res_ls, "nstep") <- 2
class(res_ls) <- c("combined_global_envelope", class(res_ls))
if(curve_set_is2d(curve_sets[[1]]))
class(res_ls) <- c("combined_global_envelope2d", class(res_ls))
res_ls
}
# Functionality for combined_central_region and combined_global_envelope_test (one-step procedure)
combined_CR_or_GET_1step <- function(curve_sets, CR_or_GET = c("CR", "GET"), coverage, ...) {
curve_set <- combine_curve_sets(curve_sets, equalr=TRUE)
switch(CR_or_GET,
CR = {
res <- individual_central_region(curve_set, coverage=coverage, ...)
},
GET = {
res <- individual_global_envelope_test(curve_set, alpha=1-coverage, ...)
})
# Transform the envelope to a combined envelope
nfuns <- length(curve_sets)
nr <- curve_set_narg(curve_sets[[1]]) # all curve sets have the same
# Split the envelopes to the original groups
res_ls <- split(res, f = rep(1:nfuns, each=nr))
# Set unreasonable attributes of individuals sets of curves to NULL
for(i in 1:nfuns)
attr(res_ls[[i]], "method") <- paste0("1/", nfuns, "th of a combined global envelope test")
anames <- c("p", "p_interval", "ties", "M", "M_alpha", "alpha")
anames <- anames[anames %in% names(attributes(res_ls[[1]]))]
for(name in anames) {
for(i in 1:nfuns) attr(res_ls[[i]], name) <- NULL
}
mostattributes(res_ls) <- attributes(res)
attr(res_ls, "row.names") <- NULL
if(!is.null(names(curve_sets))) names(res_ls) <- names(curve_sets)
switch(CR_or_GET,
CR = {
attr(res_ls, "method") <- "Combined global envelope"
},
GET = {
attr(res_ls, "method") <- "Combined global test"
})
attr(res_ls, "nstep") <- 1
class(res_ls) <- c("combined_global_envelope", "list")
if(curve_set_is2d(curve_sets[[1]]))
class(res_ls) <- c("combined_global_envelope2d", class(res_ls))
res_ls
}
#' Print method for the class 'global_envelope'
#'
#' @param x A 'global_envelope' object.
#' @param ... Ignored.
#' @export
print.global_envelope <- function(x, ...) {
printhelper_ge_base(x)
}
#' Print method for the class 'combined_global_envelope'
#'
#' @param x A 'combined_global_envelope' object
#' @param ... Ignored.
#' @export
print.combined_global_envelope <- function(x, ...) {
printhelper_ge_combined(x)
}
#' Plot method for the class 'global_envelope'
#'
#' @details If several envelopes have been computed, their are plotted in different
#' grey scales so that the smallest envelope has the darkest color and the widest
#' envelope consist of all grey scales with the lightest color in the outskirts.
#' @param x An 'global_envelope' object
#' @param dotplot Logical. If TRUE, then instead of envelopes a dot plot is done.
#' Suitable for low dimensional test vectors.
#' Default: TRUE if the dimension is less than 10, FALSE otherwise.
#' @param sign.col The color for the observed curve when outside the global envelope
#' (significant regions). Default to "red". Setting the color to \code{NULL} corresponds
#' to no coloring. If the object contains several envelopes, the coloring is done for
#' the widest one.
#' @param labels A character vector of suitable length.
#' If \code{dotplot = TRUE}, then labels for the tests at x-axis.
#' @param digits The number of digits used for printing the p-value or p-interval
#' in the default main.
#' @param ... Ignored.
#'
#' @export
#' @seealso \code{\link{central_region}}, \code{\link{global_envelope_test}}
#' @examples
#' if(require("spatstat.explore", quietly=TRUE)) {
#' X <- unmark(spruces)
#' \donttest{nsim <- 1999 # Number of simulations}
#' \dontshow{nsim <- 19 # Number of simulations}
#' env <- envelope(X, fun="Kest", nsim=nsim,
#' savefuns=TRUE, # save the functions
#' correction="translate", # edge correction for K
#' simulate=expression(runifpoint(ex=X))) # Simulate CSR
#' res <- global_envelope_test(env, type="erl")
#'
#' # Default plot
#' plot(res)
#' # Plots can be edited, e.g.
#' # Remove legend
#' plot(res) + ggplot2::theme(legend.position="none")
#' # Change its position
#' plot(res) + ggplot2::theme(legend.position="right")
#' # Change the outside color
#' plot(res, sign.col="#5DC863FF")
#' plot(res, sign.col=NULL)
#' # Change default title and x- and y-labels
#' plot(res) + ggplot2::labs(title="95% global envelope", x="x", y="f(x)")
#'
#' # Prior to the plot, you can set your preferred ggplot theme by theme_set
#' old <- ggplot2::theme_set(ggplot2::theme_bw())
#' plot(res)
#'
#' # Do other edits, e.g. turn off expansion with the default limits
#' plot(res) + ggplot2::coord_cartesian(expand=FALSE)
#'
#' # Go back to the old theme
#' ggplot2::theme_set(old)
#'
#' # If you are working with the R package spatstat and its envelope-function,
#' # you can obtain global envelope plots in the style of spatstat using plot.fv:
#' plot.fv(res)
#' }
#' @importFrom ggplot2 labs
plot.global_envelope <- function(x, dotplot = length(x$r)<10, sign.col = "red",
labels = NULL, digits = 3, ...) {
if(!is.null(x[['r']]) && !all(x[['r']][-1] - x[['r']][-length(x[['r']])] > 0))
warning("r values non-increasing. Plot not valid.")
if(missing(labels)) labels <- default_labels(x, labels)
main <- env_main_default(x, digits=digits)
d <- plotdefaultlabs(x)
if(dotplot) {
env_dotplot_ggplot(x, labels=labels, sign.col=sign.col) +
labs(title=main, x=d$xlab, y=d$ylab)
} else {
env_ggplot(x, main=main, xlab=d$xlab, ylab=d$ylab, sign.col=sign.col)
}
}
#' Plot method for the class 'combined_global_envelope'
#'
#' Plotting method for the class 'combined_global_envelope', i.e. combined envelopes for
#' 1d functions.
#'
#' @description This function provides plots for combined global envelopes.
#' @param x An 'combined_global_envelope' object
#' @inheritParams plot.global_envelope
#' @param labels A character vector of suitable length.
#' If \code{dotplot = TRUE} (for the level 2 test), then labels for the tests at x-axis.
#' Otherwise labels for the separate plots.
#' @param scales See \code{\link[ggplot2]{facet_wrap}}.
#' Use \code{scales = "free"} when the scales of the functions in the global envelope
#' vary. \code{scales = "fixed"} is a good choice, when you want the same y-axis for all components.
#' A sensible default based on r-values exists.
#' @param ncol The maximum number of columns for the figures.
#' Default 2 or 3, if the length of x equals 3.
#' (Relates to the number of curve_sets that have been combined.)
#' @param level 1 or 2. In the case of two-step combined tests (with several test functions),
#' two different plots are available:
#' 1 for plotting the combined global envelopes (default and most often wanted) or
#' 2 for plotting the second level test result.
#' @export
#' @seealso \code{\link{central_region}}
plot.combined_global_envelope <- function(x, labels, scales, sign.col = "red",
ncol = 2 + 1*(length(x)==3),
digits = 3, level = 1, ...) {
if(!(level %in% c(1,2))) stop("Unreasonable value for level.")
if(missing(scales)) {
if(all(sapply(x, FUN=function(y) { all(range(y[['r']]) == range(x[[1]][['r']])) })))
scales <- "fixed"
else
scales <- "free"
}
alt <- get_alternative(x[[1]])
main <- env_main_default(x, digits=digits, alternative=alt)
d <- plotdefaultlabs(x)
if(level == 1) {
if(missing(labels)) labels <- default_labels(x, labels)
env_combined_ggplot(x, main=main, xlab=d$xlab, ylab=d$ylab,
labels=labels, scales=scales,
max_ncols_of_plots=ncol, sign.col=sign.col)
}
else {
if(attr(x, "nstep") != 2)
stop("level = 2 plot not available for one-step combined global envelopes.")
if(missing(labels)) labels <- default_labels(attr(x, "level2_ge"), labels)
env_dotplot_ggplot(attr(x, "level2_ge"), labels=labels)
}
}
#' Central region / Global envelope
#'
#' Provides central regions or global envelopes or confidence bands
#'
#'
#' Given a \code{curve_set} (see \code{\link{create_curve_set}} for how to create such an object)
#' or an \code{envelope} object of \pkg{spatstat} or \code{fdata} object of \pkg{fda.usc},
#' the function \code{central_region} construcst a central region, i.e. a global envelope,
#' from the given set of functions (or vectors).
#'
#' Generally an envelope is a band bounded by the vectors (or functions)
#' \eqn{T_{low}}{T_lo} and \eqn{T_{hi}}{T_hi}.
#' A \eqn{100(1-\alpha)}{100(1-alpha)}\% or 100*coverage\% global envelope is a set
#' \eqn{(T_{low}, T_{hi})}{(T_lo, T_hi)} of envelope vectors
#' such that the probability that \eqn{T_i}{T_i} falls outside this envelope
#' in any of the d points of the vector \eqn{T_i}{T_i} is less or equal to \eqn{\alpha}{alpha}.
#' The global envelopes can be constructed based on different measures
#' that order the functions from the most extreme one to the least extreme one.
#' We use such orderings of the functions for which we are able to construct global envelopes
#' with intrinsic graphical interpretation.
#'
#' The type of the global envelope can be chosen with the argument \code{type} and
#' the options are given in the following.
#' Further information about the measures, on which the global envelopes are based,
#' can be found in Myllymäki and Mrkvička (2020, Section 2.).
#' \itemize{
#' \item \code{'rank'}: The global rank envelope
#' proposed by Myllymäki et al. (2017) based on the extreme rank defined as the minimum of pointwise
#' ranks.
#' \item \code{'erl'}: The global rank envelope based on the extreme rank
#' length (Myllymäki et al.,2017, Mrkvička et al., 2018).
#' This envelope is constructed as the convex hull of the functions which have extreme rank
#' length measure that is larger or equal to the critical \eqn{\alpha}{alpha} level of the
#' extreme rank length measure.
#' \item \code{'cont'}: The global rank envelope based on the continuous rank
#' (Hahn, 2015; Mrkvička et al., 2019) based on minimum of continuous pointwise ranks.
#' It is contructed as the convex hull in a similar way as the \code{'erl'} envelope.
#' \item \code{'area'}: The global rank envelope based on the area rank (Mrkvička et al., 2019)
#' which is based on area between continuous pointwise ranks and minimum pointwise ranks
#' for those argument (r) values for which pointwise ranks achieve the minimum
#' (it is a combination of erl and cont).
#' It is contructed as the convex hull in a similar way as the \code{'erl'} and \code{'area'} envelopes.
#' \item \code{'qdir'}: The directional quantile envelope based on
#' the directional quantile maximum absolute deviation (MAD) test (Myllymäki et al., 2017, 2015),
#' which takes into account the unequal variances of the test function T(r) for
#' different distances r and is also protected against asymmetry of distribution of T(r).
#' \item \code{'st'}: The studentised envelope based on the studentised MAD
#' measure (Myllymäki et al., 2017, 2015),
#' which takes into account the unequal variances of the test function T(r) for different distances r.
#' \item \code{'unscaled'}: The unscaled envelope (Ripley, 1981),
#' which leads to envelopes with constant width. It corresponds to the classical
#' maximum deviation test without scaling. This test suffers from unequal variance
#' of T(r) over the distances r and from the asymmetry of distribution of T(r).
#' We recommend to use the other alternatives instead. This unscaled global envelope is
#' provided for reference.
#' }
#'
#' The values of the chosen measure M are determined for each curve in the \code{curve_set}, and
#' based on the chosen measure, the central region, i.e. the global envelope, is constructed
#' for the given curves.
#'
#' If a list of (suitable) objects are provided in the argument \code{curve_sets},
#' then by default (\code{nstep = 2}) the two-step combining procedure is used to
#' construct the combined global envelope as described in Myllymäki and Mrkvička (2020, Section 2.2.).
#' If \code{nstep = 1} and the lengths of the multivariate vectors in each component
#' of the list are equal, then the one-step combining procedure is used where the
#' functions are concatenated together into a one long vector (see again Myllymäki and Mrkvička, 2020, Section 2.2.).
#'
#' @references
#' Mrkvička, T., Myllymäki, M., Jilek, M. and Hahn, U. (2020) A one-way ANOVA test for functional data with graphical interpretation. Kybernetika 56(3), 432-458. doi: 10.14736/kyb-2020-3-0432
#'
#' Mrkvička, T., Myllymäki, M., Kuronen, M. and Narisetty, N. N. (2022) New methods for multiple testing in permutation inference for the general linear model. Statistics in Medicine 41(2), 276-297. doi: 10.1002/sim.9236
#'
#' Myllymäki, M., Grabarnik, P., Seijo, H. and Stoyan. D. (2015). Deviation test construction and power comparison for marked spatial point patterns. Spatial Statistics 11, 19-34. doi: 10.1016/j.spasta.2014.11.004
#'
#' Myllymäki, M., Mrkvička, T., Grabarnik, P., Seijo, H. and Hahn, U. (2017). Global envelope tests for spatial point patterns. Journal of the Royal Statistical Society: Series B (Statistical Methodology) 79, 381-404. doi: 10.1111/rssb.12172
#'
#' Myllymäki, M. and Mrkvička, T. (2020). GET: Global envelopes in R. arXiv:1911.06583 [stat.ME]
#'
#' Ripley, B.D. (1981). Spatial statistics. Wiley, New Jersey.
#'
#' @inheritParams forder
#' @param type The type of the global envelope with current options for 'rank', 'erl', 'cont', 'area',
#' 'qdir', 'st' and 'unscaled'. See details.
#' @param coverage A number between 0 and 1. The 100*coverage\% central region will be calculated.
#' A vector of values can also be provided, leading to the corresponding number of central regions.
#' @param central Either "mean" or "median". If the curve sets do not contain the component
#' \code{theo} for the theoretical central function, then the central function (used for plotting only)
#' is calculated either as the mean or median of functions provided in the curve sets.
#' For 'qdir', 'st' and 'unscaled' only the mean is allowed as an option, due to their definition.
#' @param nstep 1 or 2 for how to contruct a combined global envelope if list of curve sets
#' is provided. 2 (default) for a two-step combining procedure, 1 for one-step.
#' @param ... Ignored.
#' @return Either an object of class \code{global_envelope} and or an \code{combined_global_envelope} object.
#' The former class is obtained when a set of curves is provided, while the latter in the case
#' that \code{curve_sets} is a list of objects. The print and plot function are defined for the
#' returned objects (see examples).
#'
#' The \code{global_envelope} object is essentially a data frame containing columns
#' \itemize{
#' \item r = the vector of values of the argument r at which the test was made
#' \item lo = the lower envelope based on the simulated functions;
#' in case of a vector of coverage values, several 'lo' exist with names paste0("lo.", 100*coverage)
#' \item hi = the upper envelope based on the simulated functions;
#' in case of a vector of coverage values, several 'lo' exist with names paste0("hi.", 100*coverage)
#' \item central = If the \code{curve_set} (or \code{envelope} object) contains a theoretical curve,
#' then this function is used as the central curve and returned in this component.
#' Otherwise, the central curve is the mean or median (according to the argument \code{central})
#' of the test functions T_i(r), i=2, ..., s+1. Used for visualization only.
#' }
#' and potentially additionally
#' \itemize{
#' \item obs = the data function, if there is only one data function in the given \code{curve_sets}.
#' Otherwise not existing.
#' }
#' (Most often \code{central_region} is directly applied to functional data where all curves are observed.)
#' Additionally, the returned object has some attributes, where
#' \itemize{
#' \item M = A vector of the values of the chosen measure for all the function.
#' If there is only one observed function, then M[1] gives the value of the measure for this.
#' \item M_alpha = The critical value of M corresponding to the 100(1-alpha)\% global envelope
#' (see Myllymäki and Mrkvička, 2020, Definition 1.1. IGI).
#' }
#' Further the object has some attributes for printing and plotting purposes, where
#' \code{alternative}, \code{type}, \code{ties}, \code{alpha} correspond to those in the function call
#' and \code{method} gives a name for the method.
#' Attributes of an object \code{res} can be obtained using the function
#' \code{\link[base]{attr}}, e.g. \code{attr(res, "M")} for the values of the ordering measure.
#'
#' If the given set of curves had the class \code{envelope} of \pkg{spatstat}, then the returned
#' \code{global_envelope} object has also the class \code{fv} of spatstat, whereby one can utilize
#' also the plotting functions of \pkg{spatstat}, see example in \code{\link{plot.global_envelope}}.
#' However, the \code{envelope} objects are most often used with \code{\link{global_envelope_test}}
#' and not with \code{central_region}.
#' For an \code{fv} object, also some further attributes exists as required by \code{fv} of \pkg{spatstat}.
#'
#' The \code{combined_global_envelope} is a list of \code{global_envelope} objects, where
#' the components correspond to the components of \code{curve_sets}.
#' The \code{combined_global_envelope} object constructed with \code{nstep = 2} contains,
#' in addition to some conventional ones (\code{method}, \code{alternative}, \code{type}, \code{alpha},
#' \code{M}, \code{M_alpha}, see above), the second level envelope information as the attributes
#' \itemize{
#' \item level2_ge = The second level envelope on which the envelope construction is based
#' \item level2_curve_set = The second level \code{curve_set} from which \code{level2_ge} is constructed
#' }
#'
#' In the case that the given curve sets are two-dimensional, i.e., their arguments values are two-dimensional,
#' then the returned objects have in addition to the class \code{global_envelope} or \code{combined_global_envelope},
#' the class \code{global_envelope2d} or \code{combined_global_envelope2d}, respectively. This class is assigned
#' for plotting purposes: For the 2d envelopes, also the default plots are 2d.
#' Otherwise the 1d and 2d objects are similar.
#' @export
#' @seealso \code{\link{forder}}, \code{\link{global_envelope_test}}
#' @aliases global_envelope
#' @examples
#' ## A central region of a set of functions
#' #----------------------------------------
#' if(requireNamespace("fda", quietly=TRUE)) {
#' curve_set <- create_curve_set(list(r=as.numeric(row.names(fda::growth$hgtf)),
#' obs=fda::growth$hgtf))
#' plot(curve_set) + ggplot2::ylab("height")
#' cr <- central_region(curve_set, coverage=0.50, type="erl")
#' plot(cr)
#' }
#'
#' ## Confidence bands for linear or polynomial regression
#' #------------------------------------------------------
#' # Simulate regression data according to the cubic model
#' # f(x) = 0.8x - 1.8x^2 + 1.05x^3 for x in [0,1]
#' par <- c(0,0.8,-1.8,1.05) # Parameters of the true polynomial model
#' res <- 100 # Resolution
#' x <- seq(0, 1, by=1/res); x2=x^2; x3=x^3;
#' f <- par[1] + par[2]*x + par[3]*x^2 + par[4]*x^3 # The true function
#' d <- f + rnorm(length(x), 0, 0.04) # Data
#' # Plot the true function and data
#' plot(f, type="l", ylim=range(d))
#' points(d)
#'
#' # Estimate polynomial regression model
#' reg <- lm(d ~ x + x2 + x3)
#' ftheta <- reg$fitted.values
#' resid0 <- reg$residuals
#' s0 <- sd(resid0)
#'
#' # Bootstrap regression
#' \donttest{B <- 2000 # Number of bootstrap samples}
#' \dontshow{B <- 20 # Number of bootstrap samples}
#' ftheta1 <- array(0, c(B,length(x)))
#' s1 <- array(0,B)
#' for(i in 1:B) {
#' u <- sample(resid0, size=length(resid0), replace=TRUE)
#' reg1 <- lm((ftheta+u) ~ x + x2 + x3)
#' ftheta1[i,] <- reg1$fitted.values
#' s1[i] <- sd(reg1$residuals)
#' }
#'
#' # Centering and scaling
#' meanftheta <- apply(ftheta1, 2, mean)
#' m <- array(0, c(B,length(x)))
#' for(i in 1:B) { m[i,] <- (ftheta1[i,]-meanftheta)/s1[i] }
#'
#' # Central region computation
#' boot.cset <- create_curve_set(list(r=1:length(x), obs=ftheta+s0*t(m)))
#' cr <- central_region(boot.cset, coverage=c(0.50, 0.80, 0.95), type="erl")
#'
#' # Plotting the result
#' plot(cr) + ggplot2::labs(x=expression(italic(x)), y=expression(italic(f(x)))) +
#' ggplot2::geom_point(data=data.frame(id=1:length(d), points=d),
#' ggplot2::aes(x=id, y=points)) + # data points
#' ggplot2::geom_line(data=data.frame(id=1:length(d), points=f),
#' ggplot2::aes(x=id, y=points)) # true function
central_region <- function(curve_sets, type = "erl", coverage = 0.50,
alternative = c("two.sided", "less", "greater"),
probs = c(0.25, 0.75),
quantile.type = 7,
central = "median", nstep = 2, ...) {
if(!is_a_single_curveset(curve_sets)) {
if(length(curve_sets) > 1) { # Combined test
if(!(nstep %in% c(1,2))) stop("Invalid number of steps (nstep) for combining. Should be 1 or 2.")
if(nstep == 2) # Two-step combining procedure
return(combined_CR_or_GET(curve_sets, CR_or_GET="CR", type=type, coverage=coverage,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central, ...))
else # One-step combining procedure
return(combined_CR_or_GET_1step(curve_sets, CR_or_GET="CR", type=type, coverage=coverage,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central, ...))
}
else if(length(curve_sets) == 1)
curve_sets <- curve_sets[[1]]
else
stop("The given list of curve_sets is empty.")
}
# Individual test
return(individual_central_region(curve_sets, type=type, coverage=coverage,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central, ...))
}
#' Global envelope test
#'
#' Global envelope test, global envelopes and p-values
#'
#'
#' Given a \code{curve_set} (see \code{\link{create_curve_set}} for how to create such an object)
#' or an \code{envelope} object of \pkg{spatstat},
#' which contains both the data curve (or function or vector) \eqn{T_1(r)}{T_1(r)}
#' (in the component \code{obs}) and
#' the simulated curves \eqn{T_2(r),\dots,T_{s+1}(r)}{T_2(r),...,T_(s+1)(r)}
#' (in the component \code{sim_m}),
#' the function \code{global_envelope_test} performs a global envelope test.
#' The functionality of the function is rather similar to the function
#' \code{\link{central_region}}, but in addition to ordering the functions from
#' the most extreme one to the least extreme one using different measures
#' and providing the global envelopes with intrinsic
#' graphical interpretation, p-values are calculated for the test.
#' Thus, while \code{\link{central_region}} can be used to construct global
#' envelopes in a general setting, the function \code{\link{global_envelope_test}}
#' is devoted to testing as its name suggests.
#'
#' The function \code{global_envelope_test} is the main function for global envelope tests
#' (for simple hypotheses).
#' Different \code{type} of global envelope tests can be performed.
#' We use such ordering of the functions for which we are able to construct global
#' envelopes with intrinsic graphical interpretation.
#' \itemize{
#' \item \code{'rank'}: the completely non-parametric rank envelope test (Myllymäki et al., 2017)
#' based on minimum of pointwise ranks
#' \item \code{'erl'}: the completely non-parametric rank envelope test based on extreme rank lengths
#' (Myllymäki et al., 2017; Mrkvička et al., 2018) based on number of minimal pointwise ranks
#' \item \code{'cont'}: the completely non-parametric rank envelope test based on continuous rank
#' (Hahn, 2015; Mrkvička et al., 2019) based on minimum of continuous pointwise ranks
#' \item \code{'area'}: the completely non-parametric rank envelope test based on area rank
#' (Mrkvička et al., 2019) based on area between continuous pointwise ranks and minimum
#' pointwise ranks for those argument (r) values for which pointwise ranks achieve the minimum
#' (it is a combination of erl and cont)
#' \item "qdir", the directional quantile envelope test, protected against unequal variance and
#' asymmetry of T(r) for different distances r (Myllymäki et al., 2015, 2017)
#' \item "st", the studentised envelope test, protected against unequal variance of T(r) for
#' different distances r (Myllymäki et al., 2015, 2017)
#' \item "unscaled", the unscaled envelope (providing a baseline) that has a contant width and
#' that corresponds to the classical maximum deviation test (Ripley, 1981).
#' }
#' The first four types are global rank envelopes.
#' The \code{'rank'} envelope test is a completely non-parametric test,
#' which provides the 100(1-alpha)% global envelope for the chosen test function
#' T(r) on the chosen interval of distances and associated p-values.
#' The other three are modifications of \code{'rank'} to treat the ties in
#' the extreme rank ordering on which the \code{'rank'} test is based on.
#' The last three envelopes are global scaled maximum absolute difference (MAD)
#' envelope tests. The unscaled envelope test leads to envelopes with constant width over the
#' distances r. Thus, it suffers from unequal variance of T(r) over the distances r and
#' from the asymmetry of distribution of T(r). We recommend to use the other global
#' envelope tests available. The unscaled envelope is provided as a reference.
#'
#' See Myllymäki and Mrkvička (2020, Section 2.), i.e. \code{vignette("GET")}, for more detailed description of the measures and
#' the corresponding envelopes.
#'
#' See \code{vignette("pointpatterns")} for examples of point pattern analyses.
#' @section Procedure:
#' 1) First the curves are ranked from the most extreme one to the least extreme one
#' by a measure that is specified by the argument \code{type}. The options are
#' \itemize{
#' \item 'rank': extreme ranks (Myllymäki et al., 2017)
#' \item 'erl': extreme rank lengths (Myllymäki et al., 2017; Mrkvička et al., 2018)
#' \item 'cont': continuous ranks (Hahn, 2015; Mrkvička et al., 2019)
#' \item 'area': area ranks (Mrkvička et al., 2019)
#' \item 'qdir': the directional quantile maximum absolute deviation (MAD) measure (Myllymäki et al., 2015, 2017)
#' \item 'st': the studentized MAD measure (Myllymäki et al., 2015, 2017)
#' \item 'unscaled': the unscaled MAD measure (Ripley, 1981)
#' }
#'
#' 2) Based on the measures used to rank the functions, the 100(1-alpha)\% global envelope is provided.
#' It corresponds to the 100*coverage\% central region.
#'
#' 3) P-values:
#' In the case \code{type="rank"}, based on the extreme ranks \eqn{k_i, i=1, ..., s+1}{k_i, i=1, ..., s+1},
#' the p-interval is calculated. Because the extreme ranks contain ties, there is not just
#' one p-value. The p-interval is given by the most liberal and the most conservative p-value
#' estimate. Also a single p-value is calculated.
#' By default this single p-value is the extreme rank length p-value ("erl") as specified by the argument \code{ties}.
#' If the case of other measures, a (single) p-value based on the given ordering
#' of the functions is calculated and returned in the attribute \code{p}.
#'
#' @section Number of simulations:
#' For the global \code{"rank"} envelope test, Myllymäki et al. (2017) recommended to use
#' at least 2500 simulations for testing at the significance level alpha = 0.05 for single
#' function tests, based on experiments with summary functions for point processes evaluated
#' approximately at 500 argument values.
#' In this case, the width of the p-interval associated with the extreme rank measure tended
#' to be smaller than 0.01.
#' The tests \code{'erl'}, \code{'cont'} and \code{'area'}, similarly as
#' the MAD deviation/envelope tests \code{'qdir'}, \code{'st'} and \code{'unscaled'},
#' allow in principle a lower number of simulations to be used than the test based on
#' extreme ranks (\code{'rank'}), because no ties occur for these measures.
#' If affordable, we recommend in any case some thousands of simulations for all the measures
#' to achieve a good power and repeatability of the test.
#' If the dimension of the test functions is higher, also the number of simulations should
#' preferably be higher.
#'
#' @section Tests based on several functions:
#' If a list of (suitable) objects are provided in the argument \code{curve_sets},
#' then by default (\code{nstep = 2}) the two-step combining procedure is used to
#' perform the combined global test as described in Myllymäki and Mrkvička (2020).
#' If \code{nstep = 1} and the lengths of the multivariate vectors in each component
#' of the list are equal, then the one-step combining procedure is used where the
#' functions are concatenated together into a one long vector.
#'
#' @references
#' Mrkvička, T., Myllymäki, M. and Hahn, U. (2017). Multiple Monte Carlo testing, with applications in spatial point processes. Statistics & Computing 27(5), 1239-1255. doi: 10.1007/s11222-016-9683-9
#'
#' Mrkvička, T., Myllymäki, M., Jilek, M. and Hahn, U. (2020) A one-way ANOVA test for functional data with graphical interpretation. Kybernetika 56(3), 432-458. doi: 10.14736/kyb-2020-3-0432
#'
#' Mrkvička, T., Myllymäki, M., Kuronen, M. and Narisetty, N. N. (2022) New methods for multiple testing in permutation inference for the general linear model. Statistics in Medicine 41(2), 276-297. doi: 10.1002/sim.9236
#'
#' Myllymäki, M., Grabarnik, P., Seijo, H. and Stoyan. D. (2015). Deviation test construction and power comparison for marked spatial point patterns. Spatial Statistics 11, 19-34. doi: 10.1016/j.spasta.2014.11.004
#'
#' Myllymäki, M., Mrkvička, T., Grabarnik, P., Seijo, H. and Hahn, U. (2017). Global envelope tests for spatial point patterns. Journal of the Royal Statistical Society: Series B (Statistical Methodology) 79, 381–404. doi: 10.1111/rssb.12172
#'
#' Myllymäki, M. and Mrkvička, T. (2020). GET: Global envelopes in R. arXiv:1911.06583 [stat.ME]
#'
#' Ripley, B.D. (1981). Spatial statistics. Wiley, New Jersey.
#'
#' @inheritParams central_region
#' @param curve_sets A \code{curve_set} (see \code{\link{create_curve_set}})
#' or an \code{envelope} object of \pkg{spatstat} containing a data function and simulated functions.
#' If an envelope object is given, it must contain the summary
#' functions from the simulated patterns which can be achieved by setting
#' \code{savefuns = TRUE} when calling the \code{envelope} function.
#' Alternatively, a list of \code{curve_set} or \code{envelope} objects can be given.
#' @param alpha The significance level. The 100(1-alpha)\% global envelope will be calculated.
#' If a vector of values is provided, the global envelopes are calculated for each value.
#' @param ties The method to obtain a unique p-value when \code{type = 'rank'}.
#' Possible values are 'midrank', 'random', 'conservative', 'liberal' and 'erl'.
#' For 'conservative' the resulting p-value will be the highest possible.
#' For 'liberal' the p-value will be the lowest possible.
#' For 'random' the rank of the obs within the tied values is uniformly sampled so that the resulting
#' p-value is at most the conservative option and at least the liberal option.
#' For 'midrank' the mid-rank within the tied values is taken.
#' For 'erl' the extreme rank length p-value is calculated.
#' The default is 'erl'.
#' @param ... Additional parameters to be passed to \code{\link{central_region}}.
#' @return Either an object of class "global_envelope" or "combined_global_envelope",
#' similarly as the objects returned by \code{\link{central_region}}.
#'
#' The \code{global_envelope} is essentially a data frame containing columns
#' \itemize{
#' \item the values of the argument r at which the test was made, copied from the argument \code{curve_sets} with the corresponding names
#' \item obs = values of the data function, copied from the argument \code{curve_sets}
#' (unlike for central regions, \code{obs} always exists for a global envelope test)
#' \item lo = the lower envelope; in case of a vector of alpha values, several 'lo' exist with names paste0("lo.", 100*(1-alpha))
#' \item hi = the upper envelope; in case of a vector of alpha values, several 'lo' exist with names paste0("hi.", 100*(1-alpha))
#' \item central = a central curve as specified in the argument \code{central}.
#' }
#' Moreover, the returned object has the same attributes as the \code{global_envelope} object returned by
#' \code{\link{central_region}} and in addition
#' \itemize{
#' \item p = A point estimate for the p-value (default is the mid-rank p-value).
#' }
#' and in the case that \code{type = 'rank'} also
#' \itemize{
#' \item p_interval = The p-value interval \eqn{[p_{liberal}, p_{conservative}]}{[p_liberal, p_conservative]}.
#' \item ties = As the argument \code{ties}.
#' }
#'
#' The \code{combined_global_envelope} is a list of \code{global_envelope} objects
#' containing the above mentioned columns and which all together form the global envelope.
#' It has the same attributes as described in \code{\link{central_region}}, and in addition also
#' the p-value \code{p}.
#' The 2d classes are attached as described in \code{\link{central_region}}.
#' @export
#' @seealso \code{\link{plot.global_envelope}}, \code{\link{central_region}},
#' \code{\link{GET.composite}}
#' @examples
#' # Goodness-of-fit testing for simple hypothesis
#' if(require("spatstat.explore", quietly=TRUE)) {
#' # Testing complete spatial randomness (CSR)
#' #==========================================
#' X <- unmark(spruces)
#'
#' \donttest{nsim <- 1999 # Number of simulations}
#' \dontshow{nsim <- 19 # Number of simulations}
#'
#' # Illustration of general workflow for simple hypotheses
#' #=======================================================
#' # First illustrate the general workflow for the test by this example
#' # of CSR test for a point pattern X using the empirical L-function.
#' # Define the argument values at which the functions are evaluated
#' obs.L <- Lest(X, correction="translate")
#' r <- obs.L[['r']]
#' # The test function for the data
#' obs <- obs.L[['trans']] - r
#' # Prepare simulations and calculate test functions for them at same r as 'obs'
#' sim <- matrix(nrow=length(r), ncol=nsim)
#' for(i in 1:nsim) {
#' sim.X <- runifpoint(ex=X) # simulation under CSR
#' sim[, i] <- Lest(sim.X, correction="translate", r=r)[['trans']] - r
#' }
#' # Create a curve_set containing argument values, observed and simulated functions
#' cset <- create_curve_set(list(r=r, obs=obs, sim_m=sim))
#' # Perform the test
#' res <- global_envelope_test(cset, type="erl")
#' plot(res) + ggplot2::ylab(expression(italic(hat(L)(r)-r)))
#'
#' # Simple hypothesis for a point pattern utilizing the spatstat package
#' #=====================================================================
#' # Generate nsim simulations under CSR, calculate L-function for the data and simulations
#' env <- envelope(X, fun="Lest", nsim=nsim,
#' savefuns=TRUE, # save the functions
#' correction="translate", # edge correction for L
#' transform=expression(.-r), # centering
#' simulate=expression(runifpoint(ex=X))) # Simulate CSR
#' # The rank envelope test (ERL)
#' res <- global_envelope_test(env, type="erl")
#' # Plot the result
#' plot(res)
#'
#' ## Advanced use:
#' # Choose the interval of distances [r_min, r_max] (at the same time create a curve_set from 'env')
#' cset <- crop_curves(env, r_min=1, r_max=7)
#' # Do the rank envelope test (erl)
#' res <- global_envelope_test(cset, type="erl")
#' plot(res) + ggplot2::ylab(expression(italic(L(r)-r)))
#'
#' # A combined global envelope test
#' #================================
#' # As an example test CSR of the saplings point pattern by means of
#' # L, F, G and J functions.
#' data(saplings)
#' X <- as.ppp(saplings, W=square(75))
#'
#' \donttest{nsim <- 499 # Number of simulations}
#' \dontshow{nsim <- 19 # Number of simulations}
#' # Specify distances for different test functions
#' n <- 500 # the number of r-values
#' rmin <- 0; rmax <- 20; rstep <- (rmax-rmin)/n
#' rminJ <- 0; rmaxJ <- 8; rstepJ <- (rmaxJ-rminJ)/n
#' r <- seq(0, rmax, by=rstep) # r-distances for Lest
#' rJ <- seq(0, rmaxJ, by=rstepJ) # r-distances for Fest, Gest, Jest
#' \dontshow{r <- r[1:50]; rJ <- rJ[1:50]}
#'
#' # Perform simulations of CSR and calculate the L-functions
#' env_L <- envelope(X, nsim=nsim,
#' simulate=expression(runifpoint(ex=X)),
#' fun="Lest", correction="translate",
#' transform=expression(.-r), # Take the L(r)-r function instead of L(r)
#' r=r, # Specify the distance vector
#' savefuns=TRUE, # Save the estimated functions
#' savepatterns=TRUE) # Save the simulated patterns
#' # Take the simulations from the returned object
#' simulations <- attr(env_L, "simpatterns")
#' # Then calculate the other test functions F, G, J for each simulated pattern
#' env_F <- envelope(X, nsim=nsim, simulate=simulations,
#' fun="Fest", correction="Kaplan", r=rJ,
#' savefuns=TRUE)
#' env_G <- envelope(X, nsim=nsim, simulate=simulations,
#' fun="Gest", correction="km", r=rJ,
#' savefuns=TRUE)
#' env_J <- envelope(X, nsim=nsim, simulate=simulations,
#' fun="Jest", correction="none", r=rJ,
#' savefuns=TRUE)
#'
#' # Crop the curves to the desired r-interval I
#' curve_set_L <- crop_curves(env_L, r_min=rmin, r_max=rmax)
#' curve_set_F <- crop_curves(env_F, r_min=rminJ, r_max=rmaxJ)
#' curve_set_G <- crop_curves(env_G, r_min=rminJ, r_max=rmaxJ)
#' curve_set_J <- crop_curves(env_J, r_min=rminJ, r_max=rmaxJ)
#'
#' res <- global_envelope_test(curve_sets=list(L=curve_set_L, F=curve_set_F,
#' G=curve_set_G, J=curve_set_J))
#' plot(res)
#' plot(res, labels=c("L(r)-r", "F(r)", "G(r)", "J(r)"))
#' }
#'
#' # A test based on a low dimensional random vector
#' #================================================
#' # Let us generate some example data.
#' X <- matrix(c(-1.6,1.6),1,2) # data pattern X=(X_1,X_2)
#' if(requireNamespace("mvtnorm", quietly=TRUE)) {
#' Y <- mvtnorm::rmvnorm(200,c(0,0),matrix(c(1,0.5,0.5,1),2,2)) # simulations
#' plot(Y, xlim=c(min(X[,1],Y[,1]), max(X[,1],Y[,1])), ylim=c(min(X[,2],Y[,2]), max(X[,2],Y[,2])))
#' points(X, col=2)
#'
#' # Test the null hypothesis is that X is from the distribution of Y's (or if it is an outlier).
#'
#' # Case 1. The test vector is (X_1, X_2)
#' cset1 <- create_curve_set(list(r=1:2, obs=as.vector(X), sim_m=t(Y)))
#' res1 <- global_envelope_test(cset1)
#' plot(res1)
#'
#' # Case 2. The test vector is (X_1, X_2, (X_1-mean(Y_1))*(X_2-mean(Y_2))).
#' t3 <- function(x, y) { (x[,1]-mean(y[,1]))*(x[,2]-mean(y[,2])) }
#' cset2 <- create_curve_set(list(r=1:3, obs=c(X[,1],X[,2],t3(X,Y)), sim_m=rbind(t(Y), t3(Y,Y))))
#' res2 <- global_envelope_test(cset2)
#' plot(res2)
#' }
global_envelope_test <- function(curve_sets, type = "erl", alpha = 0.05,
alternative = c("two.sided", "less", "greater"),
ties = "erl", probs = c(0.025, 0.975), quantile.type = 7,
central = "mean", nstep = 2, ...) {
if(!is_a_single_curveset(curve_sets)) {
if(length(curve_sets) > 1) { # Combined test
if(!(nstep %in% c(1,2))) stop("Invalid number of steps (nstep) for combining. Should be 1 or 2.")
if(nstep == 2) # Two-step combining procedure
return(combined_CR_or_GET(curve_sets, CR_or_GET="GET", type=type, coverage=1-alpha,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central, ...))
else # One-step combining procedure
return(combined_CR_or_GET_1step(curve_sets, CR_or_GET="GET", type=type, coverage=1-alpha,
alternative=alternative,
probs=probs, quantile.type=quantile.type,
central=central, ...))
}
else if(length(curve_sets) == 1)
curve_sets <- curve_sets[[1]]
else
stop("The given list of curve_sets is empty.")
}
return(individual_global_envelope_test(curve_sets, type=type, alpha=alpha,
alternative=alternative, ties=ties,
probs=probs, quantile.type=quantile.type,
central=central, ...))
}
#' The rank envelope test
#'
#' The rank envelope test, p-values and global envelopes.
#' The test corresponds to the global envelope test that can be carriet out by
#' \code{\link{global_envelope_test}} by specifying the \code{type} for which the options
#' \code{"rank"}, \code{"erl"}, \code{"cont"} and \code{"area"} are available. The last
#' three are modifications of the first one to treat the ties in the extreme rank ordering
#' used in \code{"rank"}. This function is kept for historical reasons.
#'
#' The \code{"rank"} envelope test is a completely non-parametric test, which provides
#' the 100(1-alpha)\% global envelope for the chosen test function T(r) on
#' the chosen interval of distances and associated p-values.
#' The other three types are solutions to break the ties in the extreme ranks
#' on which the \code{"rank"} envelope test is based on.
#'
#' Note: The method to break ties for the global \code{type = "rank"} envelope
#' (Myllymäki et al., 2017) can be done by the argument \code{ties} with default
#' to \code{ties = "erl"} corresponding to the extreme rank length breaking of ties.
#' In this case the global envelope corresponds to the extreme rank measure.
#' If instead choosing \code{type} to be \code{"erl"}, \code{"cont"} or \code{"area"},
#' then the global envelope corresponds to these measures.
#'
#' @section Number of simulations:
#' The global \code{"erl"}, \code{"cont"}, \code{"area"} envelope tests allow
#' in principle a lower number of simulations to be used than the global \code{"rank"} test
#' based on extreme ranks.
#' However, if feasible, we recommend some thousands of simulations in any case to achieve
#' a good power and repeatability of the test.
#' For the global \code{"rank"} envelope test, Myllymäki et al. (2017) recommended to use
#' at least 2500 simulations for testing at the significance level alpha = 0.05 for single
#' function tests, experimented with summary functions for point processes.
#'
#' @references
#' Myllymäki, M., Mrkvička, T., Grabarnik, P., Seijo, H. and Hahn, U. (2017). Global envelope tests for spatial point patterns. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 79: 381–404. doi: 10.1111/rssb.12172
#'
#' Mrkvička, T., Myllymäki, M. and Hahn, U. (2017). Multiple Monte Carlo testing, with applications in spatial point processes. Statistics & Computing 27 (5): 1239-1255. doi: 10.1007/s11222-016-9683-9
#'
#' Mrkvička, T., Myllymäki, M., Jilek, M. and Hahn, U. (2020) A one-way ANOVA test for functional data with graphical interpretation. Kybernetika 56 (3), 432-458. doi: 10.14736/kyb-2020-3-0432
#'
#' @param curve_set A curve_set (see \code{\link{create_curve_set}}) or an \code{envelope}
#' object of \pkg{spatstat}. If an envelope object is given, it must contain the summary
#' functions from the simulated patterns which can be achieved by setting
#' savefuns = TRUE when calling the function of \pkg{spatstat}.
#' @param type The type of the global envelope with current options for "rank", "erl", "cont" and "area".
#' If "rank", the global rank envelope accompanied by the p-interval is given (Myllymäki et al., 2017).
#' If "erl", the global rank envelope based on extreme rank lengths accompanied by the extreme rank
#' length p-value is given (Myllymäki et al., 2017, Mrkvička et al., 2018). See details and additional
#' sections thereafter.
#' @param ... Additional parameters to be passed to \code{\link{global_envelope_test}}.
#' @return An object of class \code{global_envelope} of \code{combined_global_envelope}
#' which can be printed and plotted directly. See \code{\link{global_envelope_test}} for more details.
#' @export
#' @seealso \code{\link{global_envelope_test}}
#' @examples
#' # See ?global_envelope_test for more examples
#'
#' ## Testing complete spatial randomness (CSR)
#' #-------------------------------------------
#' if(require("spatstat.explore", quietly=TRUE)) {
#' X <- unmark(spruces)
#' \donttest{nsim <- 2499 # Number of simulations}
#' \dontshow{nsim <- 19 # Number of simulations for testing}
#' # Generate nsim simulations under CSR, calculate centred L-function for the data and simulations
#' env <- envelope(X, fun="Lest", nsim=nsim, savefuns=TRUE,
#' correction="translate", transform=expression(.-r),
#' simulate=expression(runifpoint(ex=X)))
#' # The rank envelope test
#' res <- rank_envelope(env)
#' # Plot the result.
#' plot(res)
#'
#' ## Advanced use:
#' # Choose the interval of distances [r_min, r_max] (at the same time create a curve_set from 'env')
#' curve_set <- crop_curves(env, r_min=1, r_max=7)
#' # Do the rank envelope test
#' res <- rank_envelope(curve_set); plot(res)
#' }
rank_envelope <- function(curve_set, type = "rank", ...) {
if(!(type %in% c("rank", "erl", "cont", "area"))) stop("No such type for the global rank envelope.")
global_envelope_test(curve_set, type=type, ...)
}
#' Global scaled maximum absolute difference (MAD) envelope tests
#'
#' Performs the global scaled MAD envelope tests, either directional quantile or studentised,
#' or the unscaled MAD envelope test. These tests correspond to calling the
#' function \code{\link{global_envelope_test}} with \code{type="qdir"}, \code{type = "st"} and
#' \code{type="unscaled"}, respectively. The functions \code{qdir_envelope}, \code{st_envelope} and
#' \code{unscaled_envelope} have been kept for historical reasons;
#' preferably use \code{\link{global_envelope_test}} with the suitable \code{type} argument.
#'
#' The directional quantile envelope test (Myllymäki et al., 2015, 2017)
#' takes into account the unequal variances of the test function T(r)
#' for different distances r and is also protected against asymmetry of T(r).
#'
#' @references
#' Myllymäki, M., Grabarnik, P., Seijo, H. and Stoyan. D. (2015). Deviation test construction and power comparison for marked spatial point patterns. Spatial Statistics 11: 19-34. doi: 10.1016/j.spasta.2014.11.004
#'
#' Myllymäki, M., Mrkvička, T., Grabarnik, P., Seijo, H. and Hahn, U. (2017). Global envelope tests for spatial point patterns. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 79: 381–404. doi: 10.1111/rssb.12172
#'
#' @inheritParams rank_envelope
#' @return An object of class \code{global_envelope} of \code{combined_global_envelope}
#' which can be printed and plotted directly. See \code{\link{global_envelope_test}} for more details.
#' @export
#' @name qdir_envelope
#' @seealso \code{\link{global_envelope_test}}
#' @examples
#' # See more examples in ?global_envelope_test
#' ## Testing complete spatial randomness (CSR)
#' #-------------------------------------------
#' if(require("spatstat.explore", quietly=TRUE)) {
#' X <- spruces
#' \donttest{nsim <- 999 # Number of simulations}
#' \dontshow{nsim <- 19 # Number of simulations for testing}
#' ## Test for complete spatial randomness (CSR)
#' # Generate nsim simulations under CSR, calculate centred L-function for the data and simulations
#' env <- envelope(X, fun="Lest", nsim=nsim, savefuns=TRUE,
#' correction="translate", transform=expression(.-r),
#' simulate=expression(runifpoint(ex=X)))
#' res_qdir <- qdir_envelope(env) # The directional quantile envelope test
#' plot(res_qdir)
#'
#' ## Advanced use:
#' # Create a curve set, choosing the interval of distances [r_min, r_max]
#' curve_set <- crop_curves(env, r_min=1, r_max=8)
#' # The directional quantile envelope test
#' res_qdir <- qdir_envelope(curve_set); plot(res_qdir)
#' # The studentised envelope test
#' res_st <- st_envelope(curve_set); plot(res_st)
#' # The unscaled envelope test
#' res_unscaled <- unscaled_envelope(curve_set); plot(res_unscaled)
#' }
qdir_envelope <- function(curve_set, ...) {
args <- list(...)
if("type" %in% names(args)) warning("type is hardcoded to be qdir here. No other options.")
global_envelope_test(curve_set, type="qdir", ...)
}
#' Studentised envelope test
#'
#' @details The studentised envelope test (Myllymäki et al., 2015, 2017)
#' takes into account the unequal variances of the test function T(r)
#' for different distances r.
#'
#' @export
#' @rdname qdir_envelope
st_envelope <- function(curve_set, ...) {
args <- list(...)
if("type" %in% names(args)) warning("type is hardcoded to be st here. No other options.")
global_envelope_test(curve_set, type="st", ...)
}
#' Unscaled envelope test
#'
#' @details The unscaled envelope test (Ripley, 1981) corresponds to the classical maximum
#' deviation test without scaling, and leads to envelopes with constant width over the distances r.
#' Thus, it suffers from unequal variance of T(r) over the distances r and from the asymmetry of
#' distribution of T(r). We recommend to use the other global envelope tests available,
#' see \code{\link{global_envelope_test}} for full list of alternatives.
#'
#' @references
#' Ripley, B.D. (1981). Spatial statistics. Wiley, New Jersey.
#' @export
#' @rdname qdir_envelope
unscaled_envelope <- function(curve_set, ...) {
args <- list(...)
if("type" %in% names(args)) warning("type is hardcoded to be unscaled here. No other options.")
global_envelope_test(curve_set, type="unscaled", ...)
}
|
da59b6f7e9c032e62368365daa43da35c604bf60 | a59b0019cd455e5c8c59263d5248b388eb235257 | /man/model_concurvity.Rd | 56e4ecce309aefbea404e547c281e60a15a2304d | [
"MIT"
] | permissive | dill/gratia | 4df529f5e636a0139f5c355b52a2924bebf7aca4 | 26c3ece0e6a6298ab002b02019b0ea482d21dace | refs/heads/master | 2023-04-08T18:35:18.730888 | 2023-03-20T12:52:33 | 2023-03-20T12:52:33 | 160,169,115 | 0 | 0 | NOASSERTION | 2018-12-03T09:54:30 | 2018-12-03T09:54:30 | null | UTF-8 | R | false | true | 1,320 | rd | model_concurvity.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/concurvity.R
\name{model_concurvity}
\alias{model_concurvity}
\alias{model_concurvity.gam}
\alias{concrvity}
\title{Concurvity of an estimated GAM}
\usage{
model_concurvity(model, ...)
\method{model_concurvity}{gam}(
model,
terms = everything(),
type = c("all", "estimate", "observed", "worst"),
pairwise = FALSE,
...
)
concrvity(
model,
terms = everything(),
type = c("all", "estimate", "observed", "worst"),
pairwise = FALSE,
...
)
}
\arguments{
\item{model}{a fitted GAM. Currently only objects of class \code{"gam"} are
supported}
\item{...}{arguents passed to other methods.}
\item{terms}{currently ignored}
\item{type}{character;}
\item{pairwise}{logical; extract pairwise concurvity of model terms?}
}
\description{
Concurvity of an estimated GAM
}
\examples{
## simulate data with concurvity...
library("tibble")
load_mgcv()
set.seed(8)
n <- 200
df <- tibble(t = sort(runif(n)),
x = gw_f2(t) + rnorm(n) * 3,
y = sin(4 * pi * t) + exp(x / 20) + rnorm(n) * 0.3)
## fit model
m <- gam(y ~ s(t, k = 15) + s(x, k = 15), data = df, method = "REML")
## overall concurvity
o_conc <- concrvity(m)
draw(o_conc)
## pairwise concurvity
p_conc <- concrvity(m, pairwise = TRUE)
draw(p_conc)
}
|
f949ee60b385d7fe065e7cdf50620dfa1f4601ca | e915e0575109709510caa64081fd8292e2dcc6cc | /Plot6.R | 68d8f94121555cb9bfa627db74611d391a3c887a | [] | no_license | ATidmore/ExData_CourseProject2 | 9c5f2a50ede72f63771b2edc599fd44969690f44 | ec2b80855ec80b3142a3d5e978c3b88d80e241df | refs/heads/master | 2020-12-24T14:01:20.527812 | 2015-06-18T03:00:24 | 2015-06-18T03:00:24 | 37,633,694 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 896 | r | Plot6.R | setwd("~/Training/CourseraDataScience/4Exploratory/Week3/exdata-data-NEI_data")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#To put the images in a better directory...
setwd("~/Training/CourseraDataScience/4Exploratory/Week3/")
library(ggplot2)
#Compare B City with LA (fips = 06037) motor vehicle emissions
merge_ds <- merge(NEI, SCC, by = "SCC") # merge on SCC column
q6ds <- merge_ds[grepl("vehicle", merge_ds$Short.Name, ignore.case = TRUE), ] #ds is just vehicle related
q6ds <- q6ds[q6ds$fips %in% c("06037" , "24510"),] # B City and LA
agg_ds <- aggregate(Emissions ~ year + fips, q6ds, sum) #agg the dataset
g <- ggplot(agg_ds, aes(year, Emissions, color = fips))
g <- g + geom_line(stat = "identity") + ggtitle("Baltimore City vs LA Vehicle Emissions Over Time")
png("Plot6.png", width=480, height=480)
print(g)
dev.off()
|
24eb53049eac149c517785e14bad8d75c32ee801 | 277dbb992966a549176e2b7f526715574b421440 | /R_training/MyCode/2주_Web Basic/day2/3. five.R | 3dbe0de723d863b4363bbba6be12819d4da729ae | [] | no_license | BaeYS-marketing/R | 58bc7f448d7486510218035a3e09d1dd562bca4b | 03b500cb428eded36d7c65bd8b2ee3437a7f5ef1 | refs/heads/master | 2020-12-11T04:30:28.034460 | 2020-01-17T08:47:38 | 2020-01-17T08:47:38 | 227,819,378 | 0 | 0 | null | 2019-12-13T12:06:33 | 2019-12-13T10:56:18 | C++ | UTF-8 | R | false | false | 1,967 | r | 3. five.R | naver.url <- "https://movie.naver.com/movie/point/af/list.nhn?page=1"
url<-read_html(naver.url)
#콘텐츠 추출 순서
'1. URL setting
2. read_html
3. html_node
4. html_text'
#
'1.크롤링하고자 하는 웹사이트의 문자열을 파악해야 한다(쿼리 문자열 포함)
주소창 확인
2. 웹페이지의 컨텐트가 어떤 문자셋으로 작성되었는지 파악
<meta>
3.추출하려는 컨텐츠를 포함하고 있는 태그를 찾는다
개발자도구의 기능을 활용한다
4.찾은태그의 CSS Selector,또는 Xpath를 판단한다
5. Rvest라는 패키지를 사용한다.'
c1 <- html_nodes(url,"#old_content > table > tbody > tr:nth-child(8) > td.title > a.movie")
c1
c2 <-html_nodes(url,"#old_content > table > tbody > tr> td.title > a.movie")
c2
#html_node는 제일 위 하나만 추출
html_text(c1)
html_text(c2)
?html_text
#
naver.url <- "https://movie.naver.com/movie/point/af/list.nhn"
url <- read_html(naver.url, encoding = 'CP949')
url
#영화제목
nodes <- html_nodes(url, '.movie')
title <- html_text(nodes)
title
#영화리뷰
nodes <- html_nodes(url, '.title')
review <- html_text(nodes, trim=TRUE)
review <- gsub('\t','',review)
review <- gsub('[\r\n]','',review)
review <- gsub('신고','',review)
review
page <- data.frame(title,review)
write.csv(page,'movie_reviews1.csv')
#여러 페이지
site <- "https://movie.naver.com/movie/point/af/list.nhn?page="
movie.review <-NULL
for(i in 1:100){
url <- paste(site,i,sep='')
text <- read_html(url,encoding ='CP949')
nodes <- html_nodes(text,'.title')
review <- html_text(nodes, trim=TRUE)
review <- gsub('\t','',review)
review <- gsub('[\r\n]','',review)
review <- gsub('신고','',review)
page <- data.frame(title,review)
movie.review <- rbind(movie.review,page)
}
write.csv(movie.review,'movie_reviews2.csv')
getwd()
|
2182942207bc2e72998cc710adcf2d060b3fa967 | 3e7203c420d8eb75dbf4c011eb26eb88b23c7372 | /Principle_Component_Analysis.R | a3b91c04fe8e70b6c3b5fb6e2d3f839d09df7840 | [] | no_license | nimeshrajput5211/Unsupervised-Clustering-Technique | c322a84e5c51bd10a0bd56137eb049b110bdcee4 | 6ab6b113d464af8b85d12179c488d2c29d4f5663 | refs/heads/master | 2021-05-11T09:51:23.121113 | 2018-01-19T07:03:14 | 2018-01-19T07:03:14 | 118,089,498 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,622 | r | Principle_Component_Analysis.R | rm(list=ls(all=T))
setwd("C:\\Users\\NY 5211\\Downloads\\INSOFE\\Module_2\\10-09-2017\\Sridhar Pappu")
library(scales)
# Chemical analysis of Wine
# These data are the results of a chemical analysis of wines grown
# in the same region in Italy but derived from three different cultivars.
# The analysis determined the quantities of 13 constituents found in each
# of the three types of wines.
WineData <- read.csv("wine.csv")
str(WineData)
# PCA analysis is done only on the predictors
wine.predictors <- WineData[,-1]
# Since the predictors are of completely different magnitude,
# we need scale them before the analysis.
scaled.Predictors <- scale(wine.predictors)
scaled.Predictors
# compute PCs
pca.out = princomp(scaled.Predictors)
# princomp(wine.predictors, cor=TRUE) would
# automatically scale
names(pca.out)
summary(pca.out)
plot(pca.out)
#If we choose 80% explanatory power for variances, we need only first 5 components of PC.
compressed_features = pca.out$scores[,1:5]
compressed_features
library(nnet)
multout.pca <- multinom(WineData$WineClass ~ compressed_features)
summary(multout.pca)
#Gives us AIC value of 24
multout.full <- multinom(WineClass ~ scaled.Predictors, data=WineData)
summary(multout.full) #Gives us AIC of 56
#Visualizing the spread in the dataset using only the first 2 components.
#
library(devtools)
install_github("vqv/ggbiplot")
library(ggbiplot)
g <- ggbiplot(pca.out, obs.scale = 1, var.scale = 1,
groups = WineData$WineClass, ellipse = TRUE, circle = TRUE)
g + scale_color_discrete(name = '')
|
726738ee11270babdef89b2ee14a2d4eda874a87 | baa94d60dc360a14cc626d8bd4e43455b2f370ac | /Old Scripts/Create Condor Input.R | da8fe7e02cf5f44c3ec17cd14d7452f0a4475b35 | [] | no_license | tbish-pleccy/SaniAntsBetaDiv | 39b2a5d5e42036b236fb58c3191c45788c72e22f | edd90f6cf82735403ce013757995c849139aa65a | refs/heads/master | 2021-01-23T03:53:07.975498 | 2014-06-04T15:34:44 | 2014-06-04T15:34:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,135 | r | Create Condor Input.R | # Save Observed Communities
save.frame <- data.frame(rep(names(ch.comm.list), each = 1000), seq(1, 14000, 1))
names(save.frame) <- c("comm.name", "index")
head(save.frame)
observed.communities.location <-
"C:/Users/Tom Bishop/Documents/Research Projects/PhD/2 Second Paper/Paper 2 Restart September/Data/Observed Communities Convex Hull"
for (i in 1:nrow(save.frame)){
select <- save.frame[i, ]
write.csv(as.matrix(ch.comm.list[[select$comm.name]]), row.names = TRUE,
paste(observed.communities.location, "/","Comm", select$index - 1, ".csv", sep = ""))
}
# Save Randomised Traits
save.frame$rel.ind <- rep(seq(1, 1000, 1), 14)
random.traits.location <-
"C:/Users/Tom Bishop/Documents/Research Projects/PhD/2 Second Paper/Paper 2 Restart September/Data/Randomised Traits Convex Hull"
for (i in 1:nrow(save.frame)){
select <- save.frame[i, ]
randomisedtraits <- as.matrix(ch.rand.traits.list[[select$comm.name]][,,select$rel.ind])
write.csv(randomisedtraits, file =
paste(random.traits.location, "/", "Traits", select$index - 1,
".csv", sep = ""))
}
save(fun.dissim) |
ae68e69e39df0b17cae2ccb610840954739ebb96 | 9b375cff041d4ccc76045b2787716f74d3147896 | /vignettes/mfe.R | 1013016576f67ba63212d3a5e7dfcaa1e192ed75 | [] | no_license | cmjt/eeda | 57ff7b8ba07af38158205cae43ea8e11af2d90c1 | 68bd086062f8e5e944efca1260d13f22a830ca65 | refs/heads/master | 2022-12-04T04:05:44.775347 | 2020-08-24T04:24:47 | 2020-08-24T04:24:47 | 258,033,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 561 | r | mfe.R | ## ---- libs
library(eeda)
## ---- quiet
key <- read.delim("keys.txt",header = FALSE)$V1
## ---- key
eeda_auth(key = key)
## ---- data
data(mfe_names)
head(mfe_names,10)
## search
idx <- grep("invert",mfe_names[,1])
mfe_names[idx,]
## ---- get
## as spdf
invert <- get_mfe_data(id = "52713")
## as sf
invert_sf <- get_mfe_data(id = "52713", sf = TRUE)
## ---- base
## ---- sfplot
library(ggplot2)
ggplot(invert_sf) + geom_sf(aes(color = as.numeric(SiteMedian)), size = 2) +
scale_color_gradient("MCI median") +
theme_void()
## ---- eeda
|
2abf2e021029a005efeaec6372252c37b303fac5 | 6ea363f2fa9200669aabf0b1814b8ce9f87809b2 | /WorkSpace/R Programming/R-Looping/RepeatFor.R | c3a04323f77d4487bdf4fb85b9fc514b22726992 | [] | no_license | chinna510/Projects | e1d81aa14f929337a80747924e224a6335f622c6 | bb84e5d2a59915286444edb59c92e7196ec3869b | refs/heads/master | 2021-08-23T08:12:28.324784 | 2017-12-04T06:34:16 | 2017-12-04T06:34:16 | 112,998,864 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 129 | r | RepeatFor.R | x <- c(1,2,3,4,5,2,34,5)
count=1
repeat{
print(count)
count=count+1
if(count >500){
print(count)
break
}
}
|
3173ae7ed62b8d0cc7108535c119710a948bd215 | 2e37595eab50d34c0f6c4016ff547c1547c42e6b | /PQL_Lancet81.R | 61e6b198a0b928c58daa1fdd7f8d35e76ec89472 | [] | no_license | joshua-nugent/PQL_Quad | 1bce26a37cf9677a3454d81e94f0961142762597 | ef29890eeed3a3867e428446e834cf6332cb35f8 | refs/heads/master | 2022-04-27T20:45:01.386317 | 2022-03-31T20:25:31 | 2022-03-31T20:25:31 | 149,775,077 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,313 | r | PQL_Lancet81.R | library(lme4)
library(MASS)
library(tidyverse)
library(parallel)
RNGkind("L'Ecuyer-CMRG")
set.seed(42)
replication_index <- 0
#sas_check <- check_estimates(n, p, b_0, b_1, sigbsq)
#sas_csv <- read.csv("data_for_sas_2.csv")
#y <- sas_csv$y
#x1 <- sas_csv$x1
#id <- sas_csv$id
#glmmresPQL <- glmmPQL(y ~ x1, random = ~ 1 | id , family = binomial(link = "logit"), niter=5)
check_estimates <- function(n, p, b_0, b_1, sigbsq){
replication_index <- replication_index + 1
print('replication number:')
print(replication_index)
betas <- c(b_0, b_1)
# vector of p copies of each ID for 'long' data
id <- rep(1:n, each = p)
# vector of 1111...0000 for treatment arms
x1 <- as.numeric(id < (n+1)/2)
# Generate random normal values for error term, replicated p times for each ID
randint <- rep(rnorm(n, 0, sqrt(sigbsq)), each = p)
# Data generation with specified model plus noise
linpred <- b_0 + b_1*x1 + randint
# Invert t
expit <- exp(linpred) / (1 + exp(linpred))
y <- runif(p*n) < expit
# for the SAS check we did
# data <- cbind(id, x1, randint, linpred, expit, y)
# write.csv(data, paste0("data_for_sas_", i,".csv"), row.names=F)
glmmresPQL <- glmmPQL(y ~ x1, random = ~ 1 | id , family = binomial(link = "logit"), niter = 100)
glmmres10 <- glmer(y ~ x1 + (1|id), nAGQ = 10, family = binomial(link = "logit"))
betaPQL <- fixef(glmmresPQL)
beta10 <- fixef(glmmres10)
# get covariance matrix...
vcovPQL <- vcov(glmmresPQL, useScale = FALSE)
vcov10 <- vcov(glmmres10, useScale = FALSE)
# ...and use diagonal entries to get SE
sePQL <- sqrt(diag(vcovPQL))
se10 <- sqrt(diag(vcov10))
value_labels <- c("n", "p", "b_0", "b_1", "sigbsq",
"PQL_b_0", "nAGQ_10_b_0",
"b_0_PQL_bias_diff", "b_0_10_bias_diff",
"b_0_PQL_bias_ratio", "b_0_10_bias_ratio",
"PQL_b_1", "nAGQ_10_b_1",
"b_1_PQL_bias_diff", "b_1_10_bias_diff",
"b_1_PQL_bias_ratio", "b_1_10_bias_ratio",
"b_1_PQL_SE", "b_1_10_SE"
)
estimates <-c(n, p, b_0, b_1, sigbsq,
betaPQL[1], beta10[1],
betaPQL[1] - b_0, beta10[1] - b_0,
betaPQL[1] / b_0, beta10[1] / b_0,
betaPQL[2], beta10[2],
betaPQL[2] - b_1, beta10[2] - b_1,
betaPQL[2] / b_1, beta10[2] / b_1,
sePQL[2], se10[2]
)
names(estimates) <- value_labels
return(estimates)
}
###############################################################
#
# Here we actually start running the simulation
#
###############################################################
# Parameters for Lancet - vol 385, April 18, 2015 "Lancet 81"
# Incident Suicide attempts (Table 2, p. 1540)
# YAM vs. controls as 12 months
n <- 85 # number of clusters: 85
p <- 50 # people/observations per cluster 1987+2256=4243 people / 45+40=85 schools = 50 per cluster
b_0 <- -4.2 # intercept/baseline of control group: 1.51% = .0151 = OR of .015 = b_0 of -4.2
b_1 <- -0.8 # effect of treatment: OR of .45 = -.8
sigbsq <- 1
reps <- 1000
replication_index <- 0
data_attempts <- as_tibble(t(replicate(reps, check_estimates(n, p, b_0, b_1, sigbsq))))
mean(data_attempts$PQL_b_1)
mean(data_attempts$b_1_PQL_bias_ratio)
mean(data_attempts$b_1_PQL_bias_diff)
# Crazy estiamtes? Remove max or min values.
# clean_data <- data_attempts[-which.min(data_attempts$PQL_b_1), ] ...rt:
# clean_data <- data_attempts[-which.max(data_attempts$PQL_b_1), ]
# repeat below until crazy values gone
# clean_data <- clean_data[-which.min(clean_data$PQL_b_1), ]
# clean_data <- clean_data[-which.max(clean_data$PQL_b_1), ]
mean(clean_data$PQL_b_1)
mean(clean_data$b_1_PQL_bias_ratio)
mean(clean_data$b_1_PQL_bias_diff)
# Parameters for Lancet - vol 385, April 18, 2015 "Lancet 81"
# Incident severe suicidal ideation: YAM vs. controls at 12 months
# Table 3, p. 1541
n <- 85 # number of clusters: 85
p <- 47 # people/observations per cluster 1991+1962=3953 people / 85 schools = 47 per cluster
b_0 <- -4.27 # intercept/baseline of control group: 1.37% = .0137 = OR of .014 = b_0 of -4.27
b_1 <- -0.69 # effect of treatment: OR of .5 = -.69
sigbsq <- 1
reps <- 100
data_ideation <- as_tibble(t(replicate(reps, check_estimates(n, p, b_0, b_1, sigbsq))))
mean(data_ideation$PQL_b_1)
mean(data_ideation$b_1_PQL_bias_ratio)
mean(data_ideation$b_1_PQL_bias_diff)
# # Example loops to scan over values:
# for (i in c(-2, -1.5, -1, -.5, 0, .5, 1, 1.5, 2)){
# data <- as_tibble(t(replicate(reps, check_estimates(n, p, b_0, i, sigbsq))))
# write.csv(data, paste0("data_for_b_1_", i,".csv"), row.names=F)
# }
# for (i in c(-1.5, -1, -.5, 0, 1, 1.5, 2)){
# data <- as_tibble(t(replicate(reps, check_estimates(n, p, i, b_1, sigbsq))))
# write.csv(data, paste0("data_for_b_0_", i,".csv"), row.names=F)
# }
# for (i in c(.5, 1, 1.5, 2, 3)){
# data <- as_tibble(t(replicate(reps, check_estimates(n, p, b_0, b_1, i))))
# write.csv(data, paste0("data_for_sigbsq_", i,".csv"), row.names=F)
# }
# Organizational structure as model list might grow
# to hopefully avoid repeated code
diff_col_names_b_0 <- c(
"b_0_PQL_bias_diff",
"b_0_10_bias_ratio"
)
diff_col_names_b_1 <- c(
"b_1_PQL_bias_diff",
"b_1_10_bias_ratio"
)
# read in data, make a table with bias
#b_1_bias_results <- tibble(b_1 = 0, b_1_PQL_bias = 0, b_1_Laplace_bias = 0, b_1_4_bias = 0, b_1_10_bias = 0, b_1_25_bias = 0)
b_1_bias_results <- tibble(b_1 = double(), b_1_PQL_bias = double(), b_1_Laplace_bias = double(), b_1_4_bias = double(), b_1_10_bias = double(), b_1_25_bias = double())
index <- 0
#add_row(b_1_bias_results)
for (i in c(-2, -1.5, -1, -.5, 0, .5, 1, 1.5, 2)){
index <- index + 1
add_row(b_1_bias_results)
opened_file <- read.csv(paste0("data_for_b_1_", i,".csv"))
b_1 <- i
b_1_bias_results[index,1] <- b_1
for (j in diff_col_names_b_1){
m <- mean(opened_file[[j]])
b_1_bias_results[index,j] <- m
}
}
b_1_bias_plot <- ggplot(data = b_1_bias_results) +
geom_abline(slope = 0, intercept = 0) +
geom_point(mapping = aes(x = b_1, y = b_1_PQL_bias), color = "red") +
geom_point(mapping = aes(x = b_1, y = b_1_25_bias), color = "green")
b_1_bias_plot
##########################
############################# Below this is code junkyard
########################
############# integrate this into above system
# Look at bias / variance for beta1
for (i in diff_col_names_b_1){
cat(sprintf("mean for b_1 estimate error for %s", i))
cat(sprintf(" is: \n"))
m <- mean(data[[i]])
print(m)
}
for (i in diff_col_names_b_1){
cat(sprintf("std dev for b_1 estimate error for %s", i))
cat(sprintf(" is: \n"))
sd <- sd(data[[i]])
print(sd)
}
for (i in diff_col_names_b_1){
histogram <- hist(data[[i]], main = paste("Histogram of b_1 estimate error for", i))
}
# Same as above, but for beta0 term
for (i in diff_col_names_b_0){
cat(sprintf("mean for intercept error of %s", i))
cat(sprintf(" is: \n"))
m <- mean(data[[i]])
print(m)
}
for (i in diff_col_names_b_0){
cat(sprintf("std dev for intercept error of %s", i))
cat(sprintf(" is: \n"))
sd <- sd(data[[i]])
print(sd)
}
for (i in diff_col_names_b_0){
histogram <- hist(data[[i]], main = paste("Histogram of intercept error of", i))
}
|
def1b082a8ff3cc58e522649150f5bfa15e5ab99 | b965ad876fa4ef938064ec49446a427601496c1a | /man/m_to_in.Rd | f6e00e01098fb91fe1aecec88155cb99d741fccf | [
"MIT"
] | permissive | indenkun/yd2m | a880ac26e3c56e921b0a9eac2a75fdbb9fbaca0e | b7f5f76cd931b72cbe893953b45ea16c66bdaeea | refs/heads/master | 2023-01-02T15:39:29.014940 | 2020-10-27T14:50:55 | 2020-10-27T14:50:55 | 278,355,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 417 | rd | m_to_in.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/length.R
\name{m_to_in}
\alias{m_to_in}
\title{Converts the input value from meters to inches}
\usage{
m_to_in(x)
}
\arguments{
\item{x}{The value of the length of the meters unit system}
}
\value{
The value of the length of the inches unit system
}
\description{
Converts the input value from meters to inches.
}
\examples{
m_to_in(1)
}
|
b9da92143505e53aa885b9509ea9941ab5b8beb4 | 90a1de2d8df6013c8475e88220b28acfe747eade | /STAT452_Statistical_Learning/452Lecture_Rcode/L04_RegExtension/L4 - Extensions of variables.R | 8c07e4d6e348f142593366c9316c3fc5ea19197c | [] | no_license | yiyangd/SFU | 4a5d39016c8b0978cc6c437232f65ea4f3690151 | 399800b48a929031b689e40d72f7d51fb95b0e93 | refs/heads/master | 2023-07-06T07:48:20.712383 | 2021-08-07T02:58:20 | 2021-08-07T02:58:20 | 166,151,796 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,193 | r | L4 - Extensions of variables.R | #############################################################
# Demonstrating various different forms of explanatory variables.
# 1. Categorical/Factor variables
# 2. Transformations
# 3. Interactions and other functions
#
#############################################################
#############################################################
# 1. Categorical variables
#
# Below we create some data where X1 is categorical with 4
# levels and X2 is numerical.
# The mean of Y is related to the 4 levels of X1 but not X2
# We explore the variables and see what regressions look like
#############################################################
# Creating some data, saving to data frame, "dat"
set.seed(3894270)
X1 = rep(x=c("A", "B", "C", "D"), each=5)
X2 = round(rnorm(20),2)
mean.y = rep(c(3,1,5,1), each=5)
epsilon = rnorm(20)
y = round(mean.y + epsilon, 2)
dat = data.frame(y,X1,X2)
cbind(dat[1:5,],dat[6:10,],dat[11:15,],dat[16:20,])
# Here is how to see the information in the variable X1
class(dat$X1)
levels(dat$X1)
# Fit a model in R using X1.
# See how it writes the parameter estimates.
mod.cat = lm(y ~ X1, data=dat)
summary(mod.cat)
# Add the numerical variable to see how that works.
# Could add the interaction, X1:X2
mod.cat2 = lm(y ~ X1 + X2, data=dat)
summary(mod.cat2)
# Now showing how to create dummy (indicator) variables
# because some functions only work with numerical vars
library(caret)
dv1 = dummyVars("~.", data=dat)
dv2 = predict(dv1, newdata=dat)
dat.dv = data.frame(dv2)
# Note that it createsd ALL Q variables, and does not drop any
head(dat.dv)
# See what happens when all the indicators are in the model together
mod.cat.dv = lm(y ~ ., data=dat.dv)
summary(mod.cat.dv)
#############################################################
# 2. Transformations
#
# The main thing to know here is what functions and other math
# operations are possible in R. Google is your friend.
# Some commone ones
# Powers: X^2, X^3, etc.
# Logs: Natural: log() Base 10: log10()
# Square roots: sqrt() or X^(1/2) (USE PARENTHESES AROUND POWER)!!!)
# For other roots, use powers
# Inverse: 1/X or X^-1
#
# You can either do calculations within lm or create the variables
# separately and include them in R. If you do it as a new variable
# be sure to include it in the data frame. If you do calculations
# within lm(formula=), put them in I(...). The I() function tells
# R basically to treat the contents as a calculation.
#############################################################
# Probably shouldn't be taking a log here, but showing it for demo.
# Code below produces missing values due to negative X2
dat$logx2 = log(dat$X2)
head(dat)
lm(y~logx2, data=dat)
# Or can use log() directly in formula:
lm(y~log(X2), data=dat)
# Same ideas work for math operations
dat$invx2 = 1/X2
head(dat)
# Math operations within formula need to be done carefully
# Adding inverse of x within lm
# Without it fits the wrong model
mod.noI = lm(y~ 1/X2, data=dat)
summary(mod.noI)
# With I() does it right
mod.Internal = lm(y~ I(1/X2), data=dat)
summary(mod.Internal)
#############################################################
# 3. Interactions and other functions
#
# Crossproducts are usually handled by addind them to the formula
# in lm(). The form is X1:X2.
# Other fuctions are easy: just create them as variables and
# add them to the model
#############################################################
# Creating another variable for our interactions and functions
dat$X3 = runif(n=20, min=10, max=20)
head(dat)
# Adding X2:X3 crossproduct
# First show model without crossproduct
mod.23 = lm(y ~ X2 + X3, data=dat)
summary(mod.23)
# Look at a plot
x1. <- seq(from=-3, to=3, by=.1)
xy1 <- data.frame(expand.grid(X2=seq(from=-3, to=3, by=.1),
X3=seq(from=10, to=20, by=.2)))
pred <- predict(mod.23 ,newdata=xy1)
surface = matrix(pred, nrow=length(x1.))
library(rgl)
open3d()
persp3d(x = seq(from=-3, to=3, by=.1), y = seq(from=10, to=20, by=.2),
z = surface, col = "orange", xlab="X2", ylab="X3",
zlab="Predicted Y")
points3d(dat$y ~ dat$X2 + dat$X3, col="blue")
# Adding X2:X3 crossproduct
# Now add crossproduct
mod.cp = lm(y ~ X2 + X3 + X2:X3, data=dat)
summary(mod.cp)
# Look at a plot
x1. <- seq(from=-3, to=3, by=.1)
xy1 <- data.frame(expand.grid(X2=seq(from=-3, to=3, by=.1),
X3=seq(from=10, to=20, by=.2)))
pred <- predict(mod.cp ,newdata=xy1)
surface = matrix(pred, nrow=length(x1.))
library(rgl)
open3d()
persp3d(x = seq(from=-3, to=3, by=.1), y = seq(from=10, to=20, by=.2),
z = surface, col = "orange", xlab="X2", ylab="X3",
zlab="Predicted Y")
points3d(dat$y ~ dat$X2 + dat$X3, col="blue")
# Now making a new variable as function of two variables, X3/(X2+X3)
dat$X4 = dat$X3 / (dat$X3+dat$X2)
#Adding these into model just treats them as regular variables
mod.func = lm(y~X1 + X2 + X3 + X4, data=dat)
summary(mod.func)
|
e239177cd622b495e1ed437e685d7c5ee34efa51 | acf7af720e1acb217bd4da13e1007ee94db0bfab | /scripts/CRobCor.R | fe1d707712ff665baeb8f784bea504bd6f6e1a27 | [] | no_license | venkoyoung/Network | 4184d2ec8f12fa8536b9b2fa2ba2ac0e5a2863eb | 6952d0aec7094a1539c4f7d77cef406ec4dbf09a | refs/heads/master | 2020-05-15T02:22:23.743714 | 2019-03-29T11:34:44 | 2019-03-29T11:34:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,748 | r | CRobCor.R | CRobCor <- function (M,ct=0.5) {
#rows: eventos
#columns: factores
Mcov<-cov((M),use="all.obs")
#evalua con Pearson
Mcor<-cov2cor(Mcov)
#Scaling a covariance matrix into a correlation one can be achieved in many ways,
#mathematically most appealing by multiplication with a diagonal matrix from left and right,
#or more efficiently by using sweep(.., FUN = "/") twice.
#The cov2cor function is even a bit more efficient, and provided mostly for didactical reasons.
DELTA<-array(data=0,
dim=c(ncol(M),
ncol(M),
nrow(M)),
dimnames=list(colnames(M),
colnames(M),
rownames(M)))
# p x p x n dimensional array (obviously ofr large numbers of n this becomes impossible to compute)
MRobCor<-Mcor;
for (ev in 1:nrow(M)) {
marg_M<-M[-c(ev),];
marg_M<-scale(marg_M)
marg_Mcor<-cov(marg_M,use="all.obs")
marg_Mcor<-cov2cor(marg_Mcor)
DELTA[,,ev]<-abs(marg_Mcor-Mcor) #marginal Delta Covariance
}
INFL<-apply(DELTA,3,sum)/(ncol(M)^2-ncol(M)); #Calculate "influence" of outliers
for (p_i in 1:(ncol(M)-1)){
for (p_j in (p_i+1):ncol(M)){
if (abs(Mcor[p_i,p_j])<0.25) next
cta=ct-0.15 /( 0.5*length(which(abs(Mcor[p_i,])>ct))+ 0.5*length(which(abs(Mcor[p_j,])>ct)) )
WT=apply(DELTA[p_i, c(p_j,which(abs(Mcor[p_i,])>cta)),], 2,sum)/(length(which(abs(Mcor[p_i,])>cta)) ) #Calculate Weights
WT=WT+apply(DELTA[p_j, c(p_i,which(abs(Mcor[p_j,])>cta)),], 2,sum)/(length(which(abs(Mcor[p_j,])>cta)) ) #Calculate Weights
CRij=cov.wt(M[,c(p_i,p_j)], ((1/WT^(0.8))),cor=TRUE)$cor #Weighted correlation
MRobCor[p_i,p_j]=CRij[1,2]
MRobCor[p_j,p_i]=CRij[2,1]
}
}
return(MRobCor)
}
|
a32f2f0759c259da17a8dcf1fea8ffa22a917c4f | 37794cfdab196879e67c3826bae27d44dc86d7f7 | /Math/Poly.System.Asymmetric.S3.R | 23639056cc13c420cb5bcaed7feafb0db029cd14 | [] | no_license | discoleo/R | 0bbd53a54af392ef53a6e24af85cec4f21133d17 | e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff | refs/heads/master | 2023-09-05T00:43:32.381031 | 2023-08-31T23:03:27 | 2023-08-31T23:03:27 | 213,750,865 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 28,356 | r | Poly.System.Asymmetric.S3.R | ########################
###
### Leonard Mada
### [the one and only]
###
### Polynomial Systems:
### Asymmetric S3: Simple / Basic
###
### draft v.0.1e
##########################
### Asymmetric Systems ###
### Simple Asymmetric Systems
### Basic Types
# - some simple Models;
####################
###############
### History ###
###############
### draft v.0.1e:
# - some experiments on Simple Asymmetric systems;
### draft v.0.1d:
# - Mixt Ht system:
# Eq 2: x*y^2 + y*z^2 + z*x^2 = R2;
### draft v.0.1c - v.0.1c-ext:
# - Eq 2 variants:
# x^2 + y^2 + z^2 = R2;
# - added A-type extension;
### draft v.0.1b - v.0.1b-fix:
# - entanglement with roots of unity:
# Order 1: x + m*y + m^2*z = 0;
# Order 2: x^2 + m*y^2 + m^2*z^2 = 0;
# - improved versions; [v.0.1b-fix]
### draft v.0.1a:
# - initial draft;
####################
####################
### helper functions
library(polynom)
library(pracma)
# the functions are in the file:
# Polynomials.Helper.R
# - e.g. round0(), round0.p(),
# solve.EnAll(), solveEn();
########################
###############
### Order 2 ###
###############
# x^2 + b1*x + b2*S = R1
# y^2 + b1*y + b2*S = R2
# z^2 + b1*z + b2*S = R3
### Solution
### Sum =>
x^2 + y^2 + z^2 + (b1 + 3*b2)*S - (R1 + R2 + R3) # = 0
S^2 + (b1 + 3*b2)*S - 2*E2 - (R1 + R2 + R3) # = 0
# 2*E2 = S^2 + (b1 + 3*b2)*S - (R1 + R2 + R3)
### Prod(R[i] - b2*S)
(x^2 + b1*x)*(y^2 + b1*y)*(z^2 + b1*z) + (b2*S - R1)*(b2*S - R2)*(b2*S - R3) # = 0
E3^2 + b1*E3*E2 + b1^2*E3*S + b1^3*E3 + (b2*S - R1)*(b2*S - R2)*(b2*S - R3)
E3^2 + b1*E3*E2 + b1^2*E3*S + b1^3*E3 +
+ (b2^3*S^3 - b2^2*(R1+R2+R3)*S^2 + b2*(R1*R2+R1*R3+R2*R3)*S - R1*R2*R3)
### Prod =>
(x^2 + b1*x + b2*S)*(y^2 + b1*y + b2*S)*(z^2 + b1*z + b2*S) - R1*R2*R3 # = 0
b1*E2*E3 + E3*b1^2*S + E3*b1^3 + E3^2 + b1*b2*(E2*S - 3*E3)*S + b1^2*b2*E2*S +
+ b2*(E2^2 - 2*E3*S)*S + b2^2*(S^2 - 2*E2)*S^2 + b2^3*S^3 + b1*b2^2*S^3 - R1*R2*R3
b1*E2*E3 - 2*b2*E3*S^2 + b1^2*E3*S - 3*b1*b2*E3*S + b1^3*E3 + E3^2 +
+ b1*b2*E2*S^2 - 2*b2^2*E2*S^2 + b1^2*b2*E2*S + b2*E2^2*S +
+ b2^2*S^4 + b2^3*S^3 + b1*b2^2*S^3 - R1*R2*R3
### Rel:
SubstE3 = - 4*R1*R2*S*b2 - 4*R1*R3*S*b2 - 4*R1*S*b1^2*b2 - 8*R1*S^2*b1*b2 + 4*R1*S^2*b2^2 - 4*R1*S^3*b2 + 2*R1^2*S*b2 - 4*R2*R3*S*b2 - 4*R2*S*b1^2*b2 - 8*R2*S^2*b1*b2 + 4*R2*S^2*b2^2 - 4*R2*S^3*b2 + 2*R2^2*S*b2 - 4*R3*S*b1^2*b2 - 8*R3*S^2*b1*b2 + 4*R3*S^2*b2^2 - 4*R3*S^3*b2 + 2*R3^2*S*b2 + 12*S^2*b1^2*b2^2 + 4*S^2*b1^3*b2 + 24*S^3*b1*b2^2 + 10*S^3*b1^2*b2 - 6*S^3*b2^3 + 8*S^4*b1*b2 + 12*S^4*b2^2 + 2*S^5*b2;
DivE3 = - 24*S*b1*b2 - 16*S^2*b2;
E3 = - SubstE3 / DivE3;
### Eq: actual P[8]
# - seems NO win over direct solution;
# - embedded in S are a set of 8 real solutions and a larger set of false solutions:
# the consequences of this are not yet fully understood;
(- 84*R1*R2*R3*b1^2 + 4*R1*R2*R3^2 + 8*R1*R2*b1^4 + 4*R1*R2^2*R3 + 10*R1*R2^2*b1^2 - 4*R1*R2^3 + 8*R1*R3*b1^4 + 10*R1*R3^2*b1^2 - 4*R1*R3^3 - 24*R1*b1^6 + 4*R1^2*R2*R3 + 10*R1^2*R2*b1^2 + 6*R1^2*R2^2 + 10*R1^2*R3*b1^2 + 6*R1^2*R3^2 + 28*R1^2*b1^4 - 4*R1^3*R2 - 4*R1^3*R3 - 10*R1^3*b1^2 + R1^4 + 8*R2*R3*b1^4 + 10*R2*R3^2*b1^2 - 4*R2*R3^3 - 24*R2*b1^6 + 10*R2^2*R3*b1^2 + 6*R2^2*R3^2 + 28*R2^2*b1^4 - 4*R2^3*R3 - 10*R2^3*b1^2 + R2^4 - 24*R3*b1^6 + 28*R3^2*b1^4 - 10*R3^3*b1^2 + R3^4)*S^2 +
(- 120*R1*R2*R3*b1 - 24*R1*R2*R3*b2 + 44*R1*R2*b1^2*b2 + 36*R1*R2*b1^3 + 12*R1*R2^2*b1 - 4*R1*R2^2*b2 + 44*R1*R3*b1^2*b2 + 36*R1*R3*b1^3 + 12*R1*R3^2*b1 - 4*R1*R3^2*b2 - 72*R1*b1^4*b2 - 120*R1*b1^5 + 12*R1^2*R2*b1 - 4*R1^2*R2*b2 + 12*R1^2*R3*b1 - 4*R1^2*R3*b2 + 10*R1^2*b1^2*b2 + 78*R1^2*b1^3 - 12*R1^3*b1 + 4*R1^3*b2 + 44*R2*R3*b1^2*b2 + 36*R2*R3*b1^3 + 12*R2*R3^2*b1 - 4*R2*R3^2*b2 - 72*R2*b1^4*b2 - 120*R2*b1^5 + 12*R2^2*R3*b1 - 4*R2^2*R3*b2 + 10*R2^2*b1^2*b2 + 78*R2^2*b1^3 - 12*R2^3*b1 + 4*R2^3*b2 - 72*R3*b1^4*b2 - 120*R3*b1^5 + 10*R3^2*b1^2*b2 + 78*R3^2*b1^3 - 12*R3^3*b1 + 4*R3^3*b2 + 72*b1^6*b2 + 24*b1^7)*S^3 +
(- 40*R1*R2*R3 + 72*R1*R2*b1*b2 + 48*R1*R2*b1^2 + 20*R1*R2*b2^2 + 4*R1*R2^2 + 72*R1*R3*b1*b2 + 48*R1*R3*b1^2 + 20*R1*R3*b2^2 + 4*R1*R3^2 - 54*R1*b1^2*b2^2 - 228*R1*b1^3*b2 - 238*R1*b1^4 + 4*R1^2*R2 + 4*R1^2*R3 + 12*R1^2*b1*b2 + 80*R1^2*b1^2 - 2*R1^2*b2^2 - 4*R1^3 + 72*R2*R3*b1*b2 + 48*R2*R3*b1^2 + 20*R2*R3*b2^2 + 4*R2*R3^2 - 54*R2*b1^2*b2^2 - 228*R2*b1^3*b2 - 238*R2*b1^4 + 4*R2^2*R3 + 12*R2^2*b1*b2 + 80*R2^2*b1^2 - 2*R2^2*b2^2 - 4*R2^3 - 54*R3*b1^2*b2^2 - 228*R3*b1^3*b2 - 238*R3*b1^4 + 12*R3^2*b1*b2 + 80*R3^2*b1^2 - 2*R3^2*b2^2 - 4*R3^3 + 108*b1^4*b2^2 + 360*b1^5*b2 + 116*b1^6)*S^4 +
(24*R1*R2*b1 + 24*R1*R2*b2 + 24*R1*R3*b1 + 24*R1*R3*b2 - 84*R1*b1*b2^2 - 256*R1*b1^2*b2 - 240*R1*b1^3 - 12*R1*b2^3 + 36*R1^2*b1 + 4*R1^2*b2 + 24*R2*R3*b1 + 24*R2*R3*b2 - 84*R2*b1*b2^2 - 256*R2*b1^2*b2 - 240*R2*b1^3 - 12*R2*b2^3 + 36*R2^2*b1 + 4*R2^2*b2 - 84*R3*b1*b2^2 - 256*R3*b1^2*b2 - 240*R3*b1^3 - 12*R3*b2^3 + 36*R3^2*b1 + 4*R3^2*b2 + 54*b1^2*b2^3 + 342*b1^3*b2^2 + 714*b1^4*b2 + 234*b1^5)*S^5 +
(4*R1*R2 + 4*R1*R3 - 120*R1*b1*b2 - 130*R1*b1^2 - 28*R1*b2^2 + 6*R1^2 + 4*R2*R3 - 120*R2*b1*b2 - 130*R2*b1^2 - 28*R2*b2^2 + 6*R2^2 - 120*R3*b1*b2 - 130*R3*b1^2 - 28*R3*b2^2 + 6*R3^2 + 84*b1*b2^3 + 384*b1^2*b2^2 + 720*b1^3*b2 + 255*b1^4 + 9*b2^4)*S^6 +
(- 36*R1*b1 - 20*R1*b2 - 36*R2*b1 - 20*R2*b2 - 36*R3*b1 - 20*R3*b2 + 180*b1*b2^2 + 390*b1^2*b2 + 162*b1^3 + 28*b2^3)*S^7 +
(- 4*R1 - 4*R2 - 4*R3 + 108*b1*b2 + 60*b1^2 + 30*b2^2)*S^8 +
(12*b1 + 12*b2)*S^9 +
(1)*S^10
### Example:
R = c(1,2,3)
b = c(2,3)
R1 = R[1]; R2 = R[2]; R3 = R[3];
b1 = b[1]; b2 = b[2];
#
coeff = c(
(- 84*R1*R2*R3*b1^2 + 4*R1*R2*R3^2 + 8*R1*R2*b1^4 + 4*R1*R2^2*R3 + 10*R1*R2^2*b1^2 - 4*R1*R2^3 +
8*R1*R3*b1^4 + 10*R1*R3^2*b1^2 - 4*R1*R3^3 - 24*R1*b1^6 + 4*R1^2*R2*R3 + 10*R1^2*R2*b1^2 +
6*R1^2*R2^2 + 10*R1^2*R3*b1^2 + 6*R1^2*R3^2 + 28*R1^2*b1^4 - 4*R1^3*R2 - 4*R1^3*R3 - 10*R1^3*b1^2 +
R1^4 + 8*R2*R3*b1^4 + 10*R2*R3^2*b1^2 - 4*R2*R3^3 - 24*R2*b1^6 + 10*R2^2*R3*b1^2 + 6*R2^2*R3^2 +
28*R2^2*b1^4 - 4*R2^3*R3 - 10*R2^3*b1^2 + R2^4 - 24*R3*b1^6 + 28*R3^2*b1^4 - 10*R3^3*b1^2 + R3^4),
(- 120*R1*R2*R3*b1 - 24*R1*R2*R3*b2 + 44*R1*R2*b1^2*b2 + 36*R1*R2*b1^3 + 12*R1*R2^2*b1 - 4*R1*R2^2*b2 +
44*R1*R3*b1^2*b2 + 36*R1*R3*b1^3 + 12*R1*R3^2*b1 - 4*R1*R3^2*b2 - 72*R1*b1^4*b2 - 120*R1*b1^5 +
12*R1^2*R2*b1 - 4*R1^2*R2*b2 + 12*R1^2*R3*b1 - 4*R1^2*R3*b2 + 10*R1^2*b1^2*b2 + 78*R1^2*b1^3 +
- 12*R1^3*b1 + 4*R1^3*b2 + 44*R2*R3*b1^2*b2 + 36*R2*R3*b1^3 + 12*R2*R3^2*b1 - 4*R2*R3^2*b2 +
- 72*R2*b1^4*b2 - 120*R2*b1^5 + 12*R2^2*R3*b1 - 4*R2^2*R3*b2 + 10*R2^2*b1^2*b2 + 78*R2^2*b1^3 +
- 12*R2^3*b1 + 4*R2^3*b2 - 72*R3*b1^4*b2 - 120*R3*b1^5 + 10*R3^2*b1^2*b2 + 78*R3^2*b1^3 +
- 12*R3^3*b1 + 4*R3^3*b2 + 72*b1^6*b2 + 24*b1^7), # *S +
(- 40*R1*R2*R3 + 72*R1*R2*b1*b2 + 48*R1*R2*b1^2 + 20*R1*R2*b2^2 + 4*R1*R2^2 + 72*R1*R3*b1*b2 +
48*R1*R3*b1^2 + 20*R1*R3*b2^2 + 4*R1*R3^2 - 54*R1*b1^2*b2^2 - 228*R1*b1^3*b2 - 238*R1*b1^4 +
4*R1^2*R2 + 4*R1^2*R3 + 12*R1^2*b1*b2 + 80*R1^2*b1^2 - 2*R1^2*b2^2 - 4*R1^3 + 72*R2*R3*b1*b2 +
48*R2*R3*b1^2 + 20*R2*R3*b2^2 + 4*R2*R3^2 - 54*R2*b1^2*b2^2 - 228*R2*b1^3*b2 - 238*R2*b1^4 +
4*R2^2*R3 + 12*R2^2*b1*b2 + 80*R2^2*b1^2 - 2*R2^2*b2^2 - 4*R2^3 - 54*R3*b1^2*b2^2 - 228*R3*b1^3*b2 +
- 238*R3*b1^4 + 12*R3^2*b1*b2 + 80*R3^2*b1^2 - 2*R3^2*b2^2 - 4*R3^3 + 108*b1^4*b2^2 +
360*b1^5*b2 + 116*b1^6), # *S^2 +
(24*R1*R2*b1 + 24*R1*R2*b2 + 24*R1*R3*b1 + 24*R1*R3*b2 - 84*R1*b1*b2^2 - 256*R1*b1^2*b2 - 240*R1*b1^3 +
- 12*R1*b2^3 + 36*R1^2*b1 + 4*R1^2*b2 + 24*R2*R3*b1 + 24*R2*R3*b2 - 84*R2*b1*b2^2 - 256*R2*b1^2*b2 +
- 240*R2*b1^3 - 12*R2*b2^3 + 36*R2^2*b1 + 4*R2^2*b2 - 84*R3*b1*b2^2 - 256*R3*b1^2*b2 - 240*R3*b1^3 +
- 12*R3*b2^3 + 36*R3^2*b1 + 4*R3^2*b2 + 54*b1^2*b2^3 + 342*b1^3*b2^2 + 714*b1^4*b2 + 234*b1^5), # *S^3 +
(6*R1^2 + 6*R2^2 + 6*R3^2 + 4*R1*R2 + 4*R1*R3 + 4*R2*R3 - 120*b1*b2*(R1 + R2 + R3) +
- 130*b1^2*(R1 + R2 + R3) - 28*b2^2*(R1 + R2 + R3) +
84*b1*b2^3 + 384*b1^2*b2^2 + 720*b1^3*b2 + 255*b1^4 + 9*b2^4), # *S^4
(-(36*b1 + 20*b2)*(R1 + R2 + R3) + 180*b1*b2^2 + 390*b1^2*b2 +
162*b1^3 + 28*b2^3), # *S^5
(- 4*(R1 + R2 + R3) + 108*b1*b2 + 60*b1^2 + 30*b2^2), # *S^6
(12*b1 + 12*b2), 1 # S^8
)
S = roots(rev(coeff))
print(S)
E2 = (S^2 + (b1 + 3*b2)*S - (R1 + R2 + R3)) / 2;
SubstE3 = - 4*R1*R2*S*b2 - 4*R1*R3*S*b2 - 4*R1*S*b1^2*b2 - 8*R1*S^2*b1*b2 + 4*R1*S^2*b2^2 - 4*R1*S^3*b2 + 2*R1^2*S*b2 - 4*R2*R3*S*b2 - 4*R2*S*b1^2*b2 - 8*R2*S^2*b1*b2 + 4*R2*S^2*b2^2 - 4*R2*S^3*b2 + 2*R2^2*S*b2 - 4*R3*S*b1^2*b2 - 8*R3*S^2*b1*b2 + 4*R3*S^2*b2^2 - 4*R3*S^3*b2 + 2*R3^2*S*b2 + 12*S^2*b1^2*b2^2 + 4*S^2*b1^3*b2 + 24*S^3*b1*b2^2 + 10*S^3*b1^2*b2 - 6*S^3*b2^3 + 8*S^4*b1*b2 + 12*S^4*b2^2 + 2*S^5*b2;
DivE3 = - 24*S*b1*b2 - 16*S^2*b2;
E3 = - SubstE3 / DivE3;
### TODO: robust & all roots;
### x
# - unstable & fails!
# x = sapply(seq_along(S), function(id) roots(c(1, -S[id], E2[id], -E3[id])))
# len = length(S)
# S = matrix(S, ncol=len, nrow=3, byrow=T)
# - fails to compute many of the roots;
x = sapply(seq_along(S), function(id) roots(c(1, b1, b2*S[id] - R1)))
### y, z
y = sapply(seq_along(S), function(id) roots(c(1, b1, b2*S[id] - R2)))
z = sapply(seq_along(S), function(id) roots(c(1, b1, b2*S[id] - R3)))
len = length(S)
S = matrix(S, ncol=len, nrow=2, byrow=T)
# x = matrix(x, ncol=len, nrow=2, byrow=T)
### Alternative:
x = sapply(seq_along(S), function(id) roots(c(1, -S[id], E2[id], -E3[id])))
sol = solve.EnAll(x, n=3);
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
x^2 + b1*x + b2*(x+y+z) # - R1
y^2 + b1*y + b2*(x+y+z) # - R2
z^2 + b1*z + b2*(x+y+z) # - R3
isTrue1 = round0(x^2 + b1*x + b2*(x+y+z) - R1) == 0
isTrue2 = round0(y^2 + b1*y + b2*(x+y+z) - R2) == 0
isTrue3 = round0(z^2 + b1*z + b2*(x+y+z) - R3) == 0
isTrue = apply(cbind(isTrue1, isTrue2, isTrue3), 1, all)
(x+y+z)[isTrue] # every S-root;
sol[isTrue,]
poly.calc(sol[isTrue,1])
poly.calc(sol[isTrue,2])
poly.calc(sol[isTrue,3])
### Debug
b1 = -2; b2 = 3;
x = sqrt(2); y = sqrt(3); z = sqrt(5);
S = x + y + z; E2 = x*y + x*z + y*z; E3 = x*y*z;
R1 = x^2 + b1*x + b2*S;
R2 = y^2 + b1*y + b2*S;
R3 = z^2 + b1*z + b2*S;
### Classic Polynomial:
(- 4*R1*R2*b1*b2^3 - 4*R1*R3*b1*b2^3 + 4*R1*b1^2*b2^4 + 2*R1*b1^3*b2^3 - 2*R1^2*R2*b2^2 - 2*R1^2*R3*b2^2 + 8*R1^2*b1*b2^3 + 5*R1^2*b1^2*b2^2 + 4*R1^3*b1*b2 + 4*R1^3*b2^2 + R1^4 - 2*R2*R3*b2^4 - 2*R2*b1^2*b2^4 + R2^2*b2^4 - 2*R3*b1^2*b2^4 + R3^2*b2^4) +
(4*R1*R2*b1*b2^2 + 4*R1*R2*b2^3 + 4*R1*R3*b1*b2^2 + 4*R1*R3*b2^3 - 8*R1*b1*b2^4 - 26*R1*b1^2*b2^3 - 10*R1*b1^3*b2^2 - 24*R1^2*b1*b2^2 - 12*R1^2*b1^2*b2 - 8*R1^2*b2^3 - 4*R1^3*b1 - 4*R1^3*b2 + 4*R2*b1*b2^4 + 4*R2*b1^2*b2^3 + 4*R3*b1*b2^4 + 4*R3*b1^2*b2^3 - 6*b1^3*b2^4 - 2*b1^4*b2^3)*x^1 +
(4*R1*R2*b2^2 + 4*R1*R3*b2^2 + 12*R1*b1*b2^3 + 26*R1*b1^2*b2^2 + 12*R1*b1^3*b2 + 4*R1*b2^4 + 6*R1^2*b1^2 - 6*R1^2*b2^2 - 4*R1^3 - 2*R2*b1^2*b2^2 - 2*R2*b2^4 - 2*R3*b1^2*b2^2 - 2*R3*b2^4 + 9*b1^2*b2^4 + 16*b1^3*b2^3 + 5*b1^4*b2^2)*x^2 +
(36*R1*b1*b2^2 + 12*R1*b1^2*b2 - 4*R1*b1^3 + 12*R1*b2^3 + 12*R1^2*b1 + 12*R1^2*b2 - 4*R2*b1*b2^2 - 4*R2*b2^3 - 4*R3*b1*b2^2 - 4*R3*b2^3 + 6*b1^2*b2^3 - 6*b1^3*b2^2 - 4*b1^4*b2)*x^3 +
(- 12*R1*b1*b2 - 12*R1*b1^2 + 6*R1^2 - 2*R2*b2^2 - 2*R3*b2^2 - 16*b1*b2^3 - 25*b1^2*b2^2 - 8*b1^3*b2 + b1^4 - 3*b2^4)*x^4 +
(- 12*R1*b1 - 12*R1*b2 - 12*b1*b2^2 + 4*b1^3 - 4*b2^3)*x^5 +
(- 4*R1 + (b1 + b2)*(6*b1 + 2*b2))*x^6 +
4*(b1 + b2)*x^7 +
(1)*x^8
##########################
##########################
###################
### Curiosities ###
###################
### Omega-Entanglements
# - entanglement with roots of unity:
# m^3 = 1;
# - Eqs 2 & 3: can be anything symmetric;
###############
### Order 1 ###
###############
# x + m*y + m^2*z = 0
# x*y + x*z + y*z = R2
# x*y*z = R3
### Solution:
### Eq 1 =>
(x + m*y + m^2*z)*(m^2*x + m*y + z) # = 0
m^2*(x^2 + y^2 + z^2) + (m+1)*(x*y + x*z + y*z) # = 0
m^2*(S^2 - 2*E2) + (m+1)*E2 # = 0
m^2*S^2 + (m+1 - 2*m^2)*E2 # = 0
m^2*S^2 - 3*m^2*E2 # = 0
S^2 - 3*E2 # = 0
### Eq:
# S^2 = 3*E2
### Solver:
solve.omega.P1 = function(R, debug=TRUE) {
S = sqrt(3*R[1])
S = c(S, -S);
if(debug) print(S);
x = sapply(S, function(S) roots(c(1, -S, R[1], -R[2])))
# TODO: still NOT robust!
m = unity(3, all=F);
yz.s = S - x;
yz.ms = - x;
y = (m^2*yz.s - yz.ms) / (m^2 - m);
z = (yz.s - y)
sol = cbind(x=as.vector(x), y=as.vector(y), z=as.vector(z))
return(sol);
}
test.omega.P1 = function(sol, R=0, pow=1, b.ext=c(0,0), type="E2") {
m = unity(3, all=F);
x = sol[,1]; y = sol[,2]; z = sol[,3];
type = match(type, c("E2", "Pow2", "Mixt"))
err1 = x^pow + m*y^pow + m^2*z^pow # = 0
err2 = if(type == 1) {
x*y + x*z + y*z # - R2
} else if(type == 2) {
x^2 + y^2 + z^2 # - R2
} else if(type == 3) {
x*y^2 + y*z^2 + z*x^2 # - R2
} else {
NA
}
err3 = x*y*z # - R3
### Ext:
if(length(b.ext) < 2) b.ext = c(b.ext, 0)
if(any(b.ext != 0)) {
S = x + y + z;
err2 = err2 + b.ext[1]*S;
err3 = err3 + b.ext[2]*S;
}
err = cbind(err1, err2, err3)
err = round0(err)
err
}
### Examples
R = c(1, -1)
sol = solve.omega.P1(R)
### Test
test.omega.P1(sol, R);
x = sol[,1]
round0.p(poly.calc(sol[ ,1]))
###############
###############
### Order 2 ###
###############
# x^2 + m*y^2 + m^2*z^2 = 0
# x*y + x*z + y*z = R2
# x*y*z = R3
### Solution:
### Eq 1 =>
(x^2 + m*y^2 + m^2*z^2)*(m^2*x^2 + m*y^2 + z^2) # = 0
m^2*(x^4 + y^4 + z^4) + (m+1)*((x*y)^2 + (x*z)^2 + (y*z)^2) # = 0
m^2*(S^4 - 4*E2*S^2 + 4*E3*S + 2*E2^2) + (m+1)*(E2^2 - 2*E3*S) # = 0
m^2*S^4 - 4*E2*m^2*S^2 + 4*m^2*E3*S + 2*m^2*E2^2 + (m+1)*E2^2 - 2*(m+1)*E3*S # = 0
m^2*S^4 - 4*E2*m^2*S^2 - 2*(m+1 - 2*m^2)*E3*S + (2*m^2 + m + 1)*E2^2 # = 0
m^2*S^4 - 4*E2*m^2*S^2 + 6*m^2*E3*S + m^2*E2^2 # = 0
### Eq:
S^4 - 4*E2*S^2 + 6*E3*S + E2^2 # = 0
### Solver:
solve.omega.P2 = function(R) {
coeff = c(1, 0, - 4*R[1], 6*R[2], R[1]^2)
S = roots(coeff)
x = sapply(S, function(S) roots(c(1, -S, R[1], -R[2])))
R2 = R[1]; # !!
m = unity(3, all=F);
# TODO: robust
# sol = solve.EnAll(x, n=3, max.perm=1)
yz.s = S - x;
yz = R2 - x*yz.s;
yz.ms = - x^2;
y2 = ((yz.s^2 - 2*yz)*m^2 - yz.ms) / (m^2 - m);
y = sqrt(y2 + 0i);
y = c(y, -y); yz.s = c(yz.s, yz.s); x = c(x, x)
z = (yz.s - y);
sol = cbind(x=as.vector(x), y=as.vector(y), z=as.vector(z))
isOK = round0(sol[,2]*sol[,3] - as.vector(c(yz, yz))) == 0;
sol = sol[isOK,] # still needed;
return(sol);
}
### Examples
R = c(1, -1)
sol = solve.omega.P2(R)
### Test
test.omega.P1(sol, R, pow=2);
x = sol[,1]
round0.p(poly.calc(sol))
##################
#############
### Variants:
# x + m*y + m^2*z = 0
# x^2 + y^2 + z^2 = R2
# x*y*z = R3
### Solution:
### Eq 1 =>
# S^2 = 3*E2
### Eq 2 =>
S^2 - 2*E2 - R2 # = 0
E2 - R2 # = 0
# E2 = R2
# - the same as the simple Order 1 version;
#############
### Variants:
### Order 2
# x^2 + m*y^2 + m^2*z^2 = 0
# x^2 + y^2 + z^2 = R2
# x*y*z = R3
### Solution:
### Eq 1 =>
S^4 - 4*E2*S^2 + 6*E3*S + E2^2 # = 0
### Eq 2 =>
S^2 - 2*E2 - R2 # = 0
# 2*E2 = S^2 - R2
### =>
4*S^4 - 16*E2*S^2 + 24*E3*S + 4*E2^2 # = 0
4*S^4 - 8*(S^2 - R2)*S^2 + 24*E3*S + (S^2 - R2)^2 # = 0
3*S^4 - 6*R2*S^2 - 24*R3*S - R2^2 # = 0
### Solver:
solve.omega.P2 = function(R, b.ext=c(0, 0)) {
coeff = c(3, 0, - 6*R[1], - 24*R[2], - R[1]^2)
if(length(b.ext) < 2) b.ext = c(b.ext, 0)
if(any(b.ext != 0)) {
coeff = coeff + c(0, 6*b.ext[1], 24*b.ext[2] - b.ext[1]^2, 2*R[1]*b.ext[1], 0)
}
S = roots(coeff)
R2 = R[1] - b.ext[1]*S; R3 = R[2] - b.ext[2]*S; # !!
E2 = (S^2 - R2) / 2; E3 = R3;
x = sapply(seq_along(S), function(id) roots(c(1, -S[id], E2[id], -E3[id])))
m = unity(3, all=F);
len = length(S);
expand.m = function(m) matrix(m, ncol=len, nrow=3, byrow=T);
S = expand.m(S); R2 = expand.m(R2);
E2 = expand.m(E2); E3 = expand.m(E3);
# sol = solve.EnAll(x, n=3, max.perm=1)
# robust
yz.s = S - x; yz.sq = R2 - x^2; yz.ms = -x^2;
yz = (yz.s^2 - yz.sq) / 2;
y2 = (yz.sq*m^2 - yz.ms) / (m^2 - m);
z2 = yz.sq - y2;
yz.ms_m = - (m^2*(S^3 - 3*E2*S + 3*E3) + x*y2 + m*x*z2);
y = (yz.s * (x^2 + yz) - yz.ms_m) / ((1-m)*(x^2 + yz));
z = (yz.s - y);
sol = cbind(x=as.vector(x), y=as.vector(y), z=as.vector(z))
return(sol);
}
### Examples
R = c(1, -1)
sol = solve.omega.P2(R)
### Test
test.omega.P1(sol, R, pow=2, type="Pow2");
x = sol[,1]
round0.p(poly.calc(x[c(1:4, 6,9)])) * 3
#########
### Ex 2:
R = c(1, -1)
b.ext = c(1, 1)
sol = solve.omega.P2(R, b.ext=b.ext)
### Test
test.omega.P1(sol, R, pow=2, b.ext=b.ext, type="Pow2");
x = sol[,1]
round0.p(poly.calc(x)) * 9
#############
### Variants: Mixt
### Order 2
# x^2 + m*y^2 + m^2*z^2 = 0
# x*y^2 + y*z^2 + z*x^2 = R2
# x*y*z = R3
### Solution:
### Eq 1 =>
S^4 + E2^2 - 4*E2*S^2 + 6*E3*S # = 0
S^4 + E2^2 - 4*E2*S^2 + 6*R3*S # = 0
### Eq 2 =>
E3*S^3 - R2*E2*S - 6*E2*E3*S + E2^3 + 9*E3^2 + 3*R2*E3 + R2^2 # = 0
R3*S^3 - R2*E2*S - 6*E2*R3*S + E2^3 + R2^2 + 3*R2*R3 + 9*R3^2 # = 0
### Eq:
(729*R2*R3^5 + 486*R2^2*R3^4 + 189*R2^3*R3^3 + 54*R2^4*R3^2 + 9*R2^5*R3 + R2^6 + 729*R3^6) +
(162*R2*R3^4 - 513*R2^2*R3^3 - 252*R2^3*R3^2 - 63*R2^4*R3 + 2187*R3^5)*S^3 +
(- 6651*R2*R3^3 - 4086*R2^2*R3^2 - 633*R2^3*R3 - 27*R2^4 - 3969*R3^4)*S^6 +
(13608*R2*R3^2 + 8037*R2^2*R3 + 454*R2^3 + 847*R3^3)*S^9 +
(- 6291*R2*R3 - 3447*R2^2 - 5781*R3^2)*S^12 +
(900*R2 + 921*R3)*S^15 +
(- 64)*S^18
### E2:
E2Subst = 3*R2*R3 + R2^2 - 23*R3*S^3 + 9*R3^2 - 4*S^6;
E2div = R2*S + 12*R3*S - 15*S^4;
### Solver:
solve.omegaMixt.P2 = function(R, debug=TRUE) {
R2 = R[1]; R3 = R[2]; # !!
coeff = c(-64, 0,0, (900*R2 + 921*R3), 0,0, (- 6291*R2*R3 - 3447*R2^2 - 5781*R3^2), 0,0,
(13608*R2*R3^2 + 8037*R2^2*R3 + 454*R2^3 + 847*R3^3), 0,0,
(- 6651*R2*R3^3 - 4086*R2^2*R3^2 - 633*R2^3*R3 - 27*R2^4 - 3969*R3^4), 0,0,
(162*R2*R3^4 - 513*R2^2*R3^3 - 252*R2^3*R3^2 - 63*R2^4*R3 + 2187*R3^5), 0,0,
(729*R2*R3^5 + 486*R2^2*R3^4 + 189*R2^3*R3^3 + 54*R2^4*R3^2 + 9*R2^5*R3 + R2^6 + 729*R3^6))
S = roots(coeff)
if(debug) print(S);
E2Subst = 3*R2*R3 + R2^2 - 23*R3*S^3 + 9*R3^2 - 4*S^6;
E2Div = R2*S + 12*R3*S - 15*S^4;
E2 = E2Subst / E2Div;
x = sapply(seq_along(S), function(id) roots(c(1, -S[id], E2[id], -R3)))
# m = unity(3, all=F);
len = length(S);
expand.m = function(m) matrix(m, ncol=len, nrow=3, byrow=T);
# S = expand.m(S); # E2 = expand.m(E2);
### TODO: robust
sol = solve.EnAll(x, n=3, max.perm=1)
return(cbind(sol, S=as.vector(S)));
}
### Examples
R = c(1, -1)
sol = solve.omegaMixt.P2(R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
test.omega.P1(sol, R, pow=2, type="Mixt")
round0.p(poly.calc(sol[13:18,4])) * 64
# TODO: factorize!
#######################
#######################
#######################
### Mixt Asymmetric ###
#######################
##########################
### Asymmetric Reduced ###
##########################
### Simple: Half Symmetric & Reduced
# x + b*y + b*z = 0
# x*y + x*z + y*z = R2
# x*y*z = R3
### Solution:
# - classic solution: x^3 + b*R2*x - b*R3 = 0;
# - non-classic solution: allows easy extensions;
### Eq 1 =>
b^2*S^3 + b*(b - 1)^2*E2*S - (b - 1)^3*E3 # = 0
### Solver:
solve.AsymPart.S3P1 = function(R, b, b.ext = c(0, 0), debug=TRUE) {
bd = b[1] - 1;
R2 = R[1]; R3 = R[2];
coeff = c(b[1]^2, 0, b[1]*bd^2*R2, - bd^3*R3)
if(any(b.ext != 0)) {
coeff = coeff + c(0, - b[1]*bd^2*b.ext[1], bd^3*b.ext[2], 0)
}
S = roots(coeff)
if(debug) print(S)
len = length(S)
E2 = R2 - S*b.ext[1];
E3 = R3 - S*b.ext[2];
# x = sapply(seq(len), function(id) roots(coeff(1, -S[id], E2[id], -E3[id])))
x = b[1]*S / bd;
# expand.m = function(m) matrix(m, ncol=len, nrow=3, byrow=T);
# S = expand.m(S);
yz.s = S - x;
yz = E3 / x;
yz.d = sqrt(yz.s^2 - 4*yz + 0i)
y = (yz.s + yz.d) / 2;
z = yz.s - y;
sol = cbind(x=x, y=y, z=z)
return(sol)
}
### Examples:
R = c(2, -1)
b = 2
b.ext = c(-1, 1)
sol = solve.AsymPart.S3P1(R, b=b, b.ext=b.ext)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
S = x+y+z;
x + b[1]*y + b[1]*z
x*y + x*z + y*z + b.ext[1]*S # - R2
x*y*z + b.ext[2]*S # - R3
### Classic Polynomial: x => P3; (y,z) => P6;
poly.calc(c(y,z))
##########################
### Simple: Half Symmetric & Reduced
# x^2 + b*y^2 + b*z^2 = 0
# x*y + x*z + y*z = R2
# x*y*z = R3
### Solution:
# - non-classic solution: allows easy extensions;
### Eq 1 =>
b^2*(S^2 - 2*E2)^3 + b*(b - 1)^2*(E2^2 - 2*E3*S)*(S^2 - 2*E2) - (b - 1)^3*E3^2 # = 0
b^2*(S^2 - 2*E2)^3 - 2*b*(b - 1)^2*E3*S^3 + b*(b - 1)^2*E2^2*S^2 + 4*b*(b - 1)^2*E2*E3*S +
- 2*b*(b - 1)^2*E2^3 - (b - 1)^3*E3^2 # = 0
b^2*S^6 - 6*b^2*E2*S^4 - 2*b*(b - 1)^2*E3*S^3 + b*(b^2 + 10*b + 1)*E2^2*S^2 + 4*b*(b - 1)^2*E2*E3*S +
- 2*b*(b - 1)^2*E2^3 - 8*b^2*E2^3 - (b - 1)^3*E3^2 # = 0
### Solver:
solve.AsymPart.S3P2 = function(R, b, b.ext = c(0, 0), debug=TRUE) {
bd = b[1] - 1;
R2 = R[1]; R3 = R[2];
coeff = c(b[1]^2, 0, - 6*b[1]^2*R2, - 2*b[1]*bd^2*R3,
b[1]*(b[1]^2 + 10*b[1] + 1)*R2^2, 4*b[1]*bd^2*R2*R3,
- 2*b[1]*bd^2*R2^3 - 8*b[1]^2*R2^3 - bd^3*R3^2)
if(any(b.ext != 0)) {
# TODO:
coeff = coeff + c(0, 0)
}
S = roots(coeff)
if(debug) {print(coeff); print(S);}
len = length(S)
E2 = R2 - S*b.ext[1];
E3 = R3 - S*b.ext[2];
# x = sapply(seq(len), function(id) roots(coeff(1, -S[id], E2[id], -E3[id])))
x2 = b[1]*(S^2 - 2*E2) / bd;
yz.s2 = S^2 - 2*E2 - x2;
x = (x2*yz.s2 + x2*E2 + E3*S) / (E2*S - E3);
yz.s = S - x;
yz = E3 / x;
yz.d = sqrt(yz.s^2 - 4*yz + 0i)
y = (yz.s + yz.d) / 2;
z = yz.s - y;
sol = cbind(x=as.vector(x), y=as.vector(y), z=as.vector(z), S=S)
return(sol)
}
### Examples:
R = c(2, -1)
b = 2
b.ext = c(0, 0)
sol = solve.AsymPart.S3P2(R, b=b, b.ext=b.ext)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
S = x+y+z;
x^2 + b[1]*y^2 + b[1]*z^2
x*y + x*z + y*z + b.ext[1]*S # - R2
x*y*z + b.ext[2]*S # - R3
#########
### Ex 2: S = 0 & P[5] for S
R = c(-1, 6)
b = 2
b.ext = c(0, 0)
sol = solve.AsymPart.S3P2(R, b=b, b.ext=b.ext)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
S = x+y+z;
x^2 + b[1]*y^2 + b[1]*z^2
x*y + x*z + y*z + b.ext[1]*S # - R2
x*y*z + b.ext[2]*S # - R3
round0.p(poly.calc(x))
round0.p(poly.calc(c(y, z)) / round0.p(poly.calc(c(y[1], z[1]))))
#######################
#######################
### Asymmetric
### x + b1*y + b2*z = 0
### x*y + x*z + y*z = R2
### x*y*z = R3
### Solution:
### Eq 1:
(x + b1*y + b2*z)*(b1*x + b2*y + z)*(b2*x + y + b1*z)*
(x + b2*y + b1*z)*(b2*x + b1*y + z)*(b1*x + y + b2*z) # = 0
b1^2*b2^2*(x^6 + y^6 + z^6) +
+ (b1^3*b2^2 + b1^3*b2 + b1^2*b2^3 + b1*b2^3 + b1^2*b2 + b1*b2^2) *
(x^5*y + x^5*z + y^5*x + y^5*z + z^5*x + z^5*y) +
+ (b1*b2 + b1*b2^2 + b1*b2^3 + b1*b2^4 + b1^2*b2 + 3*b1^2*b2^2 + b1^2*b2^3 + b1^3 + b1^3*b2 + b1^3*b2^2 +
+ b1^3*b2^3 + b1^4*b2 + b2^3) * (x^4*y^2 + x^4*z^2 + y^4*x^2 + y^4*z^2 + z^4*x^2 + z^4*y^2) +
+ (2*b1*b2 + 2*b1*b2^2 + 2*b1*b2^3 + 2*b1*b2^4 + b1^2 + 2*b1^2*b2 + 6*b1^2*b2^2 + 2*b1^2*b2^3 +
+ b1^2*b2^4 + 2*b1^3*b2 + 2*b1^3*b2^2 + b1^4 + 2*b1^4*b2 + b1^4*b2^2 + b2^2 + b2^4) *
x*y*z*(x^3 + y^3 + z^3) +
+ (2*b1*b2^2 + 2*b1*b2^3 + b1^2 + 2*b1^2*b2 + 2*b1^2*b2^2 + 2*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3*b2 +
+ 2*b1^3*b2^2 + b1^4 + b1^4*b2^2 + b2^2 + b2^4) * (x^3*y^3 + x^3*z^3 + y^3*z^3) +
+ (b1 + 2*b1*b2 + 5*b1*b2^2 + 5*b1*b2^3 + 2*b1*b2^4 + b1*b2^5 + b1^2 + 5*b1^2*b2 + 6*b1^2*b2^2 +
+ 5*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3 + 5*b1^3*b2 + 5*b1^3*b2^2 + 2*b1^3*b2^3 + b1^4 + 2*b1^4*b2 +
+ b1^4*b2^2 + b1^5 + b1^5*b2 + b2 + b2^2 + 2*b2^3 + b2^4 + b2^5) *
x*y*z*(x^2*y + x^2*z + y^2*x + y^2*z + z^2*x + z^2*y) +
+ (1 + 6*b1*b2 + 6*b1*b2^2 + 6*b1*b2^3 + 6*b1*b2^4 + 3*b1^2 + 6*b1^2*b2 + 9*b1^2*b2^2 + 6*b1^2*b2^3 +
+ 3*b1^2*b2^4 + 2*b1^3 + 6*b1^3*b2 + 6*b1^3*b2^2 + 2*b1^3*b2^3 + 3*b1^4 + 6*b1^4*b2 + 3*b1^4*b2^2 +
+ b1^6 + 3*b2^2 + 2*b2^3 + 3*b2^4 + b2^6) * (x*y*z)^2
b1^2*b2^2*(- 2*E2^3 + 3*E3^2 - 12*E2*E3*S + 9*E2^2*S^2 + 6*E3*S^3 - 6*E2*S^4 + S^6) +
+ (b1^3*b2^2 + b1^3*b2 + b1^2*b2^3 + b1*b2^3 + b1^2*b2 + b1*b2^2) *
((S^5 - 5*E2*S^3 + 5*E3*S^2 + 5*E2^2*S - 5*E2*E3)*S +
- (- 2*E2^3 + 3*E3^2 - 12*E2*E3*S + 9*E2^2*S^2 + 6*E3*S^3 - 6*E2*S^4 + S^6)) +
+ (b1*b2 + b1*b2^2 + b1*b2^3 + b1*b2^4 + b1^2*b2 + 3*b1^2*b2^2 + b1^2*b2^3 + b1^3 + b1^3*b2 + b1^3*b2^2 +
+ b1^3*b2^3 + b1^4*b2 + b2^3) * (E2^2*S^2 - 2*E3*S^3 - 2*E2^3 + 4*E2*E3*S - 3*E3^2) +
+ (2*b1*b2 + 2*b1*b2^2 + 2*b1*b2^3 + 2*b1*b2^4 + b1^2 + 2*b1^2*b2 + 6*b1^2*b2^2 + 2*b1^2*b2^3 +
+ b1^2*b2^4 + 2*b1^3*b2 + 2*b1^3*b2^2 + b1^4 + 2*b1^4*b2 + b1^4*b2^2 + b2^2 + b2^4) *
E3*(S^3 - 3*E2*S + 3*E3) +
+ (2*b1*b2^2 + 2*b1*b2^3 + b1^2 + 2*b1^2*b2 + 2*b1^2*b2^2 + 2*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3*b2 +
+ 2*b1^3*b2^2 + b1^4 + b1^4*b2^2 + b2^2 + b2^4) * (E2^3 - 3*E3*E2*S + 3*E3^2) +
+ (b1 + 2*b1*b2 + 5*b1*b2^2 + 5*b1*b2^3 + 2*b1*b2^4 + b1*b2^5 + b1^2 + 5*b1^2*b2 + 6*b1^2*b2^2 +
+ 5*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3 + 5*b1^3*b2 + 5*b1^3*b2^2 + 2*b1^3*b2^3 + b1^4 + 2*b1^4*b2 +
+ b1^4*b2^2 + b1^5 + b1^5*b2 + b2 + b2^2 + 2*b2^3 + b2^4 + b2^5) *
E3*(E2*S - 3*E3) +
+ (1 + 6*b1*b2 + 6*b1*b2^2 + 6*b1*b2^3 + 6*b1*b2^4 + 3*b1^2 + 6*b1^2*b2 + 9*b1^2*b2^2 + 6*b1^2*b2^3 +
+ 3*b1^2*b2^4 + 2*b1^3 + 6*b1^3*b2 + 6*b1^3*b2^2 + 2*b1^3*b2^3 + 3*b1^4 + 6*b1^4*b2 + 3*b1^4*b2^2 +
+ b1^6 + 3*b2^2 + 2*b2^3 + 3*b2^4 + b2^6) * E3^2
b1^2*b2^2*(- 2*E2^3 + 3*E3^2 - 12*E2*E3*S + 9*E2^2*S^2 + 6*E3*S^3 - 6*E2*S^4 + S^6) +
+ (b1^3*b2^2 + b1^3*b2 + b1^2*b2^3 + b1*b2^3 + b1^2*b2 + b1*b2^2) *
((S^5 - 5*E2*S^3 + 5*E3*S^2 + 5*E2^2*S - 5*E2*E3)*S +
- (- 2*E2^3 + 3*E3^2 - 12*E2*E3*S + 9*E2^2*S^2 + 6*E3*S^3 - 6*E2*S^4 + S^6)) +
+ (b1*b2 + b1*b2^2 + b1*b2^3 + b1*b2^4 + b1^2*b2 + 3*b1^2*b2^2 + b1^2*b2^3 + b1^3 + b1^3*b2 + b1^3*b2^2 +
+ b1^3*b2^3 + b1^4*b2 + b2^3) * (E2^2*S^2 - 2*E3*S^3 - 2*E2^3 + 4*E2*E3*S - 3*E3^2) +
+ (2*b1*b2 + 2*b1*b2^2 + 2*b1*b2^3 + 2*b1*b2^4 + b1^2 + 2*b1^2*b2 + 6*b1^2*b2^2 + 2*b1^2*b2^3 +
+ b1^2*b2^4 + 2*b1^3*b2 + 2*b1^3*b2^2 + b1^4 + 2*b1^4*b2 + b1^4*b2^2 + b2^2 + b2^4) *
E3*(S^3 - 3*E2*S + 3*E3) +
+ (2*b1*b2^2 + 2*b1*b2^3 + b1^2 + 2*b1^2*b2 + 2*b1^2*b2^2 + 2*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3*b2 +
+ 2*b1^3*b2^2 + b1^4 + b1^4*b2^2 + b2^2 + b2^4) * (E2^3 - 3*E3*E2*S + 3*E3^2) +
+ (b1 + 2*b1*b2 + 5*b1*b2^2 + 5*b1*b2^3 + 2*b1*b2^4 + b1*b2^5 + b1^2 + 5*b1^2*b2 + 6*b1^2*b2^2 +
+ 5*b1^2*b2^3 + b1^2*b2^4 + 2*b1^3 + 5*b1^3*b2 + 5*b1^3*b2^2 + 2*b1^3*b2^3 + b1^4 + 2*b1^4*b2 +
+ b1^4*b2^2 + b1^5 + b1^5*b2 + b2 + b2^2 + 2*b2^3 + b2^4 + b2^5) *
E3*(E2*S - 3*E3) +
+ (1 + 6*b1*b2 + 6*b1*b2^2 + 6*b1*b2^3 + 6*b1*b2^4 + 3*b1^2 + 6*b1^2*b2 + 9*b1^2*b2^2 + 6*b1^2*b2^3 +
+ 3*b1^2*b2^4 + 2*b1^3 + 6*b1^3*b2 + 6*b1^3*b2^2 + 2*b1^3*b2^3 + 3*b1^4 + 6*b1^4*b2 + 3*b1^4*b2^2 +
+ b1^6 + 3*b2^2 + 2*b2^3 + 3*b2^4 + b2^6) * E3^2
# TODO
### Eq 1: alternative approach
(x + b1*y + b2*z)*(b1*x + b2*y + z)*(b2*x + y + b1*z) # = 0
b1*b2*(x^3 + y^3 + z^3) + (b1^3 + b2^3 + 3*b1*b2 + 1)*x*y*z +
+ (b1^2 + b1*b2^2 + b2)*(x*y^2 + y*z^2 + x^2*z) +
+ (b1^2*b2 + b2^2 + b1)*(y^2*z + x*z^2 + x^2*y)
b1*b2*(S^3 - 3*E2*S + 3*E3) + (b1^3 + b2^3 + 3*b1*b2 + 1)*E3 +
+ (b1^2 + b1*b2^2 + b2)*(x*y^2 + y*z^2 + x^2*z + y^2*z + x*z^2 + x^2*y) +
- (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1)*(y^2*z + x*z^2 + x^2*y)
b1*b2*(S^3 - 3*E2*S + 3*E3) + (b1^3 + b2^3 + 3*b1*b2 + 1)*E3 +
+ (b1^2 + b1*b2^2 + b2)*(E2*S - 3*E3) +
- (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1)*(y^2*z + x*z^2 + x^2*y)
# Eq 1-bis
b1*b2*S^3 + (b1^3 + b2^3 - 3*b1^2 - 3*b1*b2^2 + 6*b1*b2 - 3*b2 + 1)*E3 +
+ (b1^2 + b1*b2^2 - 3*b1*b2 + b2)*E2*S +
- (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1)*(y^2*z + x*z^2 + x^2*y)
# (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1) == (b2 - b1)*(b2 - 1)*(b1 - 1)
### =>
(b1*b2*S^3 + (b1^3 + b2^3 - 3*b1^2 - 3*b1*b2^2 + 6*b1*b2 - 3*b2 + 1)*E3 +
+ (b1^2 + b1*b2^2 - 3*b1*b2 + b2)*E2*S)*(x*y^2 + y*z^2 + x^2*z) +
- (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1)*
(y^2*z + x*z^2 + x^2*y)*(x*y^2 + y*z^2 + x^2*z)
# Eq 2-bis
(b1*b2*S^3 + (b1^2 + b1*b2^2 - 3*b1*b2 + b2)*E2*S +
+ (b1^3 + b2^3 - 3*b1^2 - 3*b1*b2^2 + 6*b1*b2 - 3*b2 + 1)*E3) *
(x*y^2 + y*z^2 + x^2*z) +
- (b1^2 + b1*b2^2 + b2 - b1^2*b2 - b2^2 - b1)*
(E3*S^3 + E2^3 - 6*E3*E2*S + 9*E3^2)
### Sum => ...
|
f00dc7398ea698cffa2bd312b0708e0ecd23b9c3 | b1a12b171097fcb0b2a6f7a10e0ab7afdf41aac1 | /man/dataSet.Rd | 2a7d0727aa3cf1d8b21d15c77735459771cb3394 | [] | no_license | myndworkz/rAmCharts | 7e1d66002cbca9ef63e1d2af6b4e49a1ac7cd3c3 | 6ea352cab2c9bc5f647447e5e7d902d9cbec0931 | refs/heads/master | 2021-01-14T13:06:28.947936 | 2015-07-29T12:34:13 | 2015-07-29T12:34:13 | 39,955,321 | 1 | 0 | null | 2015-07-30T14:37:43 | 2015-07-30T14:37:43 | null | UTF-8 | R | false | false | 502 | rd | dataSet.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/DataSet.R
\name{dataSet}
\alias{dataSet}
\title{#’ Constructor.}
\usage{
dataSet(compared = FALSE, dataProvider, fieldMappings, stockEvents, ...)
}
\arguments{
\item{\code{...}:}{Properties of DataSet.
See \code{\url{http://docs.amcharts.com/3/javascriptcharts/DataSet}}}
}
\value{
An \code{\linkS4class{DataSet}} object
}
\description{
#’ Constructor.
}
\examples{
dataSet( categoryField = "categoryField" )
}
|
c1753ba8eb3a25bb9df222d9f754d2f7450aa43f | 5150cf610a34c6c5be9b598277db1834d8fb16b4 | /man/create_capture_WYT.Rd | de987148a97c6f41c16c90062340982596c5324d | [] | no_license | SPI-Birds/pipelines | f3ab78668e526a47bd298b0f7f4127e274a4dfd0 | cb4bd41bc26d991fa54e520bb15b54333696b4cb | refs/heads/master | 2023-08-16T18:15:29.835023 | 2023-08-09T09:51:56 | 2023-08-09T09:51:56 | 153,275,927 | 0 | 3 | null | 2022-12-04T14:48:00 | 2018-10-16T11:42:17 | R | UTF-8 | R | false | true | 744 | rd | create_capture_WYT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_WYT.R
\name{create_capture_WYT}
\alias{create_capture_WYT}
\title{Create capture data table for Wytham Woods}
\usage{
create_capture_WYT(db, Brood_data, species_filter)
}
\arguments{
\item{db}{Location of primary data from Whytham Woods.}
\item{Brood_data}{Brood data generated by \code{\link{create_brood_WYT}}.}
\item{species_filter}{Species of interest. The 6 letter codes of all the species of
interest as listed in the
\href{https://github.com/SPI-Birds/documentation/blob/master/standard_protocol/SPI_Birds_Protocol_v1.0.0.pdf}{standard
protocol}.}
}
\value{
A data frame with Capture data
}
\description{
Create capture data table for Wytham Woods
}
|
f9d0b43a63b69351e592408a055616c9ec51e21f | cd82d5071502b71c821557e9c597015c80f3ace7 | /RStudio/src/01-lectura_y_limpieza.R | db3a1ed42a7f2704e1c7eb86db1d7d02b4f23e26 | [] | no_license | haro-ca/demo_IDEs | ffc701dc1b4d700805c902f519d2eac89fef3f49 | 873b91087d054271c762727bd52f83f773276175 | refs/heads/main | 2023-03-08T07:54:19.861200 | 2021-02-24T05:26:59 | 2021-02-24T05:26:59 | 341,787,837 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 121 | r | 01-lectura_y_limpieza.R | library(tidyverse)
diamonds %>%
filter(cut == "Premium") %>%
write_csv("data/diamonds_clean.csv")
|
9d7b904ca1c06d910668a56cbc714dc7de61cb51 | 3733b8c50f53b88eabeeb61d3fa324c840cf6ac6 | /R/autoplot.R | 4560747df8f5892d204ecd8a74c5599b090cbe5d | [] | no_license | RobertWSmith/forecastR | d7465ce9248a6d4971569545f80737cddb6cc629 | ce44c8636399ad8532eac8b058d6cc2366bd766b | refs/heads/master | 2021-01-12T16:33:54.413357 | 2016-10-31T16:24:40 | 2016-10-31T16:24:40 | 71,411,749 | 0 | 0 | null | 2016-10-31T16:20:36 | 2016-10-20T00:46:59 | R | UTF-8 | R | false | false | 1,249 | r | autoplot.R | ## autoplot.R
#' Automatic plotting for time series objects
#'
#' @param object a \code{ts} or \code{mts} object
#' @param title character. If provided, is passed as an argument to
#' \code{\link[ggplot2]{ggtitle}}
#' @param ... other arguments
#'
#' @importFrom ggplot2 autoplot ggplot geom_line ggtitle aes_string
#' @importFrom broom tidy
#'
#' @export
#'
#' @examples
#' library(ggplot2)
#' library(forecastR)
#'
#' ap <- ts.split(AirPassengers, as.list = FALSE)
#' ##autoplot(ap, title = "Air Passengers Subsetting")
# autoplot.ts <- function(object, title = NULL, ...)
# {
# obj <- .tidy.ts(object, ...)
# gg <- ggplot2::ggplot(obj, ggplot2::aes_string(x = "time", y = "value",
# color = "variable")) +
# ggplot2::geom_line()
#
# if (frequency(object) == 12L)
# {
# obj$date <- as.Date(ISOdate(as.integer(obj$time), obj$cycle, 1))
# gg <- ggplot2::ggplot(obj, ggplot2::aes_string(x = "date", y = "value",
# color = "variable")) +
# ggplot2::geom_line()
# }
#
# if (!is.null(title))
# gg <- gg + ggplot2::ggtitle(title)
# print(gg)
# return(gg)
# }
#
# autoplot.mts <- autoplot.ts.split <- autoplot.ts
|
4de3acc6cbf245c1dcc6212582d26a3da1133323 | 46bafa4a6d95021848bd979497b2f7ae6d3de543 | /MakePhenologyMaps.R | 6a01e557ffcae8c792499db4a74b0ba35036e0ab | [] | no_license | fanyanghenu/PhenologyLi | ac5d3f12a37c9cd8225f3d4b7e88aeb36364ba19 | a6510829b4e7c91553b22c50e1b4314f869d78dd | refs/heads/master | 2021-10-09T23:33:43.753967 | 2019-01-03T11:09:04 | 2019-01-03T11:09:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,346 | r | MakePhenologyMaps.R | # **************************************************
# MAKE PHENOLOGY MAPS
PhenologyMap <- function(df){
ggplot(df, aes(x = doy, y = value, color = pheno.stage)) +
geom_line() +
geom_point() +
facet_wrap(~ turfID) +
theme_minimal() +
ggtitle(unique(df$species))
}
## plot maps
phenoMaps2016 <- pheno %>%
filter(year == "2016") %>%
select(turfID, species, date, doy, origSite, destSite, block, treatment, bud, flower, seed, ripe) %>%
gather(key = pheno.stage, value = value, bud, flower, seed, ripe) %>% # make variable pheno.stage
filter(value > 0) %>%
group_by(species) %>%
do(pheno.maps = PhenologyMap(.))
pdf(file = "Phenologymaps2016.pdf")
phenoMaps2016$pheno.maps
dev.off()
phenoMaps2017 <- pheno %>%
filter(year == "2017") %>%
select(turfID, species, date, doy, origSite, destSite, block, treatment, bud, flower, seed, ripe) %>%
gather(key = pheno.stage, value = value, bud, flower, seed, ripe) %>% # make variable pheno.stage
filter(value > 0) %>%
group_by(species) %>%
do(pheno.maps = PhenologyMap(.))
## Now open up a pdf file and write all the plots out as separate pages
## the output pdf will be located in the getwd() directory named 'Rplots.pdf'
##
pdf(file = "Phenologymaps2017.pdf")
phenoMaps2017$pheno.maps
dev.off()
# ************************************************** |
92fb16a43c77a37bdfda501d2cd55b0d996120fc | a1674464522c2868112925fcafcfa8817eedde42 | /full_script.R | 34973cb24d579e1984e26e4f13f1dd84480c7422 | [] | no_license | timelfrinkmob/hack | 007046196055bd3e991a7ccca12f5447e1fa9f1f | 06e0203cdac12bed0543c23f5ec6161c366b87fd | refs/heads/master | 2021-01-21T23:29:04.569905 | 2017-06-25T12:48:36 | 2017-06-25T12:48:36 | 95,248,058 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,204 | r | full_script.R | library(readr)
library(stringr)
library(magrittr)
jar <- "news-crowler/target/sourcer-0.0.1-SNAPSHOT.jar"
java_command_base <- paste("java -jar", jar)
cmd_args <- commandArgs()
cmd_args <- cmd_args[!str_detect(string = cmd_args, pattern = "^-|^/Library")]
cmd_args <- "urls"
data <- readLines(cmd_args) %>% .[6:length(.)]
#all rows are assumed to be URLs
for(url in data){
file.create("tmp")
write(x = url, file = "tmp")
system(paste(java_command_base, "tmp"))
content_csv <- read.csv(file = "tmp.csv", header = T, sep = "|", stringsAsFactors = F)
title <- content_csv$Title
write(x = title, file = "tmp")
system(command = "Rscript url-finder/starterScript.R tmp")
related_links <- paste0("tmp", title, "_output") %>% str_replace_all(pattern = " |/", replacement = "_")
system(paste(java_command_base, related_links))
related_content_csv <- paste0(related_links, ".csv")
write_delim(x = content_csv, path = "tmp", sep = "|")
# related_content <- read_delim(related_content_csv, "|", escape_double = FALSE, trim_ws = TRUE)
# system(command = paste("Rscript sentiment_correlations.R", "tmp.csv", related_content_csv))
# sentiments <- read_csv("tmp_correlations")
# system(command = paste("Rscript sentence_correlations.R", "tmp.csv", related_content_csv))
# sentences <- read_csv("tmp_sentence_correlations")
path <- str_replace(url, pattern = ".+\\.[a-z]+/", replacement = "") %>% paste0("res_", .) %>%
str_replace_all(pattern = "/", replacement = "_") %>% str_replace_all(pattern = "\"", replacement = "_")
dir.create(path = path)
# file.copy(from = "tmp_correlations", to = paste0(path, "/", "related_sentiment_correlations"))
# file.copy(from = "tmp_sentence_correlations", to = paste0(path, "/", "related_sentence_correlations"))
file.copy("tmp.csv", paste0(path, "/", "original_raw"))
file.copy(related_content_csv, paste0(path, "/", "related_raw"))
file.remove("tmp")
file.remove("tmp_output")
file.remove("tmp.csv")
file.remove(related_links)
file.remove(paste0(related_links, ".csv"))
# file.remove("tmp_sentence_correlations")
# file.remove("tmp_correlations")
}
|
7eab4c92c8bf0b74a3c35f544cfe71037c472047 | 85a12881f5e0f7f2e23c543d2cf2ff3dd1d520c6 | /pmRppm.R | ca7f7a48baebbbc7284cf83773c1fbad8cf9f5d3 | [] | no_license | katewolfer/pmR | a48c2ce851cbfd705a482eab9007771745bd25f4 | 7e19d6257d76889e430a79536f7a26cf3c9fe718 | refs/heads/main | 2023-08-18T16:27:17.285764 | 2021-10-20T16:33:39 | 2021-10-20T16:33:39 | 204,450,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,394 | r | pmRppm.R | ##################################################
## ppm function for calculating mass accuracy ##
## Kate Wolfer, Institute of Hepatology, London ##
## 06 December 2018 ##
##################################################
## INPUT
## targetValues: a vector of target mass (m/z) values representing a list of MS features of interest
## observedValues: a vector of the observed/measured m/z values
## OUTPUT
## appendedTable: dataframe of target m/z values, lower mass bound, upper mass bound
## measured mass value, calculated ppm value
ppmCalculation <- function(ppm, targetValues, observedValues) {
lowerMass <- NULL
upperMass <- NULL
for (i in 1:length(targetValues)){
massRange <- targetValues[i]*(ppm/1000000)
lowerMass[i] <- targetValues[i] - massRange
upperMass[i] <- targetValues[i] + massRange
}
checkPPM <- NULL
filterTable <- NULL
for (i in 1:nrow(importMasses)){
for (j in 1:nrow(matchList)){
checkPPM <- importMasses$mz[i] > matchList$lowerMass[j] && importMasses$mz[i] < matchList$upperMass[j]
if (checkPPM == TRUE) {
slotRow <- cbind(importMasses[i,],matchList[j,])
filterTable <- rbind(filterTable, slotRow)
}
}
}
appendedTable <- data.frame(targetValues,lowerMass,upperMass,observedValues,calculated_ppm)
return(appendedTable)
} |
30989ae79b84f959e262327ec8ccffed3c2a23d6 | fbd40ec4546e66a6ea920d564796b94f84ca1340 | /RM_Miss_TypeIerror.R | a84c19bb796677ce944c8d3799416edf31d30cd4 | [] | no_license | KerstinRubarth/RM_Miss | b214095ee2758d5cc7c036d6c40bea761f561f3c | 6869b6b088dc24f511536f935e87c4e2663c9da6 | refs/heads/main | 2023-03-08T09:23:31.739133 | 2021-02-11T14:39:46 | 2021-02-11T14:39:46 | 338,023,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,877 | r | RM_Miss_TypeIerror.R | #---Simulate Repeated Measures Designs with Missing Values-------#
library(mvtnorm)
library(MASS)
library(Matrix)
library(psych)
library(multcomp)
library(mice)
#----Necessary: Empriical Distribution function adjusted for missings-------#
Fihat=function(x,xii,lambdaakt,lambdaiakt){
#--count function---#
Fihatk=0
Fihatk0=which(lambdaakt==0)
Fihatk1=which(lambdaakt==1)
Fihatk[Fihatk0] = 0
Fihatk[Fihatk1] = (x>xii[Fihatk1])+1/2*(x==xii[Fihatk1])
1/lambdaiakt*sum(Fihatk)
}
# Domhof Procedure
Domhof <- function(y, lambda){
n <- dim(y)[1]
d <- dim(y)[2]
y_NA <- y
for(j in 1:d){
for(i in 1:n){
if(lambda[i,j] == 0) y_NA[i,j] = NA
}
}
# Ranks
R <- matrix(rank(y_NA, na.last = "keep"), ncol = d)
N <- sum(lambda)
# Relative effects
p <- (R-0.5)/N
p <- colMeans(p, na.rm = T)
# Covariance matrix
lambda_d <- colSums(lambda)
# Covariance matrix formula
V <- matrix(rep(0,d*d), ncol = d)
for(i in 1:d){
for(j in 1:d){
if(i == j) V[i,j] <- n/lambda_d[i]*var(R[,i], na.rm = T)/N^2
#V[i,j] <- (lambda_d[i]-1)*var(R[,i], na.rm = T)*n/(N^2*lambda_d[i]*(lambda_d[i]-1))
if(i != j){
V[i,j] <- n/(N^2*(lambda_d[i]-1)*(lambda_d[j]-1) + n*sum(lambda[,i] == lambda[,j]&lambda[,i]==1)-1)*cov(R[,i], R[,j], use = "pairwise.complete.obs")*(sum(lambda[,i] == lambda[,j]&lambda[,i]==1)-1)
# V[i,j] <- 1/(n*d*((lambda_d[i]-1)*(lambda_d[j]-1) + n*d*sum(lambda[,i] == lambda[,j]& lambda[,i] ==1)-1))*cov(R[,i], R[,j], use = "pairwise.complete.obs")*(sum(lambda[,i] == lambda[,j]& lambda[,i] ==1)-1)
}
}
}
#if(sum(is.na(V)) > 0){
# nsim <- nsim -1
# next
#}
if(sum(is.na(V)) > 0){
erg_WTS_dom <- NA
erg_ATS_dom <- NA
}else{
C <- diag(1,d) - matrix(rep(1/d, d*d), ncol = d)
# WTS
WTS = n*t(p)%*%t(C)%*%ginv(C%*%V%*%t(C))%*%C%*%p
critWTS <- qchisq(0.95, df = rankMatrix(C))
erg_WTS_dom <- (WTS > critWTS)
# ATS
M <- t(C)%*%ginv(C%*%t(C))%*%C
ATS <-n * (tr(M%*%V))/(tr(M%*%V%*%M%*%V)) * t(p)%*%M%*%p
df_chisq_ATS <- tr(M%*%V)^2/tr(M%*%V%*%M%*%V)
crit_ATS <- qchisq(0.95, df = df_chisq_ATS)
erg_ATS_dom <- (ATS > crit_ATS)
}
result_dom <- c(erg_WTS_dom, erg_ATS_dom)
return(result_dom)
}
# Add alternative degrees of freedom calculation here
# Original Test Procedures
Original <- function(y, lambda){
n <- dim(y)[1]
d <- dim(y)[2]
# Number of missing observations for each timepoint
lambdai=colSums(lambda)
# Helping matrixes/vectors
aa=expand.grid(1:d,1:d)
i1=aa[,2]
j1=aa[,1]
ihelp1=which(i1==j1)
# Calculate all pairwise F_i(X_jk)
pairwise=matrix(0,nrow=n,ncol=d*d)
ncp=d*d
for(i in 1:n){
for(j in 1:ncp) {
if(lambda[i,i1[j]]==0){pairwise[i,j] =0}
if(lambda[i,i1[j]]==1){pairwise[i,j]=Fihat(y[i,i1[j]],y[,j1[j]],lambda[,j1[j]],lambdai[j1[j]]) }
}
}
# Pairwise relative effects p_ij
lambdaivec=lambdai[i1]
pij = colSums(pairwise)/lambdaivec
#--------------------------Compute now the Fi(Xik)------------------------------#
FiXi=cbind(pairwise[,ihelp1])
FiXiE = lambda*matrix(rep(1/2,d),nrow=n,ncol=d,byrow=TRUE)
#---------------------------Compute now G(Xik)----------------------------------#
GXik = matrix(0,nrow=n,ncol=d)
s=1:d
for(i in 1:d){
ihelp=which(i1==i)
GXik[,i] = 1/d*(rowSums(pairwise[,ihelp]))
}
# vector of relative effects
phatvec = colSums(GXik)/lambdai
# Expectation of G(Xik)
GXikE = lambda*matrix(phatvec,nrow=n,ncol=d,byrow=TRUE)
#----------------------Compute now all pairwise--------------------------------#
# Helping matrices
mathelp=matrix(0,nrow=n,ncol=d)
mathelpE=matrix(0,nrow=n,ncol=d)
# Help terms for last sum (s != i)
for(j in 1:d){
sakt=which(j1==j & i1!=j)
mathelp[,j]=1/d*(rowSums(pairwise[,sakt]/matrix(rep(lambdai[i1[sakt]],n), nrow = n, byrow = T)))
mathelpE[,j] = (rowSums(matrix(pij[sakt]/lambdai[i1[sakt]],nrow=n,ncol=(d-1),byrow=TRUE)*lambda[,i1[sakt]]))
}
# Psi_ik and Expectation of Psi_ik
Psik = n*(1/matrix(rep(lambdai,n), nrow = n, byrow = T)*(GXik-1/d*FiXi))-n*mathelp
PsikE = n*(1/matrix(rep(lambdai,n), nrow = n, byrow = T)*(GXikE - 1/d*FiXiE))-1/d*n*mathelpE
# Covariance matrix
Psikvec = Psik - PsikE
Vhat= t(Psikvec)%*%Psikvec/((n-1))
# Contrast matrix
C <- diag(d)-1/d
nc <- nrow(C)
Cphatvec=C%*%phatvec
CVC = C%*%Vhat%*%t(C)
if(sum(Vhat==0) > 0 | sum(is.na(CVC))>0 |isCorrelation(cov2cor(CVC)) == F) {
erg_WTS <- NA
erg_ATS <- NA
erg_ATS2 <- NA
erg_MCTP <- NA
} else{
#----------------------Testing--------------------------------#
# Check wether dataset is degenerate
#if(sum(lambdai==0) > 0 | sum(diag(CVC)<=0) > 0 |isCorrelation(cov2cor(C%*%Vhat%*%t(C))) == F ){
#if(sum(diag(CVC)<=0) > 0 ){
# #nsim <- nsim -1
# break
#}
# WTS
WTS = n*t(phatvec)%*%t(C)%*%ginv(C%*%Vhat%*%t(C))%*%C%*%phatvec
critWTS <- qchisq(0.95, df = rankMatrix(C))
erg_WTS= (WTS > critWTS)
# ATS
trTV=sum(c(diag(CVC)))
trTV2=trTV^2
trTVTV=sum(c(diag(CVC%*%CVC)))
dfF=trTV2/trTVTV
ATS=n*t(Cphatvec)%*%Cphatvec/trTV*dfF
crit_ATS=qchisq(0.95,dfF)
erg_ATS=(ATS>crit_ATS)
# MCTP
Test <- sqrt(n)*C%*%phatvec/sqrt(c(diag(C%*%Vhat%*%t(C))))
T0 <- max(abs(Test))
test1 = 1- pmvt(lower = -T0,upper = T0, delta=rep(0,nc), corr = cov2cor(C%*%Vhat%*%t(C)), df = n-1)[1]
erg_MCTP = (0.05 > test1)
# ATS with new degree of freedom estimation
ATS2 <- n*t(Cphatvec)%*%Cphatvec/trTV
crit_ATS2 <- qf(0.95, df1 = dfF, df2 = (n-1)*dfF)
erg_ATS2 <- (ATS2 > crit_ATS2)
}
result <- c(erg_WTS, erg_ATS, erg_MCTP, erg_ATS2)
return(result)
}
#-----------------------------Simulation Program----------------------------#
#mySimu<-function(n,%missing,rho,d,nsim)
mySimu <- function(n, r, dist, sigma, MM, nsim){
# Covariance matrix
if(sigma == "1") V0 <- matrix(c(1, 0, 0, 0, 1, 0, 0, 0, 1), ncol=3)
if(sigma == "2") V0 <- matrix(c(3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3), ncol=4)
if(sigma == "3") V0 <- matrix(c(1, 0, 0, 0, 3, 0, 0, 0, 6), ncol=3)
if(sigma == "4") V0 <- matrix(c(1, 0.5, 0.7, 0.5, 5, 0.8, 0.7, 0.8, 9), ncol=3)
if(sigma == "5") V0 <- matrix(c(1, 0, 0, 0, 0, 3, 0, 0, 0, 0, 6, 0, 0, 0, 0, 12), ncol=4)
if(sigma == "6") V0 <- matrix(c(1, 0.2, 0.4, 0.6, 0.2, 3, 0.7, 0.5, 0.4, 0.7, 6, 0.6, 0.6, 0.5, 0.6, 12), ncol=4)
# Number of repeated measures
d <- ncol(V0)
# Result vectors
erg_ATS <- c()
erg_ATS2 <- c()
erg_WTS <- c()
erg_MCTP <- c()
erg_WTS_med <- c()
erg_ATS_med <- c()
erg_MCTP_med <- c()
erg_WTS_cca <- c()
erg_ATS_cca <- c()
erg_MCTP_cca <- c()
erg_dom_WTS <- c()
erg_dom_ATS <- c()
erg_dom_MCTP <- c()
for(isim in 1:nsim){
#### CREATE DATA SET #####
# Create data
if(dist == "Normal") {
y <- rmvnorm(n, mean = rep(0,d), sigma = V0)
}
if(dist == "LogNormal"){
y <- exp(rmvnorm(n = n, sigma = V0))
}
### Create missings under MCAR or MAR ###
# First option to generate Missing At Random Using Variance
if(MM == "SIG"){
# Calculate Variances over all observations in "observed Groups"
sig1 <- var(as.vector(y[,1]))
sig2 <- var(as.vector(y[,3]))
# First Group
y[y[,1] < -sig1,2] <- y[y[,1] < -sig1,2]*rbinom(length(y[y[,1] < -sig1,2]),1, 0.9)
y[y[,3] < -sig2,4]*rbinom(length(y[y[,3] < -sig2,4]),1, 0.9)
# Second Group
y[(-sig1 < y[,1] & y[,1] < sig1),2] <- y[(-sig1 < y[,1] & y[,1] < sig1),2] * rbinom(length(y[(-sig1 < y[,1] & y[,1] < sig1),2]), 1, 0.7)
y[(-sig2 < y[,3] & y[,3] < sig2),4] <- y[(-sig2 < y[,3] & y[,3] < sig2),4] * rbinom(length(y[(-sig2 < y[,3] & y[,3] < sig2),4]), 1, 0.7)
# Third Group
y[y[,1] > sig1,2] <- y[y[,1] > sig1,2]*rbinom(length(y[y[,1] > sig1,2]),1, 0.9)
y[y[,3] > sig2,4] <- y[y[,3] > sig2,4]*rbinom(length(y[y[,3] > sig2,4]),1, 0.9)
}
if(MM == "MED"){
# Calculate Medians over all observations in "Observed Groups"
med1 <- median(y[,1])
med2 <- median(y[,3])
# First Group
y[y[,1] < med1,2] <- y[y[,1] < med1,2] * rbinom(length(y[y[,1] < med1,2]), 1, 0.9)
y[y[,3] < med2,4] <- y[y[,3] < med2,4] * rbinom(length(y[y[,3] < med2,4]), 1, 0.9)
# Second Group
y[y[,1] > med1,2] <- y[y[,1] > med1,2] * rbinom(length(y[y[,1] > med1,2]), 1, 0.7)
y[y[,3] > med2,4] <- y[y[,3] > med2,4] * rbinom(length(y[y[,3] > med2,4]), 1, 0.7)
}
if(MM == "SIG" | MM == "MED"){
# Create Missing Matrix
lambda <- matrix(rep(1, n*d), ncol = d)
for(i in 1:n){
for(j in 1:d){
if(y[i,j] == 0){
lambda[i,j] <- 0
}
}
}
}
if(MM == "MCAR"){
# Create missings
lambda <- matrix(rbinom(n*d,1,1-r),nrow=n,ncol=d)
}
# Check whether data is degenerative
lambdai <- colSums(lambda)
if(sum(lambdai==0) > 0 | sum(lambdai == 1) > 0) next;
# Round data
y <- round(y)
#### ALL AVAILABLE CASE ANALYSIS ####
# Our procedure
original <- Original(y,lambda)
erg_WTS[isim] <- original[1]
erg_ATS[isim] <- original[2]
erg_MCTP[isim] <- original[3]
erg_ATS2[isim] <- original[4]
# Domhof procedure
domhof <- Domhof(y,lambda)
erg_dom_WTS[isim] <- domhof[1]
erg_dom_ATS[isim] <- domhof[2]
#### MEDIAN IMPUTATION ####
y_NA <- y
y_imp <- y
for(j in 1:d){
for(i in 1:n){
if(lambda[i,j] == 0) y_NA[i,j] = NA
}
}
# Calculate Medians
med_NA <- function(x){median(x, na.rm=T)}
med <- apply(y_NA, 2, med_NA)
# Impute with Medians
for(j in 1:d){
for(i in 1:n){
if(is.na(y_NA[i,j]) == T) y_imp[i,j] = med[j]
}
}
# Set all lambda to 1
lambda_imp <- matrix(rep(1,n*d), ncol = d)
median <- Original(y_imp, lambda_imp)
erg_WTS_med[isim] <- median[1]
erg_ATS_med[isim] <- median[2]
erg_MCTP_med[isim] <- median[3]
#### Complete Case Analysis ####
y_cca <- y_NA[complete.cases(y_NA),]
lambda_cca <- lambda[complete.cases(y_NA),]
if(is.vector(lambda_cca) == FALSE & is.null(lambda_cca) == FALSE){
lambda_cca_i <- dim(lambda_cca)[1]
if(lambda_cca_i > d){
cca <- Original(y_cca, lambda_cca)
erg_WTS_cca[isim] <- cca[1]
erg_ATS_cca[isim] <- cca[2]
erg_MCTP_cca[isim] <- cca[3]
}
}
}
# Dataframe with results
result <- data.frame(nsim = nsim, n = n, d = d, p_miss = r, Dist = dist, Sigma = sigma, MM = MM, WTS = mean(erg_WTS, na.rm = T),
ATS = mean(erg_ATS, na.rm = T), ATS2 = mean(erg_ATS2, na.rm = T), MCTP = mean(erg_MCTP, na.rm = T), WTSmed = mean(erg_WTS_med, na.rm = T),
ATSmed = mean(erg_ATS_med, na.rm = T), MCTPmed = mean(erg_MCTP_med, na.rm = T),
WTS_cca = mean(erg_WTS_cca, na.rm =T), ATS_cca = mean(erg_ATS_cca, na.rm = T), MCTP_cca = mean(erg_MCTP_cca, na.rm = T),
WTS_domhof = mean(erg_dom_WTS, na.rm =T), ATS_domhof = mean(erg_dom_ATS, na.rm = T))
print(result)
write.table(result, "revised_results.txt", row.names = F, col.names = F, quote = F, append = T)
}
Dist <- c("Normal", "LogNormal")
n <- c(10,20,30)
p_miss <- c(0,0.1,0.2,0.3)
sig <- c("1","2","3","4","5","6")
MissMech <- c("SIG", "MED")
sig4 <- c("2", "5", "6")
set.seed(1234)
### Simulation only for MCAR
for(h in 1:length(sig)){
for(hh in 1:length(Dist)){
for(hhh in 1:length(n)){
for(hhhh in 1:length(p_miss)){
mySimu(n = n[hhh], r = p_miss[hhhh], dist = Dist[hh], sigma = sig[h], MM = "MCAR", nsim = 10000)
}
}
}
}
# Simulation for MAR (both versions)
for(h in 1:length(MissMech)){
for(hh in 1:length(sig4)){
for(hhh in 1:length(Dist)){
for(hhhh in 1:length(n)){
for(hhhhh in 1:length(p_miss))
mySimu(n = n[hhhh], r = p_miss[hhhhh], dist = Dist[hhh], sigma = sig4[hh], MM = MissMech[h], nsim = 10000)
}
}
}
}
mySimu(10,0.2,"Normal", "2", "MCAR", 1000)
|
f52bed30e519f34d24031527a703e0c525617ed1 | d6adccd4f4fecdfddc8d1f8c96295841046eac80 | /R/login.R | cba3d9057d916992ba544dc8ea890d93dd7a09e6 | [
"Apache-2.0"
] | permissive | Sage-Bionetworks/challengerutils | 6fdd86951695bb7f1eb439e209612bc6592d0e64 | fa80010e0be6c1560083e5d81517ee5e22cbe05a | refs/heads/master | 2022-11-15T01:53:00.815647 | 2020-06-24T03:41:53 | 2020-06-24T03:41:53 | 263,805,485 | 6 | 3 | Apache-2.0 | 2020-06-24T03:41:55 | 2020-05-14T03:28:45 | R | UTF-8 | R | false | false | 423 | r | login.R | #' Logs into Synapse.
#'
#' @param username a Synapse username (optional, not required if .synapseConfig available)
#' @param password a Synapse password (optional, not required if .synapseConfig available)
#' @examples
#' library(challengerutils)
#' syn_login()
#' @import reticulate
#' @export
syn_login <- function(username = NULL, password = NULL){
.syn <<- synapseclient$Synapse()
.syn$login(username, password)
}
|
3004697750b0336b6fb98d7f1f22aa418f5eb1ab | fe8f6bdc7ee13a888c3a31d9313b06510bf401cd | /Downloads/RobKAT-master-3/man/IBSKernel.Rd | e6ffc7941d9546d9a057352ab8044916fb791f8d | [] | no_license | karamartinez00/RobKAT | 8240184ebab3ce9cedf5898a73bec1f88ed69f4a | beaa97d3fc199ca2eb3fca471581cde9e44cfa7a | refs/heads/master | 2020-08-08T04:01:15.813646 | 2019-10-08T17:36:17 | 2019-10-08T17:36:17 | 213,705,444 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 442 | rd | IBSKernel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{IBSKernel}
\alias{IBSKernel}
\title{Compute the IBS kernel}
\usage{
IBSKernel(z)
}
\arguments{
\item{\code{z}}{A SNP matrix of size \code{n x M}. Here \code{n} is the
number of subjects and \code{M} is the number of SNPs. Each
entry of \code{z} should be 0, 1 or 2 denoting the minor
allele frequency.}
}
\description{
Compute the IBS kernel
}
|
bc8b1cb0fb597faca725c8dde0fac01d06c918b7 | 1d0f0b5befb697617d1dee599f522b2189563d57 | /plot6.R | 6ef148abbbec03d6cbb7f049e8a3629f5c105205 | [] | no_license | akumar2005/DataAnalysisProject | e1119c75143def14e36f68e0956c227752fb8e76 | 9a4e4d23b247824d95fc7b710d9d630341714fc4 | refs/heads/master | 2021-01-01T17:28:46.134588 | 2015-01-25T20:50:47 | 2015-01-25T20:50:47 | 29,830,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,135 | r | plot6.R | # Load NEI and SCC data frames
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Compare emissions from motor vehicle sources in Baltimore City with
# emissions from motor vehicle sources in Los Angeles County, California.
# Which city has seen greater changes over time in motor vehicle emissions?
#Subset of Emission data for Baltimore city and Los Angeles County
# from Motor vehicles
BC <- NEI[NEI$fips=="24510" & NEI$type=="ON-ROAD",]
LA <- NEI[NEI$fips=="06037" & NEI$type=="ON-ROAD",]
BC$city<-"Baltimore City"
LA$city<-"Los Angeles City"
#Combine the data from two cities
both<-rbind(BC,LA)
# aggregate of emission data per year for two cities
both.aggr<-aggregate(Emissions~year+city,both,sum)
#Plot distribution of total vehicle emissions as a function of year
# for two cities
png("plot6.png",width=480,height=480)
p<-qplot(year,Emissions, data=both.aggr, col=city)
print(p)
# Ans: Considerably lower levels of vehicle emissions for Baltimore.
# Baltimore: Decline from 1999 to 2002. Very slow decrease from 2002-2008.
# LA: steady increase till 2005 and then decreases.
# |
dae66aecaf647f230ffd6cee7186d26b29d834ce | 1e42b9829b85bc37d112ec5b8efa1682264297b2 | /R/processing_time_activity.R | f51a53369f9d13a94ad607ff60bdad40612082d6 | [] | no_license | strategist922/edeaR | ca83bf91f58e685bc9333f4db3bfea3d8c019343 | ad96118cccfdc90a7bed94f5aef2ee0cfab3aac8 | refs/heads/master | 2021-07-05T04:30:35.286640 | 2017-09-27T12:25:04 | 2017-09-27T12:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,170 | r | processing_time_activity.R |
processing_time_activity <- function(eventlog,
units) {
stop_eventlog(eventlog)
colnames(eventlog)[colnames(eventlog) == timestamp(eventlog)] <- "timestamp_classifier"
event_classifier <- activity_id(eventlog)
activity_instance_classifier <- activity_instance_id(eventlog)
r <- eventlog %>%
group_by_(event_classifier, activity_instance_classifier) %>%
summarize(s = min(timestamp_classifier), e = max(timestamp_classifier)) %>%
mutate(processing_time = as.double(e - s, units = units))
raw <- r
r <- r %>%
summarize(relative_frequency = n(),
min = min(processing_time),
q1 = quantile(processing_time, probs = c(0.25)),
median = median(processing_time),
mean = mean(processing_time),
q3 = quantile(processing_time, probs = c(0.75)),
max = max(processing_time),
st_dev = sd(processing_time),
iqr = quantile(processing_time, probs = c(0.75)) - quantile(processing_time,probs = c(0.25)),
tot = sum(processing_time)) %>%
mutate(relative_frequency = relative_frequency/sum(relative_frequency)) %>%
arrange(desc(relative_frequency))
attr(r, "raw") <- raw
return(r)
}
|
c9a0e015871f4297091048f2832e112d005a6d03 | e7cc77bfee2a849dde93ac5ac7ed2d6318725d1b | /code/data_pre-process.R | 240f7b6098b045235303718d99c726bd48b32319 | [] | no_license | y220/Stat_605_Group_Project | 8c377e7cfc150ef8f71e00d88bb510a4b74970b3 | 5c703d6ffcea7b919311418916088f9e5115bfd0 | refs/heads/master | 2023-01-28T15:41:42.358925 | 2020-12-11T22:54:40 | 2020-12-11T22:54:40 | 310,103,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 875 | r | data_pre-process.R | rm(list = ls())
# args = (commandArgs(trailingOnly=TRUE))
# if(length(args) == 1){
# data = args[1]
# } else {
# cat('usage: Rscript sofia.R <data>\n', file=stderr())
# stop()
# }
# sds data
sds <- read.csv('data/2017-07_sds011sof.csv')
# change time scale
sds$timestamp<-sub("T.*$", "", sds$timestamp)
# calculate monthly mean
particles<-aggregate(cbind(P1, P2) ~ lat + lon, data = sds, FUN = mean, na.rm = TRUE)
# bme data
bme<-read.csv("data/2017-11_bme280sof.csv")
# change time scale
bme$timestamp<-sub("T.*$", "", bme$timestamp)
# calculate monthly mean
climate<-aggregate(cbind(pressure, temperature, humidity) ~ lat + lon, data = bme, FUN = mean, na.rm = TRUE)
# write .csv data
write.csv(particles, "clean_data/particles1707.csv", row.names = FALSE)
write.csv(climate, "clean_data/climate1707.csv", row.names = FALSE)
|
1ce7f7c8df6cc824523e02effae9a022bf14f2e1 | 70db47eee4797d278ea0b442c143501178c53ef0 | /R/multi_heat.R | 7a395a84a176e17d06f9c567bac17aea5f4e4293 | [
"MIT"
] | permissive | jdreyf/ezlimmaplot | f5f4136e48e9aebb4ecf79ad135524be876c752c | 393055383916fc36b5b13b0ff6b6bb5d5dc9cedc | refs/heads/master | 2023-08-17T15:28:36.764451 | 2023-08-14T15:15:11 | 2023-08-14T15:15:11 | 130,758,410 | 1 | 6 | MIT | 2020-04-20T15:18:19 | 2018-04-23T21:22:54 | R | UTF-8 | R | false | false | 2,389 | r | multi_heat.R | #' Plot one heatmap per comparison
#'
#' Plot one heatmap per comparison with \code{ezheat}, where features in \code{object} are reordered per comparison
#' using \code{tab}.
#'
#' @inheritParams ezheat
#' @inheritParams ezvenn
#' @details \code{rownames(tab)} and \code{rownames(object)} should overlap, and some \code{colnames(tab)}
#' should end in \code{.p}, so they can be identified.
#' @export
multi_heat <- function(tab, object, pheno.df=NULL, labrows=rownames(object), labcols=colnames(object),
main="Log2 Expression", name="heats", sc="ctr", clip=NA, color.v=NULL,
unique.rows=FALSE, only.labrows=FALSE, ntop=50, stat.tab = NULL,
cutoff = 0.05, reorder_rows=FALSE, reorder_cols=FALSE, fontsize_row=10, fontsize_col=10,
na.lab=c("---", ""), plot=TRUE, width=NA, height=NA, verbose=FALSE){
if (length(labrows)==1) labrows <- rep(x=labrows, nrow(object))
stopifnot(length(labrows)==nrow(object), names(labrows)==rownames(object))
names(labrows) <- rownames(object)
p.cols <- grep(paste0("\\.p$"), colnames(tab), value=TRUE)
contr.names <- sub(paste0("\\.(p)$"), "", p.cols)
stopifnot(length(intersect(rownames(tab), rownames(object))) > 1, length(p.cols) > 0)
rows.int <- intersect(rownames(object), rownames(tab))
tab <- tab[rows.int,, drop=FALSE]
object <- object[rows.int,, drop=FALSE]
labrows <- labrows[rows.int]
if (!is.na(name)) {
grDevices::pdf(paste0(name, ".pdf"))
on.exit(grDevices::dev.off())
}
ret.lst <- list()
for (contr in contr.names){
main.tmp <- paste(main, contr)
p.col <- paste0(contr, ".p")
rows.tmp <- rownames(tab)[order(tab[,p.col])]
object.tmp <- object[rows.tmp,]
labrows.tmp <- labrows[rows.tmp]
ret.lst[[contr]] <- ezheat(object=object.tmp, labrows=labrows.tmp, pheno.df=pheno.df, main=main.tmp, sc=sc, clip=clip,
color.v=color.v, unique.rows=unique.rows, only.labrows=only.labrows, ntop=ntop,
stat.tab = stat.tab, cutoff = cutoff, labcols=labcols, reorder_rows=reorder_rows,
reorder_cols=reorder_cols, fontsize_row=fontsize_row, fontsize_col=fontsize_col,
na.lab=na.lab, plot=plot, width=width, height=height, verbose=verbose, name=NA)
}
return(invisible(ret.lst))
}
|
0e0300547075626f1bdede1ff22f0d0b687e3219 | 652abde6569f3aab251a174786678f7a2e3c5202 | /code/correlation_fun.R | 020ee997604eaff1972aa124321c9fa86b3d121c | [] | no_license | XianwuXue-NOAA/bivariate_EMOS | 48095e391809dbae063f0b530e281e0537d73e64 | 1e57bc97d81c2564538cba6e83c649948c590792 | refs/heads/master | 2020-05-22T12:33:34.130248 | 2019-04-17T11:38:48 | 2019-04-17T11:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,938 | r | correlation_fun.R |
correlation_fun <- function(x1, x2, y1, y2, radius, ...){
sect.2 <- sect.3 <- sect.4 <- sect.5 <- sect.6 <- sect.7 <- sect.8 <- sect.9 <- NULL
# define sections according to wind direction and sort
for (i in 1:length(x1)){
if (x1[i]^2 + y1[i]^2 <= radius^2) next
else phi <- atan2(-x1[i], -y1[i])/(2*pi)*360
if (phi < 0) phi <- 360 + phi
if ((phi >= 0) & (phi < 45)) sect.6 <- rbind(sect.6, cbind(x2[i], y2[i]))
if ((phi >= 45) & (phi < 90)) sect.7 <- rbind(sect.7, cbind(x2[i], y2[i]))
if ((phi >= 90) & (phi < 135)) sect.8 <- rbind(sect.8, cbind(x2[i], y2[i]))
if ((phi >= 135) & (phi < 180)) sect.9 <- rbind(sect.9, cbind(x2[i], y2[i]))
if ((phi >= 180) & (phi < 225)) sect.2 <- rbind(sect.2, cbind(x2[i], y2[i]))
if ((phi >= 225) & (phi < 270)) sect.3 <- rbind(sect.3, cbind(x2[i], y2[i]))
if ((phi >= 270) & (phi < 315)) sect.4 <- rbind(sect.4, cbind(x2[i], y2[i]))
if ((phi >= 315) & (phi < 360)) sect.5 <- rbind(sect.5, cbind(x2[i], y2[i]))
}
# set corr if too few or equal data
if (length(sect.2[,1]) %in% c(0,1)) {
cor.2 <- 0
} else if (sum(var(sect.2)==0)) {
cor.2 <- 1
} else {
cor.2 <- cor(sect.2[,1], sect.2[,2])
}
if (length(sect.3[,1]) %in% c(0,1)) {
cor.3 <- 0
} else if (sum(var(sect.3)==0)) {
cor.3 <- 1
} else {
cor.3 <- cor(sect.3[,1], sect.3[,2])
}
if (length(sect.4[,1]) %in% c(0,1)) {
cor.4 <- 0
} else if (sum(var(sect.4)==0)) {
cor.4 <- 1
} else {
cor.4 <- cor(sect.4[,1], sect.4[,2])
}
if (length(sect.5[,1]) %in% c(0,1)) {
cor.5 <- 0
} else if (sum(var(sect.5)==0)) {
cor.5 <- 1
} else {
cor.5 <- cor(sect.5[,1], sect.5[,2])
}
if (length(sect.6[,1]) %in% c(0,1)) {
cor.6 <- 0
} else if (sum(var(sect.6)==0)) {
cor.6 <- 1
} else {
cor.6 <- cor(sect.6[,1], sect.6[,2])
}
if (length(sect.7[,1]) %in% c(0,1)) {
cor.7 <- 0
} else if (sum(var(sect.7)==0)) {
cor.7 <- 1
} else {
cor.7 <- cor(sect.7[,1], sect.7[,2])
}
if (length(sect.8[,1]) %in% c(0,1)) {
cor.8 <- 0
} else if (sum(var(sect.8)==0)) {
cor.8 <- 1
} else {
cor.8 <- cor(sect.8[,1], sect.8[,2])
}
if (length(sect.9[,1]) %in% c(0,1)) {
cor.9 <- 0
} else if (sum(var(sect.9)==0)) {
cor.9 <- 1
} else {
cor.9 <- cor(sect.9[,1], sect.9[,2])
}
cor <- c(cor.6, cor.7, cor.8, cor.9, cor.2, cor.3, cor.4, cor.5)
l <- c(length(sect.6[,1]), length(sect.7[,1]), length(sect.8[,1]), length(sect.9[,1]), length(sect.2[,1]), length(sect.3[,1]), length(sect.4[,1]), length(sect.5[,1]))
y <- rbind(cor,l)
# wind direction angle
theta <- c(22.5, 67.5, 112.5, 157.5, 202.5, 247.5, 292.5, 337.5)
plot(theta, y[1,], type="b", xlab="Wind Direction in Degrees", ylab="Correlation", xlim=c(0,360), ylim=c(-1,1), ...)
text(x = theta, y = y[1,], labels = y[2,], font=2, cex = 1.5, pos=2)
return(y)
}
|
daccd8f7bc77009762566bde4c21ed7ed19f3028 | 4573415414b0c12d8be0930ab4ee3a0434dcdbe8 | /man/CreateDosageDataAcrossStudies.Rd | 6cd0a4666cc41a82ce63e49e53776dd91401a9be | [] | no_license | bbanbury/bb_gecco | fb3b277ce1f34c24599d0c07f535ef0de5bf5488 | 81740295d4908ae94500cbf7dcc97e6e23843e2f | refs/heads/master | 2020-05-20T15:40:39.095515 | 2015-05-26T15:08:32 | 2015-05-26T15:08:32 | 33,628,901 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,041 | rd | CreateDosageDataAcrossStudies.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/SearchFunctions.R
\name{CreateDosageDataAcrossStudies}
\alias{CreateDosageDataAcrossStudies}
\title{Create a Dosage Dataset from HapMap}
\usage{
CreateDosageDataAcrossStudies(rs_positions, saveToRdata = NULL)
}
\arguments{
\item{rs_positions}{An object of the class "snp_location_info" that comes out of the FindSNPpositions* functions}
\item{saveToRdata}{Optional file name to save Rdata to. Will not save if left NULL.}
}
\description{
Create dataset with Dosage Information for one or many hapmap studies
}
\details{
This function uses the information in rs_positions (class snp_location_info) to gather a
dosage dataset. There can be one study or many studies. Returns a dataset with individuals
as rows and rs_positions as columns.
}
\examples{
tmp <- FindSNPposition_hapmap(c("rs2736100", "rs401681", "rs10069690"), studies="101ccfr")
CreateDosageDataAcrossStudies(tmp)
}
\seealso{
\link{FindSNPposition_hapmap} \link{CreateDosageDataPerStudy}
}
|
9903dfc05546debf25d8878152b4fd9299636a5d | a3b4006dd6b77ff0818c751d9c6ae6d32c8a0377 | /talks/2017-05-10_readxl-rstudio-webinar.R | f7e3a2adca2583366be9c5b3bb4315f429c95118 | [
"BSD-2-Clause",
"MIT"
] | permissive | tidyverse/readxl | 98a61dbb111848100783d5c977becee6cf3cd749 | 3aa8c2ddf9f1d8921f2a8b42ae0bdfa69a22ed9b | refs/heads/main | 2023-07-20T16:12:41.510109 | 2023-07-07T02:58:27 | 2023-07-07T02:58:27 | 32,161,666 | 432 | 130 | NOASSERTION | 2023-02-08T23:07:26 | 2015-03-13T14:50:20 | C++ | UTF-8 | R | false | false | 3,163 | r | 2017-05-10_readxl-rstudio-webinar.R | ## webinar live coding
library(readxl)
## access examples-- -----------------------------------------------
## list all examples
readxl_example()
## get path to a specific example
readxl_example("datasets.xlsx")
datasets <- readxl_example("datasets.xlsx")
read_excel(datasets)
## IDE support ------------------------------------------------------
## in Files pane
## navigate to folder holding xls/xlsx
## click on one!
## choose Import Dataset...
## demo with deaths.xlsx
## admire the
## * File/Url field
## * Data preview, complete with column types
## * Import options
## - skip = 4, n_max = 10
## - range = A5:F15
## - range = other!A5:F15
## * Preview code, copy it to clipboard, execute it
## alternative workflow:
## copy this URL to the clipboard:
## https://github.com/tidyverse/readxl/blob/master/inst/extdata/deaths.xlsx?raw=true
## File > Import Dataset > paste the URL and Update
## Nice touch: code includes commands necessary to download
## Data rectangle ------------------------------------------------------
## using read_excel() "by hand"
read_excel(
readxl_example("deaths.xlsx"),
range = "arts!A5:F15"
)
read_excel(
readxl_example("deaths.xlsx"),
sheet = "other",
range = cell_rows(5:15)
)
## The Sheet Geometry vignette has all the details:
## http://readxl.tidyverse.org/articles/sheet-geometry.html
browseURL("http://readxl.tidyverse.org/articles/sheet-geometry.html")
## Column typing -------------------------------------------------------
## mix specific types with guessing
read_excel(
readxl_example("deaths.xlsx"),
range = "arts!A5:C15",
col_types = c("guess", "skip", "numeric")
)
## recycling happens
read_excel(
readxl_example("datasets.xlsx"),
col_types = "text"
)
## "list" col_type prevents all coercion
(df <- read_excel(
readxl_example("clippy.xlsx"),
col_types = c("text", "list")
))
tibble::deframe(df)
## The Cell and Column Types vignette has all the details:
## http://readxl.tidyverse.org/articles/cell-and-column-types.html
browseURL("http://readxl.tidyverse.org/articles/cell-and-column-types.html")
## Workflows -------------------------------------------------------
library(tidyverse)
## store a csv snapshot at the moment of import
iris_xl <- readxl_example("datasets.xlsx") %>%
read_excel(sheet = "iris") %>%
write_csv("iris-raw.csv")
iris_xl
dir(pattern = "iris")
read_csv("iris-raw.csv")
## load all the worksheets in a workbook at once!
path <- readxl_example("datasets.xlsx")
excel_sheets(path)
path %>%
excel_sheets() %>%
set_names() %>%
map(read_excel, path = path)
## load all the worksheets in a workbook into one BIG BEAUTIFUL
## data frame!
path <- readxl_example("deaths.xlsx")
deaths <- path %>%
excel_sheets() %>%
set_names() %>%
map_df(~ read_excel(path = path, sheet = .x, range = "A5:F15"), .id = "sheet")
deaths
## use a similar workflow to iterate over multiple files in folder
## The readxl Workflows article has all the details:
## http://readxl.tidyverse.org/articles/articles/readxl-workflows.html
browseURL("http://readxl.tidyverse.org/articles/articles/readxl-workflows.html")
## bye bye now :)
|
0f5d360b264fb1c437636739b10a927afd899bc4 | 02dd5dd6187597cc615b794cc987086fff2e17d9 | /hw04/app/app.R | c16d047528a721e2725856a5f00dd2da7ee38bcd | [] | no_license | brennan6/stat133-hws-fall17 | e8584a3ee6e4cd2ea72f20ad089ce3bbbaf5b05f | 0299d5b67dfb8d27d3317ae8309d71aef61de0a3 | refs/heads/master | 2021-08-23T07:24:57.194915 | 2017-12-04T04:00:45 | 2017-12-04T04:00:45 | 103,568,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,898 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(ggplot2)
source("/Users/matthewbrennan/stat133/stat133-hws-fall17/hw04/code/functions.R")
dat <- read.csv("/Users/matthewbrennan/stat133/stat133-hws-fall17/hw04/data/cleandata/cleanscores.csv")
dat$Grade = factor(dat$Grade,
levels = c('A+', 'A', 'A-',
'B+', 'B', 'B-',
'C+', 'C', 'C-',
'D', 'F'))
proportions = dat %>% group_by(Grade) %>% summarise(count = n())
binded = proportions
binded['Prop'] = round(as.numeric(binded$count)/sum(as.numeric(binded$count)), 2)
binded['Freq'] = as.numeric(binded$count)
vector = names(dat)[names(dat)!= 'Grade']
ui = fluidPage(
titlePanel(title = 'Grade Visualizer'),
sidebarLayout(
sidebarPanel(
conditionalPanel(condition = "input.tabselected == 1",
tableOutput('table')),
conditionalPanel(condition = "input.tabselected==2",
selectizeInput('assignment', 'X-axis variable', vector,
'HW1'),
sliderInput('values', 'Bin Width', min = 1, max = 10, value = 10)),
conditionalPanel(condition = "input.tabselected == 3",
selectizeInput('x', 'X-axis variable', vector, 'Test1'),
selectizeInput('y', 'Y-axis variable', vector, 'Overall'),
sliderInput('sliders', 'Opacity', min = 0, max = 1, value = 0.5),
radioButtons('radios', 'Show line', list('none', 'lm', 'loess'), "")
)),
mainPanel(
tabsetPanel(
tabPanel('Barchart', value = 1, plotOutput('bar')),
tabPanel('Histogram', value = 2,
fluidRow(column(11,plotOutput('histogram')),
column(12, verbatimTextOutput('summary')))),
tabPanel('Scatterplot', value = 3, plotOutput('scatter'),
h3('Correlation'), textOutput('correlation')),
id = ("tabselected")
)
)
)
)
server = function(input, output){
output$histogram = renderPlot({
col = input$assignment
hist(dat[ , col], xlab = col, ylab = 'count', breaks=seq(-5,105,input$values), col = 'gray',
border = 'white', include.lowest = TRUE, main = '')
axis(side=1,at=seq(-10,110,input$values),labels=seq(-10,110,input$values))
})
output$summary = renderPrint({
col = select(dat, input$assignment)[[1]]
print_stats(summary_stats(col))
})
output$table = renderTable({
proportions = dat %>% group_by(Grade) %>% summarise(count = n())
binded = proportions
binded['Freq'] = as.numeric(binded$count)
binded$Freq <- as.integer(binded$Freq)
binded['Prop'] = round(as.numeric(binded$count)/sum(as.numeric(binded$count)), 2)
binded = binded[-c(2)]
})
output$bar = renderPlot({
ggplot(binded, aes(Grade, Freq)) + geom_bar(stat = 'identity', fill = 'steelblue') +
labs(x = "Grade", y = "frequency")
})
output$scatter = renderPlot({
plot(dat[ , input$x], dat[ , input$y],
xlim = c(-1, 101), ylim = c(-1,101), cex = input$sliders, panel.first = grid(col = 'blue'),
xlab = input$x, ylab = input$y, pch = 1)
if (input$radios == 'lm'){
abline(lm(dat[ , input$y] ~ dat[ , input$x]), col = 'red', lwd = 3)}
else if (input$radios == 'loess'){
data = lowess(dat[ , input$x] , y = dat[ , input$y])
lines(data, lwd = 3, col = 'blue')}
})
output$correlation = renderText({
print(cor(dat[ , input$x], dat[, input$y]))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
23a6be5e80ee638323df70f7cddb33190a9d693b | c72a114d8365ead4ccd39affacc476bef334b367 | /R/explore_schools.R | 904f19fb25b50f805bc4fe8902370e2eae3c596a | [] | no_license | avila/SOEPgeodist | 2fc581b4c6b20adb96c90e644d3b45c934642123 | 18d4634f936c590cc4b449ab33da3aba16997dc4 | refs/heads/master | 2023-08-27T21:51:19.848892 | 2021-11-06T16:26:50 | 2021-11-06T16:26:50 | 415,457,487 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,316 | r | explore_schools.R | ### This script explores the school data. By the end of the script it plots the
### school data points one a map for every year.
## library -----
library(haven)
library(dplyr)
library(sf)
library(ggplot2)
library(tidyr)
library(glue)
library(purrr)
## read data ----
schools <- readxl::read_excel(
"data/schulen_komplett.xlsx",
col_types = c(art = "text",
art_reduziert = "text",
bundesland = "text",
jahr = "numeric",
lat = "numeric",
lng = "numeric",
loc_hash = "text",
name = "text",
ort = "text",
plz = "text",
priv_schule_typ = "text",
strasse = "text",
traeger = "text"
)
) %>%
mutate(
art_trunc = stringr::str_trunc(as.character(art), 13, "center"),
schultyp = paste(art_reduziert, traeger, sep = "_")
# -> for shorter legend labels
)
# schools <- haven::read_dta("data/schooldata_example.dta") %>%
# mutate(
# ## mutate into factor to be able to plot later.
# across(where(haven::is.labelled),
# ~haven::as_factor(.x)
# ),
#
# # shorten `art` for concise labels in legend
# art_trunc = stringr::str_trunc(as.character(art), 13, "center")
# )
## very basic summary ----
schools %>% select(
bundesland, art, art_reduziert
) %>% map(~table(.x, schools$jahr))
## calculate number of schools per type (art_reduziert) and bundesland.
## and for easier visualization, reshape into wide.
sch_table_red <- schools %>%
arrange(jahr, bundesland) %>%
# -> sort by year (increasing), then bundesland.
select(jahr, bundesland, art_reduziert) %>%
group_by(jahr, bundesland, art_reduziert) %>%
summarise(n = n(), .groups = "drop") %>%
tidyr::pivot_wider(names_from = jahr, values_from = n)
sch_table_komp <- schools %>%
arrange(jahr, bundesland) %>%
# -> sort by year (increasing), then bundesland.
select(jahr, bundesland, art) %>%
group_by(jahr, bundesland, art) %>%
summarise(n = n(), .groups = "drop") %>%
tidyr::pivot_wider(names_from = jahr, values_from = n)
sch_table_typ <- schools %>%
arrange(jahr, bundesland) %>%
# -> sort by year (increasing), then bundesland.
select(jahr, bundesland, schultyp) %>%
group_by(jahr, bundesland, schultyp) %>%
summarise(n = n(), .groups = "drop") %>%
tidyr::pivot_wider(names_from = jahr, values_from = n)
openxlsx::write.xlsx(
x = list(
"art" = sch_table_komp,
"art_reduziert" = sch_table_red,
"schultyp" = sch_table_typ
),
file = "output/tables/school_data_tables.xlsx",
overwrite = TRUE
)
if (interactive()) {
View(sch_table_red)
}
## plot timeseries ----
schools %>%
group_by(jahr, bundesland, schultyp) %>%
summarise(n=n()) %>%
ggplot(aes(x = jahr, y = n, color = schultyp)) +
geom_line() +
geom_point(size=.5) +
facet_wrap(~bundesland) +
labs(title = "Schools by type / bundesland / year")
ggsave(filename = glue::glue("output/figs/art_reduziert/time_series_art_reduziert.png"),
width = 8, height = 6, dpi = 150)
## Schools range year
schools %>%
group_by(bundesland) %>%
summarise(
ymin = min(jahr),
ymax = max(jahr)
)
## plot maps ------------
### read shapefile -----
germany_shp <- st_read("misc/vign/NUTS_RG_03M_2021_4326_LEVL_1.shp") %>%
filter(CNTR_CODE=="DE")
### some checks
st_crs(germany_shp)
st_geometry_type(germany_shp)
st_bbox(germany_shp)
## plot for all years ----
export_plot <- TRUE
year_min <- range(schools$jahr)[1]
year_max <- range(schools$jahr)[2]
#year_max <- year_min + 1
for (year in seq(year_min, year_max)) {
type <- "schultyp"
cat(glue::glue("year: {year}, type: {type}"))
type_i <- dplyr::sym(type)
p <- schools %>%
filter(jahr==year) %>%
ggplot() +
geom_point(aes(x=lng, y=lat, col=!!type_i), alpha=.7, size = .5) +
geom_sf(data = germany_shp, color = "white", fill = NA, size=.5) +
labs(title = "Schools locations", subtitle = glue::glue("Year: {year}")) +
theme(axis.title=element_blank())
print(p)
if (export_plot) {
cat(" exporting plot...")
ggsave(glue::glue("output/figs/{type_i}/map_schools_{year}.png"), height = 9,
width = 8, dpi = 150)
}
cat(" Done!\n")
}
|
4f733edf22da5e2b49a2dc41de5f728479efb6d7 | f268adce81916fea4811e44b8cd35947b2c3fdff | /Cheung 2022/Hagger18.R | f5b99e621b431f4afb9f9842abffb70967ed454e | [] | no_license | mikewlcheung/code-in-articles | dd0e9202ad2777304dc2bb2186b69a367f0e682c | 0469adaa14ee6f4a947f932cb366ba2fa8b28867 | refs/heads/master | 2022-06-13T19:08:18.611034 | 2022-06-01T07:21:28 | 2022-06-01T07:21:28 | 45,301,280 | 10 | 5 | null | null | null | null | UTF-8 | R | false | false | 82,962 | r | Hagger18.R | Hagger18 <-
list(data = list(`1` = structure(c(1, 0.34000000000000002, NA,
0.38, 0.28999999999999998, NA, 0.059999999999999998, 0.34999999999999998,
NA, 0.34000000000000002, 1, NA, 0.29999999999999999, 0.26000000000000001,
NA, 0.17000000000000001, 0.27000000000000002, NA, NA, NA, 1,
NA, NA, NA, NA, NA, NA, 0.38, 0.29999999999999999, NA, 1, 0.29999999999999999,
NA, 0.059999999999999998, 0.17999999999999999, NA, 0.28999999999999998,
0.26000000000000001, NA, 0.29999999999999999, 1, NA, 0.10000000000000001,
0.35999999999999999, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.059999999999999998,
0.17000000000000001, NA, 0.059999999999999998, 0.10000000000000001,
NA, 1, 0.20999999999999999, NA, 0.34999999999999998, 0.27000000000000002,
NA, 0.17999999999999999, 0.35999999999999999, NA, 0.20999999999999999,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `2` = structure(c(1, NA, NA, 0.42999999999999999,
NA, 0.080000000000000002, 0.58999999999999997, 0.40999999999999998,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, NA, 0.42999999999999999, NA, NA, 1, NA, -0.13, 0.11,
0.20999999999999999, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.080000000000000002,
NA, NA, -0.13, NA, 1, 0.41999999999999998, -0.14000000000000001,
NA, 0.58999999999999997, NA, NA, 0.11, NA, 0.41999999999999998,
1, 0.31, NA, 0.40999999999999998, NA, NA, 0.20999999999999999,
NA, -0.14000000000000001, 0.31, 1, NA, NA, NA, NA, NA, NA, NA,
NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `3` = structure(c(1,
NA, 0.65600000000000003, 0.498, NA, 0.27300000000000002, 0.70199999999999996,
0.65000000000000002, 0.65000000000000002, NA, 1, NA, NA, NA,
NA, NA, NA, NA, 0.65600000000000003, NA, 1, 0.56100000000000005,
NA, 0.38900000000000001, 0.58899999999999997, 0.65000000000000002,
0.498, 0.498, NA, 0.56100000000000005, 1, NA, 0.186, 0.36799999999999999,
0.36799999999999999, 0.36799999999999999, NA, NA, NA, NA, 1,
NA, NA, NA, NA, 0.27300000000000002, NA, 0.38900000000000001,
0.186, NA, 1, 0.38, 0.26400000000000001, 0.26400000000000001,
0.70199999999999996, NA, 0.58899999999999997, 0.36799999999999999,
NA, 0.38, 1, 0.59099999999999997, 0.59099999999999997, 0.65000000000000002,
NA, 0.65000000000000002, 0.36799999999999999, NA, 0.26400000000000001,
0.59099999999999997, 1, 0.70899999999999996, 0.65000000000000002,
NA, 0.498, 0.36799999999999999, NA, 0.26400000000000001, 0.59099999999999997,
0.70899999999999996, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `4` = structure(c(1,
0.45000000000000001, 0.45000000000000001, 0.34999999999999998,
0.36799999999999999, 0.01, 0.20000000000000001, 0.26000000000000001,
NA, 0.45000000000000001, 1, 0.78000000000000003, 0.40999999999999998,
0.28599999999999998, 0.23000000000000001, 0.10000000000000001,
0.20999999999999999, NA, 0.45000000000000001, 0.78000000000000003,
1, 0.41999999999999998, 0.30499999999999999, 0.10000000000000001,
0.23000000000000001, 0.20000000000000001, NA, 0.34999999999999998,
0.40999999999999998, 0.41999999999999998, 1, 0.40000000000000002,
-0.01, 0.22, 0.26000000000000001, NA, 0.36799999999999999, 0.28599999999999998,
0.30499999999999999, 0.40000000000000002, 1, 0.029999999999999999,
0.25, 0.14499999999999999, NA, 0.01, 0.23000000000000001, 0.10000000000000001,
-0.01, 0.029999999999999999, 1, 0.42999999999999999, 0.050000000000000003,
NA, 0.20000000000000001, 0.10000000000000001, 0.23000000000000001,
0.22, 0.25, 0.42999999999999999, 1, 0.13, NA, 0.26000000000000001,
0.20999999999999999, 0.20000000000000001, 0.26000000000000001,
0.14499999999999999, 0.050000000000000003, 0.13, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `5` = structure(c(1, NA, 0.67000000000000004, 0.54000000000000004,
0.080000000000000002, 0.13, 0.48999999999999999, 0.17000000000000001,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, 0.67000000000000004, NA,
1, 0.70999999999999996, 0.23999999999999999, 0.28000000000000003,
0.29999999999999999, 0.22, NA, 0.54000000000000004, NA, 0.70999999999999996,
1, 0.34000000000000002, 0.14000000000000001, 0.23999999999999999,
0.11, NA, 0.080000000000000002, NA, 0.23999999999999999, 0.34000000000000002,
1, 0.14000000000000001, 0.23999999999999999, 0.029999999999999999,
NA, 0.13, NA, 0.28000000000000003, 0.14000000000000001, 0.14000000000000001,
1, 0.32000000000000001, 0.089999999999999997, NA, 0.48999999999999999,
NA, 0.29999999999999999, 0.23999999999999999, 0.23999999999999999,
0.32000000000000001, 1, 0.089999999999999997, NA, 0.17000000000000001,
NA, 0.22, 0.11, 0.029999999999999999, 0.089999999999999997, 0.089999999999999997,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `6` = structure(c(1, 0.44, 0.19, 0.31, NA, NA, 0.68999999999999995,
0.23000000000000001, NA, 0.44, 1, 0.39000000000000001, 0.45000000000000001,
NA, NA, 0.46999999999999997, 0.12, NA, 0.19, 0.39000000000000001,
1, 0.5, NA, NA, 0.20999999999999999, 0.089999999999999997, NA,
0.31, 0.45000000000000001, 0.5, 1, NA, NA, 0.38, 0.070000000000000007,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.68999999999999995, 0.46999999999999997, 0.20999999999999999,
0.38, NA, NA, 1, 0.23000000000000001, NA, 0.23000000000000001,
0.12, 0.089999999999999997, 0.070000000000000007, NA, NA, 0.23000000000000001,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `7` = structure(c(1, 0.47999999999999998, 0.29999999999999999,
0.33000000000000002, NA, NA, 0.68999999999999995, 0.40999999999999998,
NA, 0.47999999999999998, 1, 0.25, 0.25, NA, NA, 0.46999999999999997,
0.28000000000000003, NA, 0.29999999999999999, 0.25, 1, 0.41999999999999998,
NA, NA, 0.28000000000000003, 0.089999999999999997, NA, 0.33000000000000002,
0.25, 0.41999999999999998, 1, NA, NA, 0.29999999999999999, 0.16,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.68999999999999995, 0.46999999999999997, 0.28000000000000003,
0.29999999999999999, NA, NA, 1, 0.34999999999999998, NA, 0.40999999999999998,
0.28000000000000003, 0.089999999999999997, 0.16, NA, NA, 0.34999999999999998,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `8` = structure(c(1, 0.47999999999999998, 0.23999999999999999,
0.14000000000000001, NA, NA, NA, 0.46999999999999997, NA, 0.47999999999999998,
1, 0.28999999999999998, 0.17999999999999999, NA, NA, NA, 0.20999999999999999,
NA, 0.23999999999999999, 0.28999999999999998, 1, 0.37, NA, NA,
NA, 0.17000000000000001, NA, 0.14000000000000001, 0.17999999999999999,
0.37, 1, NA, NA, NA, 0.02, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, 0.46999999999999997, 0.20999999999999999, 0.17000000000000001,
0.02, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `9` = structure(c(1, 0.42999999999999999,
0.20999999999999999, 0.28999999999999998, NA, NA, NA, 0.41999999999999998,
NA, 0.42999999999999999, 1, 0.46000000000000002, 0.20000000000000001,
NA, NA, NA, 0.10000000000000001, NA, 0.20999999999999999, 0.46000000000000002,
1, 0.32000000000000001, NA, NA, NA, -0.02, NA, 0.28999999999999998,
0.20000000000000001, 0.32000000000000001, 1, NA, NA, NA, 0.059999999999999998,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.41999999999999998,
0.10000000000000001, -0.02, 0.059999999999999998, NA, NA, NA,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `10` = structure(c(1, 0.47999999999999998, 0.23999999999999999,
0.27000000000000002, NA, NA, 0.66000000000000003, 0.25, NA, 0.47999999999999998,
1, 0.65000000000000002, 0.48999999999999999, NA, NA, 0.48999999999999999,
0.19, NA, 0.23999999999999999, 0.65000000000000002, 1, 0.47999999999999998,
NA, NA, 0.27000000000000002, 0.11, NA, 0.27000000000000002, 0.48999999999999999,
0.47999999999999998, 1, NA, NA, 0.35999999999999999, 0.17000000000000001,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.66000000000000003, 0.48999999999999999, 0.27000000000000002,
0.35999999999999999, NA, NA, 1, 0.32000000000000001, NA, 0.25,
0.19, 0.11, 0.17000000000000001, NA, NA, 0.32000000000000001,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `11` = structure(c(1, 0.60999999999999999, 0.45000000000000001,
0.26000000000000001, NA, NA, 0.68999999999999995, 0.46999999999999997,
NA, 0.60999999999999999, 1, 0.46999999999999997, 0.20000000000000001,
NA, NA, 0.47999999999999998, 0.32000000000000001, NA, 0.45000000000000001,
0.46999999999999997, 1, 0.38, NA, NA, 0.20000000000000001, 0.14999999999999999,
NA, 0.26000000000000001, 0.20000000000000001, 0.38, 1, NA, NA,
0.16, 0.089999999999999997, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.68999999999999995, 0.47999999999999998,
0.20000000000000001, 0.16, NA, NA, 1, 0.39000000000000001, NA,
0.46999999999999997, 0.32000000000000001, 0.14999999999999999,
0.089999999999999997, NA, NA, 0.39000000000000001, 1, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `12` = structure(c(1, 0.52000000000000002, 0.16,
0.14000000000000001, NA, NA, 0.64000000000000001, 0.53000000000000003,
NA, 0.52000000000000002, 1, 0.33000000000000002, 0.11, NA, NA,
0.5, 0.40000000000000002, NA, 0.16, 0.33000000000000002, 1, 0.27000000000000002,
NA, NA, 0.20999999999999999, 0.22, NA, 0.14000000000000001, 0.11,
0.27000000000000002, 1, NA, NA, 0.14000000000000001, 0.059999999999999998,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.64000000000000001, 0.5, 0.20999999999999999, 0.14000000000000001,
NA, NA, 1, 0.44, NA, 0.53000000000000003, 0.40000000000000002,
0.22, 0.059999999999999998, NA, NA, 0.44, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `13` = structure(c(1,
0.55000000000000004, 0.27000000000000002, 0.28000000000000003,
NA, NA, 0.57999999999999996, 0.53000000000000003, NA, 0.55000000000000004,
1, 0.55000000000000004, 0.20999999999999999, NA, NA, 0.5, 0.34999999999999998,
NA, 0.27000000000000002, 0.55000000000000004, 1, 0.27000000000000002,
NA, NA, 0.27000000000000002, 0.20000000000000001, NA, 0.28000000000000003,
0.20999999999999999, 0.27000000000000002, 1, NA, NA, 0.20999999999999999,
0.17000000000000001, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, NA, 0.57999999999999996, 0.5, 0.27000000000000002,
0.20999999999999999, NA, NA, 1, 0.38, NA, 0.53000000000000003,
0.34999999999999998, 0.20000000000000001, 0.17000000000000001,
NA, NA, 0.38, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `14` = structure(c(1, 0.60999999999999999,
0.40000000000000002, 0.29999999999999999, NA, NA, 0.51000000000000001,
0.42999999999999999, NA, 0.60999999999999999, 1, 0.40999999999999998,
0.31, NA, NA, 0.37, 0.32000000000000001, NA, 0.40000000000000002,
0.40999999999999998, 1, 0.34000000000000002, NA, NA, 0.28000000000000003,
0.17000000000000001, NA, 0.29999999999999999, 0.31, 0.34000000000000002,
1, NA, NA, 0.10000000000000001, 0.13, NA, NA, NA, NA, NA, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.51000000000000001,
0.37, 0.28000000000000003, 0.10000000000000001, NA, NA, 1, 0.40000000000000002,
NA, 0.42999999999999999, 0.32000000000000001, 0.17000000000000001,
0.13, NA, NA, 0.40000000000000002, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `15` = structure(c(1,
0.46999999999999997, 0.34000000000000002, 0.31, NA, NA, 0.44,
0.14000000000000001, NA, 0.46999999999999997, 1, 0.37, 0.31,
NA, NA, 0.41999999999999998, 0.059999999999999998, NA, 0.34000000000000002,
0.37, 1, 0.32000000000000001, NA, NA, 0.23999999999999999, 0.01,
NA, 0.31, 0.31, 0.32000000000000001, 1, NA, NA, 0.14000000000000001,
0.10000000000000001, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, NA, 0.44, 0.41999999999999998, 0.23999999999999999,
0.14000000000000001, NA, NA, 1, 0.14000000000000001, NA, 0.14000000000000001,
0.059999999999999998, 0.01, 0.10000000000000001, NA, NA, 0.14000000000000001,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `16` = structure(c(1, NA, 0.51500000000000001, 0.41499999999999998,
NA, NA, 0.71499999999999997, 0.505, NA, NA, 1, NA, NA, NA, NA,
NA, NA, NA, 0.51500000000000001, NA, 1, 0.32500000000000001,
NA, NA, 0.52000000000000002, 0.29499999999999998, NA, 0.41499999999999998,
NA, 0.32500000000000001, 1, NA, NA, 0.32500000000000001, 0.255,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.71499999999999997, NA, 0.52000000000000002, 0.32500000000000001,
NA, NA, 1, 0.48999999999999999, NA, 0.505, NA, 0.29499999999999998,
0.255, NA, NA, 0.48999999999999999, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `17` = structure(c(1,
0.60999999999999999, NA, 0.27000000000000002, 0.20999999999999999,
0.42999999999999999, NA, 0.45000000000000001, NA, 0.60999999999999999,
1, NA, 0.23000000000000001, 0.16, 0.41999999999999998, NA, 0.35999999999999999,
NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.27000000000000002, 0.23000000000000001,
NA, 1, 0.17999999999999999, 0.32000000000000001, NA, 0.33000000000000002,
NA, 0.20999999999999999, 0.16, NA, 0.17999999999999999, 1, 0.28999999999999998,
NA, 0.44, NA, 0.42999999999999999, 0.41999999999999998, NA, 0.32000000000000001,
0.28999999999999998, 1, NA, 0.5, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, 0.45000000000000001, 0.35999999999999999, NA, 0.33000000000000002,
0.44, 0.5, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `18` = structure(c(1, 0.34000000000000002,
0.41999999999999998, 0.35699999999999998, NA, NA, 0.57599999999999996,
0.34300000000000003, NA, 0.34000000000000002, 1, 0.53200000000000003,
0.44500000000000001, NA, NA, 0.48799999999999999, 0.184, NA,
0.41999999999999998, 0.53200000000000003, 1, 0.38300000000000001,
NA, NA, 0.45900000000000002, 0.151, NA, 0.35699999999999998,
0.44500000000000001, 0.38300000000000001, 1, NA, NA, 0.43099999999999999,
0.14599999999999999, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, NA, 0.57599999999999996, 0.48799999999999999,
0.45900000000000002, 0.43099999999999999, NA, NA, 1, 0.28699999999999998,
NA, 0.34300000000000003, 0.184, 0.151, 0.14599999999999999, NA,
NA, 0.28699999999999998, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `19` = structure(c(1, NA,
NA, 0.55000000000000004, 0.56000000000000005, NA, NA, 0.83999999999999997,
0.93999999999999995, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, NA, NA, 0.55000000000000004, NA, NA, 1, 0.41999999999999998,
NA, NA, 0.45000000000000001, 0.56000000000000005, 0.56000000000000005,
NA, NA, 0.41999999999999998, 1, NA, NA, 0.55000000000000004,
0.58999999999999997, NA, NA, NA, NA, NA, 1, NA, NA, 0.67000000000000004,
NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.83999999999999997, NA, NA,
0.45000000000000001, 0.55000000000000004, NA, NA, 1, 0.87, 0.93999999999999995,
NA, NA, 0.56000000000000005, 0.58999999999999997, 0.67000000000000004,
NA, 0.87, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `20` = structure(c(1,
0.68000000000000005, 0.57099999999999995, 0.099000000000000005,
0.014, 0.060999999999999999, 0.376, 0.53400000000000003, 0.54000000000000004,
0.68000000000000005, 1, 0.66900000000000004, 0.10199999999999999,
0.081000000000000003, 0.023, 0.44500000000000001, 0.41899999999999998,
0.58699999999999997, 0.57099999999999995, 0.66900000000000004,
1, 0.313, 0.26000000000000001, 0.16300000000000001, 0.17999999999999999,
0.376, 0.501, 0.099000000000000005, 0.10199999999999999, 0.313,
1, 0.39900000000000002, 0.42699999999999999, 0.223, 0.063, 0.058999999999999997,
0.014, 0.081000000000000003, 0.26000000000000001, 0.39900000000000002,
1, 0.34000000000000002, 0.094, 0.044999999999999998, -0.22900000000000001,
0.060999999999999999, 0.023, 0.16300000000000001, 0.42699999999999999,
0.34000000000000002, 1, 0.51300000000000001, -0.13800000000000001,
-0.24399999999999999, 0.376, 0.44500000000000001, 0.17999999999999999,
0.223, 0.094, 0.51300000000000001, 1, -0.249, 0.057000000000000002,
0.53400000000000003, 0.41899999999999998, 0.376, 0.063, 0.044999999999999998,
-0.13800000000000001, -0.249, 1, 0.76800000000000002, 0.54000000000000004,
0.58699999999999997, 0.501, 0.058999999999999997, -0.22900000000000001,
-0.24399999999999999, 0.057000000000000002, 0.76800000000000002,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `21` = structure(c(1, 0.84999999999999998,
0.56999999999999995, 0.084000000000000005, 0.35699999999999998,
0.29499999999999998, 0.65100000000000002, -0.066000000000000003,
0.56499999999999995, 0.84999999999999998, 1, 0.35799999999999998,
0.086999999999999994, 0.371, 0.55700000000000005, 0.70399999999999996,
0.126, 0.126, 0.56999999999999995, 0.35799999999999998, 1, 0.26700000000000002,
0.32800000000000001, 0.40000000000000002, 0.317, 0.51600000000000001,
0.71799999999999997, 0.084000000000000005, 0.086999999999999994,
0.26700000000000002, 1, -0.067000000000000004, 0.51400000000000001,
0.10100000000000001, 0.083000000000000004, 0.246, 0.35699999999999998,
0.371, 0.32800000000000001, -0.067000000000000004, 1, 0.067000000000000004,
0.49099999999999999, 0.253, 0.36299999999999999, 0.29499999999999998,
0.55700000000000005, 0.40000000000000002, 0.51400000000000001,
0.067000000000000004, 1, 0.27400000000000002, -0.34000000000000002,
0.13200000000000001, 0.65100000000000002, 0.70399999999999996,
0.317, 0.10100000000000001, 0.49099999999999999, 0.27400000000000002,
1, -0.34000000000000002, 0.19700000000000001, -0.066000000000000003,
0.126, 0.51600000000000001, 0.083000000000000004, 0.253, -0.34000000000000002,
-0.34000000000000002, 1, 0.42199999999999999, 0.56499999999999995,
0.126, 0.71799999999999997, 0.246, 0.36299999999999999, 0.13200000000000001,
0.19700000000000001, 0.42199999999999999, 1), .Dim = c(9L, 9L
), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"))), `22` = structure(c(1, 0.51000000000000001, 0.42999999999999999,
0.41999999999999998, 0.52000000000000002, 0.57999999999999996,
NA, 0.55000000000000004, 0.32000000000000001, 0.51000000000000001,
1, 0.26000000000000001, 0.17000000000000001, 0.33000000000000002,
0.45000000000000001, NA, 0.26000000000000001, 0.22, 0.42999999999999999,
0.26000000000000001, 1, 0.37, 0.28999999999999998, 0.31, NA,
0.17000000000000001, -0.29999999999999999, 0.41999999999999998,
0.17000000000000001, 0.37, 1, 0.66000000000000003, 0.27000000000000002,
NA, 0.32000000000000001, 0.22, 0.52000000000000002, 0.33000000000000002,
0.28999999999999998, 0.66000000000000003, 1, 0.44, NA, 0.41999999999999998,
0.31, 0.57999999999999996, 0.45000000000000001, 0.31, 0.27000000000000002,
0.44, 1, NA, 0.52000000000000002, 0.28000000000000003, NA, NA,
NA, NA, NA, NA, 1, NA, NA, 0.55000000000000004, 0.26000000000000001,
0.17000000000000001, 0.32000000000000001, 0.41999999999999998,
0.52000000000000002, NA, 1, 0.54000000000000004, 0.32000000000000001,
0.22, -0.29999999999999999, 0.22, 0.31, 0.28000000000000003,
NA, 0.54000000000000004, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `23` = structure(c(1, 0.58999999999999997, -0.13500000000000001,
0.14099999999999999, 0.16500000000000001, 0.14000000000000001,
0.61399999999999999, 0.182, NA, 0.58999999999999997, 1, 0.16300000000000001,
0.14399999999999999, 0.070999999999999994, 0.097000000000000003,
0.64200000000000002, 0.081000000000000003, NA, -0.13500000000000001,
0.16300000000000001, 1, -0.039, 0.045999999999999999, 0.114,
-0.0089999999999999993, -0.025000000000000001, NA, 0.14099999999999999,
0.14399999999999999, -0.039, 1, 0.52200000000000002, 0.034000000000000002,
0.191, 0.034000000000000002, NA, 0.16500000000000001, 0.070999999999999994,
0.045999999999999999, 0.52200000000000002, 1, -0.0070000000000000001,
0.074999999999999997, 0.075999999999999998, NA, 0.14000000000000001,
0.097000000000000003, 0.114, 0.034000000000000002, -0.0070000000000000001,
1, 0.20100000000000001, 0.10000000000000001, NA, 0.61399999999999999,
0.64200000000000002, -0.0089999999999999993, 0.191, 0.074999999999999997,
0.20100000000000001, 1, 0.105, NA, 0.182, 0.081000000000000003,
-0.025000000000000001, 0.034000000000000002, 0.075999999999999998,
0.10000000000000001, 0.105, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"))), `24` = structure(c(1,
0.62, 0.39000000000000001, 0.28999999999999998, NA, NA, NA, 0.29999999999999999,
NA, 0.62, 1, 0.58999999999999997, 0.23999999999999999, NA, NA,
NA, 0.26000000000000001, NA, 0.39000000000000001, 0.58999999999999997,
1, 0.34000000000000002, NA, NA, NA, 0.14000000000000001, NA,
0.28999999999999998, 0.23999999999999999, 0.34000000000000002,
1, NA, NA, NA, 0.12, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA,
NA, 0.29999999999999999, 0.26000000000000001, 0.14000000000000001,
0.12, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `25` = structure(c(1, 0.62, 0.23000000000000001,
0.22, NA, NA, NA, 0.53000000000000003, 0.78000000000000003, 0.62,
1, 0.46000000000000002, 0.17999999999999999, NA, NA, NA, 0.35999999999999999,
0.65000000000000002, 0.23000000000000001, 0.46000000000000002,
1, 0.19, NA, NA, NA, 0.14999999999999999, 0.22, 0.22, 0.17999999999999999,
0.19, 1, NA, NA, NA, 0.059999999999999998, 0.19, NA, NA, NA,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1, NA, NA, 0.53000000000000003, 0.35999999999999999,
0.14999999999999999, 0.059999999999999998, NA, NA, NA, 1, 0.52000000000000002,
0.78000000000000003, 0.65000000000000002, 0.22, 0.19, NA, NA,
NA, 0.52000000000000002, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `26` = structure(c(1, 0.63, 0.23000000000000001,
0.23999999999999999, NA, NA, NA, 0.53000000000000003, 0.54000000000000004,
0.63, 1, 0.44, 0.17000000000000001, NA, NA, NA, 0.35999999999999999,
0.34000000000000002, 0.23000000000000001, 0.44, 1, 0.20000000000000001,
NA, NA, NA, 0.14999999999999999, 0.11, 0.23999999999999999, 0.17000000000000001,
0.20000000000000001, 1, NA, NA, NA, 0.080000000000000002, 0.14000000000000001,
NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.53000000000000003,
0.35999999999999999, 0.14999999999999999, 0.080000000000000002,
NA, NA, NA, 1, 0.54000000000000004, 0.54000000000000004, 0.34000000000000002,
0.11, 0.14000000000000001, NA, NA, NA, 0.54000000000000004, 1
), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `27` = structure(c(1, NA,
0.56999999999999995, 0.40000000000000002, 0.46999999999999997,
NA, 0.64000000000000001, 0.72999999999999998, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, 0.56999999999999995, NA, 1, 0.45000000000000001,
0.31, NA, 0.51000000000000001, 0.47999999999999998, NA, 0.40000000000000002,
NA, 0.45000000000000001, 1, 0.28999999999999998, NA, 0.34999999999999998,
0.37, NA, 0.46999999999999997, NA, 0.31, 0.28999999999999998,
1, NA, 0.34999999999999998, 0.5, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, 0.64000000000000001, NA, 0.51000000000000001, 0.34999999999999998,
0.34999999999999998, NA, 1, 0.58999999999999997, NA, 0.72999999999999998,
NA, 0.47999999999999998, 0.37, 0.5, NA, 0.58999999999999997,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `28` = structure(c(1, 0.85999999999999999, 0.68999999999999995,
0.5, 0.47999999999999998, 0.14999999999999999, 0.75, 0.69999999999999996,
NA, 0.85999999999999999, 1, 0.66000000000000003, 0.56999999999999995,
0.56000000000000005, 0.10000000000000001, 0.76000000000000001,
0.66000000000000003, NA, 0.68999999999999995, 0.66000000000000003,
1, 0.57999999999999996, 0.47999999999999998, 0.11, 0.52000000000000002,
0.46000000000000002, NA, 0.5, 0.56999999999999995, 0.57999999999999996,
1, 0.65000000000000002, 0.12, 0.46999999999999997, 0.35999999999999999,
NA, 0.47999999999999998, 0.56000000000000005, 0.47999999999999998,
0.65000000000000002, 1, 0.080000000000000002, 0.44, 0.41999999999999998,
NA, 0.14999999999999999, 0.10000000000000001, 0.11, 0.12, 0.080000000000000002,
1, 0.01, 0.11, NA, 0.75, 0.76000000000000001, 0.52000000000000002,
0.46999999999999997, 0.44, 0.01, 1, 0.57999999999999996, NA,
0.69999999999999996, 0.66000000000000003, 0.46000000000000002,
0.35999999999999999, 0.41999999999999998, 0.11, 0.57999999999999996,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `29` = structure(c(1, 0.55000000000000004, 0.55000000000000004,
0.40999999999999998, 0.42999999999999999, 0.28000000000000003,
0.58999999999999997, 0.64000000000000001, 0.69999999999999996,
0.55000000000000004, 1, 0.42999999999999999, 0.39000000000000001,
0.31, 0.23999999999999999, 0.32000000000000001, 0.39000000000000001,
0.46000000000000002, 0.55000000000000004, 0.42999999999999999,
1, 0.47999999999999998, 0.31, 0.22, 0.45000000000000001, 0.42999999999999999,
0.41999999999999998, 0.40999999999999998, 0.39000000000000001,
0.47999999999999998, 1, 0.35999999999999999, 0.28000000000000003,
0.40000000000000002, 0.34000000000000002, 0.34999999999999998,
0.42999999999999999, 0.31, 0.31, 0.35999999999999999, 1, 0.22,
0.37, 0.37, 0.42999999999999999, 0.28000000000000003, 0.23999999999999999,
0.22, 0.28000000000000003, 0.22, 1, 0.41999999999999998, 0.28000000000000003,
0.25, 0.58999999999999997, 0.32000000000000001, 0.45000000000000001,
0.40000000000000002, 0.37, 0.41999999999999998, 1, 0.55000000000000004,
0.51000000000000001, 0.64000000000000001, 0.39000000000000001,
0.42999999999999999, 0.34000000000000002, 0.37, 0.28000000000000003,
0.55000000000000004, 1, 0.63, 0.69999999999999996, 0.46000000000000002,
0.41999999999999998, 0.34999999999999998, 0.42999999999999999,
0.25, 0.51000000000000001, 0.63, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `30` = structure(c(1, 0.54000000000000004, 0.38100000000000001,
0.41399999999999998, NA, NA, 0.48299999999999998, 0.58899999999999997,
0.53900000000000003, 0.54000000000000004, 1, 0.625, 0.53000000000000003,
NA, NA, 0.44900000000000001, 0.34499999999999997, 0.35199999999999998,
0.38100000000000001, 0.625, 1, 0.64300000000000002, NA, NA, 0.47399999999999998,
0.22500000000000001, 0.24199999999999999, 0.41399999999999998,
0.53000000000000003, 0.64300000000000002, 1, NA, NA, 0.46999999999999997,
0.22500000000000001, 0.27100000000000002, NA, NA, NA, NA, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.48299999999999998,
0.44900000000000001, 0.47399999999999998, 0.46999999999999997,
NA, NA, 1, 0.29899999999999999, 0.31, 0.58899999999999997, 0.34499999999999997,
0.22500000000000001, 0.22500000000000001, NA, NA, 0.29899999999999999,
1, 0.65100000000000002, 0.53900000000000003, 0.35199999999999998,
0.24199999999999999, 0.27100000000000002, NA, NA, 0.31, 0.65100000000000002,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `31` = structure(c(1, NA,
0.57199999999999995, 0.625, NA, 0.53300000000000003, 0.76000000000000001,
0.309, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, 0.57199999999999995,
NA, 1, 0.433, NA, 0.55400000000000005, 0.69299999999999995, 0.36899999999999999,
NA, 0.625, NA, 0.433, 1, NA, 0.41299999999999998, 0.55700000000000005,
0.36399999999999999, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.53300000000000003,
NA, 0.55400000000000005, 0.41299999999999998, NA, 1, 0.56599999999999995,
0.33800000000000002, NA, 0.76000000000000001, NA, 0.69299999999999995,
0.55700000000000005, NA, 0.56599999999999995, 1, 0.441, NA, 0.309,
NA, 0.36899999999999999, 0.36399999999999999, NA, 0.33800000000000002,
0.441, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `32` = structure(c(1, 0.25, NA, 0.17999999999999999,
NA, 0.37, 0.82999999999999996, NA, 0.40000000000000002, 0.25,
1, NA, 0.20000000000000001, NA, 0.14000000000000001, 0.34000000000000002,
NA, 0.23999999999999999, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.17999999999999999,
0.20000000000000001, NA, 1, NA, 0.040000000000000001, -0.040000000000000001,
NA, 0.11, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.37, 0.14000000000000001,
NA, 0.040000000000000001, NA, 1, 0.29999999999999999, NA, 0.48999999999999999,
0.82999999999999996, 0.34000000000000002, NA, -0.040000000000000001,
NA, 0.29999999999999999, 1, NA, 0.059999999999999998, NA, NA,
NA, NA, NA, NA, NA, 1, NA, 0.40000000000000002, 0.23999999999999999,
NA, 0.11, NA, 0.48999999999999999, 0.059999999999999998, NA,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `33` = structure(c(1, 0.60999999999999999,
0.748, 0.46800000000000003, 0.379, 0.373, 0.38600000000000001,
0.74199999999999999, 0.79800000000000004, 0.60999999999999999,
1, 0.69999999999999996, 0.33400000000000002, 0.30499999999999999,
0.223, 0.26400000000000001, 0.47599999999999998, 0.45400000000000001,
0.748, 0.69999999999999996, 1, 0.437, 0.29799999999999999, 0.246,
0.248, 0.60199999999999998, 0.64600000000000002, 0.46800000000000003,
0.33400000000000002, 0.437, 1, 0.55400000000000005, 0.16300000000000001,
0.129, 0.379, 0.40699999999999997, 0.379, 0.30499999999999999,
0.29799999999999999, 0.55400000000000005, 1, 0.16900000000000001,
0.16500000000000001, 0.313, 0.30499999999999999, 0.373, 0.223,
0.246, 0.16300000000000001, 0.16900000000000001, 1, 0.69699999999999995,
0.26600000000000001, 0.29699999999999999, 0.38600000000000001,
0.26400000000000001, 0.248, 0.129, 0.16500000000000001, 0.69699999999999995,
1, 0.27400000000000002, 0.28399999999999997, 0.74199999999999999,
0.47599999999999998, 0.60199999999999998, 0.379, 0.313, 0.26600000000000001,
0.27400000000000002, 1, 0.75600000000000001, 0.79800000000000004,
0.45400000000000001, 0.64600000000000002, 0.40699999999999997,
0.30499999999999999, 0.29699999999999999, 0.28399999999999997,
0.75600000000000001, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `34` = structure(c(1,
0.68000000000000005, 0.58499999999999996, 0.46700000000000003,
0.39900000000000002, 0.65800000000000003, 0.74199999999999999,
0.73199999999999998, 0.752, 0.68000000000000005, 1, 0.64800000000000002,
0.40100000000000002, 0.33800000000000002, 0.50600000000000001,
0.55400000000000005, 0.54200000000000004, 0.53800000000000003,
0.58499999999999996, 0.64800000000000002, 1, 0.378, 0.186, 0.38100000000000001,
0.45700000000000002, 0.42799999999999999, 0.40000000000000002,
0.46700000000000003, 0.40100000000000002, 0.378, 1, 0.51500000000000001,
0.34899999999999998, 0.39900000000000002, 0.33200000000000002,
0.33600000000000002, 0.39900000000000002, 0.33800000000000002,
0.186, 0.51500000000000001, 1, 0.23599999999999999, 0.28899999999999998,
0.29899999999999999, 0.33000000000000002, 0.65800000000000003,
0.50600000000000001, 0.38100000000000001, 0.34899999999999998,
0.23599999999999999, 1, 0.79700000000000004, 0.52700000000000002,
0.56200000000000006, 0.74199999999999999, 0.55400000000000005,
0.45700000000000002, 0.39900000000000002, 0.28899999999999998,
0.79700000000000004, 1, 0.56999999999999995, 0.60799999999999998,
0.73199999999999998, 0.54200000000000004, 0.42799999999999999,
0.33200000000000002, 0.29899999999999999, 0.52700000000000002,
0.56999999999999995, 1, 0.78000000000000003, 0.752, 0.53800000000000003,
0.40000000000000002, 0.33600000000000002, 0.33000000000000002,
0.56200000000000006, 0.60799999999999998, 0.78000000000000003,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `35` = structure(c(1, 0.48999999999999999,
0.23999999999999999, 0.33000000000000002, 0.27000000000000002,
NA, 0.32000000000000001, 0.02, 0.22, 0.48999999999999999, 1,
0.34000000000000002, 0.22, 0.059999999999999998, 0.23999999999999999,
NA, 0.029999999999999999, 0.16, 0.23999999999999999, 0.34000000000000002,
1, 0.17000000000000001, -0.029999999999999999, 0.14000000000000001,
NA, 0.080000000000000002, 0.13, 0.33000000000000002, 0.22, 0.17000000000000001,
1, 0.23000000000000001, 0.080000000000000002, NA, -0.02, 0.12,
0.27000000000000002, 0.059999999999999998, -0.029999999999999999,
0.23000000000000001, 1, -0.080000000000000002, NA, 0.089999999999999997,
0.070000000000000007, NA, 0.23999999999999999, 0.14000000000000001,
0.080000000000000002, -0.080000000000000002, 1, NA, NA, -0.050000000000000003,
0.32000000000000001, NA, NA, NA, NA, NA, 1, 0.080000000000000002,
NA, 0.02, 0.029999999999999999, 0.080000000000000002, -0.02,
0.089999999999999997, NA, 0.080000000000000002, 1, 0.01, 0.22,
0.16, 0.13, 0.12, 0.070000000000000007, -0.050000000000000003,
NA, 0.01, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `36` = structure(c(1,
0.53000000000000003, 0.40000000000000002, 0.14999999999999999,
0.28999999999999998, NA, 0.39000000000000001, 0.29999999999999999,
0.20000000000000001, 0.53000000000000003, 1, 0.46999999999999997,
0.25, 0.17000000000000001, 0.19, NA, 0.14999999999999999, 0.14000000000000001,
0.40000000000000002, 0.46999999999999997, 1, 0.25, 0.20000000000000001,
0.17999999999999999, NA, 0.14999999999999999, 0.20000000000000001,
0.14999999999999999, 0.25, 0.25, 1, 0.13, 0.050000000000000003,
NA, 0.11, 0.050000000000000003, 0.28999999999999998, 0.17000000000000001,
0.20000000000000001, 0.13, 1, 0.20999999999999999, NA, 0.080000000000000002,
0.12, NA, 0.19, 0.17999999999999999, 0.050000000000000003, 0.20999999999999999,
1, NA, NA, 0.050000000000000003, 0.39000000000000001, NA, NA,
NA, NA, NA, 1, 0.23000000000000001, NA, 0.29999999999999999,
0.14999999999999999, 0.14999999999999999, 0.11, 0.080000000000000002,
NA, 0.23000000000000001, 1, 0.089999999999999997, 0.20000000000000001,
0.14000000000000001, 0.20000000000000001, 0.050000000000000003,
0.12, 0.050000000000000003, NA, 0.089999999999999997, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `37` = structure(c(1, NA, NA, 0.27000000000000002,
0.30199999999999999, 0.16600000000000001, 0.44600000000000001,
0.45100000000000001, 0.47699999999999998, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.27000000000000002,
NA, NA, 1, 0.252, 0.060999999999999999, 0.052999999999999999,
-0.17000000000000001, 0.031, 0.30199999999999999, NA, NA, 0.252,
1, 0.10000000000000001, 0.24099999999999999, 0.27500000000000002,
0.39200000000000002, 0.16600000000000001, NA, NA, 0.060999999999999999,
0.10000000000000001, 1, 0.24099999999999999, 0.114, 0.10100000000000001,
0.44600000000000001, NA, NA, 0.052999999999999999, 0.24099999999999999,
0.24099999999999999, 1, 0.32600000000000001, 0.33500000000000002,
0.45100000000000001, NA, NA, -0.17000000000000001, 0.27500000000000002,
0.114, 0.32600000000000001, 1, 0.746, 0.47699999999999998, NA,
NA, 0.031, 0.39200000000000002, 0.10100000000000001, 0.33500000000000002,
0.746, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `38` = structure(c(1,
0.51000000000000001, 0.52000000000000002, 0.40000000000000002,
0.29999999999999999, NA, 0.56000000000000005, 0.40999999999999998,
NA, 0.51000000000000001, 1, 0.68000000000000005, 0.52000000000000002,
0.34999999999999998, NA, 0.58999999999999997, 0.34000000000000002,
NA, 0.52000000000000002, 0.68000000000000005, 1, 0.60999999999999999,
0.22, NA, 0.66000000000000003, 0.34000000000000002, NA, 0.40000000000000002,
0.52000000000000002, 0.60999999999999999, 1, 0.40000000000000002,
NA, 0.48999999999999999, 0.23999999999999999, NA, 0.29999999999999999,
0.34999999999999998, 0.22, 0.40000000000000002, 1, NA, 0.29999999999999999,
0.10000000000000001, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.56000000000000005,
0.58999999999999997, 0.66000000000000003, 0.48999999999999999,
0.29999999999999999, NA, 1, 0.37, NA, 0.40999999999999998, 0.34000000000000002,
0.34000000000000002, 0.23999999999999999, 0.10000000000000001,
NA, 0.37, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `39` = structure(c(1, 0.66000000000000003,
NA, NA, NA, 0.47999999999999998, 0.69999999999999996, 0.55000000000000004,
NA, 0.66000000000000003, 1, NA, NA, NA, 0.40999999999999998,
0.56999999999999995, 0.40999999999999998, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA,
NA, NA, 1, NA, NA, NA, NA, 0.47999999999999998, 0.40999999999999998,
NA, NA, NA, 1, 0.68999999999999995, 0.32000000000000001, NA,
0.69999999999999996, 0.56999999999999995, NA, NA, NA, 0.68999999999999995,
1, 0.54000000000000004, NA, 0.55000000000000004, 0.40999999999999998,
NA, NA, NA, 0.32000000000000001, 0.54000000000000004, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `40` = structure(c(1, 0.57999999999999996, 0.315,
0.30499999999999999, NA, 0.29799999999999999, 0.59499999999999997,
0.51500000000000001, NA, 0.57999999999999996, 1, 0.27000000000000002,
0.27500000000000002, NA, 0.23000000000000001, 0.505, 0.34999999999999998,
NA, 0.315, 0.27000000000000002, 1, 0.27000000000000002, NA, 0.14999999999999999,
0.188, 0.20000000000000001, NA, 0.30499999999999999, 0.27500000000000002,
0.27000000000000002, 1, NA, 0.108, 0.23000000000000001, 0.16500000000000001,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.29799999999999999, 0.23000000000000001,
0.14999999999999999, 0.108, NA, 1, 0.45300000000000001, 0.19,
NA, 0.59499999999999997, 0.505, 0.188, 0.23000000000000001, NA,
0.45300000000000001, 1, 0.375, NA, 0.51500000000000001, 0.34999999999999998,
0.20000000000000001, 0.16500000000000001, NA, 0.19, 0.375, 1,
NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `41` = structure(c(1, 0.81000000000000005, 0.53000000000000003,
0.54000000000000004, 0.17000000000000001, 0.72999999999999998,
0.54000000000000004, 0.23999999999999999, NA, 0.81000000000000005,
1, 0.46000000000000002, 0.52000000000000002, 0.23000000000000001,
0.63, 0.62, 0.39000000000000001, NA, 0.53000000000000003, 0.46000000000000002,
1, 0.27000000000000002, 0.02, 0.37, 0.37, 0.14000000000000001,
NA, 0.54000000000000004, 0.52000000000000002, 0.27000000000000002,
1, 0.40999999999999998, 0.37, 0.40999999999999998, 0.20000000000000001,
NA, 0.17000000000000001, 0.23000000000000001, 0.02, 0.40999999999999998,
1, 0.14000000000000001, 0.26000000000000001, 0.14000000000000001,
NA, 0.72999999999999998, 0.63, 0.37, 0.37, 0.14000000000000001,
1, 0.53000000000000003, 0.10000000000000001, NA, 0.54000000000000004,
0.62, 0.37, 0.40999999999999998, 0.26000000000000001, 0.53000000000000003,
1, 0.23000000000000001, NA, 0.23999999999999999, 0.39000000000000001,
0.14000000000000001, 0.20000000000000001, 0.14000000000000001,
0.10000000000000001, 0.23000000000000001, 1, NA, NA, NA, NA,
NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `42` = structure(c(1,
0.59999999999999998, 0.46000000000000002, 0.26000000000000001,
0.02, 0.22, 0.42999999999999999, 0.23000000000000001, NA, 0.59999999999999998,
1, 0.20999999999999999, 0.16, -0.02, 0.10000000000000001, 0.46999999999999997,
0.28000000000000003, NA, 0.46000000000000002, 0.20999999999999999,
1, 0.28000000000000003, 0.080000000000000002, 0.070000000000000007,
0.16, 0.040000000000000001, NA, 0.26000000000000001, 0.16, 0.28000000000000003,
1, 0.5, 0.27000000000000002, 0.33000000000000002, 0.059999999999999998,
NA, 0.02, -0.02, 0.080000000000000002, 0.5, 1, 0.27000000000000002,
0.14999999999999999, 0.02, NA, 0.22, 0.10000000000000001, 0.070000000000000007,
0.27000000000000002, 0.27000000000000002, 1, 0.22, 0.14000000000000001,
NA, 0.42999999999999999, 0.46999999999999997, 0.16, 0.33000000000000002,
0.14999999999999999, 0.22, 1, 0.26000000000000001, NA, 0.23000000000000001,
0.28000000000000003, 0.040000000000000001, 0.059999999999999998,
0.02, 0.14000000000000001, 0.26000000000000001, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `43` = structure(c(1, 0.54000000000000004, 0.48999999999999999,
0.499, 0.53700000000000003, 0.19400000000000001, 0.54000000000000004,
0.71699999999999997, NA, 0.54000000000000004, 1, 0.443, 0.36499999999999999,
0.40600000000000003, 0.126, 0.48199999999999998, 0.497, NA, 0.48999999999999999,
0.443, 1, 0.46000000000000002, 0.32500000000000001, 0.14099999999999999,
0.435, 0.373, NA, 0.499, 0.36499999999999999, 0.46000000000000002,
1, 0.51000000000000001, 0.14499999999999999, 0.27700000000000002,
0.42799999999999999, NA, 0.53700000000000003, 0.40600000000000003,
0.32500000000000001, 0.51000000000000001, 1, 0.14000000000000001,
0.33000000000000002, 0.52900000000000003, NA, 0.19400000000000001,
0.126, 0.14099999999999999, 0.14499999999999999, 0.14000000000000001,
1, 0.20000000000000001, 0.159, NA, 0.54000000000000004, 0.48199999999999998,
0.435, 0.27700000000000002, 0.33000000000000002, 0.20000000000000001,
1, 0.41399999999999998, NA, 0.71699999999999997, 0.497, 0.373,
0.42799999999999999, 0.52900000000000003, 0.159, 0.41399999999999998,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `44` = structure(c(1, 0.56000000000000005, 0.34000000000000002,
0.46000000000000002, NA, NA, 0.55000000000000004, 0.27000000000000002,
0.34000000000000002, 0.56000000000000005, 1, 0.53000000000000003,
0.40000000000000002, NA, NA, 0.40000000000000002, 0.29999999999999999,
0.28999999999999998, 0.34000000000000002, 0.53000000000000003,
1, 0.28000000000000003, NA, NA, 0.23999999999999999, 0.16, 0.11,
0.46000000000000002, 0.40000000000000002, 0.28000000000000003,
1, NA, NA, 0.48999999999999999, 0.14000000000000001, 0.16, NA,
NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA,
NA, 0.55000000000000004, 0.40000000000000002, 0.23999999999999999,
0.48999999999999999, NA, NA, 1, 0.25, 0.31, 0.27000000000000002,
0.29999999999999999, 0.16, 0.14000000000000001, NA, NA, 0.25,
1, 0.53000000000000003, 0.34000000000000002, 0.28999999999999998,
0.11, 0.16, NA, NA, 0.31, 0.53000000000000003, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `45` = structure(c(1, 0.47999999999999998,
0.34999999999999998, 0.23999999999999999, 0.26000000000000001,
NA, 0.53000000000000003, 0.48999999999999999, NA, 0.47999999999999998,
1, 0.41999999999999998, 0.23999999999999999, 0.17000000000000001,
NA, 0.23999999999999999, 0.31, NA, 0.34999999999999998, 0.41999999999999998,
1, 0.34000000000000002, -0.029999999999999999, NA, 0.19, 0.089999999999999997,
NA, 0.23999999999999999, 0.23999999999999999, 0.34000000000000002,
1, 0.23000000000000001, NA, 0.11, 0.14999999999999999, NA, 0.26000000000000001,
0.17000000000000001, -0.029999999999999999, 0.23000000000000001,
1, NA, 0.14000000000000001, 0.23000000000000001, NA, NA, NA,
NA, NA, NA, 1, NA, NA, NA, 0.53000000000000003, 0.23999999999999999,
0.19, 0.11, 0.14000000000000001, NA, 1, 0.40999999999999998,
NA, 0.48999999999999999, 0.31, 0.089999999999999997, 0.14999999999999999,
0.23000000000000001, NA, 0.40999999999999998, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `46` = structure(c(1, 0.62, NA, 0.51300000000000001,
0.623, 0.57999999999999996, NA, 0.66300000000000003, NA, 0.62,
1, NA, 0.58999999999999997, 0.56999999999999995, 0.56299999999999994,
NA, 0.46500000000000002, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA,
0.51300000000000001, 0.58999999999999997, NA, 1, 0.54800000000000004,
0.49299999999999999, NA, 0.41499999999999998, NA, 0.623, 0.56999999999999995,
NA, 0.54800000000000004, 1, NA, NA, 0.46300000000000002, NA,
0.57999999999999996, 0.56299999999999994, NA, 0.49299999999999999,
NA, 1, NA, 0.38800000000000001, NA, NA, NA, NA, NA, NA, NA, 1,
NA, NA, 0.66300000000000003, 0.46500000000000002, NA, 0.41499999999999998,
0.46300000000000002, 0.38800000000000001, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `47` = structure(c(1, 0.67000000000000004, NA, 0.56999999999999995,
0.44, NA, 0.63, 0.46000000000000002, NA, 0.67000000000000004,
1, NA, 0.57999999999999996, 0.40999999999999998, NA, 0.65000000000000002,
0.41999999999999998, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.56999999999999995,
0.57999999999999996, NA, 1, 0.41999999999999998, NA, 0.5, 0.33000000000000002,
NA, 0.44, 0.40999999999999998, NA, 0.41999999999999998, 1, NA,
0.38, 0.39000000000000001, NA, NA, NA, NA, NA, NA, 1, NA, NA,
NA, 0.63, 0.65000000000000002, NA, 0.5, 0.38, NA, 1, 0.41999999999999998,
NA, 0.46000000000000002, 0.41999999999999998, NA, 0.33000000000000002,
0.39000000000000001, NA, 0.41999999999999998, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `48` = structure(c(1, 0.67000000000000004, NA, 0.52900000000000003,
0.30399999999999999, NA, 0.64400000000000002, 0.378, 0.48499999999999999,
0.67000000000000004, 1, NA, 0.58899999999999997, 0.29499999999999998,
NA, 0.70599999999999996, 0.42799999999999999, 0.51900000000000002,
NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.52900000000000003, 0.58899999999999997,
NA, 1, NA, NA, 0.54100000000000004, 0.39400000000000002, 0.51200000000000001,
0.30399999999999999, 0.29499999999999998, NA, NA, 1, NA, 0.38,
NA, 0.34100000000000003, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.64400000000000002,
0.70599999999999996, NA, 0.54100000000000004, 0.38, NA, 1, 0.48599999999999999,
0.58999999999999997, 0.378, 0.42799999999999999, NA, 0.39400000000000002,
NA, NA, 0.48599999999999999, 1, 0.54500000000000004, 0.48499999999999999,
0.51900000000000002, NA, 0.51200000000000001, 0.34100000000000003,
NA, 0.58999999999999997, 0.54500000000000004, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `49` = structure(c(1, NA, NA, 0.64000000000000001,
0.48999999999999999, NA, 0.52000000000000002, 0.22, 0.48999999999999999,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA,
NA, NA, 0.64000000000000001, NA, NA, 1, 0.62, NA, 0.56000000000000005,
0.20999999999999999, 0.5, 0.48999999999999999, NA, NA, 0.62,
1, NA, -0.37, NA, 0.46000000000000002, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.52000000000000002, NA, NA, 0.56000000000000005,
-0.37, NA, 1, 0.17000000000000001, 0.48999999999999999, 0.22,
NA, NA, 0.20999999999999999, NA, NA, 0.17000000000000001, 1,
0.35999999999999999, 0.48999999999999999, NA, NA, 0.5, 0.46000000000000002,
NA, 0.48999999999999999, 0.35999999999999999, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `50` = structure(c(1, 0.46999999999999997,
NA, 0.029999999999999999, NA, 0.02, 0.44, 0.42999999999999999,
NA, 0.46999999999999997, 1, NA, 0.16, NA, 0.050000000000000003,
0.41999999999999998, 0.46000000000000002, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, 0.029999999999999999, 0.16, NA, 1, NA, 0.11,
0.55000000000000004, 0.11, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, 0.02, 0.050000000000000003, NA, 0.11, NA, 1, 0.42999999999999999,
0.23999999999999999, NA, 0.44, 0.41999999999999998, NA, 0.55000000000000004,
NA, 0.42999999999999999, 1, 0.56000000000000005, NA, 0.42999999999999999,
0.46000000000000002, NA, 0.11, NA, 0.23999999999999999, 0.56000000000000005,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `51` = structure(c(1, 0.81999999999999995, NA, 0.44,
NA, 0.02, 0.64000000000000001, 0.58999999999999997, 0.69999999999999996,
0.81999999999999995, 1, NA, 0.59999999999999998, NA, 0.01, 0.47999999999999998,
0.51000000000000001, 0.63, NA, NA, 1, NA, NA, NA, NA, NA, NA,
0.44, 0.59999999999999998, NA, 1, NA, 0.01, 0.23000000000000001,
0.20999999999999999, 0.33000000000000002, NA, NA, NA, NA, 1,
NA, NA, NA, NA, 0.02, 0.01, NA, 0.01, NA, 1, 0.17999999999999999,
-0.050000000000000003, 0.14999999999999999, 0.64000000000000001,
0.47999999999999998, NA, 0.23000000000000001, NA, 0.17999999999999999,
1, 0.37, 0.44, 0.58999999999999997, 0.51000000000000001, NA,
0.20999999999999999, NA, -0.050000000000000003, 0.37, 1, 0.59999999999999998,
0.69999999999999996, 0.63, NA, 0.33000000000000002, NA, 0.14999999999999999,
0.44, 0.59999999999999998, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `52` = structure(c(1, 0.73999999999999999, NA, 0.41999999999999998,
NA, 0.01, 0.28999999999999998, 0.40000000000000002, 0.40999999999999998,
0.73999999999999999, 1, NA, 0.5, NA, -0.01, 0.17000000000000001,
0.28999999999999998, 0.5, NA, NA, 1, NA, NA, NA, NA, NA, NA,
0.41999999999999998, 0.5, NA, 1, NA, -0.11, 0.26000000000000001,
0.20999999999999999, 0.27000000000000002, NA, NA, NA, NA, 1,
NA, NA, NA, NA, 0.01, -0.01, NA, -0.11, NA, 1, 0.20000000000000001,
-0.19, 0.029999999999999999, 0.28999999999999998, 0.17000000000000001,
NA, 0.26000000000000001, NA, 0.20000000000000001, 1, 0.26000000000000001,
0.23999999999999999, 0.40000000000000002, 0.28999999999999998,
NA, 0.20999999999999999, NA, -0.19, 0.26000000000000001, 1, 0.60999999999999999,
0.40999999999999998, 0.5, NA, 0.27000000000000002, NA, 0.029999999999999999,
0.23999999999999999, 0.60999999999999999, 1), .Dim = c(9L, 9L
), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"))), `53` = structure(c(1, 0.82999999999999996, NA,
0.42999999999999999, NA, -0.22, 0.69999999999999996, 0.41999999999999998,
0.63, 0.82999999999999996, 1, NA, 0.53000000000000003, NA, -0.11,
0.65000000000000002, 0.35999999999999999, 0.53000000000000003,
NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.42999999999999999, 0.53000000000000003,
NA, 1, NA, 0, 0.39000000000000001, 0.17000000000000001, 0.26000000000000001,
NA, NA, NA, NA, 1, NA, NA, NA, NA, -0.22, -0.11, NA, 0, NA, 1,
-0.11, -0.12, -0.27000000000000002, 0.69999999999999996, 0.65000000000000002,
NA, 0.39000000000000001, NA, -0.11, 1, 0.41999999999999998, 0.46000000000000002,
0.41999999999999998, 0.35999999999999999, NA, 0.17000000000000001,
NA, -0.12, 0.41999999999999998, 1, 0.41999999999999998, 0.63,
0.53000000000000003, NA, 0.26000000000000001, NA, -0.27000000000000002,
0.46000000000000002, 0.41999999999999998, 1), .Dim = c(9L, 9L
), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"))), `54` = structure(c(1, 0.56000000000000005, NA,
0.56000000000000005, NA, 0.23999999999999999, 0.80000000000000004,
0.71999999999999997, 0.46999999999999997, 0.56000000000000005,
1, NA, NA, NA, NA, NA, 0.42999999999999999, NA, NA, NA, 1, 0.56000000000000005,
NA, 0.46999999999999997, 0.44, NA, 0.31, 0.56000000000000005,
NA, 0.56000000000000005, 1, NA, 0.25, 0.51000000000000001, 0.34999999999999998,
0.23999999999999999, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.23999999999999999,
NA, 0.46999999999999997, 0.25, NA, 1, 0.32000000000000001, 0.17000000000000001,
0.070000000000000007, 0.80000000000000004, NA, 0.44, 0.51000000000000001,
NA, 0.32000000000000001, 1, 0.51000000000000001, 0.32000000000000001,
0.71999999999999997, 0.42999999999999999, NA, 0.34999999999999998,
NA, 0.17000000000000001, 0.51000000000000001, 1, 0.46000000000000002,
0.46999999999999997, NA, 0.31, 0.23999999999999999, NA, 0.070000000000000007,
0.32000000000000001, 0.46000000000000002, 1), .Dim = c(9L, 9L
), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"))), `55` = structure(c(1, 0.33000000000000002, NA,
0.10000000000000001, NA, 0.20999999999999999, 0.44, 0.83999999999999997,
NA, 0.33000000000000002, 1, NA, 0.19, NA, 0.040000000000000001,
0.32000000000000001, 0.19, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, 0.10000000000000001, 0.19, NA, 1, NA, -0.01, -0.01, 0.01,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.20999999999999999, 0.040000000000000001,
NA, -0.01, NA, 1, 0.37, 0.22, NA, 0.44, 0.32000000000000001,
NA, -0.01, NA, 0.37, 1, 0.47999999999999998, NA, 0.83999999999999997,
0.19, NA, 0.01, NA, 0.22, 0.47999999999999998, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `56` = structure(c(1, 0.40000000000000002, 0.34000000000000002,
0.22, NA, NA, 0.39000000000000001, 0.54000000000000004, NA, 0.40000000000000002,
1, 0.46500000000000002, 0.17499999999999999, NA, NA, 0.33500000000000002,
0.315, NA, 0.34000000000000002, 0.46500000000000002, 1, NA, NA,
NA, NA, 0.19500000000000001, NA, 0.22, 0.17499999999999999, NA,
1, NA, NA, 0.14999999999999999, 0.13500000000000001, NA, NA,
NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA,
NA, 0.39000000000000001, 0.33500000000000002, NA, 0.14999999999999999,
NA, NA, 1, 0.35999999999999999, NA, 0.54000000000000004, 0.315,
0.19500000000000001, 0.13500000000000001, NA, NA, 0.35999999999999999,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `57` = structure(c(1, NA, NA, 0.495, 0.41999999999999998,
0.23000000000000001, NA, 0.28000000000000003, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.495,
NA, NA, 1, 0.32000000000000001, 0.39500000000000002, NA, 0.23000000000000001,
NA, 0.41999999999999998, NA, NA, 0.32000000000000001, 1, 0.17999999999999999,
NA, 0.16, NA, 0.23000000000000001, NA, NA, 0.39500000000000002,
0.17999999999999999, 1, NA, 0.070000000000000007, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, 0.28000000000000003, NA, NA, 0.23000000000000001,
0.16, 0.070000000000000007, NA, 1, NA, NA, NA, NA, NA, NA, NA,
NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `58` = structure(c(1,
0.64000000000000001, NA, 0.26100000000000001, 0.20699999999999999,
NA, NA, 0.29299999999999998, NA, 0.64000000000000001, 1, NA,
0.192, 0.30599999999999999, NA, NA, 0.35599999999999998, NA,
NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.26100000000000001, 0.192,
NA, 1, 0.41799999999999998, NA, NA, -0.0089999999999999993, NA,
0.20699999999999999, 0.30599999999999999, NA, 0.41799999999999998,
1, NA, NA, 0.056000000000000001, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.29299999999999998,
0.35599999999999998, NA, -0.0089999999999999993, 0.056000000000000001,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `59` = structure(c(1, 0.55000000000000004,
NA, 0.39800000000000002, NA, 0.14999999999999999, 0.67900000000000005,
0.54700000000000004, NA, 0.55000000000000004, 1, NA, 0.36199999999999999,
NA, 0.125, 0.45200000000000001, 0.312, NA, NA, NA, 1, NA, NA,
NA, NA, NA, NA, 0.39800000000000002, 0.36199999999999999, NA,
1, NA, 0.075999999999999998, 0.34100000000000003, 0.32700000000000001,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.14999999999999999, 0.125,
NA, 0.075999999999999998, NA, 1, 0.496, 0.191, NA, 0.67900000000000005,
0.45200000000000001, NA, 0.34100000000000003, NA, 0.496, 1, 0.48799999999999999,
NA, 0.54700000000000004, 0.312, NA, 0.32700000000000001, NA,
0.191, 0.48799999999999999, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"))), `60` = structure(c(1,
0.76000000000000001, NA, 0.34000000000000002, NA, 0.11600000000000001,
0.59599999999999997, 0.36399999999999999, NA, 0.76000000000000001,
1, NA, 0.34499999999999997, NA, 0.34399999999999997, 0.68000000000000005,
0.318, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.34000000000000002,
0.34499999999999997, NA, 1, NA, -0.0089999999999999993, 0.17000000000000001,
0.114, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.11600000000000001,
0.34399999999999997, NA, -0.0089999999999999993, NA, 1, 0.49199999999999999,
0.193, NA, 0.59599999999999997, 0.68000000000000005, NA, 0.17000000000000001,
NA, 0.49199999999999999, 1, 0.36599999999999999, NA, 0.36399999999999999,
0.318, NA, 0.114, NA, 0.193, 0.36599999999999999, 1, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `61` = structure(c(1, 0.40000000000000002, 0.22,
0.25, NA, 0.31, NA, 0.41999999999999998, 0.34000000000000002,
0.40000000000000002, 1, 0.29999999999999999, 0.12, NA, NA, 0.38,
0.22, 0.16, 0.22, 0.29999999999999999, 1, 0.17000000000000001,
NA, NA, 0.28000000000000003, 0.17000000000000001, 0.089999999999999997,
0.25, 0.12, 0.17000000000000001, 1, NA, NA, 0.17999999999999999,
0.11, -0.059999999999999998, NA, NA, NA, NA, 1, NA, NA, NA, NA,
0.31, NA, NA, NA, NA, 1, NA, 0.38, NA, NA, 0.38, 0.28000000000000003,
0.17999999999999999, NA, NA, 1, NA, 0.46000000000000002, 0.41999999999999998,
0.22, 0.17000000000000001, 0.11, NA, 0.38, NA, 1, 0.46999999999999997,
0.34000000000000002, 0.16, 0.089999999999999997, -0.059999999999999998,
NA, NA, 0.46000000000000002, 0.46999999999999997, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `62` = structure(c(1, 0.31, 0.23000000000000001,
0.22, NA, NA, 0.41999999999999998, 0.44, NA, 0.31, 1, 0.39000000000000001,
0.25, NA, NA, 0.28999999999999998, 0.25, NA, 0.23000000000000001,
0.39000000000000001, 1, 0.33000000000000002, NA, NA, 0.17999999999999999,
0.080000000000000002, NA, 0.22, 0.25, 0.33000000000000002, 1,
NA, NA, 0.14000000000000001, 0.089999999999999997, NA, NA, NA,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA,
0.41999999999999998, 0.28999999999999998, 0.17999999999999999,
0.14000000000000001, NA, NA, 1, 0.23000000000000001, NA, 0.44,
0.25, 0.080000000000000002, 0.089999999999999997, NA, NA, 0.23000000000000001,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `63` = structure(c(1, NA, NA, NA, NA, NA, NA, NA,
NA, NA, 1, 0.51000000000000001, 0.32000000000000001, NA, 0.33000000000000002,
NA, 0.41999999999999998, NA, NA, 0.51000000000000001, 1, 0.28999999999999998,
NA, 0.46999999999999997, NA, 0.38, NA, NA, 0.32000000000000001,
0.28999999999999998, 1, NA, 0.26000000000000001, NA, 0.28999999999999998,
NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, 0.33000000000000002,
0.46999999999999997, 0.26000000000000001, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.41999999999999998, 0.38,
0.28999999999999998, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA,
NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `64` = structure(c(1,
NA, NA, NA, NA, NA, NA, NA, 0.629, NA, 1, 0.33000000000000002,
0.14999999999999999, NA, NA, 0.51000000000000001, NA, 0.40999999999999998,
NA, 0.33000000000000002, 1, 0.22, NA, NA, 0.35999999999999999,
NA, 0.28000000000000003, NA, 0.14999999999999999, 0.22, 1, NA,
NA, 0.16, NA, 0.19, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1, NA, NA, NA, NA, 0.51000000000000001, 0.35999999999999999,
0.16, NA, NA, 1, NA, 0.59999999999999998, NA, NA, NA, NA, NA,
NA, NA, 1, 0.54400000000000004, 0.629, 0.40999999999999998, 0.28000000000000003,
0.19, NA, NA, 0.59999999999999998, 0.54400000000000004, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `65` = structure(c(1, 0.40999999999999998,
0.33000000000000002, 0.23000000000000001, NA, 0.23000000000000001,
NA, 0.63, NA, 0.40999999999999998, 1, 0.5, 0.16, NA, 0.19, NA,
0.34000000000000002, NA, 0.33000000000000002, 0.5, 1, 0.34999999999999998,
NA, 0.17000000000000001, NA, 0.23999999999999999, NA, 0.23000000000000001,
0.16, 0.34999999999999998, 1, NA, 0.19, NA, 0.19, NA, NA, NA,
NA, NA, 1, NA, NA, NA, NA, 0.23000000000000001, 0.19, 0.17000000000000001,
0.19, NA, 1, NA, 0.23999999999999999, NA, NA, NA, NA, NA, NA,
NA, 1, NA, NA, 0.63, 0.34000000000000002, 0.23999999999999999,
0.19, NA, 0.23999999999999999, NA, 1, NA, NA, NA, NA, NA, NA,
NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA",
"IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `66` = structure(c(1,
0.69999999999999996, 0.67000000000000004, 0.55000000000000004,
NA, 0.35999999999999999, NA, 0.46999999999999997, NA, 0.69999999999999996,
1, 0.68999999999999995, 0.54000000000000004, NA, 0.11, NA, 0.37,
NA, 0.67000000000000004, 0.68999999999999995, 1, 0.70999999999999996,
NA, 0.12, NA, 0.32000000000000001, NA, 0.55000000000000004, 0.54000000000000004,
0.70999999999999996, 1, NA, 0.22, NA, 0.28000000000000003, NA,
NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.35999999999999999, 0.11,
0.12, 0.22, NA, 1, NA, 0.23999999999999999, NA, NA, NA, NA, NA,
NA, NA, 1, NA, NA, 0.46999999999999997, 0.37, 0.32000000000000001,
0.28000000000000003, NA, 0.23999999999999999, NA, 1, NA, NA,
NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `67` = structure(c(1, 0.55000000000000004, 0.26000000000000001,
0.33000000000000002, NA, 0.45000000000000001, NA, 0.44, 0.58999999999999997,
0.55000000000000004, 1, 0.32000000000000001, 0.31, NA, 0.19,
NA, 0.31, 0.5, 0.26000000000000001, 0.32000000000000001, 1, 0.34999999999999998,
NA, 0.14999999999999999, NA, 0.11, 0.10000000000000001, 0.33000000000000002,
0.31, 0.34999999999999998, 1, NA, 0.19, NA, 0.17000000000000001,
0.27000000000000002, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.45000000000000001,
0.19, 0.14999999999999999, 0.19, NA, 1, NA, 0.26000000000000001,
0.32000000000000001, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.44,
0.31, 0.11, 0.17000000000000001, NA, 0.26000000000000001, NA,
1, 0.55000000000000004, 0.58999999999999997, 0.5, 0.10000000000000001,
0.27000000000000002, NA, 0.32000000000000001, NA, 0.55000000000000004,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `68` = structure(c(1, 0.46000000000000002,
0.31, 0.31, NA, 0.45000000000000001, NA, NA, NA, 0.46000000000000002,
1, 0.26000000000000001, 0.14999999999999999, NA, 0.28000000000000003,
NA, NA, NA, 0.31, 0.26000000000000001, 1, 0.28999999999999998,
NA, 0.28000000000000003, NA, NA, NA, 0.31, 0.14999999999999999,
0.28999999999999998, 1, NA, 0.23000000000000001, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.45000000000000001, 0.28000000000000003,
0.28000000000000003, 0.23000000000000001, NA, 1, NA, NA, NA,
NA, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `69` = structure(c(1, 0.71999999999999997, 0.55000000000000004,
0.44, NA, 0.48999999999999999, NA, 0.59999999999999998, NA, 0.71999999999999997,
1, NA, NA, NA, NA, NA, 0.52000000000000002, NA, 0.55000000000000004,
NA, 1, NA, NA, NA, NA, 0.27000000000000002, NA, 0.44, NA, NA,
1, NA, NA, NA, 0.40999999999999998, NA, NA, NA, NA, NA, 1, NA,
NA, NA, NA, 0.48999999999999999, NA, NA, NA, NA, 1, NA, 0.42999999999999999,
NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.59999999999999998, 0.52000000000000002,
0.27000000000000002, 0.40999999999999998, NA, 0.42999999999999999,
NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L
), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap",
"Beh", "PB"))), `70` = structure(c(1, 0.62, 0.40999999999999998,
0.40000000000000002, NA, 0.40000000000000002, NA, 0.59999999999999998,
NA, 0.62, 1, NA, NA, NA, NA, NA, 0.33000000000000002, NA, 0.40999999999999998,
NA, 1, NA, NA, NA, NA, 0.28999999999999998, NA, 0.40000000000000002,
NA, NA, 1, NA, NA, NA, 0.39000000000000001, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, 0.40000000000000002, NA, NA, NA, NA, 1, NA,
0.29999999999999999, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.59999999999999998,
0.33000000000000002, 0.28999999999999998, 0.39000000000000001,
NA, 0.29999999999999999, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA,
NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA",
"IN", "DN", "Aut", "Cap", "Beh", "PB"))), `71` = structure(c(1,
0.82999999999999996, 0.46999999999999997, 0.57999999999999996,
NA, 0.51000000000000001, NA, 0.52000000000000002, NA, 0.82999999999999996,
1, NA, NA, NA, NA, NA, 0.45000000000000001, NA, 0.46999999999999997,
NA, 1, NA, NA, NA, NA, 0.19, NA, 0.57999999999999996, NA, NA,
1, NA, NA, NA, 0.19, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, 0.51000000000000001,
NA, NA, NA, NA, 1, NA, 0.23999999999999999, NA, NA, NA, NA, NA,
NA, NA, 1, NA, NA, 0.52000000000000002, 0.45000000000000001,
0.19, 0.19, NA, 0.23999999999999999, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `72` = structure(c(1,
0.57999999999999996, 0.28999999999999998, 0.47999999999999998,
NA, 0.35999999999999999, NA, 0.52000000000000002, NA, 0.57999999999999996,
1, NA, NA, NA, NA, NA, 0.44, NA, 0.28999999999999998, NA, 1,
NA, NA, NA, NA, 0.20000000000000001, NA, 0.47999999999999998,
NA, NA, 1, NA, NA, NA, 0.28000000000000003, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, 0.35999999999999999, NA, NA, NA, NA, 1, NA,
0.19, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, 0.52000000000000002,
0.44, 0.20000000000000001, 0.28000000000000003, NA, 0.19, NA,
1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `73` = structure(c(1, 0.46000000000000002, 0.28999999999999998,
0.20000000000000001, 0.34999999999999998, NA, 0.14999999999999999,
0.11, 0.48999999999999999, 0.46000000000000002, 1, 0.31, 0.14000000000000001,
0.14000000000000001, NA, -0.050000000000000003, 0.12, 0.29999999999999999,
0.28999999999999998, 0.31, 1, 0.35999999999999999, 0.12, NA,
0.040000000000000001, 0.11, 0.16, 0.20000000000000001, 0.14000000000000001,
0.35999999999999999, 1, 0.31, NA, -0.20000000000000001, 0.080000000000000002,
0.11, 0.34999999999999998, 0.14000000000000001, 0.12, 0.31, 1,
NA, -0.02, 0.089999999999999997, 0.02, NA, NA, NA, NA, NA, 1,
NA, NA, NA, 0.14999999999999999, -0.050000000000000003, 0.040000000000000001,
-0.20000000000000001, -0.02, NA, 1, 0.53000000000000003, -0.13,
0.11, 0.12, 0.11, 0.080000000000000002, 0.089999999999999997,
NA, 0.53000000000000003, 1, 0.050000000000000003, 0.48999999999999999,
0.29999999999999999, 0.16, 0.11, 0.02, NA, -0.13, 0.050000000000000003,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `74` = structure(c(1, 0.56999999999999995,
NA, 0.40000000000000002, 0.40000000000000002, NA, NA, 0.56999999999999995,
0.60999999999999999, 0.56999999999999995, 1, NA, NA, NA, NA,
NA, 0.40000000000000002, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA,
0.40000000000000002, NA, NA, 1, 0.27000000000000002, NA, NA,
0.33000000000000002, 0.26000000000000001, 0.40000000000000002,
NA, NA, 0.27000000000000002, 1, NA, NA, 0.28000000000000003,
0.33000000000000002, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA,
NA, NA, NA, NA, 1, NA, NA, 0.56999999999999995, 0.40000000000000002,
NA, 0.33000000000000002, 0.28000000000000003, NA, NA, 1, 0.70999999999999996,
0.60999999999999999, NA, NA, 0.26000000000000001, 0.33000000000000002,
NA, NA, 0.70999999999999996, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `75` = structure(c(1, NA, NA, 0.36499999999999999,
NA, 0.64000000000000001, 0.83599999999999997, 0.41399999999999998,
NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA, NA, NA,
NA, NA, NA, 0.36499999999999999, NA, NA, 1, NA, 0.38300000000000001,
0.40200000000000002, 0.070000000000000007, NA, NA, NA, NA, NA,
1, NA, NA, NA, NA, 0.64000000000000001, NA, NA, 0.38300000000000001,
NA, 1, 0.70799999999999996, 0.26100000000000001, NA, 0.83599999999999997,
NA, NA, 0.40200000000000002, NA, 0.70799999999999996, 1, 0.40500000000000003,
NA, 0.41399999999999998, NA, NA, 0.070000000000000007, NA, 0.26100000000000001,
0.40500000000000003, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1
), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `76` = structure(c(1, 0.33000000000000002,
0.16400000000000001, 0.29899999999999999, NA, NA, 0.56699999999999995,
0.14899999999999999, 0.224, 0.33000000000000002, 1, 0.52500000000000002,
0.47299999999999998, NA, NA, 0.28899999999999998, 0.10199999999999999,
0.087999999999999995, 0.16400000000000001, 0.52500000000000002,
1, 0.374, NA, NA, 0.313, 0.107, 0.128, 0.29899999999999999, 0.47299999999999998,
0.374, 1, NA, NA, 0.41099999999999998, 0.040000000000000001,
0.159, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA,
1, NA, NA, NA, 0.56699999999999995, 0.28899999999999998, 0.313,
0.41099999999999998, NA, NA, 1, 0.14499999999999999, 0.187, 0.14899999999999999,
0.10199999999999999, 0.107, 0.040000000000000001, NA, NA, 0.14499999999999999,
1, 0.19900000000000001, 0.224, 0.087999999999999995, 0.128, 0.159,
NA, NA, 0.187, 0.19900000000000001, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `77` = structure(c(1, 0.51000000000000001, 0.42899999999999999,
0.434, NA, NA, 0.66700000000000004, 0.26100000000000001, 0.315,
0.51000000000000001, 1, 0.58099999999999996, 0.53300000000000003,
NA, NA, 0.48499999999999999, 0.128, 0.23100000000000001, 0.42899999999999999,
0.58099999999999996, 1, 0.59799999999999998, NA, NA, 0.56200000000000006,
0.095000000000000001, 0.13700000000000001, 0.434, 0.53300000000000003,
0.59799999999999998, 1, NA, NA, 0.56699999999999995, 0.129, 0.193,
NA, NA, NA, NA, 1, NA, NA, NA, NA, NA, NA, NA, NA, NA, 1, NA,
NA, NA, 0.66700000000000004, 0.48499999999999999, 0.56200000000000006,
0.56699999999999995, NA, NA, 1, 0.22700000000000001, 0.27700000000000002,
0.26100000000000001, 0.128, 0.095000000000000001, 0.129, NA,
NA, 0.22700000000000001, 1, 0.51700000000000002, 0.315, 0.23100000000000001,
0.13700000000000001, 0.193, NA, NA, 0.27700000000000002, 0.51700000000000002,
1), .Dim = c(9L, 9L), .Dimnames = list(c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN",
"DN", "Aut", "Cap", "Beh", "PB"))), `78` = structure(c(1, 0.42999999999999999,
NA, 0.17999999999999999, NA, 0.14000000000000001, 0.48999999999999999,
0.60999999999999999, 0.58999999999999997, 0.42999999999999999,
1, NA, NA, NA, NA, NA, 0.35999999999999999, NA, NA, NA, 1, NA,
NA, NA, NA, NA, NA, 0.17999999999999999, NA, NA, 1, NA, 0.10000000000000001,
0.059999999999999998, 0.12, 0.01, NA, NA, NA, NA, 1, NA, NA,
NA, NA, 0.14000000000000001, NA, NA, 0.10000000000000001, NA,
1, 0.55000000000000004, -0.13, 0.22, 0.48999999999999999, NA,
NA, 0.059999999999999998, NA, 0.55000000000000004, 1, 0.40000000000000002,
0.42999999999999999, 0.60999999999999999, 0.35999999999999999,
NA, 0.12, NA, -0.13, 0.40000000000000002, 1, 0.37, 0.58999999999999997,
NA, NA, 0.01, NA, 0.22, 0.42999999999999999, 0.37, 1), .Dim = c(9L,
9L), .Dimnames = list(c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"), c("Int", "EA", "IA", "IN", "DN", "Aut",
"Cap", "Beh", "PB"))), `79` = structure(c(1, 0.66000000000000003,
NA, 0.46999999999999997, 0.47999999999999998, NA, 0.62, NA, 0.66000000000000003,
0.66000000000000003, 1, NA, 0.34000000000000002, 0.34000000000000002,
NA, 0.72999999999999998, NA, 0.51000000000000001, NA, NA, 1,
NA, NA, NA, NA, NA, NA, 0.46999999999999997, 0.34000000000000002,
NA, 1, 0.37, NA, 0.29999999999999999, NA, 0.38, 0.47999999999999998,
0.34000000000000002, NA, 0.37, 1, NA, 0.33000000000000002, NA,
0.40000000000000002, NA, NA, NA, NA, NA, 1, NA, NA, NA, 0.62,
0.72999999999999998, NA, 0.29999999999999999, 0.33000000000000002,
NA, 1, NA, 0.56999999999999995, NA, NA, NA, NA, NA, NA, NA, 1,
NA, 0.66000000000000003, 0.51000000000000001, NA, 0.38, 0.40000000000000002,
NA, 0.56999999999999995, NA, 1), .Dim = c(9L, 9L), .Dimnames = list(
c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"
), c("Int", "EA", "IA", "IN", "DN", "Aut", "Cap", "Beh",
"PB"))), `80` = structure(c(1, 0.59999999999999998, NA, 0.5,
NA, 0.40000000000000002, 0.56999999999999995, 0.64000000000000001,
0.78000000000000003, 0.59999999999999998, 1, NA, 0.38, 0.154,
0.27000000000000002, 0.48999999999999999, 0.46999999999999997,
0.56000000000000005, NA, NA, 1, NA, NA, NA, NA, NA, NA, 0.5,
0.38, NA, 1, 0.184, 0.17000000000000001, 0.38, 0.39000000000000001,
0.47999999999999998, NA, 0.154, NA, 0.184, 1, 0.01, -0.080000000000000002,
0.27200000000000002, 0.29399999999999998, 0.40000000000000002,
0.27000000000000002, NA, 0.17000000000000001, 0.01, 1, 0.20999999999999999,
0.34999999999999998, 0.34999999999999998, 0.56999999999999995,
0.48999999999999999, NA, 0.38, -0.080000000000000002, 0.20999999999999999,
1, 0.53000000000000003, 0.64000000000000001, 0.64000000000000001,
0.46999999999999997, NA, 0.39000000000000001, 0.27200000000000002,
0.34999999999999998, 0.53000000000000003, 1, 0.81000000000000005,
0.78000000000000003, 0.56000000000000005, NA, 0.47999999999999998,
0.29399999999999998, 0.34999999999999998, 0.64000000000000001,
0.81000000000000005, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"))), `81` = structure(c(1,
NA, 0.81000000000000005, 0.27000000000000002, NA, -0.37, 0.34999999999999998,
0.52000000000000002, 0.64000000000000001, NA, 1, NA, NA, NA,
NA, NA, NA, NA, 0.81000000000000005, NA, 1, 0.23999999999999999,
NA, -0.23000000000000001, 0.26000000000000001, 0.53000000000000003,
0.51000000000000001, 0.27000000000000002, NA, 0.23999999999999999,
1, NA, -0.20999999999999999, 0.02, -0.01, 0.26000000000000001,
NA, NA, NA, NA, 1, NA, NA, NA, NA, -0.37, NA, -0.23000000000000001,
-0.20999999999999999, NA, 1, 0.01, -0.29999999999999999, 0.51000000000000001,
0.34999999999999998, NA, 0.26000000000000001, 0.02, NA, 0.01,
1, 0.20000000000000001, 0.27000000000000002, 0.52000000000000002,
NA, 0.53000000000000003, -0.01, NA, -0.29999999999999999, 0.20000000000000001,
1, 0.45000000000000001, 0.64000000000000001, NA, 0.51000000000000001,
0.26000000000000001, NA, 0.51000000000000001, 0.27000000000000002,
0.45000000000000001, 1), .Dim = c(9L, 9L), .Dimnames = list(c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB"), c("Int",
"EA", "IA", "IN", "DN", "Aut", "Cap", "Beh", "PB")))), n = c(514,
94, 413, 1096, 133, 241, 200, 176, 237, 195, 154, 176, 237, 273,
280, 205, 231, 1108, 118, 41, 32, 146, 155, 109, 192, 413, 401,
120, 1403, 133, 152, 55, 523, 596, 174, 272, 85, 397, 146, 110,
71, 113, 390, 365, 397, 136, 95, 620, 743, 46, 109, 79, 273,
95, 199, 286, 244, 234, 144, 143, 236, 174, 230, 305, 585, 358,
153, 241, 111, 102, 99, 100, 103, 225, 278, 139, 146, 54, 1032,
225, 62), beh_freq_high = c(0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 1, 1))
|
423bf3ba92f7d99691a96daed8393c4ad023e55a | fdcd69bbf8fa90f1e5998068147cbd7a93886598 | /man/callSimilarityComputation.Rd | d30979f2bb09d7caf8715d62e6289dcf00fb8e10 | [] | no_license | souhilabsl/Rchic | bf60ec437d5ba6caf49447692cda4fa229fc2d5c | 252948b9e034b9a508938d94c976ef02533a4735 | refs/heads/master | 2021-01-21T18:38:25.225411 | 2014-04-17T13:58:54 | 2014-04-17T13:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 638 | rd | callSimilarityComputation.Rd | \name{callSimilarityComputation}
\alias{callSimilarityComputation}
\title{Calls the C++ similarity computation.}
\usage{
callSimilarityComputation(similarity_matrix, list.occurrences.variables,
verbose)
}
\arguments{
\item{similarity_matrix}{matrix of similarities of the
variables.}
\item{list.selected.item}{subset of variables to apply
the computation to.}
\item{list.occurrences.variables}{list of the occurrences
of the variables.}
\item{verbose}{gives more details}
}
\description{
Interface to call the the C++ similarity computation.
}
\author{
Rapha\"{e}l Couturier
\email{raphael.couturier@univ-fcomte.fr}
}
|
d07c4923b9a2326c4a4ff21f4c9b767882a663ae | f924a68aacb955a8f21617c2d44d1a886d5e7eea | /workout01/code/make-shot-charts-script.R | f262f112d626af5977d0f20d2cd77cc0d3da2722 | [] | no_license | stat133-sp19/hw-stat133-hinka32 | 79aa748806d4b9f2528050cc490528d5cf8d7985 | eb7fcb68a6ea3fa64099e8359b14bcbb82e7160b | refs/heads/master | 2020-04-28T08:25:19.638528 | 2019-05-03T23:37:03 | 2019-05-03T23:37:03 | 175,125,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,938 | r | make-shot-charts-script.R | #################################
#title: Shot Charts Script
#description: make scatterplots of shots for each player, and one for all players
#inputs: data frames of each player as defined in "make-shots-data-script.R"
#outputs: pdf files of each scatterplot, png file for gsw_shot_charts
#################################
library(ggplot2)
library(jpeg)
library(grid)
iguodala_scatterplot <- ggplot(data = iguodala) +
geom_point(aes(x = x, y = y, color = shot_made_flag))
court_file <- "../images/nba-court.jpg"
court_image <- rasterGrob(
readJPEG(court_file),
width = unit(1, "npc"),
height = unit(1, "npc")
)
iguodala_shot_chart <- ggplot(data = iguodala) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Iguodala (2016 Season)') +
theme_minimal()
green_shot_chart <- ggplot(data = green) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Draymond Green (2016 Season)') +
theme_minimal()
durant_shot_chart <- ggplot(data = durant) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Kevin Durant (2016 Season)') +
theme_minimal()
thompson_shot_chart <- ggplot(data = thompson) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Klay Thompson (2016 Season)') +
theme_minimal()
curry_shot_chart <- ggplot(data = curry) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
ylim(-50, 420) +
ggtitle('Shot Chart: Stephen Curry (2016 Season)') +
theme_minimal()
pdf(file = "../images/andre-iguodala-shot-chart.pdf", width = 6.5, height = 5)
iguodala_shot_chart
dev.off()
pdf(file = "../images/draymond-green-shot-chart.pdf", width = 6.5, height = 5)
green_shot_chart
dev.off()
pdf(file = "../images/kevin-durant-shot-chart.pdf", width = 6.5, height = 5)
durant_shot_chart
dev.off()
pdf(file = "../images/klay-thompson-shot-chart.pdf", width = 6.5, height = 5)
thompson_shot_chart
dev.off()
pdf(file = "../images/stephen-curry-shot-chart.pdf", width = 6.5, height = 5)
curry_shot_chart
dev.off()
gsw_shot_charts <- ggplot(data = all_players) +
annotation_custom(court_image, -250, 250, -50, 420) +
geom_point(aes(x = x, y = y, color = shot_made_flag)) +
facet_wrap(~ name) +
ylim(-50, 420) +
ggtitle('Shot Chart: GSW (2016 Season)') +
theme_minimal() +
theme(legend.position = 'top')
pdf(file = "../images/gsw_shot_charts.pdf", width = 8, height = 7)
gsw_shot_charts
dev.off()
png(filename = "../images/gsw_shot_charts.png", width = 8, height = 7, units = 'in', res = 72)
gsw_shot_charts
dev.off()
|
3f74b7a7333dea67d6ee1dd0c09fb8c1f41be76b | bca1166b487ead5957c695d1b2aa1073e64888a6 | /man/get_data_sec_money_market_funds.Rd | d2e449bcd66e17afcee8452eafb87abe6aa421a0 | [
"MIT"
] | permissive | fxcebx/fundManageR | d4a02397e9e7c5d37fccfc9e221d80b2398d4be8 | cb11ad0cd648d4d484eec67cf093f35b2d163cda | refs/heads/master | 2021-01-13T13:55:47.905682 | 2017-01-12T19:55:26 | 2017-01-12T19:55:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 749 | rd | get_data_sec_money_market_funds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sec_functions.R
\name{get_data_sec_money_market_funds}
\alias{get_data_sec_money_market_funds}
\title{Get SEC registered money market funds}
\usage{
get_data_sec_money_market_funds(only_most_recent = TRUE, years = NULL,
months = NULL, nest_data = TRUE, return_message = TRUE)
}
\arguments{
\item{only_most_recent}{return only most recent year}
\item{years}{years to include}
\item{months}{months to include}
\item{return_message}{return a message}
}
\description{
Get SEC registered money market funds
}
\examples{
get_data_sec_money_market_funds(only_most_recent = TRUE, nest_data = FALSE)
get_data_sec_money_market_funds(only_most_recent = FALSE, years = 2016)
}
|
becf157ff38bc7f861d17e795e7b20a36f3c0c16 | f999f2e9bab7a4bb5b5b21a56e33d33643422636 | /cachematrix.R | db2c5bbfc9ea7ad1591f19f34dc5505ab43fb232 | [] | no_license | DaBlahr/ProgrammingAssignment2 | b8203ddfcc76b243b80c786ec24448d474944810 | 6aadd241aed97409ba2747b2f4decae39ed47010 | refs/heads/master | 2020-12-25T03:40:06.986409 | 2015-09-22T12:31:26 | 2015-09-22T12:31:26 | 42,931,490 | 0 | 0 | null | 2015-09-22T12:09:47 | 2015-09-22T12:09:46 | null | UTF-8 | R | false | false | 1,110 | r | cachematrix.R | ## A method to wrap a matrix so that its inverse may be conveniently cached.
## First use makeCacheMatrix to turn a base matrix into its wrapped form,
## then use use cacheSolve to get its inverse. If the inverse has already
## been calculated, time will be saved by grabbing the cache.
## makeCacheMatrix: wraps a basic matrix in a special structure
## that can cache its inverse and has getter
## and setter functions
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setcache <- function(newcache) inverse <<- newcache
getcache <- function() inverse
list(set = set, get = get,
setcache = setcache,
getcache = getcache)
}
## cacheSolve: used on an x of the type returned by makeCacheMatrix,
## this returns the inverse of the basic matrix it wraps
cacheSolve <- function(x, ...) {
inverse <- x$getcache()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
mat <- x$get()
inverse <- solve(mat)
x$setcache(inverse)
inverse
} |
ff3a357cf9bec7ef866c80dbb11eb82a8cad2bb6 | 66e0dcecbb0ed7b45488ebe699136c9a8627175b | /R/Analysis/mods_with_covars_test.R | 4230a9091796e32ef5d5afc5237222cf9896863a | [] | no_license | and-jonas/thermodyn | 9ebc60ff18e1f2469f1429bef8a02f0264256458 | 1b61a4086d45a4695eb3b12669139340f7568bc2 | refs/heads/master | 2020-12-20T21:06:06.373243 | 2020-05-08T15:15:35 | 2020-05-08T15:15:35 | 236,210,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,694 | r | mods_with_covars_test.R |
rm(list = ls())
.libPaths("T:R3UserLibs")
library(SpATS)
detach("package:plyr")
library(tidyverse)
library(asreml)
library(desplot)
workdir <- "O:/Projects/KP0011/2/"
setwd(workdir)
#functions
source("thermodyn/R/Utils/002_thermo_utils.R")
source("thermodyn/R/Utils/003_h2_BLUEs_utils.R")
#============================================================================================================== -
# dat <- readRDS("Data/data_ready/5_data_dyntraits.rds") ## use only selected subset
dat <- readRDS("Data/data_all_plots/5_data_dyntraits.rds") ## use all available data
design <- dat %>% dplyr::select(Plot_ID, design) %>% unnest(c(design)) %>% unique()
# reshape
newnames <- c(names(dat$dyntraits[[1]]), paste0(names(dat$dyntraits[[1]]), "_fc"))
data0 <- dat %>%
extract_covars_from_nested(from = "covariates", vars = c("heading_GDDAS", "Cnp_onsen_gom_GDDAS_fitted")) %>%
dplyr::select(Plot_ID, design, heading_GDDAS, Cnp_onsen_gom_GDDAS_fitted, dyntraits, dyntraits_fc) %>%
unnest(c(design, dyntraits, dyntraits_fc), names_repair = "unique")
names(data0)[20:25] <- newnames
data0 <- data0%>%
gather(., dyntrait, value, slp_ct:sc_midstg_fc) %>%
unique()
#============================================================================================================== -
#perform spatial correction
#for dyntraits and dyntraits_fc
corrected_all <- data0 %>%
group_by(dyntrait, harvest_year) %>% group_nest() %>%
# filter(grepl("_fc", dyntrait)) %>%
# calculate within-year repeatablity
mutate(w2 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ check + heading_GDDAS + Cnp_onsen_gom_GDDAS_fitted", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_))) %>%
# extract BLUEs and spatially corrected plot values
mutate(obj = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ heading_GDDAS", genotype.as.random = FALSE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_))) %>%
mutate(plot_obj = purrr::map(.x = obj, .f = plot)) %>%
mutate(BLUE = purrr::map(.x = obj,
.f = possibly(get_BLUE_spats, otherwise = NA_real_))) %>%
mutate(spat_corr = purrr::map(.x = obj, response = "value", element_return = "full",
.f = possibly(get_spat_corr_spats, otherwise = NA_real_))) %>%
#get spatial component
mutate(sp = purrr::map(.x = obj,
.f = possibly(get_spatial, otherwise = NA_real_))) %>%
#plot the spatial trend
mutate(plot = purrr::map(.x = sp,
form = formula(spatial ~ RangeLot + RowLot | Lot),
.f = possibly(desplot::desplot, otherwise = NA_real_)))
corrected_all$dyntrait
corrected_all$plot
#============================================================================================================== -
#extract corrected dyntraits (calculated from raw NRCT)
dyntraits_corr <- corrected_all %>%
dplyr::select(dyntrait, spat_corr) %>% unnest(spat_corr) %>%
spread(., dyntrait, spat_corr) %>%
dplyr::select(Plot_ID, sc_midstg, sc_onsen, slp_ct) %>%
group_by(Plot_ID) %>% group_nest(.key = "dyntraits_corr")
#extract corrected dyntraits (calculated from spatially corrected NRCT)
dyntraits_fc_corr <- corrected_all %>%
dplyr::select(dyntrait, spat_corr) %>% unnest(spat_corr) %>%
spread(., dyntrait, spat_corr) %>%
dplyr::select(Plot_ID, sc_midstg_fc, sc_onsen_fc, slp_ct_fc) %>%
#REVERT to standard names
rename_at(vars(contains("_fc")), .funs = list(~gsub("_fc", "", .))) %>%
group_by(Plot_ID) %>% group_nest(.key = "dyntraits_fc_corr")
out <- dyntraits_corr %>% full_join(dyntraits_fc_corr) %>%
full_join(dat,.)
cor <- out %>%
extract_covars_from_nested("covariates", "heading_GDDAS") %>%
extract_covars_from_nested("dyntraits_fc_corr", "slp_ct") %>%
extract_covars_from_nested("design", "harvest_year") %>%
dplyr::select(Plot_ID, harvest_year, heading_GDDAS, slp_ct) %>% unique() %>%
dplyr::select(-Plot_ID) %>% group_by(harvest_year) %>% group_nest() %>%
mutate(cor = purrr::map_dbl(data, do_cor_test, x = "heading_GDDAS", y = "slp_ct"))
saveRDS(out, "Data/data_all_plots/6_data_dyntraits_corr.rds")
#============================================================================================================== -
# compare dyntraits extracted from raw NRCT and from spatially corrected NRCT
ding <- out %>% dplyr::select(Plot_ID, design, dyntraits, dyntraits_fc) %>% unique() %>%
unnest(c(design, dyntraits, dyntraits_fc), names_repair = "unique")
p1 <- ggplot(ding) +
geom_point(aes(x = slp_ct...18, y =slp_ct...21, color = as.factor(Lot))) +
xlab("slp_ct_fr") + ylab("slp_ct_fc")
# compare dyntraits extracted from spatially corrected NRCT and dyntraits extracted from raw NRCT with subsequent spatial correction
ding <- out %>% dplyr::select(Plot_ID, design, dyntraits_fc, dyntraits_corr) %>% unique() %>%
unnest(c(design, dyntraits_fc, dyntraits_corr), names_repair = "unique")
p2 <- ggplot(ding) +
geom_point(aes(x = slp_ct...18, y =slp_ct...23, color = as.factor(Lot))) +
xlab("slp_ct_fc") + ylab("slp_ct_fr_corr")
# compare dyntraits extracted from spatially corrected NRCT and dyntraits extracted from spatially corrected NRCT with subsequent repeated spatial correction
ding <- out %>% dplyr::select(Plot_ID, design, dyntraits_fc, dyntraits_fc_corr) %>% unique() %>%
unnest(c(design, dyntraits_fc, dyntraits_fc_corr), names_repair = "unique")
p3 <- ggplot(ding) +
geom_point(aes(x = slp_ct...18, y =slp_ct...23, color = as.factor(Lot))) +
xlab("slp_ct_fc") + ylab("slp_ct_fc_corr")
#arrange plots and save
png("Figures/2year/dynpars_cor_corrmethods.png", width = 7, height = 5, units = 'in', res = 300)
gridExtra::grid.arrange(grobs=list(p1, p2, p3), ncol=2)
dev.off()
#====================================================================================== -
#TWO STAGE ----
#heritability from best linear unbiased estimators
h2_BLUE <- corrected_all %>%
dplyr::select(dyntrait, harvest_year, BLUE) %>% unnest(BLUE) %>%
# add design
# full_join(design, ., by = c("Gen_Name", "harvest_year")) %>%
mutate(Gen_Name = as.factor(Gen_Name),
harvest_year = as.factor(harvest_year)) %>%
group_by(dyntrait) %>% group_nest() %>%
mutate(h2_2stage = purrr::map_dbl(data, possibly(get_h2_asreml2, otherwise = NA_real_), #after bug-fix, replace by map_dbl
fixed = "BLUE ~ harvest_year",
random = "~Gen_Name",
residual = "~NULL",
cullis = FALSE)) %>%
mutate(h2_2stage_c = purrr::map_dbl(data, possibly(get_h2_asreml2, otherwise = NA_real_), #after bug-fix, replace by map_dbl
fixed = "BLUE ~ harvest_year",
random = "~Gen_Name",
residual = "~NULL",
cullis = TRUE))
#====================================================================================== -
#ONE STAGE ----
#heritability from spatially corrected plot values
#scorings and agronomic traits
h2_spatcorr <- corrected_all %>%
dplyr::select(dyntrait, harvest_year, spat_corr) %>% unnest(spat_corr) %>%
mutate(Gen_Name = as.factor(Gen_Name),
harvest_year = as.factor(harvest_year)) %>%
group_by(dyntrait) %>% group_nest() %>%
mutate(h2_1stage_c = purrr::map_dbl(data, possibly(get_h2_asreml1, otherwise = NA_real_),
fixed = "spat_corr ~ 1",
random = "~ harvest_year + Gen_Name + Gen_Name:harvest_year + Rep:at(harvest_year)",
residual = "~dsum(~id(units) | harvest_year)",
cullis = TRUE))
data <- h2_spatcorr$data[[6]]
data[is.na(data$spat_corr),] %>% nrow()
ggplot(data) +
geom_histogram(aes(x = spat_corr)) +
facet_wrap(~harvest_year, scales = "free")
#====================================================================================== -
## THESE ARE FINAL RESULTS
#====================================================================================== -
#perform spatial correction
#for dyntraits and dyntraits_fc
corrected_all <- data0 %>%
group_by(dyntrait, harvest_year) %>% group_nest() %>%
filter(grepl("_fc", dyntrait)) %>%
# calculate within-year repeatablity
mutate(w2_0 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ NULL", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_))) %>%
mutate(w2_1 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ check", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_))) %>%
mutate(w2_21 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ check + heading_GDDAS", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_))) %>%
mutate(w2_22 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ check + Cnp_onsen_gom_GDDAS_fitted", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_))) %>%
mutate(w2_3 = purrr::map(.x = data, response = "value", random = "~ Xf + Yf",
fixed = "~ check + heading_GDDAS + Cnp_onsen_gom_GDDAS_fitted", genotype.as.random = TRUE, genotype = "Gen_Name",
.f = possibly(f_spats, otherwise = NA_real_)) %>%
purrr::map_dbl(.x = .,
.f = possibly(get_h2, otherwise = NA_real_)))
W2 <- corrected_all %>% dplyr::select(-data)
|
96ae14fbc9ca2f3f6b2a7b5824654a07192ee1e6 | e903fc789e92c027ae2c1e035da0aa4a08fe82f4 | /cachematrix.R | c7c918c4612325e5911299a7c54efd398d28a779 | [] | no_license | mirror416/ProgrammingAssignment2 | 3e21864b54c318f0c16ac26f0d32fe7d0ed0bbb9 | 27b276787d26d726a7b3b80b485aae34288a7a6b | refs/heads/master | 2021-02-04T03:30:32.894258 | 2020-03-04T21:22:07 | 2020-03-04T21:22:07 | 243,611,040 | 0 | 0 | null | 2020-02-27T20:33:13 | 2020-02-27T20:33:11 | null | UTF-8 | R | false | false | 1,506 | r | cachematrix.R | ## There are two functions in this file. They try to cache and retrieve the
## inverse of a matrix to save time in computation. The function structure is based
## on the given example and modified according to the different application purpose.
## The makeCacheMatrix function aims to create a special matrix object to be able to
## cache the inverse of that matrix. It creates a list of functions to set and get the
## matrix and also set and get its inverse. The "<<-" operator makes it possible to
## use the function in different matrix (just like different environment) as it could
## assign a value to an object that is not in the current environment.
makeCacheMatrix <- function(x = matrix()) {
inver <- NULL
set <- function(y){
x <<- y
inver <<- NULL
}
get <- function(){
x
}
setinver <- function(inverse){
inver <<- inverse
}
getinver <- function() {
inver
}
list(set = set, get = get,
setinver = setinver,
getinver = getinver)
}
## This cacheSolve function is to get the inverse of the matrix created by the first
## function. It will first try to find the inverse in the cache and return it if it
## is already calculated. If not, it will calculate the inverse with the solve function
## and store it in the cache.
cacheSolve <- function(x, ...) {
inver <- x$getinver()
if(!is.null(inver)){
message("getting stored data")
return(inver)
}
data <- x$get()
inver <- solve(data, ...)
x$setinver(inver)
inver
}
|
88af2e7a9ee79131bacfd059208a4b1e2b498f46 | 88845c28ef247fe06d194fcc784ded352655992a | /cachematrix.R | 98aecb17fef33469d8cbea1204adc5ec1b0ff26a | [] | no_license | tonyofleon/ProgrammingAssignment2 | 0a04540dbb1e650103075d3dc4c7161763c76c9f | acbabeb9d876ec11005c681b4edccb13df09d8b7 | refs/heads/master | 2021-01-16T22:35:24.388715 | 2015-03-21T21:54:05 | 2015-03-21T21:54:05 | 32,622,966 | 0 | 0 | null | 2015-03-21T06:48:13 | 2015-03-21T06:48:13 | null | UTF-8 | R | false | false | 1,635 | r | cachematrix.R | ##The first function called makeCacheMatrix, initialize the
##functions list set, get, setinv and getinv.
##In the "set" function, there is a validation to check if the
##matrix that is being set is the same one as the previous one,
##in case it is the same one, the matrix is not set and the inv is not
##set to NULL.
makeCacheMatrix <- function(x = matrix()) {
##The inv cached variable holds the value of the matrix inverse.
##When the fuction is first initialized, the inv is set to NULL.
inv <<- NULL
##Every time the matrix is set, I'm checking if the matrix is the first one
##being set, and if not it also checks if the previous one was equal or different.
set <- function(y){
if (identical(x,y) == FALSE) {
x <<- y
inv <<- NULL
}
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
##cacheSolve function, returns the inverse of the matrix
##from cache if the invers has already been calculated,
##and calculates it if the inverse has not been calculated
##or if the one in cache is for a different matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
b32afbb9a89c97527b9ad0410708b61cf9457c91 | a49944a6f19aec360eac7c4e0909078cc792fea4 | /man/galeShapley.validate.Rd | 55600ea883b402eb2ba2fb306b3be80ddb3c3f6f | [] | no_license | jtilly/matchingR | bec428e9eb22ceec35fefdc40d244d46a4972761 | 8ca48209e37e89daf7661092788a7f20470be7e6 | refs/heads/master | 2023-02-17T02:14:22.311444 | 2023-02-11T08:14:36 | 2023-02-11T08:14:36 | 35,117,499 | 55 | 27 | null | 2023-02-11T08:14:38 | 2015-05-05T18:43:07 | R | UTF-8 | R | false | true | 3,391 | rd | galeShapley.validate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/galeshapley.R
\name{galeShapley.validate}
\alias{galeShapley.validate}
\title{Input validation of preferences}
\usage{
galeShapley.validate(
proposerUtils = NULL,
reviewerUtils = NULL,
proposerPref = NULL,
reviewerPref = NULL
)
}
\arguments{
\item{proposerUtils}{is a matrix with cardinal utilities of the proposing
side of the market. If there are \code{n} proposers and \code{m} reviewers,
then this matrix will be of dimension \code{m} by \code{n}. The
\code{i,j}th element refers to the payoff that proposer \code{j} receives
from being matched to reviewer \code{i}.}
\item{reviewerUtils}{is a matrix with cardinal utilities of the courted side
of the market. If there are \code{n} proposers and \code{m} reviewers, then
this matrix will be of dimension \code{n} by \code{m}. The \code{i,j}th
element refers to the payoff that reviewer \code{j} receives from being
matched to proposer \code{i}.}
\item{proposerPref}{is a matrix with the preference order of the proposing
side of the market (only required when \code{proposerUtils} is not
provided). If there are \code{n} proposers and \code{m} reviewers in the
market, then this matrix will be of dimension \code{m} by \code{n}. The
\code{i,j}th element refers to proposer \code{j}'s \code{i}th most favorite
reviewer. Preference orders can either be specified using R-indexing
(starting at 1) or C++ indexing (starting at 0).}
\item{reviewerPref}{is a matrix with the preference order of the courted side
of the market (only required when \code{reviewerUtils} is not provided). If
there are \code{n} proposers and \code{m} reviewers in the market, then
this matrix will be of dimension \code{n} by \code{m}. The \code{i,j}th
element refers to reviewer \code{j}'s \code{i}th most favorite proposer.
Preference orders can either be specified using R-indexing (starting at 1)
or C++ indexing (starting at 0).}
}
\value{
a list containing \code{proposerUtils}, \code{reviewerUtils},
\code{proposerPref} (\code{reviewerPref} are not required after they are
translated into \code{reviewerUtils}).
}
\description{
This function parses and validates the arguments that are passed on to the
Gale-Shapley Algorithm. In particular, it checks if user-defined preference
orders are complete and returns an error otherwise. If user-defined orderings
are given in terms of R indices (starting at 1), then these are transformed
into C++ indices (starting at zero).
}
\examples{
# market size
nmen <- 5
nwomen <- 4
# generate cardinal utilities
uM <- matrix(runif(nmen * nwomen), nrow = nwomen, ncol = nmen)
uW <- matrix(runif(nwomen * nmen), nrow = nmen, ncol = nwomen)
# turn cardinal utilities into ordinal preferences
prefM <- sortIndex(uM)
prefW <- sortIndex(uW)
# validate cardinal preferences
preferences <- galeShapley.validate(uM, uW)
preferences
# validate ordinal preferences
preferences <- galeShapley.validate(proposerPref = prefM, reviewerPref = prefW)
preferences
# validate ordinal preferences when these are in R style indexing
# (instead of C++ style indexing)
preferences <- galeShapley.validate(proposerPref = prefM + 1, reviewerPref = prefW + 1)
preferences
# validate preferences when proposer-side is cardinal and reviewer-side is ordinal
preferences <- galeShapley.validate(proposerUtils = uM, reviewerPref = prefW)
preferences
}
|
1c2ed5cd2bd7acd3eaed7291a013fb87fa9d7c0c | 7783c060692c982b9769e51da0f0bddcdbde4a7d | /homework2/server.R | 2a588779ed721eb3914e4e73bc7f30cd68a4ca43 | [] | no_license | osuarezmunist/msan622 | d78c454f8b586f430ac077303d7545583bb4ddad | 0aba53f3331352ea8b022ace6f33d6b23c683560 | refs/heads/master | 2021-01-17T17:54:34.436414 | 2014-05-16T04:29:14 | 2014-05-16T04:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,509 | r | server.R | library(shiny)
library(scales)
data("movies", package = "ggplot2")
getPlot <- function(highlight,mpaaRB,minb,maxb) {
genre <- rep(NA, nrow(movies))
count <- rowSums(movies[, 18:24])
genre[which(count > 1)] = "Mixed"
genre[which(count < 1)] = "None"
genre[which(count == 1 & movies$Action == 1)] = "Action"
genre[which(count == 1 & movies$Animation == 1)] = "Animation"
genre[which(count == 1 & movies$Comedy == 1)] = "Comedy"
genre[which(count == 1 & movies$Drama == 1)] = "Drama"
genre[which(count == 1 & movies$Documentary == 1)] = "Documentary"
genre[which(count == 1 & movies$Romance == 1)] = "Romance"
genre[which(count == 1 & movies$Short == 1)] = "Short"
movies$genre <-genre
movies <- subset(movies, budget>0)
# filter by budget amounts
if (minb+maxb>0) {
moviesB <-subset(movies, budget >= minb*1000000 & budget <=maxb*1000000)
} else {
moviesB <-movies
}
# gray out if not selected
palette <- brewer_pal(type = "qual", palette = "Set1")(9)
# Need to re-code with switch; may perform more than change.
if (!1 %in% highlight ) {
palette[1] <- "#EEEEEE"
}
if (!2 %in% highlight ) {
palette[2] <- "#EEEEEE"
}
if (!3 %in% highlight ) {
palette[3] <- "#EEEEEE"
}
if (!4 %in% highlight ) {
palette[4] <- "#EEEEEE"
}
if (!5 %in% highlight ) {
palette[5] <- "#EEEEEE"
}
if (!6 %in% highlight ) {
palette[6] <- "#EEEEEE"
}
if (!7 %in% highlight ) {
palette[7] <- "#EEEEEE"
}
if (!8 %in% highlight ) {
palette[8] <- "#EEEEEE"
}
if (!9 %in% highlight ) {
palette[9] <- "#EEEEEE"
}
# Only show selected
if (mpaaRB != "All") {
moviesRB <- subset(moviesB,mpaa==mpaaRB)
}
else {
moviesRB <- moviesB
}
# increase alpha level as number of dots decreases
prop <- nrow(movies)/nrow(moviesRB)
if (prop>5) {prop <- 5}
p <- ggplot(moviesRB,aes(x=budget,y=rating,color=genre))+geom_point(alpha = .2*prop)
p <- p +scale_color_manual("Genre",values = palette)
p <- p +scale_x_continuous(labels = dollar)+scale_y_continuous(limits=c(0,10))
p <- p +xlab("Budget")+ylab("Rating")
}
# server.R
shinyServer(
function(input, output) {
#getHighlight <- reactive({
# result <- levels(movies$Species)
#return(result[which(result %in% input$highlight)])
#})
output$plot1 <- renderPlot({
print(getPlot(input$checkGroup,input$mpaa,input$minbudget,input$maxbudget))
})
}
)
|
0117372d0b1f7141784998fffc6650d4a01ea882 | 5cb3e1a594977e5055adf8cec33b0dad782ea51d | /covid_data1.R | a7bb2e6e60808f4b44f67d8b0e07d08c8aed6677 | [
"MIT"
] | permissive | maisk/covid19-greece-stats | 81d56305145494179f9c23403d03bf3ca83bc23b | db108110a2588c0fe70b3c08c706668f30f5641a | refs/heads/main | 2023-03-01T06:36:56.910677 | 2021-02-10T23:20:13 | 2021-02-10T23:20:13 | 330,237,484 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,942 | r | covid_data1.R | ##########################################################################################
# LOAD
##########################################################################################
#library(plotly)
library(plyr)
#library(lubridate)
#library(drc)
rm(list=ls(all=TRUE))
graphics.off()
cat("\014")
#environment_options()
confirmed<-read.csv("./data/covid/time_series_covid19_confirmed_global.csv",stringsAsFactors=FALSE,check.names=FALSE)
deaths<-read.csv("./data/covid/time_series_covid19_deaths_global.csv",stringsAsFactors=FALSE,check.names=FALSE)
recovered<-read.csv("./data/covid/time_series_covid19_recovered_global.csv",stringsAsFactors=FALSE,check.names=FALSE)
country_code<-read.csv("./data/countrycode.csv",stringsAsFactors=FALSE,check.names=FALSE)
confirmed$Lat<-confirmed$Long<-deaths$Lat<-deaths$Long<-recovered$Lat<-recovered$Long<-NULL
##########################################################################################
#
##########################################################################################
covid_c<-reshape2::melt(confirmed,id.vars=c("Province/State","Country/Region"),variable.name="date",value.name="confirmed")
covid_d<-reshape2::melt(deaths,id.vars=c("Province/State","Country/Region"),variable.name="date",value.name="deaths")
covid_r<-reshape2::melt(recovered,id.vars=c("Province/State","Country/Region"),variable.name="date",value.name="recovered")
covid_all<-Reduce(function(x,y) merge(x,y,all=TRUE,), list(covid_c, covid_d, covid_r))
names(covid_all)<-tolower(make.names(names(covid_all)))
covid_all$date<-as.Date(as.character(covid_all$date),"%m/%d/%y")
covid_all_country<-plyr::ddply(covid_all,.(country.region,date),plyr::numcolwise(sum,na.rm=TRUE))
covid_all_country<-merge(country_code,covid_all_country,by.x="country",by.y="country.region",all=TRUE)
save(covid_c,covid_d,covid_r,confirmed,deaths,recovered,covid_all_country,covid_all,country_code,file='covid_data.Rdata')
|
00b1d1164df9f2abedcf60d022984997e9c6bc44 | a1c150f97c1453523a7407b9ff354b4c6224fb5f | /logi_reg.R | a2fa538d4e00fd734d6648bddee35000d318ef92 | [] | no_license | ishank26/edx_analytics_edge | 9efac2b8840ea1c7d7d5ed7e80af1bcc121651bf | 7748afcc3f2f66bab3ef1efd5dde7abe694cd754 | refs/heads/master | 2020-12-25T10:59:56.692719 | 2016-06-03T12:13:35 | 2016-06-03T12:13:35 | 60,344,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,437 | r | logi_reg.R | setwd("\\home\\work\\analytics_edge")
library(mice)
library(Rcpp)
data<-read.csv(file= "loans.csv")
head(data)
summary(data)
md.pattern(data)
set.seed(144)
vars.for.imputation = setdiff(names(data), "not.fully.paid")
imputed = complete(mice(data[vars.for.imputation]))
data[vars.for.imputation] = imputed
library(caTools)
spl=sample.split(data$not.fully.paid, 0.7)
train = subset(data,spl==TRUE)
test = subset(data,spl==FALSE)
logit=glm(not.fully.paid~.,data=train ,family="binomial")
summary(logit)
test$predicted.risk = predict(logit, newdata = test, type= "response")
table(test$not.fully.paid,test$predicted.risk > 0.5)
library(ROCR)
ROCpred = prediction(test$predicted.risk, test$not.fully.paid)
ROCperf= performance(ROCpred,"tpr","fpr")
plot(ROCperf)
as.numeric(performance(ROCpred,"auc")@y.values)
##### int.rate
logit_bivar = glm(not.fully.paid ~ int.rate, data = train, family = "binomial")
test$pred.bivar = predict(logit_bivar, newdata= test, type="response")
max(test$pred.bivar)
ROCpred.bivar= prediction(test$pred.bivar,test$not.fully.paid)
as.numeric(performance(ROCpred.bivar,"auc")@y.values)
highint = subset(test, int.rate >= 0.15)
summary(highint)
table(highint$not.fully.paid)
cutoff=sort(highint$predicted.risk, decreasing=FALSE)[100]
selectedLoans=subset(highint,predicted.risk<=cutoff)
summary(selectedLoans)
nrow(selectedLoans)
table(selectedLoans$not.fully.paid)
|
ba90bcadb36a8e8f455931efe460bc564ce1d023 | b08057978c4330b4073816e6b1fd16629f9bf9b4 | /teste.R | 11c2b6dffb1e75af77d6acaabfffe113cdb7fbd3 | [
"MIT"
] | permissive | mnunes/IMDb2 | d66f5e4dffafffd56467c9ad4a80c49391936bc7 | 410934e552cda23741aaf090bde1446118d5b940 | refs/heads/master | 2021-08-28T14:00:26.867110 | 2017-12-12T11:42:08 | 2017-12-12T11:42:08 | 109,696,840 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,482 | r | teste.R | "V1","V2"
"tt0944947","Game of Thrones"
"tt0903747","Breaking Bad"
"tt1520211","The Walking Dead"
"tt1475582","Sherlock"
"tt0898266","Big Bang: A Teoria"
"tt0108778","Friends"
"tt0773262","Dexter"
"tt0460649","Como Eu Conheci Sua Mãe"
"tt0411008","Lost"
"tt4574334","Stranger Things"
"tt0455275","Prison Break"
"tt1856010","House of Cards"
"tt2356777","True Detective"
"tt2193021","Arqueiro"
"tt0412142","Dr. House"
"tt0460681","Sobrenatural"
"tt0096697","Os Simpsons"
"tt1632701","Suits"
"tt1442437","Família Moderna"
"tt3322312","Demolidor"
"tt0121955","South Park"
"tt2306299","Vikings"
"tt0182576","Uma Família da Pesada"
"tt1796960","Homeland"
"tt1405406","Diários de um Vampiro"
"tt4158110","Mr. Robot: Sociedade Hacker"
"tt0386676","Vida de Escritório"
"tt3107288","Flash"
"tt0475784","Westworld"
"tt0367279","Arrested Development"
"tt2372162","Orange Is the New Black"
"tt2802850","Fargo"
"tt2707408","Narcos"
"tt1844624","História de Horror Americana"
"tt0141842","Família Soprano"
"tt0306414","A Escuta"
"tt0369179","Dois Homens e Meio"
"tt0844441","True Blood"
"tt0813715","Heróis"
"tt0285403","Scrubs"
"tt0303461","Firefly"
"tt1124373","Filhos da Anarquia"
"tt3032476","Better Call Saul"
"tt0098904","Seinfeld"
"tt1119644","Fronteiras"
"tt1442449","Spartacus"
"tt1843230","Era Uma Vez"
"tt0413573","A Anatomia de Grey"
"tt2243973","Hannibal"
"tt2861424","Rick and Morty"
"tt0149460","Futurama"
"tt0106179","Arquivo X"
"tt3749900","Gotham"
"tt0436992","Doctor Who"
"tt2085059","Black Mirror"
"tt2364582","Agentes S.H.I.E.L.D. da Marvel"
"tt0804503","Mad Men: Inventando Verdades"
"tt1439629","Community"
"tt0417299","Avatar: A Lenda de Aang"
"tt0285331","24 Horas"
"tt0904208","Californication"
"tt1826940","New Girl"
"tt0472954","It's Always Sunny in Philadelphia"
"tt0979432","Boardwalk Empire: O Império do Contrabando"
"tt0877057","Death Note: Notas da Morte"
"tt1266020","Parks and Recreation"
"tt0387199","Entourage: Fama & Amizade"
"tt1196946","O Mentalista"
"tt1839578","Pessoa de Interesse"
"tt2661044","Os 100"
"tt2357547","Jessica Jones"
"tt1837492","13 Reasons Why"
"tt1586680","Shameless"
"tt0098936","Twin Peaks"
"tt2741602","Lista Negra"
"tt1578873","Maldosas"
"tt0407362","Battlestar Galactica"
"tt0165598","De Volta aos Anos 70"
"tt0397442","Gossip Girl: A Garota do Blog"
"tt0384766","Roma"
"tt1606375","Downton Abbey"
"tt1219024","Castle"
"tt0452046","Mentes Criminosas"
"tt0460627","Bones"
"tt1327801","Glee: Em Busca da Fama"
"tt0934814","Chuck"
"tt0118276","Buffy: A Caça-Vampiros"
"tt1486217","Archer"
"tt0193676","Freaks & Geeks"
"tt1567432","Lobo Adolescente"
"tt0487831","The IT Crowd"
"tt0212671","Malcolm"
"tt1837642","Revenge"
"tt0279600","Smallville: As Aventuras do Superboy"
"tt1358522","Crimes do Colarinho Branco"
"tt0248654","A Sete Palmos"
"tt2431438","Sense8"
"tt0410975","Desperate Housewives"
"tt2442560","Peaky Blinders"
"tt0098800","Um Maluco no Pedaço"
"tt0496424","Um Maluco na TV"
"tt1235099","Engana-me se Puder"
"tt0364845","NCIS: Investigações Criminais"
"tt0397306","American Dad!"
"tt1830617","Grimm: Contos de Terror"
"tt4052886","Lucifer"
"tt1628033","Top Gear"
"tt0439100","Weeds"
"tt1553656","O Domo"
"tt2628232","Penny Dreadful"
"tt2632424","Os Originais"
"tt0214341","Dragon Ball Z"
"tt1870479","The Newsroom"
"tt2467372","Brooklyn 9-9"
"tt2560140","Ataque dos Titãs"
"tt0159206","Sex and the City"
"tt0460091","Meu Nome é Earl"
"tt1474684","Luther"
"tt3743822","Fear the Walking Dead"
"tt2575988","Silicon Valley"
"tt2191671","Elementar"
"tt2234222","Orphan Black"
"tt0092400","Um Amor de Família"
"tt3205802","Lições de um Crime"
"tt1548850","Misfits"
"tt1845307","Duas Garotas em Apuros"
"tt1462059","Falling Skies"
"tt0238784","Gilmore Girls: Tal Mãe, Tal Filha"
"tt1637727","The Killing"
"tt2071645","The Following"
"tt3322314","Luke Cage"
"tt1831164","Leyla ile Mecnun"
"tt0290978","The Office"
"tt0264235","Segura a Onda"
"tt2070791","Revolução"
"tt1695360","Avatar: A Lenda de Korra"
"tt0092455","Jornada nas Estrelas: A Nova Geração"
"tt0096657","Mr. Bean"
"tt4016454","Supergirl"
"tt0118421","Oz"
"tt2188671","Motel Bates"
"tt0348914","Deadwood"
"tt1641349","Terra Nova"
"tt3647998","Taboo"
"tt0491738","Psych"
"tt2375692","Black Sails"
"tt3322310","Punho de Ferro"
"tt3006802","Outlander"
"tt0118480","Stargate SG-1"
"tt0362359","The O.C.: Um Estranho no Paraíso"
"tt1489428","Justified"
"tt0247082","CSI: Investigação Criminal"
"tt2017109","Banshee"
"tt0072500","Fawlty Towers"
"tt1355642","Fullmetal Alchemist: Brotherhood"
"tt2654620","The Strain"
"tt1492966","Louie"
"tt2094262","Da Vinci's Demons"
"tt0206512","Bob Esponja Calça Quadrada"
"tt0165581","O Rei do Queens"
"tt1220617","The Inbetweeners"
"tt0840196","Skins"
"tt0103359","Batman: A Série Animada"
"tt0213338","Kaubôi bibappu"
"tt0368530","Lances da Vida"
"tt0158552","Jovens Bruxas"
"tt4532368","Lendas do Amanhã"
"tt0810788","Burn Notice"
"tt0286486","The Shield: Acima da Lei"
"tt1199099","As Aventuras de Merlin"
"tt0106004","Frasier"
"tt5189670","Making a Murderer"
"tt0060028","Jornada nas Estrelas"
"tt0203259","Lei e Ordem: Unidade de Vítimas Especiais"
"tt3475734","Agente Carter da Marvel"
"tt2149175","The Americans"
"tt1837576","Escândalos: Os Bastidores do Poder"
"tt0412253","Veronica Mars: A Jovem Espiã"
"tt2249007","Ray Donovan"
"tt2249364","Broadchurch"
"tt1723816","Girls"
"tt0758790","The Tudors"
"tt1305826","Hora de Aventura"
"tt1442462","The Good Wife"
"tt1441135","Linha do Tempo"
"tt0162065","Angel: O Caça-Vampiros"
"tt2699128","The Leftovers"
"tt4508902","One Punch Man: Wanpanman"
"tt1600194","Hawaii Five-0"
"tt0988824","Naruto Shippuden"
"tt0312172","Monk: Um Detetive Diferente"
"tt3530232","Last Week Tonight with John Oliver"
"tt0115167","Raymond e Companhia"
"tt0851851","O Exterminador do Futuro: As Crônicas de Sarah Connor"
"tt2647544","Sleepy Hollow"
"tt0374455","Stargate: Atlantis"
"tt2294189","The Fall"
"tt0052520","Além da Imaginação"
"tt0063929","Monty Python's Flying Circus"
"tt1954347","Continuum"
"tt1740299","The Man in the High Castle"
"tt5114356","Legion"
"tt4474344","Ponto Cego"
"tt0200276","West Wing: Nos Bastidores do Poder"
"tt0805663","Jericho"
"tt2189461","Marco Polo"
"tt0121220","Dragon Ball Z"
"tt0361217","Estética"
"tt4635282","The OA"
"tt4422836","Limitless"
"tt2788432","American Crime Story"
"tt0925266","Pushing Daisies"
"tt1307824","V: Visitantes"
"tt0388629","One Piece: Wan pîsu"
"tt0409591","Naruto"
"tt5071412","Ozark"
"tt0383126","Os Caçadores de Mitos"
"tt1561755","Bob's Burgers"
"tt0758745","Friday Night Lights"
"tt0863046","Flight of the Conchords"
"tt0313043","CSI: Miami"
"tt5290382","Mindhunter"
"tt0421357","Fullmetal Alchemist"
"tt3487382","Forever: Uma Vida Eterna"
"tt2402207","The Last Ship"
"tt5016504","Preacher"
"tt4635276","Master of None"
"tt2618986","Wayward Pines"
"tt0262150","Black Books"
"tt0092359","Três É Demais"
"tt3501584","iZombie"
"tt3398228","BoJack Horseman"
"tt0866442","Eastbound & Down"
"tt0445114","Extras"
"tt0112178","Jornada nas Estrelas: Voyager"
"tt1592154","Nikita"
"tt3339966","Unbreakable Kimmy Schmidt"
"tt4786824","The Crown"
"tt4189022","Ash vs Evil Dead"
"tt2654580","Almost Human"
"tt5834204","O Conto da Aia"
"tt0187664","Spaced"
"tt3230854","The Expanse"
"tt4428122","Quantico"
"tt1051220","As Crônicas de Shannara"
"tt5555260","This Is Us"
"tt1344204","Blue Mountain State"
"tt0108757","Plantão Médico"
"tt0157246","Will & Grace"
"tt0068098","M*A*S*H"
|
8edffe09e7b427b1bc476779b90867ef63f366f7 | 2e731f06724220b65c2357d6ce825cf8648fdd30 | /dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612726706-test.R | b6cdcfe209bd1f61b8cc2281fcc62d6efbb597cb | [] | no_license | akhikolla/updatedatatype-list1 | 6bdca217d940327d3ad42144b964d0aa7b7f5d25 | 3c69a987b90f1adb52899c37b23e43ae82f9856a | refs/heads/master | 2023-03-19T11:41:13.361220 | 2021-03-20T15:40:18 | 2021-03-20T15:40:18 | 349,763,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 808 | r | 1612726706-test.R | testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = NA_integer_, item_score = c(60395L, -336909525L, 724256326L, 1179010630L, 1179010630L, 724249387L, 565774848L, 724249387L, 724249387L, 724249579L, 539365486L, NA, 285194751L, 565837823L, 220932907L, 167968767L, -13238017L, -268435658L, 16772896L, 352L, 0L, 1048832L, 65465L, -1179055616L, 16777216L, 0L, 0L, 16714240L, -1438733632L, 50270463L, -1179010631L, -62198L, 11152171L, 722076415L, 16042L, -1073725782L, -1073545472L, 28275L, -1073545456L, -4587743L, -1174405363L), person_id = c(-277151990L, 906034159L, 103492422L, 1179010630L, 1179010630L, 1177234219L, 724249387L, NA, 11156994L, -16776981L, 0L, 724249414L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result) |
1e14aeaa175b9972e8f90af67abf0bbcafc62502 | 8f379b92570d473941e6048dea1bbb30f73455d0 | /logistic/dp_mix.R | b6399519d7e352d3fecd6067ec3940d877ca57dc | [] | no_license | stjordanis/bnp_hw | 2cbf2ab1069212e6b7510d4bf6339f4a94f254d3 | 8931f829d093c7a13d7860a23c85f400e331d77a | refs/heads/master | 2020-07-16T06:04:27.211310 | 2016-11-30T04:40:22 | 2016-11-30T04:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 322 | r | dp_mix.R | n <- 100
beta <- c(0,1,6,-1,-12,2,20,-2)
x1 <- runif(n,0,3)
x2 <- runif(n,3,6)
x3 <- runif(n,6,8)
x4 <- runif(n,8,10)
x.list <- list(x1,x2,x3,x4)
x <- unlist(x.list)
y <- rep(0,n*4)
for (i in 1:4) {
for (j in 1:n) {
y[(i-1)*n+j] <- x.list[[i]][j] * beta[2*i] + beta[2*i-1]
}
}
y <- rnorm(4*n,y,1)
plot(x,y)
|
d124453a14d2b1e80935d9114ddf0d1b6f361285 | daccbc095ccb9be61622399c2cfa3c3319aafbe0 | /R/pred.R | 569fed3d5f72bb97ac44e8bd8a62400d9225fb7f | [] | no_license | menghaomiao/aitr | c2199837ef5e125b73838233779fc997aa8e3cd3 | 6cfb60c0ae63ef7dd43b3f8c0f78293c1eeea5bb | refs/heads/master | 2022-11-05T21:13:50.866225 | 2020-06-18T23:14:03 | 2020-06-18T23:14:03 | 110,192,891 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 285 | r | pred.R | pred=function(inner) {
d=dim(inner)
res=matrix(0, d[1], d[2])
rule=sapply(1:d[1], function(i) which.max(inner[i, ]))
res[cbind(1:d[1], rule)]=1
colnames(res)=colnames(inner)
rownames(res)=rownames(inner)
class(res)=c('ITR', 'matrix')
attr(res, 'outcome.ratio')=1
return(res)
} |
511c6752736a4ea806c210f893408ec130447be5 | e2ff97cda448ae0534afdbd1cb614111ed9eddc6 | /toms_scripts/plotting_from_web.R | a0b4e7e974d3e05fa9c67cf8516cf65d95bc5562 | [] | no_license | AugustT/Freshwater_Hack | 08462c5d281455d6cbce3ec5c08ab2369b1e663e | 6ec80562cfd09d212d36386c1d26272b9618d9c9 | refs/heads/master | 2021-01-22T04:10:44.332134 | 2017-08-03T15:38:41 | 2017-08-03T15:38:41 | 92,438,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,548 | r | plotting_from_web.R | my.glmergplot <- function(
# version 0.43
# written by tiflo@csli.stanford.edu
# code contributions from Austin Frank, Ting Qian, and Harald Baayen
# remaining errors are mine (tiflo@csli.stanford.edu)
#
# last modified 12/15/10
#
# now also supports linear models
# backtransforms centering and standardization
#
# known bugs:
# too simple treatment of random effects
#
model,
name.predictor,
name.outcome= "outcome",
predictor= NULL,
# is the predictor centered IN THE MODEL?
# is the predictor transformed before
# (centered and) ENTERED INTO THE MODEL?
predictor.centered= if(!is.null(predictor)) { T } else { F },
predictor.standardized= F,
predictor.transform= NULL,
fun= NULL,
type= "hex",
main= NA,
xlab= NA,
ylab= NA,
xlim= NA,
ylim= NA,
legend.position="right",
fontsize=16,
col.line= "#333333",
col.ci= col.line,
lwd.line= 1.2,
lty.line= "solid",
alpha.ci= 3/10,
hex.mincnt= 1,
hex.maxcnt= nrow(model@frame) / 2,
hex.limits = c(round(hex.mincnt), round(hex.maxcnt)),
hex.limits2 = c(round(match.fun(hex.trans)(hex.mincnt)), round(match.fun(hex.trans)(hex.maxcnt))),
hex.midpoint = (max(hex.limits) - (min(hex.limits) - 1)) / 2,
hex.nbreaks = min(5, round(match.fun(hex.trans)(max(hex.limits)) - match.fun(hex.trans)(min(hex.limits))) + 1),
hex.breaks = round(seq(min(hex.limits), max(hex.limits), length.out=hex.nbreaks)),
hex.trans = "log10",
...
)
{
if (!is(model, "mer")) {
stop("argument should be a mer model object")
}
if ((length(grep("^glmer", as.character(model@call))) == 1) &
(length(grep("binomial", as.character(model@call))) == 1)) {
model.type = "binomial"
} else {
if (length(grep("^lmer", as.character(model@call))) == 1) {
model.type = "gaussian"
}
}
if (!(model.type %in% c("binomial","gaussian"))) {
stop("argument should be a glmer binomial or gaussian model object")
}
if (!is.na(name.outcome)) {
if (!is.character(name.outcome))
stop("name.outcome should be a string\n")
}
if (!is.na(xlab[1])) {
if (!is.character(xlab))
stop("xlab should be a string\n")
}
if (!is.na(ylab)) {
if (!is.character(ylab))
stop("ylab should be a string\n")
}
# load libaries
require(lme4)
require(Design)
require(ggplot2)
if (predictor.standardized) { predictor.centered = T }
if (is.null(fun)) {
if (is.na(ylab)) {
if (model.type == "binomial") { ylab= paste("Predicted log-odds of", name.outcome) }
if (model.type == "gaussian") { ylab= paste("Predicted ", name.outcome) }
}
fun= I
} else {
if (is.na(ylab)) {
if (model.type == "binomial") { ylab= paste("Predicted probability of", name.outcome) }
if (model.type == "gaussian") { ylab= paste("Predicted ", name.outcome) }
}
fun= match.fun(fun)
}
if (!is.null(predictor.transform)) {
predictor.transform= match.fun(predictor.transform)
} else { predictor.transform= I }
indexOfPredictor= which(names(model@fixef) == name.predictor)
# get predictor
if (is.null(predictor)) {
# simply use values from model matrix X
predictor= model@X[,indexOfPredictor]
# function for predictor transform
fun.predictor= I
if (is.na(xlab)) { xlab= name.predictor }
} else {
# make sure that only defined cases are included
predictor = predictor[-na.action(model@frame)]
# function for predictor transform
trans.pred = predictor.transform(predictor)
m= mean(trans.pred, na.rm=T)
rms = sqrt(var(trans.pred, na.rm=T) / (sum(ifelse(is.na(trans.pred),0,1)) - 1))
fun.predictor <- function(x) {
x= predictor.transform(x)
if (predictor.centered == T) { x= x - m }
if (predictor.standardized == T) { x= x / rms }
return(x)
}
if ((is.na(xlab)) & (label(predictor) != "")) {
xlab= label(predictor)
}
}
# get outcome for binomial or gaussian model
if (model.type == "binomial") {
outcome= fun(qlogis(fitted(model)))
} else {
outcome= fun(fitted(model))
}
## calculate grand average but exclude effect to be modeled
## (otherwise it will be added in twice!)
## random effects are all included, even those for predictor (if any).
## should random slope terms for the predictor be excluded?
## prediction from fixed effects
if (ncol(model@X) > 2) {
Xbeta.hat = model@X[, -indexOfPredictor] %*% model@fixef[-indexOfPredictor]
} else {
Xbeta.hat = model@X[, -indexOfPredictor] %*% t(model@fixef[-indexOfPredictor])
}
## adjustment from random effects
Zb = crossprod(model@Zt, model@ranef)@x
## predicted value using fixed and random effects
Y.hat = Xbeta.hat + Zb
## intercept is grand mean of predicted values
## (excluding fixed effect of predictor)
## (including random effects of predictor, if any)
int = mean(Y.hat)
# slope
slope <- fixef(model)[name.predictor]
## error and confidence intervals
stderr <- sqrt(diag(vcov(model)))
names(stderr) <- names(fixef(model))
slope.se <- stderr[name.predictor]
lower <- -1.96 * slope.se
upper <- 1.96 * slope.se
# setting graphical parameters
if (is.na(ylim)) { ylim= c(min(outcome) - 0.05 * (max(outcome) - min(outcome)), max(outcome) + 0.05 * (max(outcome) - min(outcome)) ) }
if (is.na(xlim)) { xlim= c(min(predictor) - 0.05 * (max(predictor) - min(predictor)), max(predictor) + - 0.05 * (max(predictor) - min(predictor))) }
print("Printing with ...")
print(paste(" int=", int))
print(paste(" slope=", slope))
print(paste(" centered=", predictor.centered))
print(" fun:")
print(fun.predictor)
pdata= data.frame( predictor=predictor, outcome=outcome )
x= seq(xlim[1], xlim[2], length=1000)
fit= int + slope * fun.predictor(x)
ldata= data.frame(
predictor= x,
outcome= fun(fit),
transformed.lower= fun(fit + lower),
transformed.upper= fun(fit + upper)
)
theme_set(theme_grey(base_size=fontsize))
theme_update(axis.title.y=theme_text(angle=90, face="bold", size=fontsize, hjust=.5, vjust=.5))
theme_update(axis.title.x=theme_text(angle=0, face="bold", size=fontsize, hjust=.5, vjust=.5))
p <- ggplot(data=pdata, aes(x=predictor, y=outcome)) +
xlab(xlab) +
ylab(ylab) +
xlim(xlim) +
ylim(ylim) +
opts(legend.position=legend.position, aspect.ratio=1)
# for degbugging:
# panel.lines(rep(mean(x),2), c(min(y),max(y)))
# panel.lines(c(min(x),max(x)), c(mean(y),mean(y)))
if (type == "points") {
p <- p + geom_point(alpha=3/10)
} else if (type == "hex") {
p <- p + geom_hex(bins = 30) +
scale_fill_gradient2(low= "lightyellow",
mid="orange",
high=muted("red"),
midpoint= hex.midpoint,
space="rgb",
name= "Count",
limits= hex.limits,
breaks= hex.breaks,
trans = hex.trans
)
}
p + geom_ribbon(data=ldata,
aes( x= predictor,
ymin=transformed.lower,
ymax=transformed.upper
),
fill= col.ci,
alpha= alpha.ci
) +
geom_line(data=ldata,
aes(x= predictor,
y=outcome
),
colour= col.line,
size= lwd.line,
linetype= lty.line,
alpha=1
)
} |
d083e145f2a6af32ca18a402cc57555f818a5b51 | 4625017d85fd07a2019f7905ab0c0d24003c8f42 | /total_disance/DART_transform.R | 980c1972c3ca02f90673ceb3f0bccfcfd85e3fbb | [] | no_license | PatrickKratsch/DART_analysR | 9606f9c59534095a2e020f2806128520436ab4b4 | 359b9265a763abcb21f5e525e4cdce240262d10d | refs/heads/master | 2021-06-16T20:51:43.084150 | 2020-05-11T20:43:36 | 2020-05-11T20:43:36 | 89,875,831 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,219 | r | DART_transform.R | DART_transform <- function(data){
# data = input to this function, as called by DART_transform_batch -
# one .csv file at a time from a directory of output .csv
# files, as defined by dir_path in DART_transform_batch
# Load relevant packages
library("data.table")
# Transform data into data.table
setDT(data)
# Generate an offset table to prepare
# displacement analysis
data_offset <- rbind(data[2:data[, .N], ], data[data[, .N], ])
data_displacement <- cbind(data[, 1], data_offset[, 2:ncol(data_offset)] - data[, 2:ncol(data)])
# Save time in vector to bin it back to data.table
# once movement is generated
time <- data_displacement[, 1]
# Now, get displacement data.table without time,
# and calculate movement - bind time back to movement
data_displacement2 <- data_displacement[, 2:ncol(data_displacement)]
movement <- data_displacement2[, lapply(1:(ncol(.SD)/2), function(x) sqrt((.SD[[2*x-1]])^2 + (.SD[[2*x]])^2))]
movement <- cbind(time, movement)
# Delete last row, because there is no movement for the last second
movement <- movement[-(movement[, .N]), ]
movement
}
|
d8b06a1cdac0270f6cdd7b812d860aa682e18fb7 | e0f3cee21bd65684b1548e0ae82d4fc429ca5642 | /man/maize.G.Rd | a28e72885757e0f8f9354d36502f0e70690a9e28 | [] | no_license | tony2015116/lme4GS | d684d0c9d6ac81395899190a85e7043a16ee6648 | 5d6b79bacdce7a77840054e2497e92029aed6524 | refs/heads/master | 2020-05-16T18:59:27.150115 | 2019-03-28T22:18:35 | 2019-03-28T22:18:35 | 183,245,209 | 2 | 0 | null | 2019-04-24T14:24:36 | 2019-04-24T14:24:35 | null | UTF-8 | R | false | false | 430 | rd | maize.G.Rd | \name{maize.G}
\alias{maize.G}
\title{Genomic relationship matrix for maize lines}
\description{
A matrix with relationship between individuals for
parents of two heterotic groups. The matrix was computed
using 511 SNPs using the function A.mat included in
rrBLUP package (Endelman, 2011). The row names and column
names of this matrix corresponds to the genotype
identifiers for Parent 1 and Parent 2.
}
\keyword{datasets}
|
a156304629659cc8be82cfedb6264dda99fa8dfb | dbf0880167dedb547079f8b2fe595c78b5a92392 | /man/encounter.Rd | 225a204c7de88d91cc20c4cf0fba83e151413fa0 | [] | no_license | davan690/ctmm | 77b39af50815e83de8dac7ca0a27e9a53834d458 | e082e4aa7184ddc953b0d244d2a31c380a70930d | refs/heads/master | 2023-06-22T17:47:28.980631 | 2021-06-29T00:57:15 | 2021-06-29T00:57:15 | 266,095,610 | 0 | 0 | null | 2020-05-22T11:38:06 | 2020-05-22T11:38:05 | null | UTF-8 | R | false | false | 2,504 | rd | encounter.Rd | \name{encounter}
\alias{encounter}
\encoding{UTF-8}
%\alias{overlap.ctmm}
%\alias{overlap.telemetry}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Calculate the conditional location distribution of ecounters}
\description{Functions to calculate the location distribution of where encounters take place, conditional on said encounters taking place, as described in Noonan et al (2021).}
\usage{ encounter(object,include=NULL,exclude=NULL,debias=FALSE,...) }
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{A \code{list} of aligned \code{UD} objects.}
\item{include}{A matrix of interactions to include in the calculation (see Details below).}
\item{exclude}{A matrix of interactions to exclude in the calculation (see Details below).}
\item{debias}{Approximate GRF bias correction (in development).}
\item{...}{Additional arguments for future use.}
}
\details{
The \code{include} argument is a matrix that indicates which interactions are considered in the calculation.
By default, \code{include = 1 - diag(length(object))}, which implies that all interactions are considered aside from self-interactions. Alternatively, \code{exclude = 1 - include} can be specified, and is by-default \code{exclude = diag(length(object))}, which implies that only self-encounters are excluded.
}
\value{A \code{UD} object.}
%% ~put references to the literature/web site here ~
\references{
M. J. Noonan, R. Martinez-Garcia, G. H. Davis, M. C. Crofoot, R. Kays, B. T. Hirsch, D. Caillaud, E. Payne, A. Sih, D. L. Sinn, O. Spiegel, W. F. Fagan, C. H. Fleming, J. M. Calabrese, ``Estimating encounter location distributions from animal tracking data'', Methods in Ecology and Evolution (2021) \doi{10.1111/2041-210X.13597}.
}
\author{C. H. Fleming}
%\note{}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{ \code{\link{akde}}, \code{\link{overlap}} }
\examples{\donttest{
# Load package and data
library(ctmm)
data(buffalo)
# fit models for first two buffalo
GUESS <- lapply(buffalo[1:2], function(b) ctmm.guess(b,interactive=FALSE) )
# in general, you should use ctmm.select here
FITS <- lapply(1:2, function(i) ctmm.fit(buffalo[[i]],GUESS[[i]]) )
names(FITS) <- names(buffalo[1:2])
# create aligned UDs
UDS <- akde(buffalo[1:2],FITS)
# calculate CDE
CDE <- encounter(UDS)
# plot data and encounter distribution
plot(buffalo[1:2],col=c('red','blue'),UD=CDE,col.DF='purple',col.level='purple',col.grid=NA)
}}
|
331653682bf4290a8fff2096c79cde1b9df839cb | b1cd4f5a053dbed4c5d0b5374c730aca5a63013b | /hitandrun/R/shakeandbake.R | 5c7163895793303687b3f276981e3c1467af04b4 | [] | no_license | gertvv/hitandrun | 98ce7f8c9829afb13d2c842dcd4f21728b794429 | e19dfd7cced45e6714123531ad3e02f25f98a2f3 | refs/heads/master | 2022-06-16T06:11:19.311279 | 2022-05-27T11:31:04 | 2022-05-27T11:31:04 | 1,940,777 | 12 | 6 | null | 2018-05-20T10:58:00 | 2011-06-23T10:02:49 | R | UTF-8 | R | false | false | 1,170 | r | shakeandbake.R | sab.init <- function(constr,
thin.fn = function(n) { ceiling(log(n + 1)/4 * n^3) },
thin = NULL,
x0.randomize = FALSE, x0.method="slacklp",
x0 = NULL,
eliminate=TRUE) {
state <- har.init(constr, thin.fn, thin, x0.randomize, x0.method, x0, eliminate)
state$i0 <- findFace(state$x0, state$constr) # find the closest face of the polytope
state
}
sab.run <- function(state, n.samples) {
result <- with(state, {
n <- length(x0) - 1
if (n == 0) {
list(samples = matrix(rep(basis$translate, each=n.samples), nrow=n.samples), xN = 1, iN = 1)
} else {
sab(x0, i0, constr, N=n.samples * thin, thin=thin, homogeneous=TRUE, transform=transform)
}
})
state$x0 <- result$xN
state$i0 <- result$iN
list(state = state, samples = result$samples, faces = result$faces)
}
shakeandbake <- function(constr,
n.samples=1E4,
thin.fn = function(n) { ceiling(log(n + 1)/4 * n^3) },
thin = NULL,
x0.randomize = FALSE, x0.method="slacklp",
x0 = NULL,
eliminate=TRUE) {
state <- sab.init(constr, thin.fn, thin, x0.randomize, x0.method, x0, eliminate)
result <- sab.run(state, n.samples)
result$samples
}
|
9e79d630f56f5d1985dbcb0d9d0c33ebeaf0dc38 | 68ba515003cc405aeeddfbd35bd6423b75edb422 | /rankall.R | 4f544d875ffecbf391ce647900416b8eea2dd0e1 | [] | no_license | wayneheller/ProgrammingAssignment3 | 00cb64f3d29183b11eb655948550854d5c68f600 | 77d18b389e9d1b61ecf1a7c1f413374ea278cf41 | refs/heads/master | 2021-01-12T04:55:37.641466 | 2017-01-02T23:15:55 | 2017-01-02T23:15:55 | 77,812,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,304 | r | rankall.R | ###############################################################################
## Coursera R Programming ##
## Programming Assignment #3 ##
## Wayne Heller ##
## 12/31/2016 ##
###############################################################################
## The rankall function takes 2 arguments:
# an outcome name, and the rank of the hosiptal within the state.
# The rank may be a number or 'best' or 'worst'
# The function reads the outcome-of-care-measures.csv and returns a
# character vector with the name of the hospital and the state abbreviation
# that has the specified ranking of 30-day mortality for the specified outcome
# in that state.
# The outcomes can be one of "heart attack", "heart failure", or "pneumonia".
# Hospitals that do not have data on a particular
# outcome are excluded from the set of hospitals when deciding the rankings.
# Handling ties: If there is a tie for the best hospital for a given outcome,
# then the hospital names are sorted in alphabetical order and the
# first hospital in that set is chosen.
# This function returns a data.frame with columns 'hospital' and 'state'
rankall <- function(outcome, num = 'best') {
## Read outcome data
# setting stringsAsFactors to false is necessary; otherwise, the output will
# include a summary of the levels of the hospital.name factor
outcomeDf <- read.csv("outcome-of-care-measures.csv",
na.strings = "Not Available", stringsAsFactors = FALSE)
## Get list of valid states
statesDf <- unique(df['State'])
# sort the data.frame by state abbreviation so the output matches expected
# NOTE: this operation converts this data.frame to a factor
statesDf <- statesDf[order(statesDf$State),]
# validate outcome
valid_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(outcome %in% valid_outcomes)) {
stop("invalid outcome")
}
# validate num (ranking)
if (!(num %in% c('best', 'worst'))) {
if (!(is.numeric(num))) {
stop("invalid rank")
}
}
## Return a data.frame with hospital and state in that state with 30-day
## death rate matching rank
# map the valid outcomes to their column index in the data file
valid_outcome_colidx <- c("heart attack" = 11, "heart failure" = 17,
"pneumonia" = 23)
# create data.frame with just the columns necessary for the analysis
# Hospital Names is col index : 2
# State is col index : 7
# Outcome is col index ; valid_outcome_colidx[outcome]
analysisDf <- outcomeDf[ , c(2, 7, valid_outcome_colidx[outcome])]
# rename the columns for convenience
names(analysisDf) <- c("Hospital", "State", "Outcome")
# drop the NA's
analysisDf <- analysisDf[complete.cases(analysisDf),]
# initialize 2 character vectors to store the interim results
hospitals <- vector(mode = "character")
states <- vector(mode = "character")
# loop through all the states and populate the two vectors with the matching
# hospital for each state or an NA for the state if there is no matching
# hospital
for (state in statesDf) {
# select rows for the chosen state
resultsDf <- analysisDf[analysisDf$State == state, ]
# sort the data.frame by outcome, then by hospital name
resultsDf <- resultsDf[order(resultsDf$Outcome, resultsDf$Hospital),]
# determine which row index to return
if (num == 'best') {
rowidx = 1 }
else if (num == 'worst') {
rowidx = nrow(resultsDf) }
else {
rowidx = as.integer(num)
}
# return the hospital on the sorted list corresponding to the specified rank
# if the rowidx is beyond the end of the list, return na
if (rowidx > nrow(resultsDf)){
hospitals <- c(hospitals, NA)
states <- c(states, state)
}
else {
hospitals <- c(hospitals, resultsDf$Hospital[rowidx])
states <- c(states, state)
}
}
# create the data.frame to be returned using the two interim vectors
resultsDF <- data.frame(hospital = hospitals, state = states)
resultsDF
}
|
e10278846c483d14b5ca27783e443d7875697859 | c5c6c283f4bd0d4a57e1e2ddf9f1fb9d114c55ff | /at2/combine_datasets.R | 58bb6a0974091d04e9107982d80b2c7c2573c2c3 | [] | no_license | mutazag/DSI | ac26c8dd0735c08a45ad9bfa6dd13701b8f66ea9 | 831ed991af7101fcbf5bc44e47e816e3a03f998e | refs/heads/master | 2020-03-24T12:31:12.015443 | 2018-07-28T23:31:03 | 2018-07-28T23:31:03 | 142,716,678 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,637 | r | combine_datasets.R | #### combine data sets ####
#### header ####
setwd("C:/mdsi/dsi/at2")
library(readr)
library(dplyr)
library(lubridate)
#### read data files ####
# start with the normalised data files, 1 entry per user per day, 1 entry per
# day for weather
sleep <- read_csv("./sleep_adj.csv",locale=readr::locale(tz="Australia/Sydney"))
mood <- read_csv("./mood_adj.csv", locale=readr::locale(tz="Australia/Sydney"))
meetings <- read_csv("./meetings_summary.csv", locale=readr::locale(tz="Australia/Sydney"))
weather <- read_csv("./weather_summary_clean.csv",locale=readr::locale(tz="Australia/Sydney") )
#### normalise data sets ####
#reduce data sets to important columns only, and rename all date fields to date
#(lowercase)
sleep <- sleep %>%
rename(date = day,
weekday = WeekDay,
slp.start = start,
slp.end = end)
mood <- mood %>%
select(-N, -mood.mean) %>%
rename(mood.score = mood.rounded)
meetings <- meetings %>%
mutate(date = as.Date(FirstStart)) %>%
rename(number.meetings = N) %>%
select(-WeekDay - Day)
weather <- weather %>%
select(date = Date,
temp.high,
temp.low,
humidity.high,
precip.sum,
weather.event = events)
#### combine group data ####
group_df <- left_join(sleep, mood, c("userid", "date"))
group_df <- left_join(group_df, weather, c("date"))
#### combine personal data ####
personal_df <- group_df %>% filter(userid==6)
personal_df <- left_join(personal_df, meetings, "date")
#### save files to disk ####
write_csv(group_df, "./combined_group.csv")
write_csv(personal_df, "./combined_personal.csv")
|
e63a111dc626d0f375554a009b928a62c8ee5d4e | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/jennybc/r-graph-catalog/fig08-02_gold-price-data-high-low-chart.R | 627f33d4c658eba3c9d97f78ad6d1a4619800e6d | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,330 | r | fig08-02_gold-price-data-high-low-chart.R | library(ggplot2)
this_base <- "fig08-02_gold-price-data-high-low-chart"
my_data <- data.frame(
Year = 1979:1996,
Average = c(306, 615, 460, 376, 424, 361, 317, 368, 447, 437, 381,
383.51, 362.11, 343.82, 359.77, 384, 383.79, 387.81),
High = c(510, 850, 599, 475, 505, 402, 340, 440, 500, 480,
415, 425, 405, 360, 415, 400, 400, 420),
Low = c(215, 480, 390, 298, 380, 306, 280, 330, 390, 395, 360,
350, 345, 330, 326, 370, 370, 366))
p <- ggplot(my_data, aes(x = Year, y = Average)) +
geom_point(shape = 95, size = 4) +
geom_segment(aes(x = Year - 0.15, xend = Year - 0.15,
y = Low, yend = High), hjust = 4) +
scale_x_continuous(breaks = seq(1975, 1995, 5), limits = c(1975, 1997),
expand = c(0, 0)) +
scale_y_continuous(breaks = seq(100, 900, 100), limits = c(100, 950),
expand = c(0, 0),
labels = c(100, "", 300, "", 500, "", 700, "", 900)) +
labs(x = "Year", y = "Gold Price (dollars)") +
ggtitle("Fig 8.2 Gold Price Data: High-Low Chart") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = rel(1.2), face = "bold"))
p
ggsave(paste0(this_base, ".png"),
p, width = 7, height = 5)
|
b4237b693695f03a12db870c44869fea9b020bf9 | f327df8fff4ebce6f71dd49caeb8a7bef3e2112d | /smallGlobPrior.R | 9a800bb3389514db1a4543e6cc4b6c0d69ecb7bb | [] | no_license | XinyuTian/EB_pinc | fff0ad31c5ba671f1cc79f3664bb3de685018372 | d9a7b237d8ade418be1556e368d8092f61014986 | refs/heads/master | 2021-01-20T15:59:09.041742 | 2016-07-15T22:18:08 | 2016-07-15T22:18:08 | 60,817,803 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,385 | r | smallGlobPrior.R | ## GOES WITH CALC_LIK.R
load("/Users/Xinyu/Documents/Bayes/code/integrationExpr.Rdata")
load("/Users/Xinyu/Documents/Bayes/code/integrationMeth.Rdata")
load("/Users/Xinyu/Documents/Bayes/code/methylationID.Rdata") # gbIND prIND
load("/Users/Xinyu/Documents/Bayes/code/workingList.Rdata") # workingList, G1, G2, template_promoter_CpGs, template_body_CpGs, nruns
library(smoothie)
library(aws)
res_expr <- dim(integrationExpr)[3]
tensor_product <- function(matrix1,matrix2,smooth_h=0,normalize=c("row","column","no"),kernel=c("gauss","cauchy","minvar")) {
if (is.matrix(matrix1) && is.matrix(matrix2)) result <- matrix(ncol=ncol(matrix1),nrow=ncol(matrix2),data=rep(0,ncol(matrix1)*ncol(matrix2))) else result <- matrix(ncol=length(matrix1),nrow=length(matrix1),data=rep(0,length(matrix1)*length(matrix2)))
if (is.matrix(matrix1) && is.matrix(matrix2)) for (i in 1:nrow(matrix1)) {
result <- result + matrix(ncol=ncol(matrix1),nrow=ncol(matrix2),byrow=TRUE,data=apply(expand.grid(matrix1[i,],matrix2[i,]), 1, prod))
} else result <- result + matrix(nrow=length(matrix1),ncol=length(matrix2),byrow=TRUE,data=apply(expand.grid(matrix1,matrix2), 1, prod))
if (is.matrix(matrix1) && is.matrix(matrix2)) result <- result/nrow(matrix1)
if (!is.null(kernel) && smooth_h > 0) result <- kernel2dsmooth(result,kernel.type = kernel[1],sigma=smooth_h,nx=ncol(matrix2),ny=ncol(matrix1))
if (normalize[1] == "row") for (i in 1:nrow(result)) result[i,] <- result[i,]/sum(result[i,]) else if (normalize[1] == "column") for (i in 1:nrow(result)) result[,i] <- result[,i]/sum(result[,i])
return(result)
}
geo_mean <- function(data) {
log_data <- log(data)
gm <- exp(mean(log_data[is.finite(log_data)]))
return(gm)
}
# function to generate potential file
pot <- function(prGeneCpG, gbGeneCpG, exprGene, sm_param) {
smooth_e <- sm_param[1]
smooth_gb <- sm_param[2]
smooth_pr <- sm_param[3]
prior_pr <- apply(prGeneCpG,2,mean)
prior_gb <- apply(gbGeneCpG,2,mean)
prior_expr <- kernsm(apply(exprGene,2,mean),h=smooth_e)
prior_expr <- prior_expr@yhat/sum(prior_expr@yhat)
string <- paste(prior_pr,collapse=",")
promoterPots <- paste("\nNAME:\t\tpot_",promoter_CpGs,"\nTYPE:\t\trowNorm\nPOT_MAT:\t[1,",res_expr,"]((",string,"))\nPC_MAT:\t\t[1,",res_expr,"]((",paste(rep(1,res_expr),collapse=","),"))\n",sep="",collapse="")
string <- paste(prior_gb,collapse=",")
geneBodyPots <- paste("\nNAME:\t\tpot_",geneBody_CpGs,"\nTYPE:\t\trowNorm\nPOT_MAT:\t[1,",res_expr,"]((",string,"))\nPC_MAT:\t\t[1,",res_expr,"]((",paste(rep(1,res_expr),collapse=","),"))\n",sep="",collapse="")
string <- paste(prior_expr,collapse=",")
expr.pots <- paste("\nNAME:\t\tpot_EXPR.likelihood\nTYPE:\t\trowNorm\nPOT_MAT:\t[1,",res_expr,"]((",string,"))\nPC_MAT:\t\t[1,",res_expr,"]((",paste(rep(1,res_expr),collapse=","),"))\n\nNAME:\t\tpot_EXPR.prior\nTYPE:\t\trowNorm\nPOT_MAT:\t[1,",res_expr,"]((",string,"))\nPC_MAT:\t\t[1,",res_expr,"]((",paste(rep(1,res_expr),collapse=","),"))\n\n",sep="",collapse="")
result <- tensor_product(gbGeneCpG,exprGene,smooth_h=smooth_gb)
expr.m <- paste("NAME:\t\tpot_EXPR.M.GB\nTYPE:\t\trowNorm\nPOT_MAT:\t\t[",paste(c(res_expr,res_expr),collapse=","),"]((",paste(apply(result,1,paste,collapse=","),collapse="),\n\t\t\t("),"))\nPC_MAT:\t\t[",paste(c(res_expr,res_expr),collapse=","),"](())\n\n",sep="",collapse="")
result <- tensor_product(prGeneCpG,exprGene,smooth_h=smooth_pr)
expr.m <- c(expr.m,paste("NAME:\t\tpot_EXPR.M.P\nTYPE:\t\trowNorm\nPOT_MAT:\t\t[",paste(c(res_expr,res_expr),collapse=","),"]((",paste(apply(result,1,paste,collapse=","),collapse="),\n\t\t\t("),"))\nPC_MAT:\t\t[",paste(c(res_expr,res_expr),collapse=","),"](())\n\n",sep="",collapse=""))
return(list = c(expr.m,expr.pots,promoterPots,geneBodyPots))
}
# function to generate facData file
fac <- function(exprGene, integrationMeth) {
pasteFac <- function(xx) paste('[1,',res_expr,']((',paste(xx,sep="",collapse=","),'))',sep="",collapse="")
tempS_G1 <- matrix(ncol=1+length(IDs_promoter)+length(IDs_body),nrow=length(G1))
tempS_G1[,1] <- apply(exprGene, 1, pasteFac)
tempS_G1[,2:(length(IDs_promoter)+1)] <- t(apply(integrationMeth[IDs_promoter, , , drop=F], c(1,2), pasteFac))
tempS_G1[,(length(IDs_promoter)+2):(1+length(IDs_promoter)+length(IDs_body))] <- t(apply(integrationMeth[IDs_body, , , drop=F], c(1,2), pasteFac))
return(tempS_G1)
}
for (i in length(workingList)) {
system(command=paste('mkdir',i,sep=" "))
system(command=paste('mkdir ./',i,'/G1_model',sep=""))
system(command=paste('mkdir ./',i,'/G1_model/all',sep=""))
system(command=paste('mkdir ./',i,'/G2_model',sep=""))
system(command=paste('mkdir ./',i,'/G2_model/all',sep=""))
system(command=paste('mkdir ./',i,'/full_model',sep=""))
system(command=paste('mkdir ./',i,'/null',sep=""))
system(command=paste('mkdir ./',i,'/null/G1_model',sep=""))
system(command=paste('mkdir ./',i,'/null/G2_model',sep=""))
gene <- workingList[i]
IDs_promoter <- eval(parse(text = paste0('prIND$SID$','"',gene,'"')))
IDs_body <- eval(parse(text = paste0('gbIND$SID$','"',gene,'"')))
#smooth parameters
smooth_e <- 1/(mean(c(length(G1),length(G2)))/res_expr) # rules of thumb
smooth_pr <- trunc(1/(mean(c(length(G1),length(G2)))*length(IDs_promoter)/(res_expr*res_expr))) # rules of thumb
smooth_gb <- trunc(1/(mean(c(length(G1),length(G2)))*length(IDs_body)/(res_expr*res_expr))) # rules of thumb
sm_param <- c(smooth_e, smooth_gb, smooth_pr)
#CpG names
promoter_CpGs <- paste0(template_promoter_CpGs[1:length(IDs_promoter)], '.likelihood')
geneBody_CpGs <- paste0(template_body_CpGs[1:length(IDs_body)], '.likelihood')
#start precomputing correct initialization of parameters
prGeneCpG <- t(apply(integrationMeth[IDs_promoter, , , drop=F],2, function(xx) {xx = apply(xx, 2, geo_mean); xx = xx / sum(xx)}))
gbGeneCpG <- t(apply(integrationMeth[IDs_body, , , drop=F],2, function(xx) {xx = apply(xx, 2, geo_mean); xx = xx / sum(xx)}))
exprGene <- integrationExpr[gene,,]
###########################################################################
############################ Tumor model ###############################
G1_prior = pot(prGeneCpG[G1,], gbGeneCpG[G1,], exprGene[G1,], sm_param)
potentials <- file(paste("./",i,"/G1_model/all/factorPotentials.txt",sep=""),"w")
cat(G1_prior,file=potentials)
close(potentials)
tempS_G1 <- fac(exprGene[G1, , drop=F], integrationMeth[c(IDs_promoter, IDs_body), G1, , drop=F])
rownames(tempS_G1) <- G1
colnames(tempS_G1) <- c("NAME:\tEXPR.likelihood",promoter_CpGs,geneBody_CpGs)
eval(parse(text = paste('write.table(', paste('tempS_G1,file ="./',i,'/G1_model/all/G1_FacData.tab",row.names=TRUE,col.names=TRUE,quote=FALSE,sep="\t",append=FALSE)', sep = ""))))
string<-system(intern=TRUE,command=paste('Rscript ~/Dropbox/My\\ ','R\\ ','Code/EB_pinc/calc_lik.R ./',i,'/G1_model/all/G1_FacData.tab ./',i,'/G1_model/all/factorPotentials.txt',sep=""))
G1_G1model_mlogliks <- as.numeric(string)
###########################################################################
########################### Normal model ###############################
G2_prior = pot(prGeneCpG[G2,], gbGeneCpG[G2,], exprGene[G2,], sm_param)
potentials <- file(paste("./",i,"/G2_model/all/factorPotentials.txt",sep=""),"w")
cat(G2_prior,file=potentials)
close(potentials)
tempS_G2 <- fac(exprGene[G2, , drop=F], integrationMeth[c(IDs_promoter, IDs_body), G2, , drop=F])
rownames(tempS_G2) <- G2
colnames(tempS_G2) <- c("NAME:\tEXPR.likelihood",promoter_CpGs,geneBody_CpGs)
eval(parse(text = paste('write.table(', paste('tempS_G2,file ="./',i,'/G2_model/all/G2_FacData.tab",row.names=TRUE,col.names=TRUE,quote=FALSE,sep="\t",append=FALSE)', sep = ""))))
string<-system(intern=TRUE,command=paste('Rscript ~/Dropbox/My\\ ','R\\ ','Code/EB_pinc/calc_lik.R ./',i,'/G2_model/all/G2_FacData.tab ./',i,'/G2_model/all/factorPotentials.txt',sep=""))
G2_G2model_mlogliks <- as.numeric(string)
###########################################################################
############################ Full model ###############################
Full_prior = pot(prGeneCpG[c(G1,G2),], gbGeneCpG[c(G1,G2),], exprGene[c(G1,G2),], sm_param)
potentials <- file(paste("./",i,"/full_model/factorPotentials.txt",sep=""),"w")
cat(Full_prior,file=potentials)
close(potentials)
tempFac <- rbind(tempS_G1,tempS_G2)
rownames(tempFac) <- c(G1,G2)
colnames(tempFac) <- c("NAME:\tEXPR.likelihood",promoter_CpGs,geneBody_CpGs)
eval(parse(text = paste('write.table(', paste('tempFac,file = "./',i,'/full_model/full_FacData.tab",row.names=TRUE,col.names=TRUE,quote=FALSE,sep="\t",append=FALSE)', sep = ""))))
# query the full model with T and AN samples
string<-system(intern=TRUE,command=paste('Rscript ~/Dropbox/My\\ ','R\\ ','Code/EB_pinc/calc_lik.R ./',i,'/full_model/full_FacData.tab ./',i,'/full_model/factorPotentials.txt',sep=""))
allData_jointModel_mlogliks <- as.numeric(string)
###########################################################################
######################## D calculation ###################################
D <- 2*(sum(allData_jointModel_mlogliks) - (sum(G1_G1model_mlogliks)+sum(G2_G2model_mlogliks)))
###########################################################################
################# P val calculation using null distr. #####################
###########################################################################
Ds <- vector(length=nruns,mode="numeric")
for (run in 1:nruns) {
cur <- sample(x=1:(length(G2)+length(G1)),size=length(G2),replace=FALSE)
# G1
null_G1_prior = pot(prGeneCpG[-cur,], gbGeneCpG[-cur,], exprGene[-cur,], sm_param)
potentials <- file(paste("./",i,"/null/G1_model/factorPotentials.txt",sep=""),"w")
cat(null_G1_prior,file=potentials)
close(potentials)
# query
string<-system(intern=TRUE,command=paste('Rscript ~/Dropbox/My\\ ','R\\ ','Code/EB_pinc/calc_lik.R ./',i,'/G1_model/all/G1_FacData.tab ./',i,'/null/G1_model/factorPotentials.txt',sep=""))
G1_G1model_mlogliks <- as.numeric(string)
# G2
null_G2_prior = pot(prGeneCpG[cur,], gbGeneCpG[cur,], exprGene[cur,], sm_param)
potentials <- file(paste("./",i,"/null/G2_model/factorPotentials.txt",sep=""),"w")
cat(null_G2_prior,file=potentials)
close(potentials)
# query
string<-system(intern=TRUE,command=paste('Rscript ~/Dropbox/My\\ ','R\\ ','Code/EB_pinc/calc_lik.R ./',i,'/G2_model/all/G2_FacData.tab ./',i,'/null/G2_model/factorPotentials.txt',sep=""))
G2_G2model_mlogliks <- as.numeric(string)
Ds[run] <- 2*(sum(allData_jointModel_mlogliks) - (sum(G1_G1model_mlogliks)+sum(G2_G2model_mlogliks)))
if(is.na(Ds[run])) break
}
#if (sd(Ds, na.rm = T) != 0 & D > 0.1) zscore <- (D - mean(Ds, na.rm = T)) / sd(Ds, na.rm = T) else zscore <- -6
if (sd(Ds) != 0 & D > 0.1) zscore <- (D - mean(Ds)) / sd(Ds) else zscore <- -6
pval_zscore <- pnorm(zscore,lower.tail=FALSE)
###########################################################################################
eval(parse(text=paste('write.table(x=t(c(pval_zscore,D,mean(Ds),sd(Ds),zscore)), col.names=FALSE, row.names=FALSE, append=TRUE, file="./',i,'.result")',sep="")))
system(intern=TRUE,command=paste('tar cf ',i,'.tar ',i,sep=""))
}
|
0a8e112398d0ad6ca12407f82610cab561a98b86 | 817dbeead89bb8982d24bf1174594da6a699df47 | /plot1.R | 4fc755341810d192c8c1e8b3e60e4bb6a6da7d3f | [] | no_license | VapoVu/ExData_Plotting1 | 56e57211882621f2bd65c39ed8d76d68b3e2b6db | 1b9bfaa5c3b613c28b99d2adc41355bb22a0a34a | refs/heads/master | 2021-01-15T21:07:50.491176 | 2015-03-07T04:37:25 | 2015-03-07T04:37:25 | 31,724,346 | 0 | 0 | null | 2015-03-05T16:44:36 | 2015-03-05T16:44:36 | null | UTF-8 | R | false | false | 655 | r | plot1.R | # Read input data table
pvDat = read.table('household_power_consumption.txt', sep = ';', header = T, na.strings = "?",
stringsAsFactors = F)
pvDat$Time = paste(pvDat$Date, pvDat$Time)
pvDat$Date = as.Date(pvDat$Date, "%d/%m/%Y")
pvDat$Time = strptime(pvDat$Time, format = "%d/%m/%Y %H:%M:%S")
st = as.Date('01/02/2007', "%d/%m/%Y")
ed = as.Date('02/02/2007', "%d/%m/%Y")
pv = pvDat[pvDat$Date >=st & pvDat$Date <=ed, ]
# Plot figure 1
png(file = 'plot1.png', bg = 'white', width = 480, height = 480)
hist(pv$Global_active_power, col = 'red',
xlab = 'Global Active Power (kilowatts)', main = 'Global Active Power')
dev.off()
|
5baff0e920b0515466923d2c6641682eb25a8fca | 216771b49b5d2cf7c6ca117663dadf146175756c | /Ch6/Exercise11.R | 49d22ead7298ec2228df8455493ece5dc164414e | [] | no_license | NestorasK/Statistical_Learning | 4a3ec7a128a6e1bc3506d45b28dd5f1132f5fc99 | 0151b4bb4b7b25bf82985e8ab2e8ffc43ab8b13f | refs/heads/master | 2020-04-26T14:31:46.143762 | 2020-02-13T22:21:21 | 2020-02-13T22:21:21 | 173,617,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,427 | r | Exercise11.R | # Exercise 11 #
rm(list = ls())
library(MASS)
data(Boston)
set.seed(123)
# (a)
# training set
train.index <- sample(x = 1:nrow(Boston), size = 0.7 * nrow(Boston))
trainSet <- Boston[train.index, ]
testSet <- Boston[-train.index, ]
# Best Subset Selection #
library(leaps)
# Predictions functions
predict.regsubsets=function(object,newdata,id,...){
form=as.formula(object$call[[2]])
mat=model.matrix(form,newdata)
coefi=coef(object,id=id)
xvars=names(coefi)
mat[,xvars]%*%coefi
}
# Ten fold Cross Validation
nfolds <- 10
folds <- sample(x = c(1:nfolds), size = nrow(trainSet), replace = TRUE)
# Initialize table
CV_MSEs <- matrix(data = NA, nrow = nfolds, ncol = ncol(Boston) - 1)
for(i in 1:nfolds){
# Training Set
trainSet_i <- trainSet[folds != i, ]
# Test set
testSet_i <- trainSet[folds == i, ]
# Fitting model
fit.cv <- regsubsets(x = crim ~ ., data = trainSet_i, nvmax = ncol(trainSet_i))
for(j in 1:(ncol(trainSet_i)-1)){
# Predictions
pred_j <- predict.regsubsets(object = fit.cv, newdata = testSet_i, id = j)
# Calculate Mean Square Error per id
CV_MSEs[i, j] <- mean((pred_j - testSet_i$crim)^2)
}
}
# Mean MSE per model size
mean_mses <- apply(X = CV_MSEs, MARGIN = 2, FUN = mean)
# Plots
par(mfrow = c(1,2))
boxplot(CV_MSEs, xlab = "Model Size", ylab = "CV Mean Square Error", main = "Best Subset Selection")
plot(x = mean_mses, type = "b")
points(x = which.min(mean_mses), min(mean_mses), col = "red", pch = 19, xlab = "Model Size",
ylab = "Mean CV MSE", main = "Best Subset Selection")
# test MSE
# fit model in the full TRAINING dataset for the best number of features found from CV
fit.train <- regsubsets(x = crim ~ ., data = trainSet, nvmax = 2)
coef(object = fit.train, id = 2)
# Predictions on test set
pred.test <- predict.regsubsets(object = fit.train, newdata = testSet, id = 2)
# Final Mean Square Error
mse.test <- mean((pred.test - testSet$crim)^2)
# Ridge Regression #
library(glmnet)
# Generate my grid
grid <- 10^seq(10,-2,length=100)
# Fit ridge regression
# Ten fold cross validation is used to identify the best lambda
fit.ridge <- cv.glmnet(x = model.matrix(object = crim ~ ., data = trainSet)[, -1],
y = trainSet$crim, lambda = grid, alpha = 0)
# Plot the model
par(mfrow = c(1,2))
plot(fit.ridge)
plot(fit.ridge$glmnet.fit, xvar = "lambda")
# Evaluate test set
pred.ridge <- predict(object = fit.ridge, s = fit.ridge$lambda.min,
newx = model.matrix(object = crim ~ ., data = testSet)[, -1])
mse.ridge <- mean((pred.ridge - testSet$crim)^2)
# Lasso #
# Fit lasso
# Ten fold cross validation is used to identify the best lambda
fit.lasso <- cv.glmnet(x = model.matrix(object = crim ~ ., data = trainSet)[, -1],
y = trainSet$crim, lambda = grid, alpha = 1)
# Plot the model
par(mfrow = c(1,2))
plot(fit.lasso)
plot(fit.lasso$glmnet.fit, xvar = "lambda")
# Evaluate test set
pred.lasso <- predict(object = fit.lasso, s = fit.lasso$lambda.min,
newx = model.matrix(object = crim ~ ., data = testSet)[, -1])
mse.lasso <- mean((pred.lasso - testSet$crim)^2)
# PCR
library(pls)
# Principle Components Regression
# Fitting model on the training set using CV
fit.pcr <- pcr(crim ~ ., data = trainSet, scale = TRUE, validation = "CV")
summary(fit.pcr)
# Generating a validation plot
par(mfrow=c(1,1))
validationplot(object = fit.pcr, val.type = "MSEP", type = "b")
preds.pcr <- predict(object = fit.pcr, newdata = testSet, ncomp = 8)
# Notes:
# Selecting ncomp = 8 as the line is almost flattens out after that point.
# According to Chapter 10 you could select as few as three components as
# at that point you can see the formation of the "elbow" shape.
# Mean Square Error
mse.pcr <- mean((preds.pcr - testSet$crim)^2)
mse.pcr
# Compare methods using a barplot
barplot(height = c(mse.test, mse.ridge, mse.lasso, mse.pcr),
names.arg = c("BestSubsetSel", "RidgeRegression", "Lasso", "PCR"),
ylab = "Mean Square Error")
# From the barplot lasso in the best model
# Retrain lasso on the full dataset
fit.lasso.full <- glmnet(x = model.matrix(object = crim ~ ., data = Boston)[, -1],
y = Boston$crim, lambda = grid, alpha = 1)
coef.lasso <- predict(object = fit.lasso.full, type = "coef", s = fit.lasso$lambda.min)
plot(fit.lasso.full, xvar = "lambda")
|
b4d6276cb4397c97cbb2c6b33dc428404d52a205 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/equivalence/examples/pref.LAI.Rd.R | d32f38be88d92e0d4b332ee0b0f46f6d27e5c739 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 195 | r | pref.LAI.Rd.R | library(equivalence)
### Name: pref.LAI
### Title: Measured Leaf Area Index data from PREF, northern Idaho, USA
### Aliases: pref.LAI
### Keywords: datasets
### ** Examples
data(pref.LAI)
|
9becbf070598dd8e10e7734d5d6025dab4e49184 | f606e8da72c8f647f072c67baf699e754fc889c4 | /code_nomogram.R | d0454e91eb39aea23885ae9b42ec7397031d18ef | [
"Apache-2.0"
] | permissive | RainyRen/radiomics_ovary | a5ea46f1796e8a727e9652ed33dce9d8897ccd47 | 333a9f46efab85383033cfa6d6a3a9b8244fb468 | refs/heads/main | 2023-08-19T04:39:20.682018 | 2021-09-29T13:42:31 | 2021-09-29T13:42:31 | 411,511,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,908 | r | code_nomogram.R | library(tidyverse)
library(caret)
library(pROC)
library(glmnet)
library(DMwR)
library(rmda)
library(ggpubr)
library(ModelGood)
library(rms)
library(mRMRe)
library(DescTools)
library(Publish)
library(pheatmap)
# # ========== Environment settings ==========
# # 移除现有的环境变量
rm(list=ls())
setwd("C:/Workstation/RProject/duan_code")
getwd()
# # =========================================================
source('assist_file.R')
set.seed(1234)
clinics_column <- 16
fpath <- choose.files()
dt_all <- read.csv(fpath)
dt_all$Label <- factor(dt_all$Label, ordered = T)
dt <- dt_all[, -c(2:clinics_column)]
if(!is_empty(nearZeroVar(dt)))
{
dt <- dt[, -nearZeroVar(dt)]
}
# clinical data
dt_cli <- dt_all[, c(1:clinics_column)]
acc_val_train <- numeric(0)
acc_val_test <- numeric(0)
all_idx <- list()
train_list <- list()
test_list <- list()
cvfit_list <- list()
fit_list <- list()
out_list <- list()
for(numIt in c(1:10))
{
s_pre <- preProcess(dt, method = 'medianImpute')
dt <- predict(s_pre, dt)
idx_train <- createDataPartition(dt$Label, p = 0.7, list = F)
dt_train <- dt[idx_train, ]
dt_test <- dt[-idx_train, ]
# browser()
all_idx[[numIt]] <- idx_train
step_pre <- preProcess(dt_train, method = c('center', 'scale'))
dt_train_pre <- predict(step_pre, dt_train) %>% as.data.frame
dt_train_pre0 <- dt_train_pre
# dt_train_pre <- SMOTE(Label~., data = dt_train_pre)
dt_test_pre <- predict(step_pre, dt_test)
dt_mrmr <- mRMR.data(dt_train_pre)
f_sel <- mRMR.classic(data = dt_mrmr, target_indices = c(1), feature_count = 20)
# browser()
dt_train_pre <- select(dt_train_pre, c('Label', featureNames(f_sel)[unlist(solutions(f_sel))]))
dt_train_pre0 <- select(dt_train_pre0, c('Label', featureNames(f_sel)[unlist(solutions(f_sel))]))
dt_test_pre <- select(dt_test_pre, c('Label', featureNames(f_sel)[unlist(solutions(f_sel))]))
x <- as.matrix(dt_train_pre[, -1])
y <- dt_train_pre$Label
cv.fit <- cv.glmnet(x, y, family = 'binomial')
fit <- glmnet(x, y, family = 'binomial')
train_list[[numIt]] <- dt_train_pre0
test_list[[numIt]] <- dt_test_pre
cvfit_list[[numIt]] <- cv.fit
fit_list[[numIt]] <- fit
# browser()
pre_res_test <- as.vector(predict(fit, newx = as.matrix(dt_test_pre[, -1]), s = cv.fit$lambda.min))
roc_res_test <- pROC::roc(dt_test_pre$Label, pre_res_test)
pre_res_train <- as.vector(predict(fit, newx = x, s = cv.fit$lambda.min))
roc_res_train <- pROC::roc(dt_train_pre$Label, pre_res_train)
dir_sign_test <- roc_res_test$direction
dir_sign_train <- roc_res_train$direction
if(dir_sign_test == dir_sign_train)
{
acc_val_test <- c(acc_val_test, pROC::auc(roc_res_test))
acc_val_train <- c(acc_val_train, pROC::auc(roc_res_train))
}
else
{
acc_val_test <- c(acc_val_test, 0)
acc_val_train <- c(acc_val_train, 0)
}
}
idx_vec <- c(1:length(acc_val_test))
idx_vec <- idx_vec[acc_val_train > acc_val_test]
acc_val <- acc_val_test[acc_val_train > acc_val_test]
init_idx <- which.max(acc_val)
sel_idx <- idx_vec[init_idx]
idx_train <- all_idx[[sel_idx]]
grp_info <- tibble(Label = dt$Label, Group = 'Test')
grp_info$Radscore <- 0
grp_info$Group[idx_train] <- 'Training'
dt_train_final <- train_list[[sel_idx]]
dt_test_final <- test_list[[sel_idx]]
cvfit <- cvfit_list[[sel_idx]]
fit <- fit_list[[sel_idx]]
s = cvfit$lambda.min
pre_res_test <- as.vector(predict(fit, newx = as.matrix(dt_test_final[, -1]), s = s))
pre_res_test_prob <- as.vector(predict(fit, newx = as.matrix(dt_test_final[, -1]), s = s,
type = 'link'))
roc_res_test <- pROC::roc(dt_test_final$Label, pre_res_test, ci = T)
out_res_test <- ifelse(pre_res_test > coords(roc_res_test, x = 'best')[1], 1, 0)
conf_mat_test <- confusionMatrix(as.factor(out_res_test),as.factor(dt_test_final$Label))
rec_test <- c(conf_mat_test$overall[c(1, 3, 4)], conf_mat_test$byClass[c(1:4)])
pre_res_train <- as.vector(predict(fit, newx = as.matrix(dt_train_final[, -1]), s = s))
pre_res_train_prob <- as.vector(predict(fit, newx = as.matrix(dt_train_final[, -1]), s = s,
type = 'link'))
roc_res_train <- pROC::roc(dt_train_final$Label, pre_res_train, ci = T)
out_res_train <- ifelse(pre_res_train > coords(roc_res_train, x = 'best')[1], 1, 0)
conf_mat_train <- confusionMatrix(as.factor(out_res_train), as.factor(dt_train_final$Label))
rec_train <- c(conf_mat_train$overall[c(1, 3, 4)], conf_mat_train$byClass[c(1:4)])
rec_rad <- data.frame(rbind(rec_train, rec_test), row.names = c('Train', 'Test'))
write.csv(rec_rad, file = 'res_radiomics.csv')
grp_info$Radscore[idx_train] <- pre_res_train
grp_info$Radscore[-idx_train] <- pre_res_test
write_csv(grp_info, 'group_info.csv')
cutoff_radiomics <- coords(roc_res_train, x = 'best')
## rad score
dt_final_test <- tibble(Label = dt_test_final$Label, rad_score = pre_res_test)
dt_final_arr <- arrange(dt_final_test, rad_score)
dt_final_arr$x <- 1:nrow(dt_final_arr)
dt_final_train <- tibble(Label = dt_train_final$Label, rad_score = pre_res_train)
p_train <- ggboxplot(x = 'Label', y = 'rad_score', data = dt_final_train,
add = 'jitter', color = 'Label', palette = 'jco') +
ylim(-3, 3) +
stat_compare_means(method = 'wilcox.test') +
geom_hline(yintercept = coords(roc_res_train, x = 'best')[1]) + theme_bw()
p_test <- ggboxplot(x = 'Label', y = 'rad_score', data = dt_final_test,
add = 'jitter', color = 'Label', palette = 'jco') +
ylim(-3, 3) +
stat_compare_means(method = 'wilcox.test') +
geom_hline(yintercept = coords(roc_res_train, x = 'best')[1]) + theme_bw()
# p_rad <- ggplot(aes(x = x, y = rad_score), data = dt_final_arr)
# p_rad <- p_rad + geom_col(aes(fill = Label)) + labs(x = '', y = 'Rad Score') +
# theme_bw() + theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
coefs <- coefficients(fit, s = s)
useful_feature <- unlist(coefs@Dimnames)[coefs@i + 1]
useful_feature <- useful_feature[-1]
dt_coef <- data.frame(Feature = useful_feature, Coef = coefs@x[-1])
dt_coef <- arrange(dt_coef, desc(Coef))
dt_coef$Feature <- factor(dt_coef$Feature,
levels = as.character(dt_coef$Feature))
p_coef <- ggplot(aes(x = Feature, y = Coef), data = dt_coef)
p_coef <- p_coef + geom_col(fill = 'blue', width = 0.7) + coord_flip() +
theme_bw() + ylab('Coefficients')
final_data_test <- add_column(dt_test_final, radscore = pre_res_test)
final_data_test <- select(final_data_test, c('Label', useful_feature, 'radscore'))
write_csv(final_data_test, path = 'dataset_test.csv')
final_data_train <- add_column(dt_train_final, radscore = pre_res_train)
final_data_train <- select(final_data_train, c('Label', useful_feature, 'radscore'))
write_csv(final_data_train, path = 'dataset_train.csv')
fit_train <- glm(Label~rad_score, data = dt_final_train, family = 'binomial')
dt_dca_train <- dt_final_train
dt_dca_train$Label <- as.numeric(dt_dca_train$Label) - 1
dca_curve <- decision_curve(Label~rad_score, data = dt_dca_train)
radscore <- paste('Radscore = ', paste(round(coefs@x[-1], 3),
useful_feature, sep = '*', collapse = '+'), '+', round(coefs@x[1], 3))
print(radscore)
write_file(radscore, path = 'radscore.txt')
# figure1
oldpar <- par(mfrow = c(2, 1))
plot(cvfit)
# figure2
plot(fit, s = s, xvar = 'lambda')
abline(v = log(cvfit$lambda.min), lty = 2)
par(oldpar)
# figure3
oldpar <- par(mfrow = c(1, 2))
plot(roc_res_train, print.auc = T,
print.auc.pattern = 'AUC: %.2f(%.2f-%.2f)', legacy.axes = T)
# figure4
plot(roc_res_test, print.auc = T, legacy.axes = T,
print.auc.pattern = 'AUC: %.2f(%.2f-%.2f)')
par(oldpar)
# figure5
ggarrange(p_train, p_test, ncol = 2)
#figure 6
p_coef + theme(axis.text.y = element_text(size = 12))
# clinics analysis
idx_train <- all_idx[[sel_idx]]
dt_cli_train <- dt_cli[idx_train, ]
dt_cli_test <- dt_cli[-idx_train, ]
p_list <- lapply(dt_cli_train[, -1], inter_test, y = dt_cli_train$Label)
#p_list_test <- lapply(dt_cli_test[, -1], inter_test, y = dt_cli_test$Label)
sel_name <- c(names(which(p_list < 0.1)))
dt_cli_train_1 <- select(dt_cli_train, c('Label', sel_name))
res_ulogit_list <- lapply(colnames(dt_cli_train_1)[-1], ulogit_test, dt = dt_cli_train_1)
res_ulogit <- bind_rows(res_ulogit_list)
res_ulogit <- res_ulogit[-seq(from = 1, to = nrow(res_ulogit), by = 2), ]
res_ulogit$Var <- colnames(dt_cli_train_1)[-1]
res_ulogit_sel <- filter(res_ulogit, p_val < 0.1)
cli_name <- res_ulogit_sel$Var
dt_cli_train_2 <- select(dt_cli_train_1, c('Label', cli_name))
dt_cli_test_2 <- select(dt_cli_test, c('Label', cli_name))
write.csv(res_ulogit, file = 'ulogit_cli.csv')
log_fit <- glm(Label~., data = dt_cli_train_2, family = binomial)
vif_val <- vif(log_fit)
vif_max <- max(vif_val)
vif_max_idx <- which.max(vif_val)
while(vif_max > 10)
{
dt_cli_train_i <- select(dt_cli_train_2, -c(names(vif_max_idx)))
log_fit <- glm(Label~., data = dt_cli_train_i, family = binomial)
vif_val <- vif(log_fit)
vif_max <- max(vif_val)
vif_max_idx <- which.max(vif_val)
}
log_fit_final <- step(log_fit)
output_mlogit(log_fit_final, 'mlogit_cli.csv')
dt_cli_train_final <- model.frame(log_fit_final, data = dt_cli_train)
dt_cli_test_final <- model.frame(log_fit_final, data = dt_cli_test)
dt_combined_train <- dt_cli_train_final
dt_combined_train$rad_score <- pre_res_train_prob
dt_combined_test <- dt_cli_test_final
dt_combined_test$rad_score <- pre_res_test_prob
com_form <- paste('Label', paste(colnames(dt_combined_train)[-1], collapse = '+'), sep = '~') %>% as.formula
mod_com_final <- glm(com_form, data = dt_combined_train, family = 'binomial')
output_mlogit(mod_com_final, 'mlogit_com.csv')
dt_combined_train_final <- model.frame(mod_com_final, data = dt_combined_train)
dt_combined_test_final <- model.frame(mod_com_final, data = dt_combined_test)
com_form <- paste('Label', paste(colnames(dt_combined_train_final)[-1], collapse = '+'), sep = '~') %>% as.formula
dt_nom <- filter(dt_combined_train_final, rad_score > -5 & rad_score < 10)
ddist_train_com <- datadist(dt_nom)
options(datadist = 'ddist_train_com')
mod_train <- lrm(com_form,
data = dt_nom, x = TRUE, y = TRUE)
nom_com <- nomogram(mod_train, lp = F, fun = plogis, fun.at = c(0.1, 0.4, 0.9),
funlabel = 'Risk')
plot(nom_com)
mod_test <- lrm(com_form,
data = dt_combined_test_final, x = TRUE, y = TRUE)
oldpar <- par(mfrow = c(1, 2))
cal_train <- calPlot2(mod_train, data = dt_combined_train_final,
legend = F, col = '#FF6EB4', lty = 2)
cal_test <- calPlot2(mod_train, data = dt_combined_test_final,
legend = F, col = '#FF6EB4', lty = 2)
par(oldpar)
cli_form <- paste('Label', paste(colnames(dt_combined_train_final)[-c(1, ncol(dt_combined_train_final))], collapse = '+'),
sep = '~') %>% as.formula
HosmerLemeshowTest(cal_train$Frame$lrm, cal_train$Frame$jack)
HosmerLemeshowTest(cal_test$Frame$lrm, cal_test$Frame$jack)
res_train_final <- predict(mod_com_final, newdata = dt_combined_train)
res_roc_com_train <- pROC::roc(dt_combined_train$Label, res_train_final,
ci = T)
cutoff <- coords(res_roc_com_train, x = 'best')[1]
res_train_final_bin <- as.factor(ifelse(res_train_final > cutoff, 1, 0))
res_conf_com_train <- confusionMatrix(dt_combined_train$Label,
res_train_final_bin, positive = '1')
res_test_final <- predict(mod_train, newdata = dt_combined_test)
res_roc_com_test <- pROC::roc(dt_combined_test$Label, res_test_final,
ci = T)
cutoff <- coords(res_roc_com_test, x = 'best')[1]
res_test_final_bin <- as.factor(ifelse(res_test_final > cutoff, 1, 0))
res_conf_com_test <- confusionMatrix(dt_combined_test$Label,
res_test_final_bin, positive = '1')
rec_train_com <- c(res_conf_com_train$overall[c(1, 3, 4)], res_conf_com_train$byClass[c(1:4)])
rec_test_com <- c(res_conf_com_test$overall[c(1, 3, 4)], res_conf_com_test$byClass[c(1:4)])
rec_all <- data.frame(rbind(rec_train_com, rec_test_com), row.names = c('Train', 'Test'))
write.csv(rec_all, file = 'res_combined.csv')
dt_dca <- dt_combined_train_final
dt_dca$Label <- ifelse(dt_dca$Label == '0', 0, 1)
dca1 <- decision_curve(com_form,
data = dt_dca)
dca2 <- decision_curve(cli_form,
data = dt_dca)
plot_decision_curve(list(dca1, dca2), confidence.intervals = F,
col = c('red', 'green', 'blue', 'black'),
curve.names = c('With Radscore', 'Without Radscore'),
legend.position = 'topright', cost.benefits = FALSE)
cli_mod <- glm(cli_form, data = dt_combined_train_final, family = 'binomial')
res_cli_train <- predict(cli_mod, newdata = dt_combined_train_final, type = 'link')
roc_cli_train <- pROC::roc(dt_combined_train_final$Label, res_cli_train,ci=T)
cutoff_cli <- coords(roc_cli_train, x = 'best')[1]
res_cli_test <- predict(cli_mod, newdata = dt_combined_test_final, type = 'link')
roc_cli_test <- pROC::roc(dt_combined_test_final$Label, res_cli_test,ci=T)
res_cli_train_bin <- as.factor(ifelse(res_cli_train > cutoff_cli, 1, 0))
res_cli_test_bin <- as.factor(ifelse(res_cli_test > cutoff_cli, 1, 0))
conf_mat_cli_test <- confusionMatrix(dt_combined_test_final$Label,
res_cli_test_bin, positive = '1')
conf_mat_cli_train <- confusionMatrix(dt_combined_train_final$Label,
res_cli_train_bin, positive = '1')
rec_train_cli <- c(conf_mat_cli_train$overall[c(1, 3, 4)], conf_mat_cli_train$byClass[c(1:4)])
rec_test_cli <- c(conf_mat_cli_test$overall[c(1, 3, 4)], conf_mat_cli_test$byClass[c(1:4)])
rec_cli <- data.frame(rbind(rec_train_cli, rec_test_cli), row.names = c('Train', 'Test'))
write.csv(rec_cli, file = 'res_clinics.csv')
oldpar <- par(mfrow = c(1, 2))
plot(res_roc_com_train, print.auc = T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
legacy.axes = T, col = 'red')
plot(roc_res_train, print.auc = T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
add = T, col = 'blue', print.auc.y = 0.45)
plot(roc_cli_train, print.auc =T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
add = T, col = 'green', print.auc.y = 0.4)
legend(x = 0.3, y = 0.2, legend = c('Combined', 'Radiomics', 'Clinics'),
col = c('red', 'blue', 'green'), lty = 1)
plot(res_roc_com_test, print.auc = T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
legacy.axes = T, col = 'red', print.auc.y = 0.5)
plot(roc_res_test, print.auc = T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
add = T, col = 'blue', print.auc.y = 0.45)
plot(roc_cli_test, print.auc =T, print.auc.pattern = 'AUC: %.2f (%.2f - %.2f)',
add = T, col = 'green', print.auc.y = 0.4)
legend(x = 0.3, y = 0.2, legend = c('Combined', 'Radiomics', 'Clinics'),
col = c('red', 'blue', 'green'), lty = 1)
par(oldpar)
rec_final <- bind_rows(rec_cli, rec_rad, rec_all)
rec_final$Group <- rep(c('Trainig', 'Test'), times = 3)
rec_final$Model <- rep(c('Clinics', 'Radiomics', 'Nomogram'), times = c(2, 2, 2))
#roc.test(res_roc_com_train,res_roc_com_test)
#roc.test(roc_cli_train,roc_cli_test)
#roc.test(roc_res_train,roc_res_test)
rmarkdown::render('results_final.Rmd')
|
103d273f1c3d18b0b63558db09f8182a210890d4 | 22057bf4f2eb001f739761e53a5578de578e6920 | /sensitivity/boundary/2d.6year.temp.filtered.R | c3ce589daa759a21637de21b2de2c2c049dff2f0 | [] | no_license | mrubayet/archived_codes_for_sfa_modeling | 3e26d9732f75d9ea5e87d4d4a01974230e0d61da | f300fe8984d1f1366f32af865e7d8a5b62accb0d | refs/heads/master | 2020-07-01T08:16:17.425365 | 2019-08-02T21:47:18 | 2019-08-02T21:47:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,024 | r | 2d.6year.temp.filtered.R | rm(list=ls())
library(xts)
coord.data = read.table("data/model.coord.dat")
rownames(coord.data) = coord.data[,3]
start.time = as.POSIXct("2010-01-01 00:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
end.time = as.POSIXct("2015-12-31 23:00:00",tz="GMT",format="%Y-%m-%d %H:%M:%S")
time.index = seq(from=start.time,to=end.time,by="1 hour")
ntime = length(time.index)
simu.time = c(1:ntime-1)*3600
load("results/mass.data.r")
mass.trim = list()
for (islice in names(mass.data))
{
mass.trim[[islice]][["stage"]] = mass.data[[islice]][["stage"]][which(mass.data[[islice]][["date"]]>=start.time & mass.data[[islice]][["date"]]<=end.time)]+1.039
mass.trim[[islice]][["date"]] = mass.data[[islice]][["date"]][which(mass.data[[islice]][["date"]]>=start.time & mass.data[[islice]][["date"]]<=end.time)]
mass.trim[[islice]][["temperature"]] = mass.data[[islice]][["temperature"]][which(mass.data[[islice]][["date"]]>=start.time & mass.data[[islice]][["date"]]<=end.time)]
}
temp.mass = mass.trim[["321"]][["temperature"]]
#### ##sws-1
folders=list.files("data/Filtered_Hourly_Data/")
data=NA
for (ifolder in folders)
{
files = dir(paste("data/Filtered_Hourly_Data/",ifolder,sep=""))
files = files[grep("SWS-1",files)]
print(ifolder)
print(files)
if(length(files)>0)
{
temp = read.csv(paste("data/Filtered_Hourly_Data/",ifolder,"/",files,sep=""),header=FALSE,skip=1)
if (length(data)<10) {
data = as.matrix(temp)
}else{
data = rbind(data,as.matrix(temp))
}
}
}
temp.value = as.numeric(data[,2])
temp.time = data[,1]
temp.time = as.POSIXct(temp.time,format= "%Y-%m-%d %H:%M:%S",tz="GMT")
temp.value = temp.value[which(temp.time>=start.time & temp.time<=end.time)]
temp.time = temp.time[which(temp.time>=start.time & temp.time<=end.time)]
temp.xts = xts(temp.value,order.by = temp.time,unique=T,tz="GMT")
temp.xts = temp.xts[.indexmin(temp.xts) %in% c(56:59,0:5)]
temp.xts = temp.xts[c('2010','2011','2012','2013','2014','2015')]
index(temp.xts) = round(index(temp.xts),units="hours")
temp.xts = temp.xts[!duplicated(.index(temp.xts))]
temp.xts = merge(temp.xts,time.index)
temp.sws1 = temp.xts
temp.river = as.numeric(temp.sws1)
river.gap = which(is.na(temp.river))
temp.river[river.gap] = temp.mass[river.gap]
jpeg(paste("figures/2d.river.temp.gap.jpg",sep=''),width=16,height=5,units='in',res=200,quality=100)
plot(time.index,temp.river,pch=16,cex=0.2,col="blue",
xlab="Time (year)",
ylab="Temperature (DegC)"
)
points(time.index[river.gap],temp.river[river.gap],col="red",cex=0.2,pch=16)
legend("topright",c("SWS-1","MASS1"),col=c("blue","red"),pch=16)
dev.off()
## ##2-3
folders=list.files("data/Filtered_Hourly_Data/")
data=NA
for (ifolder in folders)
{
files = dir(paste("data/Filtered_Hourly_Data/",ifolder,sep=""))
files = files[grep("2-3_3var",files)]
print(ifolder)
print(files)
if(length(files)>0)
{
temp = read.csv(paste("data/Filtered_Hourly_Data/",ifolder,"/",files,sep=""),header=FALSE,skip=1)
if (length(data)<10) {
data = as.matrix(temp)
}else{
data = rbind(data,as.matrix(temp))
}
}
}
temp.value = as.numeric(data[,2])
temp.time = data[,1]
temp.time = as.POSIXct(temp.time,format= "%Y-%m-%d %H:%M:%S",tz="GMT")
temp.value = temp.value[which(temp.time>=start.time & temp.time<=end.time)]
temp.time = temp.time[which(temp.time>=start.time & temp.time<=end.time)]
temp.xts = xts(temp.value,order.by = temp.time,unique=T,tz="GMT")
temp.xts = temp.xts[.indexmin(temp.xts) %in% c(56:59,0:5)]
temp.xts = temp.xts[c('2010','2011','2012','2013','2014','2015')]
index(temp.xts) = round(index(temp.xts),units="hours")
temp.xts = temp.xts[!duplicated(.index(temp.xts))]
temp.xts = merge(temp.xts,time.index)
temp.2_3= temp.xts
temp.inland = as.numeric(temp.2_3)
inland.gap = which(is.na(temp.inland))
temp.inland = na.approx(temp.inland,simu.time,rule=2)
jpeg(paste("figures/2d.inland.temp.gap.jpg",sep=''),width=16,height=5,units='in',res=200,quality=100)
plot(time.index,temp.inland,pch=16,cex=0.2,col="black",
xlab="Time (year)",
ylab="Temperature (DegC)"
)
points(time.index[inland.gap],temp.inland[inland.gap],col="red",cex=0.2,pch=16)
legend("bottomright",c("SWS-1","Interpolation"),col=c("black","red"),pch=16)
dev.off()
jpeg(paste("figures/2d_temp.inland_river.jpg",sep=''),width=16,height=5,units='in',res=200,quality=100)
plot(time.index,temp.river,type="l",col="blue",
xlab="Time (year)",
ylab="Temperature (DegC)"
)
lines(time.index,temp.inland,col="black")
legend("topright",c("River","Inland"),col=c("blue","black"),lty=1)
dev.off()
temp.river = cbind(simu.time,temp.river)
temp.inland = cbind(simu.time,temp.inland)
write.table(temp.river,file=paste('Temp_River_2010_2015.txt',sep=""),col.names=FALSE,row.names=FALSE)
write.table(temp.inland,file=paste('Temp_Inland_2010_2015.txt',sep=""),col.names=FALSE,row.names=FALSE)
|
e8861d3372ce1868bc4503dbaa682a156602b5bf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/staTools/examples/getXmin.Rd.R | 6688bb3c14585f21413e13b30db94781e9a741e5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 243 | r | getXmin.Rd.R | library(staTools)
### Name: getXmin
### Title: Lower bound estimator for discrete powerlaw distributions
### Aliases: getXmin
### Keywords: bound discrete estimator lower powerlaw
### ** Examples
x = moby
o = displo(x)
est = getXmin(o)
|
8fda4fa59411ddb3d0ff5edfd0069f844e49a337 | a0a04321eeb18ae65b24b1bcc8065e6ea5db2542 | /code/skeleton.R | 580ec43bf14726f49248ccecfd27d851c294545f | [] | no_license | nstauffer/sample_design_practice | aa905491f58a89419fe6e2cb7c23862c1c4978b4 | d59181b6fd2686c5fa43dabed917f7243aff988c | refs/heads/master | 2020-04-10T00:54:19.847459 | 2018-12-06T17:00:30 | 2018-12-06T17:00:30 | 160,699,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,423 | r | skeleton.R | # Setup --------
# Install the sample design package if needed!
if (!("sample.design" %in% installed.packages())) {
devtools::install_github("nstauffer/sample.design")
}
# Set the data source paths
path_spatial <- ""
path_tabular <- ""
path_outputs <- ""
# Load in the data --------
# Spatial stuff
# Sample frame
frame_spdf <- rgdal::readOGR(dsn = path_spatial,
layer = "",
stringsAsFactors = FALSE)
# Strata
strata_spdf <- rgdal::readOGR(dsn = path_spatial,
layer = "",
stringsAsFactors = FALSE)
# The master sample points
# This takes a little while because this is a lot of points
load("C:/full/path/to/TerrestrialMasterSample2015.RData",
verbose = TRUE)
# Tabular stuff
# Strata lookup table
lut_path <- paste0(path_tabular, "/", "")
strata_lut <- read.csv(lut_path,
stringsAsFactors = FALSE)
# Point allocation table
allocation_path <- paste0(path_tabular, "/", "")
read.csv(allocation_path,
stringsAsFactors = FALSE)
# Prep data --------
# Restrict the master sample
# It's fastest to start doing this with filtering rather than spatially
points_population <- mastersample[mastersample$STATE %in% c("CO"), ]
# Then with that smaller subset do the spatial bit
points_population <- sample.design::attribute.shapefile(spdf1 = points_population,
spdf2 = strata_spdf,
attributefield = "BPS_GROUPS")
# Draw --------
design <- sample.design::draw(design_name = "Whatever",
strata_spdf = strata_spdf,
stratum_field = "STRATUM",
sampleframe_spdf = frame_spdf,
points.spdf = mastersample,
strata_lut = NULL,
strata_lut_field = NULL,
design_object = NULL,
panel_names = c("2019", "2020", "2021", "2022", "2023"),
panel_sample_size = 50,
points_min = 3,
oversample_proportion = 0.25,
oversample_min = 3,
seed_number = 112358)
# Wrangle outputs -------- |
ab451aa5df7c2bdd402ead7e487a08d7c64ebc40 | 01114541c33a31ff4b1134788ff0815fef397329 | /rda_cca_omics.r | 9eb5c9d3e880675046919e7c714eb44a0543d485 | [] | no_license | RJ333/R_scripts | 06b31ad1459bafc68e0c212aa55eb83e5f354be9 | a882732aeb86b10a44f5fedf86401bf20d4618f6 | refs/heads/master | 2021-04-26T22:55:19.096526 | 2019-07-22T08:30:33 | 2019-07-22T08:30:33 | 123,895,394 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,436 | r | rda_cca_omics.r | ####cca + rda omics
#counttable: otus in columns, samples in row.names -> e.g.
tomics_for_vegan2_nonzero_tpm
#metadata: metadata as dataframe in columns, samples in row.names, same order
#angabe der zu berücksichtigenden paramater mit ~...+...+...
#informationen in summary: x und y werte für samples und species, welche achse erklärt welchen faktor, wo liegen die schwerpunkte
head(tomics_for_vegan2_nonzero_tpm)
names(tomics_for_vegan2_nonzero_tpm)
#metafile erstellen
meta_omics_tpm<-read.csv(file.choose(),sep=";",row.names=1)
head(meta_omics_tpm)
str(meta_omics_tpm)
#meta_omics_tpm$day<-as.factor(meta_omics_tpm$day)
#cca
cca_omics_nonzero_tpm<-cca(tomics_for_vegan2_nonzero_tpm ~ duration,data=meta_omics_tpm)
plot(cca_omics_nonzero_tpm,type="t",display="sites",main="cca_omics_nonzero_tpm",xlim=c(-4,4))
plot(cca_omics_nonzero_tpm,type="p",display="species",main="cca_omics_nonzero_tpm",xlim=c(-4,4))
text(cca_omics_nonzero_tpm,labels=row.names(tomics_for_vegan2_nonzero_tpm),cex=0.8)
summary(cca_omics_nonzero_tpm)
#rda
rda_omics_nonzero_tpm<-rda(tomics_for_vegan2_nonzero_tpm ~ duration,data=meta_omics_tpm)
plot(rda_omics_nonzero_tpm,type="t",display="sites",main="rda_omics_nonzero_tpm",xlim=c(-4,4))
plot(rda_omics_nonzero_tpm,type="p",display="species",main="rda_omics_nonzero_tpm",xlim=c(-4,4))
text(rda_omics_nonzero_tpm,labels=row.names(tomics_for_vegan2_nonzero_tpm),cex=0.8)
summary(rda_omics_nonzero_tpm) |
1627d4ce1d907e1b2b9dc3972175192c9dfe3897 | f768ce5ff176370cf2a9346613252be1c9d3c0d3 | /code/server/summary_page_server.R | 82a355ecec53239d9e1acdda14215ee61efa7ec1 | [] | no_license | ShyGuyPy/DIME_CEDR_pull | 2a6dbcb239995ec262e42a214845128c61905c28 | dec6e5884be909ecb0246708efb7fc39f15a1d08 | refs/heads/master | 2021-07-07T13:55:06.521547 | 2020-12-09T20:06:07 | 2020-12-09T20:06:07 | 218,599,495 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,178 | r | summary_page_server.R | source("server.R", local = TRUE)
if(file.exists(paste0(active_path, "active_data.csv"))){#"data/ACTIVE/active_data.csv")){
#assign active_data
active_data.df <- data.table::fread(paste0( active_path, "/","active_data.csv"),
header = TRUE,
data.table = FALSE)
}
# #error report
# output$mean <- eventReactive(input$error_report, {
# #check_data()
#
# report_mean()
#
#
#
# })
#
# output$median <- eventReactive(input$error_report, {
# #check_data()
#
# report_median()
#
#
#
# })
#
# output$mode <- eventReactive(input$error_report, {
# #check_data()
#
# report_mode()
#
#
#
# })
# table for mean median mode - not currently in use
output$m3_table <- DT::renderDataTable({summary_m3_table()})
observeEvent(input$selected_tab, {#input$select_data, {
if(file.exists(paste0(active_path,"/", "active_data.csv"))){
active_data.df <- data.table::fread(paste0(active_path, "active_data.csv"),
header = TRUE,
data.table = FALSE)
}#end of if file exist
})
|
a822735ae04ad394eef817b29545f8c35dc9d6c1 | 360541dd5b7f0f403f603f11f8187755d638df3e | /renv/library/R-4.1/x86_64-w64-mingw32/sjlabelled/doc/quasiquotation.R | 6caa518cb166c110c2527c639b6aca6724b72f61 | [
"CC0-1.0"
] | permissive | AGSCL/COVID19 | f6b6580c71450b818a3ed5354a0af437fce7ee5a | c20cd5a0593871776b051c2a347bb6da12a434ce | refs/heads/main | 2023-08-20T07:17:47.954356 | 2021-10-11T23:38:58 | 2021-10-11T23:38:58 | 403,793,786 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,763 | r | quasiquotation.R | ## ----echo = FALSE-------------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
if (!requireNamespace("sjmisc", quietly = TRUE) ||
!requireNamespace("rlang", quietly = TRUE)) {
knitr::opts_chunk$set(eval = FALSE)
}
## ----message=FALSE, warning=FALSE---------------------------------------------
library(sjlabelled)
library(sjmisc) # for frq()-function
library(rlang)
# unlabelled data
dummies <- data.frame(
dummy1 = sample(1:3, 40, replace = TRUE),
dummy2 = sample(1:3, 40, replace = TRUE),
dummy3 = sample(1:3, 40, replace = TRUE)
)
# set labels for all variables in the data frame
test <- set_labels(dummies, labels = c("low", "mid", "hi"))
attr(test$dummy1, "labels")
frq(test, dummy1)
# and set same value labels for two of three variables
test <- set_labels(
dummies, dummy1, dummy2,
labels = c("low", "mid", "hi")
)
frq(test)
## ----message=FALSE, warning=FALSE---------------------------------------------
test <- val_labels(dummies, dummy1 = c("low", "mid", "hi"))
attr(test$dummy1, "labels")
# remaining variables are not labelled
frq(test)
## ----message=FALSE, warning=FALSE---------------------------------------------
labels <- c("low_quote", "mid_quote", "hi_quote")
test <- val_labels(dummies, dummy1 = !! labels)
attr(test$dummy1, "labels")
## ----message=FALSE, warning=FALSE---------------------------------------------
variable <- "dummy2"
test <- val_labels(dummies, !! variable := c("lo_var", "mid_var", "high_var"))
# no value labels
attr(test$dummy1, "labels")
# value labels
attr(test$dummy2, "labels")
## ----message=FALSE, warning=FALSE---------------------------------------------
variable <- "dummy3"
labels <- c("low", "mid", "hi")
test <- val_labels(dummies, !! variable := !! labels)
attr(test$dummy3, "labels")
## ----message=FALSE, warning=FALSE---------------------------------------------
dummy <- data.frame(
a = sample(1:4, 10, replace = TRUE),
b = sample(1:4, 10, replace = TRUE),
c = sample(1:4, 10, replace = TRUE)
)
# simple usage
test <- var_labels(dummy, a = "first variable", c = "third variable")
attr(test$a, "label")
attr(test$b, "label")
attr(test$c, "label")
# quasiquotation for labels
v1 <- "First variable"
v2 <- "Second variable"
test <- var_labels(dummy, a = !! v1, b = !! v2)
attr(test$a, "label")
attr(test$b, "label")
attr(test$c, "label")
# quasiquotation for variable names
x1 <- "a"
x2 <- "c"
test <- var_labels(dummy, !! x1 := "First", !! x2 := "Second")
attr(test$a, "label")
attr(test$b, "label")
attr(test$c, "label")
# quasiquotation for both variable names and labels
test <- var_labels(dummy, !! x1 := !! v1, !! x2 := !! v2)
attr(test$a, "label")
attr(test$b, "label")
attr(test$c, "label")
|
ea410aac8b21461910486acd10968bf2fbe32074 | a2c19b12165936b6f38edcdc657cf64e985becf3 | /man/vetools-package.Rd | 1d3541678374ccbfca110d019c73bb9a3523efd0 | [] | no_license | cran/vetools | fcfb882851ac667562bcee832cf860619ac09b73 | 945e115cc46ee4ee07684d2535d4ea60d1dccb5e | refs/heads/master | 2021-03-12T21:55:17.754628 | 2013-08-01T00:00:00 | 2013-08-01T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,580 | rd | vetools-package.Rd | \name{vetools-package}
\alias{vetools-package}
\alias{vetools}
%%%
\docType{package}
\title{
Some tools for Venezuelan environmental data
}
\description{
This toolset provides a bundle of functions to handle and unify the diverse
data formats of distinct government agencies and military bodies:
Ministerio del Ambiente, Marina Venezolana, etc. It also provides all
necessary tools to load these data sets.
In order to standarize the structure of the data provided and/or
processed, a \pkg{\link[=CatalogConvention]{vetools}}
\link[=CatalogConvention]{Catalog Convention} is presented.}
\details{
\tabular{ll}{
Package: \tab vetools\cr
Type: \tab Package\cr
Version: \tab 1.x series\cr
Initial Release Date: \tab 2013-08-01\cr
License: GPL\cr
}
Input functions:\cr
\code{\link{read.HIDROX}}\cr
\code{\link{read.MINAMB}}\cr
\code{\link{read.MARN}}\cr
Preprocessing functions:\cr
\code{\link{disaggregate.ts}}\cr
\code{\link{disaggregate.MARN}}\cr
\code{\link{complete.series}}\cr
\code{\link{fill.small.missing}}
EST family functions:\cr
\code{\link{est.cut}}\cr
\code{\link{est.fill}}\cr
\code{\link{est.rm}}\cr
\code{\link{est.union}}\cr
\code{\link{est.sort}}
Descriptive functions:\cr
\code{\link{panorama}}\cr
\code{\link{panomapa}}\cr
\code{\link{summary.Catalog}}\cr
\code{\link[=summary.Catalog]{print.Catalog}}\cr
\code{\link[=summary.Catalog]{plot.Catalog}}
SHAPE family functions:\cr
\code{\link{get.shape.state}}\cr
\code{\link{get.shape.venezuela}}\cr
\code{\link{get.shape.range}}\cr
\code{\link{get.Grid.size}}
Class \code{Catalog}\cr
\code{\link{Catalog}}\cr
\code{\link{is.Catalog}}\cr
\code{\link{as.Catalog}}
For a complete list of functions, use \code{library(help = "vetools")}.
Convention Sheet:\cr
\link[=CatalogConvention]{Catalog Convention White Sheet}\cr
Datasets:\cr
\var{\link{CuencaCaroni}}\cr
\var{\link{Vargas}}\cr
\var{\link[=Vargas]{Vargas2}}\cr
}
\author{
A. M. Sajo-Castelli. Centro de Estadística y Matemática Aplicada (CEsMA).
Universidad Simón Bolívar. Venezuela.
Contributors: D. E. Villalta, L. Bravo. CEsMA, Universidad Simón Bolívar. Venezuela.
R. Ramírez. Parque Tecnológico Sartenejas, Universidad Simón Bolívar. Venezuela.
Maintainer: A. M. Sajo-Castelli <asajo@usb.ve>
}
\references{L. Bravo, S. Abad, I. Llatas, A. Salcedo, L. Delgado, S. Ramos, K. Cordova.
Hidrox: Repositorio de Datos Hidroclimáticos para la Gestión de Riesgos Epidemiológicos y Ambientales.
2012. ISBN:987-9-8012596-2-6.
}
\keyword{venezuela}
\keyword{environmental} |
0686b406011ef3d3f783305fbf799fbfb3c78f37 | 7a3426e7f4ed941710996b50d66ac7187499f6b9 | /man/csig.Rd | 710b9106f58516f84a35ecdd5521f33c509aed87 | [] | no_license | cran/circumplex | c6bcea4a9de90a160cfaec9a3900226e80ac3953 | 486bb5e35e50c9e4132157aeaf5d8705c77536f5 | refs/heads/master | 2023-08-30T23:42:21.682083 | 2023-08-22T07:20:05 | 2023-08-22T09:30:47 | 145,907,033 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 738 | rd | csig.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/instrument_data.R
\docType{data}
\name{csig}
\alias{csig}
\title{Circumplex Scales of Intergroup Goals}
\format{
An object of class \code{circumplex_instrument} of length 5.
}
\source{
Locke, K. D. (2014). Circumplex scales of intergroup goals: An
interpersonal circle model of goals for interactions between groups.
\emph{Personality and Social Psychology Bulletin, 40}(4), 433-449.
\url{https://www.webpages.uidaho.edu/klocke/csig.htm}
}
\usage{
csig
}
\description{
Information about the Circumplex Scales of Intergroup Goals (CSIG).
}
\examples{
instrument("csig")
summary(csig)
scales(csig, items = TRUE)
}
\keyword{internal}
|
9fa7013c96b243e21326bb3e48bd13b8aad9b8f7 | 2bfbb98dedfdc80cd34c56048ec8f2f1f1714f06 | /tests/testthat/test-fit_t_response.R | ef113f45591307f24a732bb20c3653b8eb7d908d | [
"MIT"
] | permissive | cran/photosynthesis | 04d817de1a0fd7a8792d2fbdf2b8260cbf17af03 | b00ebe43f876b00f0ddb1046a82b143d10540197 | refs/heads/master | 2023-08-21T14:55:58.071520 | 2023-08-15T07:20:05 | 2023-08-15T08:31:23 | 185,838,513 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 598 | r | test-fit_t_response.R | library(testthat)
library(photosynthesis)
context("Fitting temperature response curves")
df <- data.frame(
Par = c(38, 42, 55, 58, 63, 62, 83, 104, 116, 98),
Tleaf = c(17, 20, 22, 25, 27, 30, 32, 35, 37, 40)
)
df$T_leaf <- df$Tleaf + 273.15
model <- suppressWarnings(fit_t_response(df))
test_that("Outputs", {
expect_is(object = model, class = "list")
expect_is(object = model[1], class = "list")
expect_is(object = model[[2]][[2]], class = "data.frame")
expect_is(object = model[[3]][3], class = "list")
expect_length(object = model, 7)
expect_length(object = model[[4]], 3)
})
|
898f138dc9f7f395fa9989752b338bb2125da057 | 976b84b26daf6cfb9ee270d1e3b3c01de6222f2b | /examples/codes/exo_chap_4_5.R | 41190a5ec0536f731574b77424a1b1f54179b118 | [] | no_license | Sheepolatas/M1-Programming | c527bdaece5919e70cb7196690290ba30ec52dd6 | 233f8212baaf6d3a61f7817fcbc3ae397b8b06b7 | refs/heads/master | 2023-03-04T05:05:28.193658 | 2021-02-18T15:35:24 | 2021-02-18T15:35:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,882 | r | exo_chap_4_5.R | library(tidyverse)
library(tictoc)
# Import the two datasets
table_1 = read_csv(file = 'data/table_1.csv')
table_2 = read_csv(file = 'data/table_2.csv')
cities = read_csv(file = 'data/cities_loc.csv')
# Remove articles with no pmid and no DOI, and all articles before 1975
table_1 = table_1 %>%
filter(pmid != 'null' & doi != 'null',
!duplicated(pmid))
table_2 = table_2 %>%
filter(pmid != 'null' & year_pub > 1975,
!duplicated(pmid))
# Merge the two datasets, pmid is unique for each paper
final_table = left_join(table_1,table_2,by = 'pmid')
final_table = na.omit(final_table)
# Create a new variable with the number of authors for each paper
final_table = final_table %>% rowwise() %>% mutate(nb_aut = dim(str_match_all(authors,'<AUTHOR>')[[1]])[1])
# plot distribution for the log(number of authors +1)
plot(density(log(final_table$nb_aut+1)))
# How many papers contains 'deep learning' or 'machine learning' and 'neural network' (also with a 's' for neural networks) in their title ? Create a binary variable to save this information. What is the mean of authors for ML papers and non#ML papers ?
final_table = final_table %>%
mutate(title = tolower(title),
ML = ifelse(str_detect(title,'deep learning|machine learning|neural networks?'),1,0),
has_data = ifelse(has_data == 'Y',1,0),
oa = ifelse(oa == 'Y',1,0))
final_table = na.omit(final_table)
# Transform has_data and oa into binary variable also, what is the share of ML paper that are oa
final_table %>% group_by(ML) %>%
summarize(n = sum(oa)/n(),
citation = mean(cited))
# Clean up pub_type, for simplicity just get the first type
final_table = final_table %>%
mutate(pub_type = str_match_all(pubtype,'\\[\\"(.*?)"')[[1]][1],
pub_type = str_replace_all(pub_type,'\\[|\\"',''))
# What is the pub type with the highest mean/sd of citation for each type of publication ? (use cited and the cleaned pub_type)
final_table %>% group_by(pub_type) %>%
summarize(mean = mean(cited),
sd = sd(cited)) %>% arrange(desc(mean))
# Which are the most representative country by year ? You may want to separate rows for each authors to get all countries involved in the paper, in an authors have multiple affiliations, take the first one
# Store it in an other tibble, keep only pmid and authors, get the country for each author from the loc_cities.csv.
countries = tolower(unique(cities$country))
countries_tibble = final_table %>%
select(pmid,authors) %>%
separate_rows(authors, sep = '<AUTHOR>') %>% rowwise() %>%
filter(authors !="" & !str_detect(authors,'<AFFILIATION>None')) %>%
mutate(authors = as.character(str_split(authors,'<AFFILIATION>')[[1]][2])) %>%
filter(!is.na(authors))
countries_tibble = na.omit(countries_tibble) %>%
filter(!is.na(authors) & authors != '<NA>') %>%
group_by(pmid) %>%
filter(!duplicated(authors))
tic()
countries_tibble = countries_tibble %>%
mutate(authors = str_replace_all(authors,' USA','United States'),
authors = str_replace_all(authors,' UK','United Kingdom'),
authors = str_replace_all(authors,' Korea','South Korea'),
authors = tolower(authors))
countries_tibble = countries_tibble %>% filter(any(str_detect(authors,countries)))
countries_tibble = countries_tibble %>% rowwise() %>%
mutate(country = countries[str_detect(authors,countries)][1]) %>% select(-authors)
toc()
#236878
countries_tibble = left_join(countries_tibble,final_table[,c('pmid','year_pub')],by = 'pmid')
countries_tibble = countries_tibble[!duplicated(countries_tibble),]
#101244
countries_pub = countries_tibble %>% group_by(country,year_pub) %>% summarize(n = length(unique(pmid))) %>% arrange(desc(n))
# Get the top 25 of countries involved in coronavirus research since 2001, plot the evolution on a bar chart with plot_ly
wide_countries = countries_pub %>% tidyr::spread(key = 'country',value = 'n',fill = 0) %>% filter(year_pub>2000)
countries_to_plot = names(sort(apply(wide_countries %>% select(-c('year_pub','<NA>')), FUN = sum, MARGIN = 2),decreasing = T)[1:25])
# Graph
library(plotly)
fig = plot_ly(wide_countries, type = 'bar')
for(i in countries_to_plot){
fig <- fig %>% add_trace(x = ~year_pub,y = as.matrix(wide_countries[i]),name = as.character(i))}
fig <- fig %>% layout(yaxis = list(title = 'Count',type = "log"), barmode = 'stack')
########
library(data.table)
library(stringr)
library(tictoc)
# Import the two datasets
setwd('C:/Users/Beta/Documents/GitHub/M1_TDP')
dt_table_1 = fread(file = 'data/table_1.csv')
dt_table_2 = fread(file = 'data/table_2.csv')
dt_cities = fread(file = 'data/cities_loc.csv')
setDT(dt_table_1)
setDT(dt_table_2)
# Remove articles with no pmid and no DOI, and all articles before 1975
dt_table_1 = dt_table_1[pmid != 'null' & doi != 'null' & !duplicated(pmid)]
dt_table_2 = dt_table_2[pmid != 'null' & year_pub > 1975 & !duplicated(pmid)]
# Merge the two datasets, pmid is unique for each paper
dt_final_table = dt_table_1[dt_table_2, on = 'pmid']
setDT(dt_final_table)
# Create a new variable with the number of authors for each paper
dt_final_table[,nb_aut := dim(str_match_all(authors,'<AUTHOR>')[[1]])[1],by = 'pmid']
# plot distribution for the log(number of authors +1)
plot(density(log(dt_final_table$nb_aut+1)))
# How many papers contains 'deep learning' or 'machine learning' and 'neural network' (also with a 's' for neural networks) in their title ? Create a binary variable to save this information. What is the mean of authors for ML papers and non#ML papers ?
dt_final_table[,'title' := .(tolower(title)),by = 'pmid'
][,c('ML','has_data','oa') := .(ifelse(str_detect(title,'deep learning|machine learning|neural networks?'),1,0),
ifelse(has_data == 'Y',1,0),
ifelse(oa == 'Y',1,0)),by = 'pmid']
dt_final_table = na.omit(dt_final_table)
# Transform has_data and oa into binary variable also, what is the share of ML paper that are oa
dt_final_table[,.(sum(as.numeric(oa))/.N,mean(cited)), by = ML]
# Clean up pub_type, for simplicity just get the first type
dt_final_table[,pub_type := str_match_all(pubtype,'\\[""(.*?)""')[[1]][1],by = 'pmid']
dt_final_table[,pub_type := str_replace_all(pub_type,'\\[|""',''), by = 'pmid']
# What is the pub type with the highest mean/sd of citation for each type of publication ? (use cited and the cleaned pub_type)
dt_final_table[, .(mean(cited),sd(cited)),by='pub_type'][order(V1)]
# Which are the most representative country by year ? You may want to separate rows for each authors to get all countries involved in the paper, in an authors have multiple affiliations, take the first one
# Store it in an other tibble, keep only pmid and authors
dt_countries = tolower(unique(cities$country))
dt_countries_tibble = dt_final_table[,.(authors_ = unlist(tstrsplit(authors, "<AUTHOR>"))),by = 'pmid'
][authors_ != ''& !str_detect(authors_,'<AFFILIATION>None')
][,.(authors = strsplit(authors_,'<AFFILIATION>')[[1]][2]), by = c('pmid','authors_')
][,!'authors_'][!is.na(authors)]
setkey(dt_countries_tibble,pmid,authors)
dt_countries_tibble = na.omit(dt_countries_tibble[!duplicated(dt_countries_tibble)][authors != '<NA>'])
dt_countries_tibble = dt_countries_tibble[,.(authors=str_replace_all(authors,' USA','United States'),pmid=pmid)
][,.(authors=str_replace_all(authors,' UK','United Kingdom'),pmid=pmid)
][,.(authors=str_replace_all(authors,' Korea','South Korea'),pmid=pmid)
][,.(authors = tolower(authors),pmid=pmid)]
dt_countries_tibble[,is_matched := .(ifelse(any(str_detect(authors,countries)),1,0)),by=c('pmid','authors')]
setkey(dt_countries_tibble,pmid,authors)
dt_countries_tibble = dt_countries_tibble[is_matched == 1]
dt_countries_tibble[,country := .(countries[str_detect(authors,countries)][1]),by = c('pmid','authors')]
dt_countries_tibble = dt_countries_tibble[dt_final_table[,c('pmid','year_pub')],on = 'pmid'][!is.na(is_matched)][,c('pmid','country','year_pub')]
setkey(dt_countries_tibble,pmid,country,year_pub)
dt_countries_tibble = dt_countries_tibble[!duplicated(dt_countries_tibble)]
#101244
dt_countries_tibble = dt_countries_tibble[,.(length(unique(pmid))),by=c('country','year_pub')]
# Get the top 25 of countries involved in coronavirus research since 2001, plot the evolution on a bar chart with plot_ly
wide_countries = countries_pub %>% tidyr::spread(key = 'country',value = 'n',fill = 0) %>% filter(year_pub>2000)
countries_to_plot = names(sort(apply(wide_countries %>% select(-c('year_pub','<NA>')), FUN = sum, MARGIN = 2),decreasing = T)[1:25])
|
4a40fa5f196ac7b6540b3a8a9cff736fe84bafac | 7d54e23523cbdf521efa448560474013a2981aa1 | /demo/MCMC.R | 58e6f9b39e84c310bbf9841188623c5accdb6fc3 | [] | no_license | rich-d-wilkinson/precision | 40d1c18571bca19231f8a0dc8607ee2b2a807903 | acd75ce83c2913424a776c9e98f11a1b2201886f | refs/heads/master | 2021-01-17T13:58:25.667829 | 2016-07-14T15:15:25 | 2016-07-14T15:15:25 | 39,572,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,286 | r | MCMC.R | #########################################################################
###
### MCMC.R
### Demonstration of how to run the MCMC calculations
###
###
########################################################################
library(precision)
#Load some data
data(GlegneriSecondary)
Meelis.test(GlegneriSecondary, TwoSided=TRUE)
###########################################################
#### Define hyperparameters/priors
#### - the four built in secondary datasets have priors for d and lambda built in, which load when you load the data
#### If you are using a different datasets you'll need to run the commands
# hyper<-list()
## prior for the mortality rate is Gamma(alpha, beta)
# hyper$a.m <- 6 # replace the value with something sensible for your data
# hyper$b.m <- 10
## prior for average clutch size, lambda, is Gamma(alpha, beta)
# hyper$alpha.lambda <- 4
# hyper$beta.lambda <- 1
## prior for p is beta(a, b)
hyper$a.p <- 1 ## 20 ## Hyper parameters for p's prior distribution
hyper$b.p <- 1##100
# prior for psi - assumed to be Gaussian
hyper$mu.psi <- 0
hyper$sd.psi <- 1
plot.prior(hyper=hyper, show=TRUE, family="multbinom")
#plot.prior(hyper=hyper, show=FALSE, filename="Priors.pdf", family="multbinom") # save plot rather than printing it to screen
##########################################
### MCMC parameters
nbatch <- 10^3 ## number of MCMC samples to generate - usually we'll require at least 10^5 iterations
thin = FALSE
burnin <- 0
thinby <- 1
#######################################################################################################
# BINOMIAL MODEL
#######################################################################################################
## Define a start point for the MCMC chain - important to name the parameter and the column of the NM matrix
b.theta0 <-c(10, 0.1, 0.5)
names(b.theta0) <-c("lambda", "p", "mort")
b.mcmc.out <- MCMCWithinGibbs( theta0=b.theta0, data=GlegneriSecondary, hyper=hyper, nbatch=nbatch, family="binomial")
## Thin the chain or not
if(thin){
b.mcmc.out.t <- ThinChain(b.mcmc.out, thinby=thinby, burnin=burnin)
}
if(!thin){
b.mcmc.out.t <- b.mcmc.out
}
rm(b.mcmc.out) ## delete as not needed and takes a lot of memory
plot.posterior(chain=b.mcmc.out.t$chain, hyper=hyper, show=TRUE, family="binomial", theta.true=NULL)
plot.trace(chain=b.mcmc.out.t$chain, show=TRUE, family="binomial")
#######################################################################################################
# MULTIPLICATIVE BINOMIAL MODEL
#######################################################################################################
## Define the Metropolis-Hastings random walk step size
m.step.size<-c(0.3,0.2) ## 0.3 and 0.2 aree
names(m.step.size) <- c("p.logit", "psi")
m.theta0 <-c(10, 0.1, 0, 0.1)
names(m.theta0) <-c("lambda", "p", "psi", "mort")
m.mcmc.full <- MCMCWithinGibbs( theta0=m.theta0, data=GlegneriSecondary, hyper=hyper, nbatch=nbatch, family="multbinom", step.size=m.step.size)
if(thin){
m.mcmc.full.t <- ThinChain(m.mcmc.full, thinby=thinby, burnin=burnin)
}
if(!thin){
m.mcmc.full.t <- m.mcmc.full
}
rm(m.mcmc.full) ## to save space
plot.posterior(chain=m.mcmc.full.t$chain, hyper=hyper, show=TRUE, family="multbinom", theta.true=NULL)
plot.trace(chain=m.mcmc.full.t$chain, show=TRUE, family="multbinom")
#######################################################################################################
# DOUBLE BINOMIAL MODEL
#######################################################################################################
d.step.size<-c(0.3,0.2) ## 0.3 and 0.2 aree
names(d.step.size) <- c("p.logit", "psi")
d.theta0 <-c(10, 0.1, 0, 0.1)
names(d.theta0) <-c("lambda", "p", "psi", "mort")
d.mcmc.full <- MCMCWithinGibbs( theta0=d.theta0, data=GlegneriSecondary, hyper=hyper, nbatch=nbatch, family="doublebinom", step.size=d.step.size)
if(thin){
d.mcmc.full.t <- ThinChain(d.mcmc.full, thinby=thinby, burnin=burnin)
}
if(!thin){
d.mcmc.full.t <- d.mcmc.full
}
rm(d.mcmc.full) ## to save space
plot.posterior(chain=d.mcmc.full.t$chain, hyper=hyper, show=TRUE, family="doublebinom", theta.true=NULL)
plot.trace(chain=d.mcmc.full.t$chain, show=TRUE, family="doublebinom")
|
05ef26af2399a761d65b6404023f1da9deecd2e5 | ceeab39e925764962faf99ff34bde6b67fe7f0e9 | /trainers_folder_code/day9/code/Proportional_Hazards.R | 70f6a23b9b9aa49faa89464923871210f1a13966 | [] | no_license | Keniajin/PwaniR_Training_2015 | d0ede8b79ca2e8f19d09e7fc9c755ebe91cce679 | 94a7d3e6dc31370c35b4572b8760b813a3de3eae | refs/heads/master | 2021-01-22T09:47:12.676486 | 2021-01-12T08:26:09 | 2021-01-12T08:26:09 | 27,430,653 | 0 | 18 | null | null | null | null | UTF-8 | R | false | false | 2,019 | r | Proportional_Hazards.R | # CHECK OF PROPORTIONAL HAZARDS
# ----------------------------
# A simple method is to fit a stratified Cox model, where we stratify according
# to the levels of a categorical covariate, and plot the stratum-specific
# cumulative baseline hazard estimates on a log-scale.
# If we have proportional hazards, the curves should be fairly parallel
# (i.e. have a constant vertical distance).
# We first check for sex:
fit.stslogtu=coxph(Surv(days/365,status==1)~strata(sex)+
factor(ulc)+log(thick), data=melanoma)
summary(fit.stslogtu)
plot(survfit(fit.stslogtu,
fun="cumhaz",lty=1:2,log=T,mark.time=F,xlim=c(0,10)))
# Proportionallity seems ok.
# We then check for ulceration:
fit.slogtstu=coxph(Surv(lifetime,status==1)~factor(sex)+strata(ulcer)+log2thick, data=melanoma)
plot(survfit(fit.slogtstu,newdata=data.frame(sex=1,log2thick=0)),fun="cumhaz",lty=1:2,log=T,mark.time=F,xlim=c(0,10))
# The curves are fairly parallel, except for the first two years
# Finally we check for thickness:
fit.ssttstu=coxph(Surv(lifetime,status==1)~factor(sex)+factor(ulcer)+strata(thick.group), data=melanoma)
plot(survfit(fit.ssttstu,newdata=data.frame(sex=1,ulcer=1)),fun="cumhaz",lty=1:4,log=T,mark.time=F,xlim=c(0,10))
# There is a tendency that the curves are closer together for large values
# of t than for smaller ones, indicating a non-proportional effect of thickness
# We then make a formal test for proportionality of the covariates.
# This is done by, for each covariate x, adding time-dependent covariate x*log(t),
# and testing whether the time-dependent covariates are significant using a score test:
cox.zph(fit.slogtu,transform='log')
# The test indicates that the effect of tumor-thickness is not proportional.
# The estimate we get for log-thicness in then a weighted average of the time-varying effect
#We also make plots that give nonparametric estimates of the (possible) time dependent
# effect of the covariates:
par(mfrow=c(1,3))
plot(cox.zph(fit.slogtu))
|
c35e26c1f460d622f25386fcf8fc33dab1268768 | e0bae273f83296aefc64ddc6b88a35a8d4aa6d84 | /man/locational.Gini.curve.Rd | 18904ae05a40ef77ae6a1035b500c25806454859 | [] | no_license | PABalland/EconGeo | 9958dcc5a7855b1c00be04215f0d4853e61fc799 | 420c3c5d04a20e9b27f6e03a464ca1043242cfa9 | refs/heads/master | 2023-08-08T04:22:25.972490 | 2022-12-20T11:03:49 | 2022-12-20T11:03:49 | 63,011,730 | 37 | 11 | null | 2018-09-18T15:22:21 | 2016-07-10T18:00:42 | R | UTF-8 | R | false | true | 1,614 | rd | locational.Gini.curve.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locational.Gini.curve.r
\name{locational.Gini.curve}
\alias{locational.Gini.curve}
\title{Plot a locational Gini curve from regions - industries matrices}
\usage{
locational.Gini.curve(mat, pdf = FALSE)
}
\arguments{
\item{mat}{An incidence matrix with regions in rows and industries in columns. The input can also be a vector of industrial regional count (a matrix with n regions in rows and a single column).}
\item{pdf}{Logical; shall a pdf be saved to your current working directory? Defaults to FALSE. If set to TRUE, a pdf with all locational Gini curves will be compiled and saved to your current working directory.}
\item{pop}{A vector of population regional count}
}
\description{
This function plots a locational Gini curve following Krugman from regions - industries matrices.
}
\examples{
## generate a region - industry matrix
mat = matrix (
c (100, 0, 0, 0, 0,
0, 15, 5, 70, 10,
0, 20, 10, 20, 50,
0, 25, 30, 5, 40,
0, 40, 55, 5, 0), ncol = 5, byrow = T)
rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
colnames(mat) <- c ("I1", "I2", "I3", "I4", "I5")
## run the function (shows industry #5)
locational.Gini.curve (mat)
locational.Gini.curve (mat, pdf = TRUE)
}
\author{
Pierre-Alexandre Balland \email{p.balland@uu.nl}
}
\references{
Krugman P. (1991) \emph{Geography and Trade}, MIT Press, Cambridge (chapter 2 - p.56)
}
\seealso{
\code{\link{Hoover.Gini}}, \code{\link{locational.Gini}}, \code{\link{Hoover.curve}}, \code{\link{Lorenz.curve}}, \code{\link{Gini}}
}
\keyword{concentration}
\keyword{inequality}
|
2dcf46178e531d9fb8659cb7481725ba3f987046 | d9af06cdeb065ab315ca6d568786c729b50d5c4c | /cacheSolve.R | 6a6bdc8df99c5adf3406f5e8f4b8aafd94641d54 | [] | no_license | suniljohn/ProgrammingAssignment2 | 3693b575031aed57f4d58552b281fcdd8ea7280d | a47909da783b6e75620c8ddae2dc421e15e7bbfa | refs/heads/master | 2021-01-18T06:00:58.025122 | 2015-12-28T00:10:05 | 2015-12-28T00:10:05 | 48,650,011 | 0 | 0 | null | 2015-12-27T15:31:30 | 2015-12-27T15:31:29 | null | UTF-8 | R | false | false | 479 | r | cacheSolve.R | # This function computes the inverse of the special "matrix" created by makeCacheMatrix above. If the inverse
# has already been calculated (and the matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
message("getting the cached data")
return(i)
}
m <- x$get()
i <- solve(m, ...)
x$setInverse(i)
i
} |
dd63b0d620d8b100ed0878c07219d1a934999b80 | 32308c4bd3ca65d739af17c40473e59c6a0261ba | /RedwoodDataLoader.R | c54e2317d28e02f44d39543a027a78116b899a5e | [] | no_license | Leeps-Lab/RedwoodOutputDataParser | 8aedba27a87a87aa82609b541408335831604bec | 3c0ec3b9d2433a75ca4fe133631e4acc06c8897e | refs/heads/master | 2021-05-04T10:10:08.052739 | 2017-07-12T19:42:16 | 2017-07-12T19:42:16 | 53,453,613 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,443 | r | RedwoodDataLoader.R | require(jsonlite)
require(dplyr)
require(tidyr)
source("r/helperfunctions.R")
# debugging --------------------------------------------------------------------
# keys = c("state", "actions", "targets")
# data=BJP.testData <- read.csv("examples/data/BJPricingExperimentData.csv")
# subdata=data[[1]]
# keys = c("trade", "cancel","offer_text")
# data = CMtestData
# subdata=data[[1]]
# keys = c("LOG_CONFIG","state", "actions", "targets")
# data = Bub.testData
# keys = c("state", "updateAction", "endofsubperiod")
# subdata=data[[1]]
# Debugging CUNY Matching =======
# data = matching.testData <- read.csv("examples/data/Matching-Steve Active Dev -strategy method.csv")
# keys = c("strategy","action","action_round", "action_owntype",
# "action_partnertype","action_partnerid",
# "exit", "repeataction_owntype",
# "repeataction_partnertype","repeataction_partnerid")
# Debugging TTM =========
# ttm_data = read.csv("examples/example_ttm/TTM-Baruch-2016-04-15 13_54_53.101035 (1).csv")
# ttm_data_round <- ttm_data %>% dplyr::filter(Period == 56)
# data = ttm_data_round
# keys = "rp.round_started"
# Functions ------------------
redwoodParser <- function(data, keys){
# keep only desired keys
data <- data %>%
mutate(Key = as.character(Key)) %>%
mutate(Time = Time %/% 100000000) %>%
select(-ClientTime) %>%
dplyr::filter(Key %in% keys)
# -----------------------------
# Key-Level Parser WorkHorse
# Given a subset of data for just one Key, apply this.
# the complicated and tricky part is dealing with each type of "Value" redwood produces
# these may be single values, or large data.frames, or lists, or somethese else.
rw_parsr <- function(subdata){
KeyData <- subdata %>% select(Value) %>% mutate(Value = as.character(Value)) %>% tbl_df
KeyData <- paste(unlist(KeyData), sep = "", collapse=",")
KeyData <- paste(c("[", KeyData, "]"), collapse="")
KeyData <- fromJSON(KeyData)
# "fromJSON" converts the json objects in each row of the "Value" col
# into R objects. these may be lists, vectors, data.frames, NULL, single values, etc.
# the follow if-else statements hand these different types
# if you get an error, it may be that there does not yet exist a case that handles
# that particular data structure.
if (length(KeyData) == 0){
# if obj is empty (oft used to indicate the event indicated by "key")
# e.g. "subperiod_end", just needs time and Key, not Value contents.
subdata <- subdata %>%
mutate(NewCol = TRUE)
names(subdata)[length(names(subdata))] = unique(subdata$Key)
subdata <- subdata %>% select(-Value)
} else if (typeof(KeyData[[1]][[1]]) == "list"){
# if the value is a complex json object
#used for multiline entries
if (is.data.frame(KeyData[[1]][[1]])){
data_repeatTimes <- sapply(KeyData, function(x) {sapply(x, nrow)})
} else {
data_repeatTimes <- sapply(KeyData, function(x) {sapply(x, length)})
}
# KeyData <- lapply(KeyData, function(x){
# lapply(x[[1]], function(y){
# bind_rows(data.frame(as.list(y)))
# })
# })
KeyData <- lapply(KeyData, bind_rows)
KeyData <- bind_rows(KeyData)
subdata <- subdata %>% select(-Value)
subdata <- subdata[rep(seq_len(nrow(subdata)), times = data_repeatTimes),]
subdata <- bind_cols(subdata, KeyData)
#rename vars names, add Key-name to values
nameMessage <- unique(subdata$Key)
names(subdata)[7:ncol(subdata)] = paste(nameMessage, names(subdata)[7:ncol(subdata)], sep = ".")
} else if (is.vector(KeyData)){
#in the simple case, if value is just a single value (of any type)
nameMessage <- unique(subdata$Key)
names(subdata)[which(names(subdata) == "Value")] = nameMessage
} else if (class(KeyData) == "data.frame" & is.vector(KeyData[1,])){
# when the value is a data.frame
if (is.data.frame(KeyData)
& ncol(KeyData) == 1
& length(KeyData[[1]][[1]]) != 1){
nameMessage <- unique(subdata$Key) #setup name for new column
KeyData = as.data.frame(t(as.data.frame(KeyData[[1]])))
rownames(KeyData) = NULL
names(KeyData) <- paste(nameMessage,(1:ncol(KeyData)), sep=".")
subdata <- bind_cols(subdata,KeyData) %>%
select(-Value)
} else if (is.data.frame(KeyData) & ncol(KeyData) == 1){
# when the value is a ONE-COLUMN dataframe
nameMessage <- unique(subdata$Key) #setup name for new column
subdata$Value = unlist(KeyData[1]) # put in parsed-json-value
names(subdata)[which(names(subdata) == "Value")] = paste(nameMessage,names(KeyData), sep = ".") #rename
} else {
#in the case that the value can be exprssed as a muli-row dataframe
nameMessage <- unique(subdata$Key)
# KeyData <- lapply(KeyData, function(x){
# (unlist(x))
# })
KeyData <- lapply(KeyData, function(x){
as.data.frame(t(data.frame(x)),
stringsAsFactors = F)
})
KeyData <- bind_rows(KeyData)
names(KeyData) <- paste(nameMessage,substr(names(KeyData),2,2), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
}
} else if (class(KeyData) == "matrix"){
nameMessage <- unique(subdata$Key)
KeyData <- as.data.frame(KeyData)
names(KeyData) <- paste(nameMessage,(1:ncol(KeyData)), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
} else {
# in the case....
KeyData <- as.data.frame(lapply(KeyData, cbind))
KeyData <- bind_rows(KeyData)
#rename col names, avoid mixmatch of identically named vars
nameMessage <- unique(subdata$Key)
names(KeyData) <- paste(nameMessage,(names(KeyData)), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
}
return(subdata)
}
# -----------------------------
#apply rw parswer workhorse
if (length(keys) == 1){ # simple case; only want one key
output <- rw_parsr(data)
} else { # dealing with multiple keys
output <- data[0,] %>% select(Period, Sender, Group, Time, Key)
data <- split(data, data$Key)
for (subdata in data){
subdata <- rw_parsr(subdata)
#recombine
output <- full_join(output, subdata,
by = c("Period","Group","Sender","Time"))
# merge keys into one column
if ("Key.x" %in% names(output)){
output <- output%>%
mutate(Key.x = as.character(ifelse(is.na(Key.x), "", Key.x)),
Key.y = as.character(ifelse(is.na(Key.y), "", Key.y))
) %>%
unite(Key, starts_with("Key"), sep = " ")
}
}
}
# sort by time
output <- output %>%
mutate(datetime = (rwp_myformat.POSIXct(Time/10, digits = 3))) %>% #see helper functions
select(Period, Group, Sender, datetime, everything()) %>%
arrange(Time)
}
rwp <- redwoodParser
# New Parser -------------------------------------------------------------------
rwp2 <- function(data, keys){
# keep only desired keys
data <- data %>%
mutate(
Period = as.integer(Period),
Group = as.integer(Group),
Sender = as.character(Sender),
Key = as.character(Key),
Value = as.character(Value)) %>%
mutate(Time = Time %/% 100000000 / 10) %>%
select(-ClientTime) %>%
dplyr::filter(Key %in% keys)
# -----------------------------
# Key-Level Parser WorkHorse
# Given a subset of data for just one Key, apply this.
# the complicated and tricky part is dealing with each type of "Value" redwood produces
# these may be single values, or large data.frames, or lists, or somethese else.
rw_parsr <- function(subdata){
KeyData <- subdata %>% select(Value) %>% mutate(Value = as.character(Value)) %>% tbl_df
KeyData <- paste(unlist(KeyData), sep = "", collapse=",")
KeyData <- paste(c("[", KeyData, "]"), collapse="")
KeyData <- fromJSON(KeyData)
names(KeyData)
KeyData = as.Node(KeyData)
KeyData %>% ToDataFrameTable
# New JSON handling Section
ListObNameVal <- function(Val,Nam){
if (is.list(...)){
...
} else {
}
}
length(KeyData)
length(KeyData[1])
length(KeyData[[1]][[1]])
class(length(KeyData[[1]][[1]]))
typeof(length(KeyData[[1]]))
KeyData <- lapply(KeyData, function(x) {
x[sapply(x, is.null)] <- NA
unlist(x)
})
KeyData <- do.call("rbind", KeyData) %>%
t() %>%
nameMessage <- unique(subdata$Key)
names(KeyData) = paste(nameMessage,names(KeyData), sep=".")
subdata <- bind_cols(
(subdata %>%
select(-Value)),
KeyData
)
# "fromJSON" converts the json objects in each row of the "Value" col
# into R objects. these may be lists, vectors, data.frames, NULL, single values, etc.
# the follow if-else statements hand these different types
# if you get an error, it may be that there does not yet exist a case that handles
# that particular data structure.
if (length(KeyData) == 0){
subdata <- subdata %>%
mutate(NewCol = TRUE)
names(subdata)[length(names(subdata))] = unique(subdata$Key)
subdata <- subdata %>% select(-Value)
} else if (typeof(KeyData[[1]][[1]]) == "list"){
# if the value is a complex json object
#used for multiline entries
if (is.data.frame(KeyData[[1]][[1]])){
data_repeatTimes <- sapply(KeyData, function(x) {sapply(x, nrow)})
} else {
data_repeatTimes <- sapply(KeyData, function(x) {sapply(x, length)})
}
# KeyData <- lapply(KeyData, function(x){
# lapply(x[[1]], function(y){
# bind_rows(data.frame(as.list(y)))
# })
# })
KeyData <- lapply(KeyData, bind_rows)
KeyData <- bind_rows(KeyData)
subdata <- subdata %>% select(-Value)
subdata <- subdata[rep(seq_len(nrow(subdata)), times = data_repeatTimes),]
subdata <- bind_cols(subdata, KeyData)
#rename vars names, add Key-name to values
nameMessage <- unique(subdata$Key)
names(subdata)[7:ncol(subdata)] = paste(nameMessage, names(subdata)[7:ncol(subdata)], sep = ".")
} else if (is.vector(KeyData)){
#in the simple case, if value is just a single value (of any type)
nameMessage <- unique(subdata$Key)
names(subdata)[which(names(subdata) == "Value")] = nameMessage
} else if (class(KeyData) == "data.frame" & is.vector(KeyData[1,])){
# when the value is a data.frame
if (is.data.frame(KeyData) & ncol(KeyData) == 1){
# when the value is a ONE-COLUMN dataframe
nameMessage <- unique(subdata$Key) #setup name for new column
subdata$Value = unlist(KeyData[1]) # put in parsed-json-value
names(subdata)[which(names(subdata) == "Value")] = paste(nameMessage,names(KeyData), sep = ".") #rename
} else {
#in the case that the value can be exprssed as a muli-row dataframe
nameMessage <- unique(subdata$Key)
# KeyData <- lapply(KeyData, function(x){
# (unlist(x))
# })
KeyData <- lapply(KeyData, function(x){
as.data.frame(t(data.frame(x)),
stringsAsFactors = F)
})
KeyData <- bind_rows(KeyData)
names(KeyData) <- paste(nameMessage,substr(names(KeyData),2,2), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
}
} else if (class(KeyData) == "matrix"){
nameMessage <- unique(subdata$Key)
KeyData <- as.data.frame(KeyData)
names(KeyData) <- paste(nameMessage,(1:ncol(KeyData)), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
} else {
# in the case....
KeyData <- as.data.frame(lapply(KeyData, cbind))
KeyData <- bind_rows(KeyData)
#rename col names, avoid mixmatch of identically named vars
nameMessage <- unique(subdata$Key)
names(KeyData) <- paste(nameMessage,(names(KeyData)), sep=".")
subdata <- subdata %>% select(-Value)
subdata <- bind_cols(subdata, KeyData)
}
return(subdata)
}
# -----------------------------
#apply rw parswer workhorse
if (length(keys) == 1){ # simple case; only want one key
output <- rw_parsr(data)
} else { # dealing with multiple keys
output <- data[0,] %>% select(Period, Sender, Group, Time, Key)
data <- split(data, data$Key)
for (subdata in data){
subdata <- rw_parsr(subdata)
#recombine
output <- full_join(output, subdata,
by = c("Period","Group","Sender","Time"))
# merge keys into one column
if ("Key.x" %in% names(output)){
output <- output%>%
mutate(Key.x = as.character(ifelse(is.na(Key.x), "", Key.x)),
Key.y = as.character(ifelse(is.na(Key.y), "", Key.y))
) %>%
unite(Key, starts_with("Key"), sep = " ")
}
}
}
# sort by time
output <- output %>%
mutate(datetime = (rwp_myformat.POSIXct(Time/10, digits = 3))) %>% #see helper functions
select(Period, Group, Sender, datetime, everything()) %>%
arrange(Time)
}
|
6161fe82289da66d614cef1304f48c1be915af5f | 8f86416cc6a9f8a54e81bdae2a4fc56b06fe7538 | /man/dt_quantilebins_weighted.Rd | 425e76ec1fed3b9d91464c4ee3a06c8d5363016c | [
"Unlicense"
] | permissive | BoostOlive/Rtoolkit | e1c596d99f1b131a7ebc4608eca908beda0365d6 | 6d53a94a94bb4d01a34edf906c1e5d8ee0b2c6aa | refs/heads/master | 2022-12-02T17:55:50.877027 | 2020-06-26T14:23:37 | 2020-06-26T14:23:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,349 | rd | dt_quantilebins_weighted.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datatable.r
\name{dt_quantilebins_weighted}
\alias{dt_quantilebins_weighted}
\title{Weighted quantile bins from data.table data}
\usage{
dt_quantilebins_weighted(
DT,
numerical.var,
wt.var,
by.vars = NULL,
ntile = 5,
outvarnames = c("bin", "bin_n"),
bounds = c(-Inf, Inf),
...
)
}
\arguments{
\item{DT}{\link{data.table} data}
\item{numerical.var}{\link{str(1)} variable to be quantilized}
\item{wt.var}{\link{str(1)} variable to be used as weight}
\item{by.vars}{\link{str(n)} variable to subset by when computing quantiles}
\item{ntile}{\link{int} number of quantiles}
\item{outvarnames}{to be implemented}
\item{bounds}{\link{num(2)} natural bounds for \code{numerical.var} (for pretty printing)}
\item{...}{extra arguments to the \code{cut} function}
}
\value{
}
\description{
Compute weighted quantiles of a data.table \code{DT}
}
\details{
Apply \code{dt_quantilebins_generic} with Hmisc's \code{wtd.quantile} to find
weighted quantiles. The number of percentiles is governed by \code{ntile}: with
\code{ntile=5}, the function computes quintiles, with \code{ntile=4} quartiles,and so
on.
The \code{...} are passed to the \code{cut} function. Note that \code{ordered_result=TRUE}
necessarily (i.e., it's not an option).
}
\author{
Gustavo
}
|
47418220c8f36ee4f9405dcf67e67289c4e3d492 | 5cfcd5e4dc4068737571a376cd8ee414810f7289 | /D_count_matrix_expl_dat_analysis/4_PRINC_COMP_ANALYSIS.R | 3e9d13a038f9aeca3cf3214ca325a9201941d4a7 | [] | no_license | RJEGR/Small-RNASeq-data-analysis | e42926cf3e6d28c117cb5bcd4a41d371bdbd528b | 1b0ea5a3f302f942e881c624497b2ed09da0978c | refs/heads/master | 2023-09-04T12:34:50.444113 | 2023-08-25T00:05:32 | 2023-08-25T00:05:32 | 77,959,541 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,365 | r | 4_PRINC_COMP_ANALYSIS.R |
rm(list = ls())
if(!is.null(dev.list())) dev.off()
options(stringsAsFactors = FALSE, readr.show_col_types = FALSE)
path <- "~/Documents/MIRNA_HALIOTIS/SHORTSTACKS/ShortStack_20230315_out/"
#
pHpalette <- c(`Experimental`="#ad1f1f", `Control`= "#4575b4")
#
out <- read_rds(paste0(path, "Counts_and_mtd.rds"))
dim(COUNTS <- out[[1]])
dim(MTD <- out[[2]])
# PCA =====
# ncol(data <- log2(count+1))
ncol(data <- log2(COUNTS+1))
PCA = prcomp(t(data), center = T, scale. = FALSE)
percentVar <- round(100*PCA$sdev^2/sum(PCA$sdev^2),1)
sd_ratio <- sqrt(percentVar[2] / percentVar[1])
PCAdf <- data.frame(PC1 = PCA$x[,1], PC2 = PCA$x[,2])
# Visualize eigenvalues
# Show the percentage of variances explained by each principal component.
barplot(PCA$sdev)
# library(mclust)
# d_clust <- mclust::Mclust(as.matrix(PCAdf), G=1:4, modelNames = mclust.options("emModelNames"))
# plot(d_clust)
# d_clust$BIC
# k <- d_clust$G
# names(k) <- d_clust$modelName
k <- 4
PCAdf %>%
dist(method = "euclidean") %>%
hclust() %>%
cutree(., k) %>%
as_tibble(rownames = 'LIBRARY_ID') %>%
mutate(cluster = paste0('C', value)) %>%
dplyr::select(-value) -> hclust_res
PCAdf %>%
mutate(LIBRARY_ID = rownames(.)) %>%
# mutate(g = substr(sample_id, 1,1)) %>%
# left_join(hclust_res) %>%
left_join(MTD) %>%
mutate(sample_group = substr(LIBRARY_ID, 1,nchar(LIBRARY_ID)-1)) %>%
ggplot(., aes(PC1, PC2)) +
# coord_fixed(ratio = sd_ratio) +
geom_abline(slope = 0, intercept = 0, linetype="dashed", alpha=0.5) +
geom_vline(xintercept = 0, linetype="dashed", alpha=0.5) +
ggforce::geom_mark_ellipse(aes(group = as.factor(sample_group)),
fill = 'grey89', color = NA) +
geom_point(size = 7, alpha = 0.7, aes(color = pH)) +
geom_text( family = "GillSans",
mapping = aes(label = paste0(hpf, " hpf")), size = 2.5) +
labs(caption = '') +
ylim(-250, 250) +
xlab(paste0("PC1, VarExp: ", percentVar[1], "%")) +
ylab(paste0("PC2, VarExp: ", percentVar[2], "%")) +
scale_color_manual("", values = rev(pHpalette)) +
scale_fill_manual("", values = rev(pHpalette)) +
theme_classic(base_family = "GillSans", base_size = 14) +
theme(plot.title = element_text(hjust = 0.5), legend.position = 'top') -> pcaplot
# pcaplot
ggsave(pcaplot,
filename = "PCA.png", path = path,
width = 5, height = 5, device = png, dpi = 300)
#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.