blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf94f1eb2223aef209c9480d9703027236e29ceb | b0ac70b2e2e91b62503cc3ae93ca5eedfaa53a2b | /BIP.R | 8117b50242bf88103d6aa1de89cc9170f035b0c8 | [] | no_license | matteopagliari/BIP_project | 07b5dca008fd56a52cd481d2d2c4dc1ef8fad712 | 7459fdb02ce2d9727565f71a6ba6457504843f53 | refs/heads/master | 2020-04-09T17:54:03.539851 | 2018-03-07T13:25:09 | 2018-03-07T13:25:09 | 124,237,794 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,538 | r | BIP.R | library(dplyr)
library(plotly)
library(ineq) #compute gini index
library(base)
library(stringr)
library(ggthemes) # visualization
library(lubridate)
library(forecast)
data=read.csv("./Desktop/Data and description for the course project/dataset_polimi.csv")
fdata <- data
fdata$Weekday <- wday(fdata$Data) #1=sunday
fdata$Month <- month(fdata$Data)
fdata$Year <- year(fdata$Data)
#------------DATA EXPLORATION---------------
#ogni zona,area,sottoarea ha lo stesso numero di entry
gerarchia <- fdata[1:250560, ] %>%
group_by(Zona, Area, Sottoarea ) %>%
summarise(num = n())
#prodotti presenti in modo uguale
fdata %>% count(Categoria_prodotto)-> products
fdata %>% count(Vendite)-> vendite
fdata %>% count(Data)-> days
fdata %>% count(Area)-> areas
fdata %>% count(Sottoarea)-> subareas
sot5 <- fdata[1:250560, ] %>%
filter(Sottoarea=="Sottoarea_5")
p1 <- sot5[1:1740, ] %>%
filter(Year==2015) %>%
filter(Categoria_prodotto=="Prodotto_1")
plot(Data, Vendite, type="l", main="Scatterplot Vendite",xlab="Data", ylab="Vendite",xlim=c(366,730))
lines(Data, Vendite, type="l", main="Scatterplot Vendite",xlab="Data", ylab="Vendite")
venditearea <- fdata[1:250560, ] %>%
group_by(Area, Prodotto) %>%
summarise(num = n())
gruppo <-data.frame(Area=integer(148), Prodotto=integer(148), Anno1=integer(148), Anno2=integer(148), Anno3=integer(148))
fdata$Z <- gsub("Zona_", "", fdata$Zona)
fdata$A <- gsub("Area_", "", fdata$Area)
fdata$S <- gsub("Sottoarea_", "", fdata$Sottoarea)
fdata$P <- gsub("Prodotto_", "", fdata$Categoria_prodotto)
fdata$V <- fdata$Vendite
#New dataset well defined
sdata <- subset(fdata, select=c("Z", "A", "S", "Weekday", "Month", "Year","Data", "P", "V"))
#Prodotti venduti per ogni area per ogni anno
for(i in 1:74){
gruppo$Area[i] <- i
gruppo$Prodotto[i] <- 1
gruppo$Anno1[i] <- sum(subset(sdata, A==i & Year=="2014" & P==1)$V)
gruppo$Anno2[i] <- sum(subset(sdata, A==i & Year=="2015" & P==1)$V)
gruppo$Anno3[i] <- sum(subset(sdata, A==i & Year=="2016" & P==1)$V)
}
for(i in 1:74){
gruppo$Area[i+74] <- i
gruppo$Prodotto[i+74] <- 2
gruppo$Anno1[i+74] <- sum(subset(sdata, A==i & Year=="2014" & P==2)$V)
gruppo$Anno2[i+74] <- sum(subset(sdata, A==i & Year=="2015" & P==2)$V)
gruppo$Anno3[i+74] <- sum(subset(sdata, A==i & Year=="2016" & P==2)$V)
}
#Somma prodotti 1 e 2 venduti per ogni anno per area
totgruppo <-data.frame(Area=integer(74), Anno1=integer(74), Anno2=integer(74), Anno3=integer(74))
for(i in 1:74){
totgruppo$Area[i] <- i
totgruppo$Anno1[i] <- sum(subset(sdata, A==i & Year=="2014")$V)
totgruppo$Anno2[i] <- sum(subset(sdata, A==i & Year=="2015")$V)
totgruppo$Anno3[i] <- sum(subset(sdata, A==i & Year=="2016")$V)
}
tri<-data.frame(Area=integer(148), Prodotto=integer(148), Primo2014=integer(148), Secondo2014=integer(148), Terzo2014=integer(148), Quarto2014=integer(148), Primo2015=integer(148), Secondo2015=integer(148), Terzo2015=integer(148), Quarto2015=integer(148), Primo2016=integer(148), Secondo2016=integer(148))
#Trimestri
for(i in 1:74){
tri$Area[i] <- i
tri$Prodotto <- 1
tri$Primo2014[i] <- sum(subset(sdata, A==i & Year=="2014" & Month<=3 & P==1)$V)
tri$Secondo2014[i] <- sum(subset(sdata, A==i & Year=="2014" & Month>3 & Month<=6 & P==1)$V)
tri$Terzo2014[i] <- sum(subset(sdata, A==i & Year=="2014" & Month>6 & Month<=9 & P==1)$V)
tri$Quarto2014[i] <- sum(subset(sdata, A==i & Year=="2014" & Month>9& Month<=12 & P==1)$V)
tri$Primo2015[i] <- sum(subset(sdata, A==i & Year=="2015" & Month<=3 & P==1)$V)
tri$Secondo2015[i] <- sum(subset(sdata, A==i & Year=="2015" & Month>3 & Month<=6 & P==1)$V)
tri$Terzo2015[i] <- sum(subset(sdata, A==i & Year=="2015" & Month>6 & Month<=9 & P==1)$V)
tri$Quarto2015[i] <- sum(subset(sdata, A==i & Year=="2015" & Month>9& Month<=12 & P==1)$V)
tri$Primo2016[i] <- sum(subset(sdata, A==i & Year=="2016" & Month<=3 & P==1)$V)
tri$Secondo2016[i] <- sum(subset(sdata, A==i & Year=="2016" & Month>3 & Month<=6 & P==1)$V)
}
for(i in 1:74){
tri$Area[i+74] <- i
tri$Prodotto[i+74] <- 2
tri$Primo2014[i+74] <- sum(subset(sdata, A==i & Year=="2014" & Month<=3 & P==2)$V)
tri$Secondo2014[i+74] <- sum(subset(sdata, A==i & Year=="2014" & Month>3 & Month<=6 & P==2)$V)
tri$Terzo2014[i+74] <- sum(subset(sdata, A==i & Year=="2014" & Month>6 & Month<=9 & P==2)$V)
tri$Quarto2014[i+74] <- sum(subset(sdata, A==i & Year=="2014" & Month>9& Month<=12 & P==2)$V)
tri$Primo2015[i+74] <- sum(subset(sdata, A==i & Year=="2015" & Month<=3 & P==2)$V)
tri$Secondo2015[i+74] <- sum(subset(sdata, A==i & Year=="2015" & Month>3 & Month<=6 & P==2)$V)
tri$Terzo2015[i+74] <- sum(subset(sdata, A==i & Year=="2015" & Month>6 & Month<=9 & P==2)$V)
tri$Quarto2015[i+74] <- sum(subset(sdata, A==i & Year=="2015" & Month>9& Month<=12 & P==2)$V)
tri$Primo2016[i+74] <- sum(subset(sdata, A==i & Year=="2016" & Month<=3 & P==2)$V)
tri$Secondo2016[i+74] <- sum(subset(sdata, A==i & Year=="2016" & Month>3 & Month<=6 & P==2)$V)
}
ddata<- data
ddata["Numday"] <-NA
ddata$Numday <- yday(ddata$Data)
sdata["Numday"] <- 0
sdata$Numday <- yday(sdata$Data)
for(i in 1:250560){
if(sdata$Year[i]=="2015"){
sdata$Numday[i] <- sdata$Numday[i] + 365
}
if(sdata$Year=="2016"){
sdata$Numday[i] <- sdata$Numday[i] + 730
}
}
kdata <- sdata
kdata$V <- kdata$V/100
write.csv(ndata, file = "./Desktop/Data and description for the course project/ndata.csv")
zeroven<- sdata[1:250560, ] %>%
group_by(V, Weekday) %>%
summarise(num_animals = n())
ndata <- data.frame(Area=integer(148), Prodotto=integer(148), Anno=integer(148), Trimestre=integer(148), Vendite=integer(148))
for(i in 1:1){
for(j in 9:10){
if(tri$Area[i]==1 & tri$Prodotto[i]==1){
ndata$Area[j]<-1
ndata$Prodotto[j]<-1
ndata$Anno[j] <- 2016
ndata$Trimestre[j] <- j-8
}
}
}
ndata$Vendite[3] <- 625
ndata$Vendite[4] <- 770
ndata$Vendite[5] <- 669
ndata$Vendite[6] <- 626
ndata$Vendite[7] <- 678
ndata$Vendite[8] <- 710
ndata$Vendite[9] <- 753
ndata$Vendite[10] <- 378
for(i in 1:10){
ndata$Trimestre[i] <- i
}
sot1 <- sdata[1:250560, ] %>%
filter(sdata$S==1) %>%
filter(sdata$Y==2014) %>%
filter(sdata$Month==5)
sot1 <- sot1[1:1740, ] %>%
filter(sot1$Month==5)
fit <- lm(V ~ Data, data=sot1)
summary(fit) # show results
plot(fit)
sequence <- subset(sot1, select=c("Data", "V"))
myts <- ts(sequence, start=c(2016, 1), end=c(2016, 140), frequency=365)
plot(myts)
fit <-HoltWinters(sequence, gamma=FALSE)
plot(fit)
forecast(fit, 3)
plot(forecast(fit, 3)) |
838c8f25d235a4779cd020d96122be8046bc5cee | 0aa63f99a9ebe79e55cc09338d5bb4ce2377fd83 | /tests/testthat/test-253-guessDate.R | 076c2d8f3f9e95029038dddae4b94a9eab70a5b3 | [] | no_license | nutterb/redcapAPI | c13b890b5d33b40c134833155861ee42d44b06c7 | 9b7287106198581c352fc91492d83fc7806d2bd7 | refs/heads/main | 2023-09-01T07:41:41.326514 | 2023-08-28T14:02:23 | 2023-08-28T14:02:23 | 11,691,011 | 47 | 31 | null | 2022-11-03T22:49:05 | 2013-07-26T17:31:51 | R | UTF-8 | R | false | false | 1,326 | r | test-253-guessDate.R | context("guessCast.R")
x <- data.frame(
x=c("xyz", "2023-01-01", "", "2003-12-12", "2003-12-12", "2012-10-10")
)
test_that(
"no guess cast below threshold",
{
y <- guessDate(x[1:4,,drop=FALSE], rcon)
expect_class(y$x, "character")
}
)
test_that(
"guess cast above threshold",
{
y <- guessDate(x, rcon)
expect_class(y$x, "POSIXct")
}
)
test_that(
"guess cast gives message when triggered",
{
expect_message(guessDate(x, rcon), "guessCast")
}
)
test_that(
"guess cast respects_quiet",
{
expect_message(guessDate(x, rcon, quiet=TRUE), NA)
}
)
test_that(
"guess cast reports invalid",
{
y <- guessDate(x, rcon)
expect_class(attr(y, "invalid"), "invalid")
}
)
test_that(
"guess cast validates arguments",
{
expect_error(guessDate(1:3, rcon, quiet=TRUE), "Variable 'data'")
expect_error(guessDate(x, 1:3, quiet=TRUE), "Variable 'rcon'")
expect_error(guessDate(x, rcon, quiet=1.3), "Variable 'quiet'")
expect_error(guessDate(x, rcon, quiet=TRUE,na=TRUE), "Variable 'na'")
expect_error(guessDate(x, rcon, quiet=TRUE,validation=TRUE), "Variable 'validation'")
expect_error(guessDate(x, rcon, quiet=TRUE,cast=TRUE), "Variable 'cast'")
expect_error(guessDate(x, rcon, quiet=TRUE,threshold=TRUE), "Variable 'threshold'")
}
)
|
9669f7c9b75178709537c91a73b84e30aeca7229 | abb57b664ab66644b0067c2e3f793412ebecf549 | /APLM hw03/APLM hw03.R | 2be76327625881a132d1f3fc50cb7a7cc2d6f845 | [] | no_license | oicjacky/Applied-Statistical-Linear-Model | c06662947287f9f9edcee1a22f1564488dc124ad | 244f8c4fc2f6bfc5b58b31b452cf9eb4f8ddc351 | refs/heads/master | 2021-01-02T14:56:29.292443 | 2020-02-13T06:50:52 | 2020-02-13T06:50:52 | 239,670,484 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,939 | r | APLM hw03.R | library(stringr)
setwd("D:/github_oicjacky/Applied Statistical Linear Model/APLM hw03")
data <- read.table("CH09TA01.txt")
colnames(data) <- c("X1","X2","X3","X4","X5","Y","lnY")
n <- dim(data)[1]
P <- 5 # 5 predict variable
#
R.square <- function(data , mod){
SSTO <- sum( ( data$Y - mean(data$Y) )^2 )
SSE <- sum(mod$residuals^2)
return( 1 - (SSE / SSTO) )
}
# R.adjust
R.adjust <- function(data , mod){
n <- dim(data)[1]
p <- mod$rank
SSTO <- sum( ( data$Y - mean(data$Y) )^2 )
SSE <- sum(mod$residuals^2)
return( 1 - ( (n-1) / (n-p) ) * (SSE / SSTO) )
}
# power set
powerset = function(s){
len = length(s)
l = vector(mode="list",length=2^len) ; l[[1]]=numeric()
counter = 1
for(x in 1:length(s)){
for(subset in 1:counter){
counter=counter+1
l[[counter]] = c(l[[subset]],s[x])
}
}
return(l)
}
powerset(1:P)
#
length(powerset(1:P))
all_possible <- powerset(1:P)
R_adj.square <- R_square <- c()
variable <- c()
for(i in 2: length(all_possible) ) {
if( length(all_possible[[i]]) == 1 ){
A <- data.frame( data[, all_possible[[i]] ] ,
Y = data$Y )
colnames(A)[-2] <- paste0("X",all_possible[[i]])
a <- R.adjust(A , lm( Y ~ A[,1] ,A) )
b <- R.square(A , lm( Y ~ A[,1] ,A) )
print(colnames(A))
}else if( length(all_possible[[i]]) == 2 ){
A <- data.frame( data[, all_possible[[i]] ] [ ,1] ,
data[, all_possible[[i]] ] [ ,2] ,
Y = data$Y )
colnames(A)[-3] <- colnames(data[, all_possible[[i]] ])
a <- R.adjust(A , lm( Y ~ A[,1] + A[,2] ,A) )
b <- R.square(A , lm( Y ~ A[,1] + A[,2] ,A) )
print(colnames(A))
}else if( length(all_possible[[i]]) == 3 ){
A <- data.frame( data[, all_possible[[i]] ] [ ,1] ,
data[, all_possible[[i]] ] [ ,2] ,
data[, all_possible[[i]] ] [ ,3] ,
Y = data$Y )
colnames(A)[-4] <- colnames(data[, all_possible[[i]] ])
a <- R.adjust(A , lm( Y ~ A[,1] + A[,2] + A[,3] ,A) )
b <- R.square(A , lm( Y ~ A[,1] + A[,2] + A[,3] ,A) )
print(colnames(A))
}else if( length(all_possible[[i]]) == 4 ){
A <- data.frame( data[, all_possible[[i]] ] [ ,1] ,
data[, all_possible[[i]] ] [ ,2] ,
data[, all_possible[[i]] ] [ ,3] ,
data[, all_possible[[i]] ] [ ,4] ,
Y = data$Y )
colnames(A)[-5] <- colnames(data[, all_possible[[i]] ])
a <- R.adjust(A , lm( Y ~ A[,1] + A[,2] + A[,3] + A[,4] ,A) )
b <- R.square(A , lm( Y ~ A[,1] + A[,2] + A[,3] + A[,4] ,A) )
print(colnames(A))
}else if( length(all_possible[[i]]) == 5 ){
A <- data.frame( data[, all_possible[[i]] ] [ ,1] ,
data[, all_possible[[i]] ] [ ,2] ,
data[, all_possible[[i]] ] [ ,3] ,
data[, all_possible[[i]] ] [ ,4] ,
data[, all_possible[[i]] ] [ ,5] ,
Y = data$Y )
colnames(A)[-6] <- colnames(data[, all_possible[[i]] ])
a <- R.adjust(A , lm( Y ~ A[,1] + A[,2] + A[,3] + A[,4] + A[,5] ,A) )
b <- R.square(A , lm( Y ~ A[,1] + A[,2] + A[,3] + A[,4] + A[,5] ,A) )
print(colnames(A))
}
R_adj.square <- rbind(R_adj.square , a )
R_square <- rbind(R_square , b )
variable <- c(variable ,str_c(colnames(A) ,collapse = ","))
}
A <- data.frame(R_square = R_square ,
R_adj.square = R_adj.square ,
variable = variable )
#
str_c(colnames(A) ,collapse = ",")
mod1 <- lm( Y ~ X1 ,data )
summary(mod1)
mod2 <- lm( Y ~ X1 + X2 ,data )
summary( lm( Y ~ X1 + X2 + X3 + X5 ,data ) )
summary( lm( Y ~ X1 + X2 + X3 ,data ) )
#
A[A$R_adj.square == min(A$R_adj.square), ]
A[A$R_adj.square == max(A$R_adj.square), ]
|
b38cfc47d9c3d94ca51bf83e5e3c32b63931f719 | 5f5eb8c35b60d449eaab8f021d403574903f37a0 | /Prácticas-20210209/laborforce.R | 42c36012a723359b2d59af098bcae695cb6bdeff | [] | no_license | Rodrigo-ui-svg/econometria | 20a0c26b92edd6b79128202d30a5e80b82e643b6 | a7c96a1293d5e0f4fae1f665edbf23ef7d62541f | refs/heads/main | 2023-03-03T22:25:07.355841 | 2021-02-09T17:55:35 | 2021-02-09T17:55:35 | 337,488,186 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,566 | r | laborforce.R | setwd("D:/ECONOMETRIA CLASE/Logistico")
# install.packages("pROC")
#cargamos paquetes
library(tidyverse)
library(readstata13)
library(lmtest)
library(pROC)
#cargamos base de datos
labor <- read.dta13("laborforce.dta")
head(labor)
unique(labor$lfp)
unique(labor$agecat)
unique(labor$wc)
unique(labor$hc)
#Hacemos cambios a la base de datos
labor <- labor %>%
mutate(lfp = case_when(lfp == "NotInLF" ~ 0,
lfp == "inLF" ~ 1),
agecat = case_when(agecat == "30-39" ~ 1,
agecat == "40-49" ~ 2,
agecat == "50-60" ~ 3),
wc = case_when(wc == "NoCol" ~ 0,
wc == "College" ~ 1),
hc = case_when(hc == "NoCol" ~ 0,
hc == "College" ~ 1))
# Modelo logit
Mlogit <- glm(lfp ~ k5+k618+age+wc+hc+lwg+inc, data = labor,
family = binomial(link = "logit"))
summary(Mlogit)
# Modelo probit
Mprobit <- glm(lfp ~ k5+k618+age+wc+hc+lwg+inc, data = labor,
family = binomial(link = "probit"))
summary(Mprobit)
#Modelando variables dicotomicas
labor$k5_1 <- 0
labor$k5_1 <- replace(labor$k5_1, labor$k5==1, 1)
labor$k5_2 <- 0
labor$k5_2 <- replace(labor$k5_2, labor$k5==2, 1)
labor$k5_3 <- 0
labor$k5_3 <- replace(labor$k5_3, labor$k5==3, 1)
logit2 <- glm(lfp ~ k5_1 + k5_2 + k5_3, data = labor,
family = binomial(link = "logit"))
summary(logit2)
#En la tercer categoria de k5 hay 3 observaciones solamente
table(labor$lfp, labor$k5)
# Tambien se puede realizar de la siguiente forma
class(labor$k5)
labor$k5 <- as.factor(labor$k5)
logit2_ <- glm(lfp ~ k5, data = labor, family = binomial(link = "logit"))
summary(logit2_)
#Variables independientes categoricas
labor$k5 <- as.numeric(labor$k5)
labor$agecat <- as.factor(labor$agecat)
labor$wc <- as.factor(labor$wc)
labor$hc <- as.factor(labor$hc)
logit3 <- glm(lfp ~ k5+ k618+agecat+wc+hc+lwg+inc,
data = labor, family = binomial(link = "logit"))
summary(logit3)
# odds ratio
exp(logit3$coefficients)
#probando diferentes modelos
M1 <- glm(lfp ~ k5+ k618+agecat+wc+hc+lwg+inc,
data = labor, family = binomial(link = "logit"))
M2 <- glm(lfp ~ k618+agecat+wc+hc+lwg+inc,
data = labor, family = binomial(link = "logit"))
M3 <- glm(lfp ~ k5+agecat+wc+hc+lwg+inc,
data = labor, family = binomial(link = "logit"))
# El efecto de eliminar la variable k5 es significativa al 5%, y eliminar
# la variable k618 no lo es
lrtest(M1, M2)
lrtest(M1, M3)
## Matriz de confusion
## en stata, despues de correr la regresion
## estat class
predicted <-predict(M3, labor, type = "response")
range(predicted)
table(predicted>0.5, labor$lfp)
cat("Correctamente clasificados=",(182+333)/753)
cat("Sensibilidad Pr(+|inLF) = ", 333/(333+95))
cat("Especificidad Pr(-|NotInLF) = ", 182/(182+143))
# ROC (Receiver Operator Characteristic)
#En stata lroc
roc <- roc(labor$lfp, predicted)
plot(roc)
auc(roc) #Area bajo la curva
#Gráfica
roc <- plot.roc(labor$lfp, predicted,
main="Curva ROC",
percent=TRUE,
print.auc=TRUE)
plot(ci(roc, of="thresholds", thresholds="best"))
roc<-plot.roc(labor$lfp, predicted,
main="Curva ROC",
percent=TRUE,
of="thresholds",
thresholds="best",
print.thres="best")
|
a912074cda499e8ff7149bfc5a3b2d05ca98a430 | 29585dff702209dd446c0ab52ceea046c58e384e | /lshorth/R/plot.lshorth.R | 8a2c7e639b2abcf54a05bb11c2436c11940aa57d | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,930 | r | plot.lshorth.R | #$Id: plot.lshorth.R 92 2012-03-23 23:47:12Z gsawitzki $
plot.lshorth <- function(x, y, xlim = NULL, ylim = NULL,
probs=NULL,
main="Shorth",
xlab=NULL,
ylab=NULL,
frame.plot=TRUE,
legendpos="topright",
rug=TRUE,
rescale="neg", ...)
{
stopifnot(inherits(x,"lshorth"))
if (missing(ylim)) ylim <-NULL
lshorthx <- x #fix me
probs <- lshorthx$probs
shorthm <- t(lshorthx$lshorth)
if (is.null(xlab)) {
if (!is.null(x$xname)){
xlab <- paste(x$xname,", n=",length(lshorthx$x), sep="")
} else{xlab <- paste(deparse(substitute(x), 50),
", n=",length(lshorthx$x), collapse = "\n", sep="")}
}
if (is.null(rescale)) rsc <- "none" else{
rsc<-match.arg(tolower(rescale),c("none","std","inv","neg"))
}
if (rsc=="std"){
shorthl <-min (lshorth(x=lshorthx$x,0.5,plot=FALSE)$lshorth)
# the length of the shorth
shorthmy <- shorthm/shorthl
if (is.null(ylab)) {
ylab <- "std lshorth"
if(is.null(ylim)){
ylim <- c(max(shorthmy)*1.1,0)
if (is.na(ylim[2])) {
ylim[2]<-0
}}
}
}
if (rsc=="inv") {
shorthmy<- 1/shorthm
if (is.null(ylab)) {
ylab <- "1/lshorth"
}
if (is.null(ylim)) {ylim<-1.1*range(shorthmy,finite=TRUE);ylim[1]<-0}
}
if (rsc=="neg") {
shorthmy<- shorthm
if (is.null(ylab)) {
ylab <- "lshorth"
}
if (is.null(ylim)){ylim<-c(1.1*range(shorthmy,finite=TRUE)[2],0)}
}
if (rsc=="none") {
shorthmy<- shorthm
if (is.null(ylab)) {
ylab <- "lshorth"
}
if (is.null(ylim)) {ylim<-range(shorthmy,finite=TRUE);ylim[1]<-0}
}
if (is.null(ylab)) {
ylab <- "lshorth"
}
if (is.null(ylim)) ylim<-range(shorthmy,finite=TRUE)
if (is.null(xlim)) xlim<-range(lshorthx$x[is.finite(lshorthx$x)])
plot.new()
plot.window(xlim=xlim,ylim=ylim, ...)
axis(1)
axis(2)
title(main=main, xlab=xlab, ylab=ylab)
if (frame.plot)box(...)
if (rug) rug(lshorthx$x)
lwd <- ceiling(6*(0.5-abs(probs-0.5)))
for (px in 1:length(probs)){
# lwd <- ceiling(6*(0.5-abs(probs[px]-0.5)))
lines(lshorthx$x,shorthmy[px,],lwd=lwd[px],...)
}
if (!is.null(legendpos)){
temp <- legend(legendpos, legend = rep(" ",length(probs)),
text.width = strwidth("0.0000"),
lwd = lwd, xjust = 1, yjust = 1,
title = expression(Coverage *" "* alpha),
inset=0.05)
text(temp$rect$left + temp$rect$w, temp$text$y,
format(probs,digits=3), pos=2)
}
invisible(shorthm)
}
legend.lshorth <- function(legendpos,probs, ...){
lwd <- ceiling(6*(0.5-abs(probs-0.5)))
temp <- legend(legendpos, legend = rep(" ",length(probs)),
text.width = strwidth("0.0000"),
lwd = lwd, xjust = 1, yjust = 1,
title = expression(Coverage *" "* alpha),
inset=0.05, ...)
text(temp$rect$left + temp$rect$w, temp$text$y,
format(probs,digits=3), pos=2)
} |
4bdbcad475cc59f23327d4a840770339e1be535b | e9b3763e49902026caf817951aee9c20e52a1e93 | /man/fitted.blm.Rd | a2f941dea66edb1cc0c4eac8e674d94efb8b51cf | [] | no_license | MarniTausen/blm | 448d9dadaf9f66ac55c67007aaf26c5a914d3c4f | d17d46ae241177d04ed5fba422e0f85243cc3f5d | refs/heads/master | 2020-06-11T02:28:52.679437 | 2017-01-15T19:46:06 | 2017-01-15T19:46:06 | 76,023,351 | 1 | 0 | null | 2016-12-09T09:54:29 | 2016-12-09T09:54:29 | null | UTF-8 | R | false | true | 484 | rd | fitted.blm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitted.R
\name{fitted.blm}
\alias{fitted.blm}
\title{Extracted fitted response values}
\usage{
\method{fitted}{blm}(object, ...)
}
\arguments{
\item{object}{blm model object}
\item{...}{Additional pararmeters, forwarded to predict.blm()}
}
\value{
vector of the fitted response variables
}
\description{
Returns the fitted response values from the model, given the mean values they would correspond to
}
|
73a03d09af173207873d70a850495d02ffc264d6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/HiClimR/examples/HiClimR.Rd.R | 5ce1b5c8618b095adb4cc6f341efa0a96c64eaa4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,551 | r | HiClimR.Rd.R | library(HiClimR)
### Name: HiClimR
### Title: Hierarchical Climate Regionalization
### Aliases: HiClimR
### Keywords: HiClimR
### ** Examples
require(HiClimR)
#----------------------------------------------------------------------------------#
# Typical use of HiClimR for single-variate clustering: #
#----------------------------------------------------------------------------------#
## Load the test data included/loaded in the package (1 degree resolution)
x <- TestCase$x
lon <- TestCase$lon
lat <- TestCase$lat
## Generate/check longitude and latitude mesh vectors for gridded data
xGrid <- grid2D(lon = unique(TestCase$lon), lat = unique(TestCase$lat))
lon <- c(xGrid$lon)
lat <- c(xGrid$lat)
## Single-Variate Hierarchical Climate Regionalization
y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = FALSE,
continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
standardize = TRUE, nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
## For more examples: https://github.com/hsbadr/HiClimR#examples
## Not run:
##D
##D #----------------------------------------------------------------------------------#
##D # Additional Examples: #
##D #----------------------------------------------------------------------------------#
##D
##D ## Use Ward's method
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = FALSE,
##D continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## Use data splitting for big data
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = FALSE,
##D continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = TRUE, kH = NULL,
##D members = NULL, nSplit = 10, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## Use hybrid Ward-Regional method
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = FALSE,
##D continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = TRUE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D ## Check senitivity to kH for the hybrid method above
##D
##D
##D #----------------------------------------------------------------------------------#
##D # Typical use of HiClimR for multivariate clustering: #
##D #----------------------------------------------------------------------------------#
##D
##D ## Load the test data included/loaded in the package (1 degree resolution)
##D x1 <- TestCase$x
##D lon <- TestCase$lon
##D lat <- TestCase$lat
##D
##D ## Generate/check longitude and latitude mesh vectors for gridded data
##D xGrid <- grid2D(lon = unique(TestCase$lon), lat = unique(TestCase$lat))
##D lon <- c(xGrid$lon)
##D lat <- c(xGrid$lat)
##D
##D ## Test if we can replicate single-variate region map with repeated variable
##D y <- HiClimR(x=list(x1, x1), lon = lon, lat = lat, lonStep = 1, latStep = 1,
##D geogMask = FALSE, continent = "Africa", meanThresh = list(10, 10),
##D varThresh = list(0, 0), detrend = list(TRUE, TRUE), standardize = list(TRUE, TRUE),
##D nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## Generate a random matrix with the same number of rows
##D x2 <- matrix(rnorm(nrow(x1) * 100, mean=0, sd=1), nrow(x1), 100)
##D
##D ## Multivariate Hierarchical Climate Regionalization
##D y <- HiClimR(x=list(x1, x2), lon = lon, lat = lat, lonStep = 1, latStep = 1,
##D geogMask = FALSE, continent = "Africa", meanThresh = list(10, NULL),
##D varThresh = list(0, 0), detrend = list(TRUE, FALSE), standardize = list(TRUE, TRUE),
##D weightMVC = list(1, 1), nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D ## You can apply all clustering methods and options
##D
##D #----------------------------------------------------------------------------------#
##D # Miscellaneous examples to provide more information about functionality and usage #
##D # of the helper functions that can be used separately or for other applications. #
##D #----------------------------------------------------------------------------------#
##D
##D ## Load test case data
##D x <- TestCase$x
##D
##D ## Generate longitude and latitude mesh vectors
##D xGrid <- grid2D(lon = unique(TestCase$lon), lat = unique(TestCase$lat))
##D lon <- c(xGrid$lon)
##D lat <- c(xGrid$lat)
##D
##D ## Coarsening spatial resolution
##D xc <- coarseR(x = x, lon = lon, lat = lat, lonStep = 2, latStep = 2)
##D lon <- xc$lon
##D lat <- xc$lat
##D x <- xc$x
##D
##D ## Use fastCor function to compute the correlation matrix
##D t0 <- proc.time(); xcor <- fastCor(t(x)); proc.time() - t0
##D ## compare with cor function
##D t0 <- proc.time(); xcor0 <- cor(t(x)); proc.time() - t0
##D
##D ## Check the valid options for geographic masking
##D geogMask()
##D
##D ## geographic mask for Africa
##D gMask <- geogMask(continent = "Africa", lon = lon, lat = lat, plot = TRUE,
##D colPalette = NULL)
##D
##D ## Hierarchical Climate Regionalization Without geographic masking
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = FALSE,
##D continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## With geographic masking (you may specify the mask produced above to save time)
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = TRUE,
##D continent = "Africa", meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## With geographic masking and contiguity contraint
##D ## Change contigConst as appropriate
##D y <- HiClimR(x, lon = lon, lat = lat, lonStep = 1, latStep = 1, geogMask = TRUE,
##D continent = "Africa", contigConst = 1, meanThresh = 10, varThresh = 0, detrend = TRUE,
##D standardize = TRUE, nPC = NULL, method = "ward", hybrid = FALSE, kH = NULL,
##D members = NULL, nSplit = 1, upperTri = TRUE, verbose = TRUE,
##D validClimR = TRUE, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL, hang = -1, labels = FALSE)
##D
##D ## Find minimum significant correlation at 95##D
##D rMin <- minSigCor(n = nrow(x), alpha = 0.05, r = seq(0, 1, by = 1e-06))
##D
##D ## Validtion of Hierarchical Climate Regionalization
##D z <- validClimR(y, k = 12, minSize = 1, alpha = 0.01,
##D plot = TRUE, colPalette = NULL)
##D
##D ## Apply minimum cluster size (minSize = 25)
##D z <- validClimR(y, k = 12, minSize = 25, alpha = 0.01,
##D plot = TRUE, colPalette = NULL)
##D
##D ## The optimal number of clusters, including small clusters
##D k <- length(z$clustFlag)
##D
##D ## The selected number of clusters, after excluding small clusters (if minSize > 1)
##D ks <- sum(z$clustFlag)
##D
##D ## Dendrogram plot
##D plot(y, hang = -1, labels = FALSE)
##D
##D ## Tree cut
##D cutTree <- cutree(y, k = k)
##D table(cutTree)
##D
##D ## Visualization for gridded data
##D RegionsMap <- matrix(y$region, nrow = length(unique(y$coords[, 1])), byrow = TRUE)
##D colPalette <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
##D "#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
##D image(unique(y$coords[, 1]), unique(y$coords[, 2]), RegionsMap, col = colPalette(ks))
##D
##D ## Visualization for gridded or ungridded data
##D plot(y$coords[, 1], y$coords[, 2], col = colPalette(max(Regions, na.rm = TRUE))[y$region],
##D pch = 15, cex = 1)
##D
##D ## Export region map and mean timeseries into NetCDF-4 file
##D y.nc <- HiClimR2nc(y=y, ncfile="HiClimR.nc", timeunit="years", dataunit="mm")
##D ## The NetCDF-4 file is still open to add other variables or close it
##D nc_close(y.nc)
##D
## End(Not run)
|
3382ded47c649bf58ca25f78511e58cd724fdc0c | c6eca3d4330fa1560ada90c5dbb83264e7f04595 | /man/get_pos_based_seq_weights.Rd | 73a35e76ea12fac2394f47f54aeb9223021ff271 | [] | no_license | richelbilderbeek/BALCONY | 7248a94ddfd7588a2822f33613f070efdc1a3667 | 7d26159aa93d1f56427b88e26fb5448b82246ce3 | refs/heads/master | 2022-12-31T18:55:02.309956 | 2020-10-26T09:57:29 | 2020-10-26T09:57:29 | 307,326,223 | 0 | 1 | null | 2020-10-26T09:48:50 | 2020-10-26T09:48:50 | null | UTF-8 | R | false | true | 1,529 | rd | get_pos_based_seq_weights.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conservationFuncs.R
\name{get_pos_based_seq_weights}
\alias{get_pos_based_seq_weights}
\title{Get position based weights of sequences in alignment}
\usage{
get_pos_based_seq_weights(alignment, gap = TRUE, normalized = TRUE)
}
\arguments{
\item{alignment}{alignment loaded with \code{\link[seqinr]{read.alignment}}}
\item{gap}{(optional) a logical parameter, if TRUE(default) the gaps in MSA are included}
\item{normalized}{optional) logical parameter, if TRUE (default) weights for all sequences are divided by number of columns in alignment (when gap = TRUE weights sum up to 1)}
}
\value{
a vector of position based weights for each sequence in given alignment
}
\description{
This function calculates position based weights of sequences based on Heinkoff & Heinkoff (1994) for given MSA. The score is calculated as sum of scores for each sequence position c. Score for position c is equal 1/r if there is r different residues at column c in MSA but 1/rs if r symbol is repeated in s sequences.
}
\details{
The weights might be calculated only for amino acids symbols or for all symbols (including gaps). Also weights can be normalized by number of columns in MSA, then the sum of weights for all sequences is 1.
}
\examples{
data("small_alignment")
pos_based_weights <- get_pos_based_seq_weights(small_alignment)
}
\references{
Henikoff, S. & Henikoff, J. G. Position-based sequence weights. Journal of Molecular Biology 243, 574–578 (1994).
}
|
34292e49779923c41f00fb73dd763f14f8e105a8 | 1ce37a2fabf5ffd64830f97c123621a5aadc30ce | /xgboost/prueba.R | b5957b845c32be25e270f159361f7e2a1b85b349 | [] | no_license | raulcarlomagno/dmuba-dm-economiafinanzas | b0f36a82d257cfdcacc291ed0035865faa7f0abe | 185f6f318f3c2e371f7f3df48e855d78a56af8ef | refs/heads/master | 2020-03-27T22:34:18.072842 | 2018-12-06T03:43:35 | 2018-12-06T03:43:35 | 147,242,696 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 2,341 | r | prueba.R | #require(xgboost)
#
#
#data(agaricus.train, package='xgboost')
#data(agaricus.test, package='xgboost')
#train <- agaricus.train
#test <- agaricus.test
#
#
#bstSparse <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
#
#bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nthread = 2, nrounds = 2, objective = "binary:logistic", verbose = 1)
#
#
##Random Forest™ - 1000 trees
#bst <- xgboost(data = train$data, label = train$label, max.depth = 4, num_parallel_tree = 1000, subsample = 0.5, colsample_bytree =0.5, nrounds = 1, objective = "binary:logistic", verbose = 1)
#
#
#bst <- xgboost(data = train$data, label = train$label, max.depth = 4, nrounds = 3, objective = "binary:logistic")
# An example of using GPU-accelerated tree building algorithms
#
# NOTE: it can only run if you have a CUDA-enable GPU and the package was
# specially compiled with GPU support.
#
# For the current functionality, see
# https://xgboost.readthedocs.io/en/latest/gpu/index.html
#
library('xgboost')
# Simulate N x p random matrix with some binomial response dependent on pp columns
set.seed(111)
N <- 1000000
p <- 500
pp <- 250
X <- matrix(runif(N * p), ncol = p)
betas <- 2 * runif(pp) - 1
sel <- sort(sample(p, pp))
m <- X[, sel] %*% betas - 1 + rnorm(N)
y <- rbinom(N, 1, plogis(m))
tr <- sample.int(N, N * 0.75)
dtrain <- xgb.DMatrix(X[tr,], label = y[tr])
dtest <- xgb.DMatrix(X[-tr,], label = y[-tr])
wl <- list(train = dtrain, test = dtest)
# An example of running 'gpu_hist' algorithm
# which is
# - similar to the 'hist'
# - the fastest option for moderately large datasets
# - current limitations: max_depth < 16, does not implement guided loss
# You can use tree_method = 'gpu_exact' for another GPU accelerated algorithm,
# which is slower, more memory-hungry, but does not use binning.
param <- list(objective = 'reg:logistic', eval_metric = 'auc', subsample = 0.5, nthread = 4,
max_bin = 64, tree_method = 'gpu_hist')
pt <- proc.time()
bst_gpu <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
proc.time() - pt
# Compare to the 'hist' algorithm:
param$tree_method <- 'hist'
pt <- proc.time()
bst_hist <- xgb.train(param, dtrain, watchlist = wl, nrounds = 50)
proc.time() - pt |
4aad6dfd1645f40f2432b349968020f191d3a982 | 7ca7853ce06b347cc8618986ad9b38ac61969426 | /cachematrix.R | dbf045e402e30d5911feef48b63048365cb65c02 | [] | no_license | RichAlej/ProgrammingAssignment2 | e6f48108bc9bed2c77295db7a80dc8a5174ab293 | b6fff6a4bb3309b3dcacbeb987b10f5ac4809449 | refs/heads/master | 2020-12-27T10:49:58.257972 | 2015-10-23T22:40:09 | 2015-10-23T22:40:09 | 44,771,539 | 0 | 0 | null | 2015-10-22T20:31:14 | 2015-10-22T20:31:13 | null | UTF-8 | R | false | false | 1,790 | r | cachematrix.R | ## These two functions take a matrix and associate with functions that allow
##you to calculate and store the value of the matrix's inverse; this allows
##you to save computation time for this complex task in the case that you need
##to repeatedly refer to the Inverse of a matrix
## makeCacheMatrix() takes a matrix as its argument and associates it with
##a list of functions that can be used to store and recall the value of the matrix
##and the value of its inverse -- a "cached matrix"
makeCacheMatrix <- function(x = matrix()) {
I <- NULL #I is for inverse
set <- function(y) {
x <<- y # sets x for out of local environment, or globally
I <<- NULL
}
get <- function() x
setInverse <- function(invrs) I <<- invrs #sets I value globally, for storage
getInverse <- function() I
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve() takes a "cached matrix" (matrix called through makeCacheMatrix)
##as its argument and returns the inverse;
##it calculates the inverse if it has not been calculated;
##if the inverse has already been calculated by cacheSolve(), then it returns the value
##that has been set (or cached) in the matrix
cacheSolve <- function(x, ...) {
I <- x$getInverse()
if(!is.null(I)) { #checks for stored value
message("getting cached data")
return(I)
}
data <- x$get() #retrieves the value of x matrix
I <- solve(data, ...) #calculates inverse
x$setInverse(I) #stores the inverse value and associates with x
I
}
|
e9b008ae476c8a971f7f080998f4bb38190a7b4c | 13b21646937de87022c30c8512c0151d5bb3f03f | /openintro/man/immigration.Rd | 2306c0a5b975fe347f52ab989e1cd6002de92b0d | [] | no_license | mine-cetinkaya-rundel/openintro-r-package | 374a104b4241eec05fb4688a5231ba8979e59ad0 | 099da5b776784462f4ff384434cd7a517dfe37d6 | refs/heads/master | 2020-03-28T09:40:40.626621 | 2018-09-01T03:20:39 | 2018-09-01T03:20:39 | 148,051,227 | 2 | 0 | null | 2018-09-09T17:54:34 | 2018-09-09T17:54:34 | null | UTF-8 | R | false | false | 1,031 | rd | immigration.Rd | \name{immigration}
\alias{immigration}
\docType{data}
\title{Poll on illegal workers in the US}
\description{910 randomly sampled registered voters in Tampa, FL were asked if they thought workers who have illegally entered the US should be (i) allowed to keep their jobs and apply for US citizenship, (ii) allowed to keep their jobs as temporary guest workers but not allowed to apply for US citizenship, or (iii) lose their jobs and have to leave the country as well as their political ideology.}
\usage{data("immigration")}
\format{
A data frame with 910 observations on the following 2 variables.
\describe{
\item{\code{response}}{a factor with levels \code{Apply for citizenship} \code{Guest worker} \code{Leave the country} \code{Not sure}}
\item{\code{political}}{a factor with levels \code{conservative} \code{liberal} \code{moderate}}
}
}
\source{SurveyUSA, News Poll #18927, data collected Jan 27-29, 2012.}
\examples{
data(immigration)
## maybe str(immigration) ; plot(immigration) ...
}
\keyword{datasets}
|
58d0f710ab1f7c9d17fa79b3653a81123375c607 | b58787ca244bec6e3788d2c875958b04a27fcd22 | /CSE845_Jan19_2012_PlayWithR/Jan19th_R_exercise.R | af735a91f8a935a9efd5a0deaeb91fb092d98b50 | [] | no_license | anhnguyendepocen/CSE845_R_tutorials | 917d4a479ea3d29afae7507c17710fdd2b0359a4 | 1c5a872de767f23a1ce9f0ff7802f522df98e1b0 | refs/heads/master | 2020-03-14T21:21:37.232080 | 2013-05-27T01:14:01 | 2013-05-27T01:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,513 | r | Jan19th_R_exercise.R | # A little script to show some of the useful arguments for read.table()
# I will explain this more later, but this will read in a dataset (from the course website in this case)
setwd('/Users/ian/BEACON_COURSE_2011/BEACON R tutorial/')
# Don't worry about the details for the moment
avida_data <- read.table('LZ_avida_average.dat',
header = F, # Is there a header (column names)
na.strings = "NA",
sep = "", # field seperator
skip = 19, # skip the first 19 lines
nrows= 1020, # Max number of rows.
col.names=c("update","merit","gestationT","fitness","RepRate", "size","CopiedSize",
"ExecutedSize","abundance","PropBirth","PropBreed","GenotypeDepth", "generation","neutral",
"lineage","TrueRepRate"),# names for each column
colClasses=c(rep("numeric",16))
)
# What is the class of the new object?
# How many rows? columns?
# what are the data types within each column?
# Let's say we are interested in only looking at subsets of the data for generations above 50,
# how would you make a new data set to do so?
# How about if we only wanted the 1st, 3rd and 5th column of the data? |
bb260eae8b8246966234ee0f1703abba811a7f78 | 1d34d47cbbeae6fa3d35e2081ec264ab11e72a4c | /endogeno.r | aa8cd337c75ba7fd043e28a3c4af09abe37a6c35 | [] | no_license | GustavoPMSilva/AD-UFRJ-PLE | 0cef0e56dc17f1eaf5b40fd7eab6d08bfd042b1b | bad626fbe7e66a2afffcb71dc2e8e48ceec20cff | refs/heads/master | 2023-01-12T23:15:38.660412 | 2020-11-16T13:03:38 | 2020-11-16T13:03:38 | 313,303,634 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,076 | r | endogeno.r | K <- 2
N <- 5
L0 <- 0
L1 <- 0
F01mi0 <- 1
F02mi0 <- 2
F11mi1 <- 1
F12mi1 <- 2
GOOD <- 0
FAKE <- 1
gerarEndogenosIniciais <- function(){
for(i in 1:length(Usuarios)) {
gerarEventosEndogenos(i)
}
}
atualizarTempos <- function(tempo){
estado <- c(0,0,0,0)
for(usuario in Usuarios){
if(usuario==0){estado[1] <- estado[1] + 1}
else if(usuario==1){estado[2] <- estado[2] + 1}
else if(usuario==2){estado[3] <- estado[3] + 1}
else if(usuario==3){estado[4] <- estado[4] + 1}
}
estadoStr <- paste(estado,collapse='')
Tempos[[estadoStr]] <<- Tempos[[estadoStr]] + tempo
}
modificarTimelinesFIFO <- function(e){
if(e[2] == GOOD){
if(Usuarios[e[4]] == 0){Usuarios[e[4]] <<- 0}
else if(Usuarios[e[4]] == 1){Usuarios[e[4]] <<- 2}
else if(Usuarios[e[4]] == 2){Usuarios[e[4]] <<- 0}
else if(Usuarios[e[4]] == 3){Usuarios[e[4]] <<- 2}
}
else {
if(Usuarios[e[4]] == 0){Usuarios[e[4]] <<- 1}
else if(Usuarios[e[4]] == 1){Usuarios[e[4]] <<- 3}
else if(Usuarios[e[4]] == 2){Usuarios[e[4]] <<- 1}
else if(Usuarios[e[4]] == 3){Usuarios[e[4]] <<- 3}
}
}
modificarTimelinesRND <- function(e){
if(e[2] == GOOD){
if(Usuarios[e[4]]==0){Usuarios[e[4]] <<- 0}
else if(Usuarios[e[4]]==1){Usuarios[e[4]] <<- sample(c(0,1),1,rep=FALSE)}
else if(Usuarios[e[4]]==2){Usuarios[e[4]] <<- sample(c(0,2),1,rep=FALSE)}
else if(Usuarios[e[4]]==3){Usuarios[e[4]] <<- sample(c(1,2),1,rep=FALSE)}
}
else {
if(Usuarios[e[4]]==0){Usuarios[e[4]] <<- sample(c(1,2),1,rep=FALSE)}
else if(Usuarios[e[4]]==1){Usuarios[e[4]] <<- sample(c(1,3),1,rep=FALSE)}
else if(Usuarios[e[4]]==2){Usuarios[e[4]] <<- sample(c(2,3),1,rep=FALSE)}
else if(Usuarios[e[4]]==3){Usuarios[e[4]] <<- 3}
}
}
arrumarFilasEventos <- function(){
FilaDeEventos <<- FilaDeEventos[order(sapply(FilaDeEventos, function(x) x[[3]]))]
}
gerarEventosEndogenos <- function(quem){
###criarEventosNovos
if(Usuarios[quem]==0){
evento <- c(quem, GOOD, rexp(1,F02mi0), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
}
if(Usuarios[quem]==1){
evento <- c(quem, GOOD, rexp(1,F01mi0), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
evento <- c(quem, FAKE, rexp(1,F11mi1), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
}
if(Usuarios[quem]==2){
evento <- c(quem, GOOD, rexp(1,F01mi0), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
evento <- c(quem, FAKE, rexp(1,F11mi1), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
}
if(Usuarios[quem]==3){
evento <- c(quem, FAKE, rexp(1,F12mi1), gerarSample(quem))
FilaDeEventos[[length(FilaDeEventos)+1]] <<- evento
}
}
gerarSample <- function(nquero){
while(TRUE) {
num <- sample(1:5,1,rep=FALSE)
if (num != nquero) break
}
return(num)
}
FIM <- 1000
FIFO <- FALSE
contadorEstadoInicial <- numeric(FIM)
contadorEstadoFinal <- numeric(FIM)
guardador <- matrix(0L, ncol = FIM, nrow = 2)
for(i in 1:FIM){
#evento = (de quem veio, good/fake, tempo, pra onde vai)
T <- 0
Usuarios <- c(1,1,1,1,1)
Tempos <- numeric(56)
names(Tempos) <- c('5000','4100','4010','4001','1400','0410','0401',
'1040','0140','0041','1004','0104','0014','3200',
'3020','3002','2300','0320','0302','2030','0230',
'0032','2003','0203','0023','3110','3101','3011',
'1310','1301','0311','1130','1031','0131','1103',
'1013','0113','2210','2201','2120','2021','2102',
'2012','1220','0221','1202','0212','1022','0122',
'2111','1211','1121','1112','0500','0050','0005')
while(Tempos[['5000']] == 0 & Tempos[['0005']] == 0){
FilaDeEventos <- list()
gerarEndogenosIniciais()
arrumarFilasEventos()
atualizarTempos(evento[3])
evento <- FilaDeEventos[[1]]
T <- T + evento[3]
if(FIFO){modificarTimelinesFIFO(evento)}else{modificarTimelinesRND(evento)}
}
if(Tempos[['5000']] > Tempos[['0005']]){
contadorEstadoInicial[i] <- 1
guardador[1,i] <- T
guardador[2,i] <- 1
}
else if(Tempos[['0005']] > Tempos[['5000']]){
contadorEstadoFinal[i] <- 1
guardador[1,i] <- T
guardador[2,i] <- 2
}
}
print(mean(contadorEstadoInicial) - (1.96 * sd(contadorEstadoInicial) / sqrt(FIM)))
print(mean(contadorEstadoInicial))
print(mean(contadorEstadoInicial) + (1.96 * sd(contadorEstadoInicial) / sqrt(FIM)))
print(mean(contadorEstadoFinal) - (1.96 * sd(contadorEstadoFinal) / sqrt(FIM)))
print(mean(contadorEstadoFinal))
print(mean(contadorEstadoFinal) + (1.96 * sd(contadorEstadoFinal) / sqrt(FIM)))
guardador <- t(apply(guardador,1,sort))
somaInicial <- numeric(FIM)
somaFinal <- numeric(FIM)
for(i in 1:FIM){
for(j in 1:i){
if(guardador[2,j]==1){somaInicial[i] <- somaInicial[i] + 1}
else {somaFinal[i] <- somaFinal[i] + 1}
}
}
plot(guardador[1,], somaFinal/FIM, type="l", xlim=c(1,30), ylim=c(0,1), main="Estado Inicial = 0500", xlab="Tempo", ylab="Probabilidade", col="blue")
lines(guardador[1,],somaInicial/FIM, col="red")
abline(h=seq(0,1,by=0.1), lwd=1, lty="dotted", col="lightgray")
abline(v=c(1:30), lwd=1, lty="dotted", col="lightgray")
legend("topleft",
c("Estado 0005 (ALL FAKE)","Estado 5000 (ALL NON FAKE)"),
fill=c("blue","red")
)
print('fim') |
cdc5c10699b13df897626815e93c38d4261a5e40 | 2e731f06724220b65c2357d6ce825cf8648fdd30 | /NetMix/inst/testfiles/getZ/AFL_getZ/getZ_valgrind_files/1616007464-test.R | f04faa331faae59b19740ffea5f659ad73c9767d | [] | no_license | akhikolla/updatedatatype-list1 | 6bdca217d940327d3ad42144b964d0aa7b7f5d25 | 3c69a987b90f1adb52899c37b23e43ae82f9856a | refs/heads/master | 2023-03-19T11:41:13.361220 | 2021-03-20T15:40:18 | 2021-03-20T15:40:18 | 349,763,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | 1616007464-test.R | testlist <- list(mat = NULL, pi_mat = structure(c(7.87481755977877e-311, 1.38700635605729e-284, 2.84878851639172e-306, 1.29799017759031e-308, 1.0614123518369e-313, 5.77957377291124e-310, 1.06141235406019e-313, 5.77426876891344e-310, 2.12318152942584e-193, 7.2911220195564e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(3L, 7L)))
result <- do.call(NetMix:::getZ,testlist)
str(result) |
9f1165d487a1bf6c62b02277ef4e91ce72925744 | 56390f2a77468b2e662f38a726bda8ea7cda0cf9 | /R/functions.R | 12c222f0d05e3a2314f23880ac4a7e94e0685046 | [] | no_license | bsekiewicz/vizhackathon | 4aaed7ae6dad559a8ce2f52ae6053f4ce515c8ce | 390d2e84e498e4270b5d62679112d661ce5bd20d | refs/heads/master | 2020-07-31T23:32:20.579999 | 2019-10-07T20:37:05 | 2019-10-07T20:37:05 | 210,787,204 | 1 | 0 | null | 2019-09-27T16:49:59 | 2019-09-25T07:52:40 | HTML | UTF-8 | R | false | false | 1,769 | r | functions.R | # score ver 3.0
score_3 <- function(data, wgs, params=list(), districts=NULL, popular_times = popular_times) {
popular_times %<>% group_by(place_id) %>% summarise(oi = mean(occupancy_index))
data %<>% select(place_id, rating, user_ratings_total, lat, lng, type, wgs=wgs84_500_id)
# pop times
data %<>% left_join(popular_times)
data %<>% filter(!is.na(oi)) %>% filter(oi > 0)
# districts
if (!is.null(districts)) {
wgs %<>% filter(district_id %in% districts)
}
types_in <- params[['types_in']]
types_out <- params[['types_out']]
# score
output <- data %>%
group_by(wgs) %>%
summarise(score = 5)
for (t in types_in) {
tmp <- data %>% filter(type==t) %>%
group_by(wgs) %>%
summarise(mean_rating = mean(rating, na.rm =TRUE)) %>%
arrange(desc(mean_rating))
tmp$mean_rating[is.na(tmp$mean_rating)] <- mean(tmp$mean_rating, na.rm=TRUE)
tmp <- left_join(output, tmp)
tmp$mean_rating[is.na(tmp$mean_rating)] <- 0
tmp$score <- tmp$score + tmp$mean_rating
output <- tmp %>% select(wgs, score)
}
for (t in types_out) {
tmp <- data %>% filter(type==t) %>%
group_by(wgs)
if (nrow(tmp) > 0){
tmp %<>% summarise(mean_rating = -1) %>%
arrange(desc(mean_rating))
tmp <- left_join(output, tmp)
tmp$mean_rating[is.na(tmp$mean_rating)] <- 0
tmp$score <- tmp$score + tmp$mean_rating
output <- tmp %>% select(wgs, score)
}
}
output$score[output$score<0] <- 0
output$score <- output$score/max(output$score)*10
output %<>% arrange(desc(score))
# hist(output$score)
output %<>%
select(id=wgs, score) %>%
left_join(wgs) %>%
select(lat, lng, score) %>%
mutate(in_out = TRUE)
return(output)
}
|
d1bb60df3b9021c69ed82cef7c4d2b6396c2a546 | 72b117982117ae8df5f769c5871bff70da0cfbab | /02 cleaning/ingredient_extraction.R | d11fdcc5907f8ab8e4508b8b1a0374bd0c9d6257 | [] | no_license | palautatan/perfectBananaBread | 5519ae7e70f9750198c3a968ed69792f1d7154f5 | 4c66a9e602a7af0f2ac257a5d6c4ea5f6f433c85 | refs/heads/master | 2021-01-20T23:11:42.580892 | 2017-12-05T19:13:20 | 2017-12-05T19:13:20 | 101,843,735 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,172 | r | ingredient_extraction.R | bbdata = read.csv("/Users/shermanpeng/Documents/R/banana_bread/all_data.csv", as.is=TRUE)
attach(bbdata)
head(bbdata, 2)
# SPLIT INGREDIENT LIST INTO SINGLE INGREDIENTS
split_into_ingredients = function(py_string) {
ingredients = strsplit(py_string, "\\$")[[1]]
return(ingredients)
}
library(stringr)
# USE REGEX TO TO GET THE NUMBER
check_recipe_number = function(listed_ingredient) {
number = str_extract(listed_ingredient, "[0-9](/[0-9]?)?+( [0-9]/[0-9])?")
return(number)
}
# CHANGE NUMBER TO DECIMAL
num_to_dec = function(x) {
# CHECK IF THERE IS A SLASH
if (grepl("\\/", x)) {
if (nchar(x)<5) {
# NON MIXED NUMBERS
num_denom = strsplit(x,"\\/")[[1]]
dec = as.integer(num_denom[1]) / as.integer(num_denom[2])
} else {
# MIXED NUMBERS
mixed_no = strsplit(x, " ")[[1]]
whole = as.integer(mixed_no[1])
num_denom = strsplit(mixed_no[2],"\\/")[[1]]
frac = as.integer(num_denom[1]) / as.integer(num_denom[2])
dec = whole + frac
}
} else {
# IF NO SLASH, JUST INTEGER
dec = as.integer(x)
}
dec
}
check_measurement = function (single_entry) {
if (grepl("[0-9]+ ([A-z])+", single_entry)) {
if (grepl("cup(s)?|[A-z]+spoon(s)?|ounce(s)?", single_entry)) {
return(str_extract(single_entry, "[A-z]+"))
} else {
return(0)
}
}
else {
return(0)
}
}
all_recipes = lapply(bbdata$recipe, split_into_ingredients)
the_measurements = lapply(1:length(all_recipes), function(x) unlist(lapply(all_recipes[x][[1]],check_measurement)))
# for (i in 1:length(all_recipes)) {
# cat(unlist(lapply(all_recipes[i][[1]], check_measurement)))
# }
# FOR ONE
# unlist(lapply(all_recipes[1][[1]], check_measurement))
numbers = lapply(all_recipes, check_recipe_number)
converts = lapply(numbers, function(x) unlist(lapply(x, num_to_dec)))
# the_measurements = lapply(all_recipes, function(x) unlist(lapply(x,check_measurement)))
ingredients_all = list()
for (i in 1:length(all_recipes)) {
ingredients_per_recipe = c()
for (j in 1:length(all_recipes[[i]])) {
step1 = gsub(paste0(numbers[[i]][j]," "), "", all_recipes[[i]][j])
step2 = gsub(paste0(the_measurements[[i]][j]," "), "", step1)
# cat(paste0(step2,"\n"))
ingredients_per_recipe = c(ingredients_per_recipe, step2)
}
ingredients_all[[i]] = ingredients_per_recipe
}
all_ingr = c()
for (i in 1:length(longRating)) {
a = converts[[i]]
b = the_measurements[[i]]
c = ingredients_all[[i]]
if (length(a)==length(b) & length(a) == length(c)) {
all_ingr = rbind(all_ingr, (cbind(study_no = i,amount = a, unit = b, ingredient = c, rating = longRating[i])))
} else {
break
}
}
length(converts[3])
parsed_recipes = data.frame(all_ingr)
write.csv(parsed_recipes ,file = "/Users/shermanpeng/Documents/R/banana_bread/parsed_ingredients.csv")
head(parsed_recipes, 20)
no_units_ingr = ingredients_all
indicies = unlist(grep(".*ounce.*", no_units_ingr))
for(i in 1:length(indicies))
{
reg = ".*ounce(s)?\\W "
position = grep(reg, no_units_ingr[[indicies[i]]])
no_units_ingr[[indicies[i]]][position] = gsub(reg, "", no_units_ingr[[indicies[i]]][position])
}
|
f7a2b3e1008641c7c4c65f6715afbc4e6919f04a | 192fd3dbc491d3c36bd9351f02cf9b5957ea56d1 | /R Packages/comorbidities-icd10/man/cmrbdt.finder.numeric.Rd | c37acfbf2713b6cff6ab70cfd16a818d18ba7190 | [] | no_license | ryerex/Research_and_Methods | d4d211defdbee83e47ecc72c59944c3f60a3bcca | 4010b75a5521c2c18ee624d48257ee99b29a7777 | refs/heads/master | 2023-05-26T01:54:17.048907 | 2020-08-05T16:14:29 | 2020-08-05T16:14:29 | 91,369,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,596 | rd | cmrbdt.finder.numeric.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{cmrbdt.finder.numeric.ahrq}
\alias{cmrbdt.finder.numeric.ahrq}
\alias{cmrbdt.finder.numeric.charlson_Deyo1992}
\alias{cmrbdt.finder.numeric.elixhauser_Elixhauser1998}
\title{Finds the matching codes for the different comorbidity groups}
\usage{
cmrbdt.finder.numeric.ahrq(icd_codes, out, country_code,
include_acute = rep(TRUE, length(icd_codes)), icd_ver = 9, ver = "3.7")
cmrbdt.finder.numeric.charlson_Deyo1992(icd_codes, out, country_code,
include_acute = rep(TRUE, length(icd_codes)), icd_ver = 9)
cmrbdt.finder.numeric.elixhauser_Elixhauser1998(icd_codes, out, country_code,
include_acute = rep(TRUE, length(icd_codes)), icd_ver = 9)
}
\arguments{
\item{icd_codes}{The icd code of interest, either a
number or a string}
\item{out}{If the function has been run previously there
may already be matches for a particular group, if the out
parameter is supplied with a vector equal to the number
of Elixhauser comorbidities with their corresponding
names only new findings will be appended.}
\item{country_code}{The two-letter \code{ISO 3166-1
alpha-2} code indicating the country of interest (the
same as the top-level internet domain name of that
country). As certain countries have adapted
country-specific ICD-coding there may be minor
differences between countries. Currently only US codes
are implemented for the numeric algorithms.}
\item{include_acute}{Certain codes may indicate a
non-chronic disease such as heart infarction, stroke or
similar. Under some circumstances these should be
ignored, e.g. when studying predictors for hip
arthroplasty re-operations codes during the admission for
the surgery should not include myocardial infarction as
this is most likely a postoperative condition not
available for scoring prior to the surgery.}
\item{icd_ver}{The icd version of interest. Currently
only ICD-9 is supported by the numeric matching
algorithms. Leave empty.}
\item{ver}{The version number for the
\code{cmrbdt.finder.numeric.ahrq} function, see
\code{\link{pr.ahrq.list}}, the default is the 3.7
version}
}
\value{
\code{vector} Returns a vector with the names of each
comorbidity group. If the entry is FALSE this correspond to
that no code matched the other group otherwise it returns
TRUE.
}
\description{
The functions loop through all the comorbidity groups in
search for a matching group to the provided
\code{icd_codes}.
}
\details{
The \code{_regex} indicates that these functions use
regular expressions for identification of codes of
interest. The match is case-insensitive. The function can
also identify acute conditions and ignore those if
specified.
}
\examples{
cmrbdt.finder.numeric.ahrq(9320)
cmrbdt.finder.numeric.charlson_Deyo1992(c(9320, 41000))
cmrbdt.finder.numeric.elixhauser_Elixhauser1998(34190)
}
\references{
\url{http://www.hcup-us.ahrq.gov/toolssoftware/comorbidity/comorbidity.jsp}
R. A. Deyo, D. C. Cherkin, and M. A. Ciol, "Adapting a
clinical comorbidity index for use with ICD-9-CM
administrative databases" Journal of Clinical Epidemiology,
vol. 45, no. 6, pp. 613-619, Jun. 1992.
Elixhauser A, Steiner C, Harris DR, Coffey RM. (1998)
Comorbidity measures for use with administrative data. Med
Care. 36:8-27.
}
\seealso{
\code{\link{cmrbdt.calc}}
Other cmrbdtf.finder functions:
\code{\link{cmrbdt.finder.regex.charlson_Armitage2010}},
\code{\link{cmrbdt.finder.regex.charlson_Quan2005}},
\code{\link{cmrbdt.finder.regex.charlson_Sundarajan2004}},
\code{\link{cmrbdt.finder.regex.elixhauser_Quan2005}}
}
|
1872669d1ffc3d7e0d9ce9cb72c00664ec6aebcc | 188608d77584dc0ca6ae35c4b62cd34b8d8ce7fe | /code/ggplot.R | 0ce8fe7ba6216514d258f59c6e608487ad23f3d7 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | carpentries-uconn/2020-05-01-UConn-online | 1135ab915c9472b449b9aa87cbb0af2875ebf606 | aba45f79c4974e5368d460d99d2c75c287bf9125 | refs/heads/gh-pages | 2022-08-27T17:17:07.732581 | 2020-05-26T16:39:26 | 2020-05-26T16:39:26 | 255,981,989 | 0 | 1 | NOASSERTION | 2020-05-08T18:52:04 | 2020-04-15T16:56:13 | Python | UTF-8 | R | false | false | 5,931 | r | ggplot.R | # Software Carpentry ggplot2 workshop
# Install 3rd party packages we'll be using
install.packages(c("ggplot2", "cowplot", "gapminder",
"plotly"))
# Read in data
gap <- read.csv("data/gapminder_data.csv")
# Getting help
?read.csv
# Take a quick look at our data
head(gap)
str(gap)
##### Intro to ggplot
# Load the ggplot library
library(ggplot2)
# first ggplot
ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp)) +
geom_point()
# ggplot with year vs life expectancy
# Not the most informative graph
# The aes() function lets us set an x and y variable
# The geom_point() function tells the graph to add a scatterplot
ggplot(data = gap, mapping = aes(x = year,
y = lifeExp, color = continent)) +
geom_point()
# This graph might be better with changes over time
# So we use geom_line() to make a line graph instead
# Adding a by argument to aes() lets us have a line for each country
# Adding a color argument lets us color by group (continent)
ggplot(data = gap, mapping = aes(x = year,
y = lifeExp, color = continent,
group = country)) +
geom_line()
# We could color the points and lines differently by adding
# aes() to just the geom_line() layer
ggplot(data = gap, mapping = aes(x = year,
y = lifeExp, group = country)) +
geom_line(aes(color = continent)) +
geom_point()
# Color by group for lines, specific color for points (outside aes())
ggplot(data = gap, mapping = aes(x = year,
y = lifeExp, group = country)) +
geom_line(mapping = aes(color = continent)) +
geom_point(color = "blue")
# Make the points semi-transparent to see density
# Log-transform the x-axis
# Add a trendline (linear regression/model) using geom_smooth()
ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp)) +
geom_point(alpha = 0.5) +
scale_x_log10() +
geom_smooth(method = "lm", size = 3)
# Make the points bigger, and give them a different shape for each continent
# Good for colorblind accessibility
ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp)) +
geom_point(aes(color = continent, shape = continent),
size = 2, alpha = 0.5) +
scale_x_log10() +
geom_smooth(aes(group = continent), method = "lm")
##### Getting ready for publication
# Modify the graph to make it look good for publication
# Also, save it to a variable
lifeExp_plot <- ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp, color = continent)) +
geom_point(mapping = aes(shape = continent),
size = 2) +
scale_x_log10() +
geom_smooth(method = "lm") +
# Change the y-axis limits and tickmark positions
scale_y_continuous(limits = c(0, 100),
breaks = seq(0, 100, by = 10)) +
# Change the theme to something more simple
theme_minimal() +
# Add axis and legend labels
labs(title = "Effects of per-capita GDP",
x = "GDP per Capita ($)",
y = "Life Expectancy (yrs)",
color = "Continents",
shape = "Continents")
# Since we saved the plot to a variable,
# we have to run the variable to show the plot now
lifeExp_plot
# Save as png
ggsave(file = "figures/life_expectancy.png", plot = lifeExp_plot)
# Save as pdf
ggsave(file = "figures/life_expectancy.pdf", plot = lifeExp_plot)
# Save as png, high resolution
ggsave(file = "figures/life_expectancy.png", plot = lifeExp_plot,
width = 8, height = 6, units = "in", dpi = 300)
##### Facets
# Facets for 3-dimensional data
# This splits the graph into multiple graphs in a panel
# Here, we show a different graph of gdpPercap vs lifeExp for each
# continent
ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp)) +
facet_wrap(~ continent, ncol = 2, scales = "free") +
geom_point(alpha = 0.5) +
scale_x_log10() +
geom_smooth(method = "lm")
# Another example, coloring by continent, and faceting by year
ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp, color = continent)) +
facet_wrap(~year) +
geom_point(alpha = 0.5) +
scale_x_log10() +
geom_smooth(method = "lm")
##### Combining graphs
# Load the cowplot library
library(cowplot)
# Make our first plot and save it
plotA <- ggplot(data = gap, mapping = aes(x = gdpPercap,
y = lifeExp)) +
geom_point() +
scale_x_log10() +
# Cowplot also has a nice minimalist theme
theme_cowplot()
# Show first plot
plotA
# Make our second plot and save it
plotB <- ggplot(data = gap, mapping = aes(x = continent,
y = lifeExp)) +
geom_boxplot() +
# Cowplot also has a nice minimalist theme
theme_cowplot()
# Show second plot
plotB
# Combine the two plots into a figure with A and B graphs
plot_grid(plotA, plotB, labels = c("A", "B"))
# Save the combined plot, with a custom size
ggsave(file = "figures/combined_plot.pdf", width = 10,
height = 4, units = "in")
# Another (more powerful) way to combine plots
# This way allows you to control relative size of the graphs
# ggdraw() works with a canvas that goes from 0 to 1 in the x and y axis
# You position and size graphs in that 0 to 1 space.
ggdraw() +
draw_plot(plotA, x = 0, y = 0, width = 0.3, height = 1) +
draw_plot(plotB, x = 0.3, y = 0, width = 0.7, height = 1)
# Show detailed tutorial for cowplot (also works for other packages)
browseVignettes("cowplot")
##### Interactive graphs
# Load the plotly library, which includes ggplotly()
library(plotly)
# Make a graph and save it to a variable
yearLifeExp <- ggplot(data = gap, mapping = aes(x = year,
y = lifeExp,
continent = continent)) +
facet_wrap(~ continent) +
geom_line(aes(group = country)) +
scale_x_log10()
# Show the ggplot graph, not interactive yet
yearLifeExp
# Make the graph interactive with ggplotly.
# Now, you can explore the data
ggplotly(yearLifeExp)
|
8354b204e98d21800ef0fa04f28aa4bc8bf39698 | c31db2890a672e455e29daeab067be3908207bf6 | /hw2/pollutantmean.R | a12cad92d6ca06a8ca74807d151b012e9f94841c | [] | no_license | xizhonghua/rprog-016 | d9432df091f1ef5e89e7c6f5284339db12c8cf3a | 04c2725333a7ec30406aeb32a24ec2498d3b39d6 | refs/heads/master | 2021-01-23T04:08:32.503566 | 2014-12-06T02:46:06 | 2014-12-06T02:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,700 | r | pollutantmean.R | ## Write a function named 'pollutantmean' that calculates the
## mean of a pollutant (sulfate or nitrate) across a specified
## list of monitors. The function 'pollutantmean' takes three
## arguments: 'directory', 'pollutant', and 'id'. Given a
## vector monitor ID numbers, 'pollutantmean' reads that
## monitors' particulate matter data from the directory
## specified in the 'directory' argument and returns the mean
## of the pollutant across all of the monitors, ignoring any
## missing values coded as NA. A prototype of the function is
## as follows
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## select the col
col <- if (pollutant == "sulfate") 2 else 3;
## create a empty (length = 0) numeric vector
table <- numeric();
## loop each file id
for(i in id) {
## generate the filename
filename <- sprintf("%s/%03d.csv", directory, i)
## read the cvs, store the only needed column
data <- read.csv(filename)[, col]
## append the rows to the big table
table <- c(table, data)
}
## remove the NAs, and compute the mean on all rows acrossing
## all files
mean(table, na.rm = T)
}
|
b1c12c35a9439e90d4ca5d2de296159e7414c0a1 | a008684e8b0610b04ec5a089d6f4f5bc2d4b8b90 | /inst/tmp.R | a1b948861c60f357117854b9b7426a730c53c3ef | [
"MIT"
] | permissive | jhaineymilevis/dmaps | 9156a7ff88a79fdcd2dc8feff2c896b2426151b2 | fbdf8f6cb072cf0fc3dfae83e823a0c41d554bab | refs/heads/master | 2020-03-26T09:19:22.662950 | 2017-09-14T19:34:57 | 2017-09-14T19:34:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 582 | r | tmp.R | library(devtools)
load_all()
###
d <- read.csv("inst/data/co/departamento-municipio.csv")
d <- read.csv("inst/data/co/ciudades-colombia-poblacion.csv")
d$name <- paste(d$ciudad,d$departamento,sep = " - ")
codes <- read.csv("inst/dmaps/co/dane-codes-municipio.csv", colClasses = "character")
matchCode <- function(codes,data){
dict <- codes[c("name","alternativeNames")]
x <- dictionaryMatch(data$name,dict)
idx <- match(x,codes$name)
cbind(data[1:2],codes[idx,c("name","latitud","longitud")])
}
m <- matchCode(codes,d)
write.csv(m,"~/Desktop/dep-mun.csv",row.names = FALSE)
|
05e5ad5ce444b0075990866fdd181e677412d133 | 0bc75447cc6e4c96dd1f8f015ed1aa17c6a0e218 | /replArchive/appendix/figureA1.R | 64fdd75b96e12f3d8e0d28aae5c78ef1db4cfcfe | [] | no_license | s7minhas/conflictEvolution | 3d19f3d7ad237358234595f9bc692840056d0198 | c53b399ca4c717cc2346cfc3b0fe71e73f56c4eb | refs/heads/master | 2021-03-22T01:22:43.035833 | 2019-05-25T03:20:25 | 2019-05-25T03:20:25 | 50,407,689 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | figureA1.R | ################
# workspace
source('../main/setup.R')
source('../main/paramPlot2.R')
################
################
# load data
load('../main/ameResults.rda') # load AME mod results
# quick trace plot
mcmcData = ameFits$base$BETA
varKey = data.frame(dirty=colnames(mcmcData),stringsAsFactors=FALSE)
varKey$clean = c(
'Intercept',
'Riots/Protests Against (Sender)', 'Civilian Attacks (Sender)', 'Geographic Spread (Sender)',
'Riots/Protests Against (Receiver)', 'Civilian Attacks (Receiver)', 'Geographic Spread (Receiver)',
'Gov-Gov Actors','Post-Boko Haram', 'Election Year', 'Neighborhood Conflict')
varKey = varKey[c(9,2,5,3,6,4,7,10,11,8,1),]
ggsave(
paramPlot2(mcmcData, varKey),
file='floats/figureA1.pdf', width=8,height=9)
################ |
b58216c1f0035f7b4152c36cf22103986ef31105 | f18550811c31c759c2cc7404656b923a924f7a99 | /R/readPindel.R | 4f55ce4b2c5e0b3ddbb7153db3c0757f0b76a599 | [
"MIT"
] | permissive | YaoLab-Bioinfo/intansv | 8ce8e19745fbce0c03d67ad792ca57f131e058a6 | 12905289e0b10f3d8872730f182e253741324ba2 | refs/heads/master | 2022-08-30T23:32:29.165613 | 2020-05-21T02:18:26 | 2020-05-21T02:18:26 | 39,183,991 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,734 | r | readPindel.R | ## Merging overlapped SVs predicted by Pindel
PindelCluster <- function(df)
{
maxReadPairSupp <- max(df$ReadPairSupport)
dfFil <- df[df$ReadPairSupport>=(maxReadPairSupp/2), ]
dfFilIrange <- IRanges(start=dfFil$BP_left, end=dfFil$BP_right)
outTmp <- findOverlaps(dfFilIrange, reduce(dfFilIrange))
dfFil$clupindel <- subjectHits(outTmp)
dfFilRes <- ddply(dfFil, ("clupindel"), function(x){
if(nrow(x)==1){
return(x)
} else {
LeftMin <- min(x$BP_left)
RightMax <- max(x$BP_right)
RangeLength <- RightMax-LeftMin
x$op <- (x$BP_right-x$BP_left)/RangeLength
if(any(x$op<0.8)){
return(NULL)
} else {
return(x[which.max(x$ReadPairSupport), ])
}
}
})
}
## Reading in the predicted SVs given by Pindel
readPindel <- function(dataDir=".", regSizeLowerCutoff=100,
regSizeUpperCutoff=1000000, readsSupport=3,
method="Pindel")
{
PindelDelList <- list.files(dataDir, full.names=T, pattern=".+_D$")
PindelInvList <- list.files(dataDir, full.names=T, pattern=".+_INV$")
PindelTdList <- list.files(dataDir, full.names=T, pattern=".+_TD$")
## reading predicted deletions
PindelDel <- lapply(PindelDelList, function(x){
PdPredDf <- try(read.table(x, fill=T, as.is=T),silent=T)
if (is.data.frame(PdPredDf)) {
PdPredDf <- PdPredDf[grepl("^\\d", PdPredDf$V1), ]
PdPredDf <- PdPredDf[, c(2, 3, 8, 10, 11, 13, 14, 16, 25)]
names(PdPredDf) <- c("SV_type", "SV_len", "chromosome", "BP_left",
"BP_right", "BP_range_left", "BP_range_right",
"ReadPairSupport", "score")
} else {
PdPredDf <- NULL
}
return(PdPredDf)
})
## reading predicted inversions
PindelInv <- lapply(PindelInvList, function(x){
PdPredDf <- try(read.table(x, fill=T, quote="", as.is=T),silent=T)
if (is.data.frame(PdPredDf)) {
PdPredDf <- PdPredDf[grepl("^\\d", PdPredDf$V1), ]
PdPredDf <- PdPredDf[, c(2, 3, 8, 10, 11, 13, 14, 16, 25)]
names(PdPredDf) <- c("SV_type", "SV_len", "chromosome", "BP_left",
"BP_right", "BP_range_left", "BP_range_right",
"ReadPairSupport", "score")
} else {
PdPredDf <- NULL
}
return(PdPredDf)
})
## reading predicted tandom duplications
PindelTd <- lapply(PindelTdList, function(x){
PdPredDf <- try(read.table(x, fill=T, as.is=T),silent=T)
if (is.data.frame(PdPredDf)) {
PdPredDf <- PdPredDf[grepl("^\\d", PdPredDf$V1), ]
PdPredDf <- PdPredDf[, c(2, 3, 8, 10, 11, 13, 14, 16, 25)]
names(PdPredDf) <- c("SV_type", "SV_len", "chromosome", "BP_left",
"BP_right", "BP_range_left", "BP_range_right",
"ReadPairSupport", "score")
} else {
PdPredDf <- NULL
}
return(PdPredDf)
})
## merging predictions of different chromosome
PindelDelDf <- do.call(rbind, PindelDel)
PindelDelDf$SV_len <- as.numeric(PindelDelDf$SV_len)
PindelDelDf <- PindelDelDf[PindelDelDf$SV_len>=regSizeLowerCutoff &
PindelDelDf$SV_len<=regSizeUpperCutoff &
PindelDelDf$ReadPairSupport>=readsSupport, ]
PindelInvDf <- do.call(rbind, PindelInv)
PindelInvDf <- PindelInvDf[PindelInvDf$SV_len>=regSizeLowerCutoff &
PindelInvDf$SV_len<=regSizeUpperCutoff &
PindelInvDf$ReadPairSupport>=readsSupport, ]
PindelTdDf <- do.call(rbind, PindelTd)
PindelTdDf <- PindelTdDf[PindelTdDf$SV_len>=regSizeLowerCutoff &
PindelTdDf$SV_len<=regSizeUpperCutoff &
PindelTdDf$ReadPairSupport>=readsSupport, ]
## filtering and merging deletions
if (is.data.frame(PindelDelDf)) {
PindelDelIrange <- GRanges(seqnames=PindelDelDf$chromosome,
ranges=IRanges(start=PindelDelDf$BP_left,
end=PindelDelDf$BP_right))
PindelDelIrangeRes <- findOverlaps(PindelDelIrange, reduce(PindelDelIrange))
PindelDelDf$clu <- subjectHits(PindelDelIrangeRes)
PindelDelDfFilMer <- ddply(PindelDelDf, ("clu"), PindelCluster)
PindelDelDfFilMer <- PindelDelDfFilMer[, c(3:5, 2, 8:9)]
names(PindelDelDfFilMer)[2:6] <- c("pos1", "pos2", "size", "readsSupport", "score")
PindelDelDfFilMer$info <- paste0("SR=", PindelDelDfFilMer$readsSupport, ";score=", PindelDelDfFilMer$score)
PindelDelDfFilMer$readsSupport <- NULL
PindelDelDfFilMer$score <- NULL
PindelDelDfFilMer$size <- as.numeric(PindelDelDfFilMer$size)
PindelDelDfFilMer$pos1 <- as.numeric(PindelDelDfFilMer$pos1)
PindelDelDfFilMer$pos2 <- as.numeric(PindelDelDfFilMer$pos2)
} else {
PindelDelDfFilMer <- NULL
}
## filtering and merging inversions
if (is.data.frame(PindelInvDf)) {
PindelInvIrange <- GRanges(seqnames=PindelInvDf$chromosome,
ranges=IRanges(start=PindelInvDf$BP_left,
end=PindelInvDf$BP_right))
PindelInvIrangeRes <- findOverlaps(PindelInvIrange, reduce(PindelInvIrange))
PindelInvDf$clu <- subjectHits(PindelInvIrangeRes)
PindelInvDfFilMer <- ddply(PindelInvDf, ("clu"), PindelCluster)
PindelInvDfFilMer <- PindelInvDfFilMer[, c(3:5, 2, 8:9)]
names(PindelInvDfFilMer)[2:6] <- c("pos1", "pos2", "size", "readsSupport", "score")
PindelInvDfFilMer$info <- paste0("SR=", PindelInvDfFilMer$readsSupport, ";score=", PindelInvDfFilMer$score)
PindelInvDfFilMer$readsSupport <- NULL
PindelInvDfFilMer$score <- NULL
PindelInvDfFilMer$size <- as.numeric(PindelInvDfFilMer$size)
PindelInvDfFilMer$pos1 <- as.numeric(PindelInvDfFilMer$pos1)
PindelInvDfFilMer$pos2 <- as.numeric(PindelInvDfFilMer$pos2)
} else {
PindelInvDfFilMer <- NULL
}
## filtering and merging tandom duplications
if (is.data.frame(PindelTdDf)) {
PindelTdIrange <- GRanges(seqnames=PindelTdDf$chromosome,
ranges=IRanges(start=PindelTdDf$BP_left,
end=PindelTdDf$BP_right))
PindelTdIrangeRes <- findOverlaps(PindelTdIrange, reduce(PindelTdIrange))
PindelTdDf$clu <- subjectHits(PindelTdIrangeRes)
PindelTdDfFilMer <- ddply(PindelTdDf, ("clu"), PindelCluster)
PindelTdDfFilMer <- PindelTdDfFilMer[, c(3:5, 2, 8:9)]
names(PindelTdDfFilMer)[2:6] <- c("pos1", "pos2", "size", "readsSupport", "score")
PindelTdDfFilMer$info <- paste0("SR=", PindelTdDfFilMer$readsSupport, ";score=", PindelTdDfFilMer$score)
PindelTdDfFilMer$readsSupport <- NULL
PindelTdDfFilMer$score <- NULL
PindelTdDfFilMer$size <- as.numeric(PindelTdDfFilMer$size)
PindelTdDfFilMer$pos1 <- as.numeric(PindelTdDfFilMer$pos1)
PindelTdDfFilMer$pos2 <- as.numeric(PindelTdDfFilMer$pos2)
} else {
PindelTdDfFilMer <- NULL
}
retuRes <- list(del=PindelDelDfFilMer, inv=PindelInvDfFilMer,
dup=PindelTdDfFilMer)
attributes(retuRes) <- c(attributes(retuRes), list(method=method))
return(retuRes);
}
|
bc071202bb4a66db8d88a72e617566505f2cd0d8 | 4ee7a1db4b20d30fdcc0bba1e10b3ce325f6bdc2 | /R shinymodules/cmip5ScatterPlot_mod.R | e3f3ca490791fd2b3c6e6febaf7b0f2cde3d6478 | [] | no_license | tanerumit/my-repo | a40f71acb246f6c107afb0181d26014fd4f378e3 | 4c9eb48424d67dee9ac2fd17364be262e1b9dda4 | refs/heads/master | 2021-07-19T05:21:22.228151 | 2021-01-30T15:52:36 | 2021-01-30T15:52:36 | 97,176,651 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,307 | r | cmip5ScatterPlot_mod.R |
cmip5ScatterPlot_mod_UI <- function(id)
{
ns = NS(id)
highchartOutput(ns("GCMplot"), height = "500px")
}
cmip5ScatterPlot_mod <- function(input, output, session, data, hist.period,
proj.period)
{
#hist.period <- reactive({input$hist_period})
#proj.period <- 2066:2095
tavg.breaks <- seq(0,8,1)
prcp.breaks <- seq(-40,40,10)
# Axis limits
tavg_step <- (tavg.breaks[2] - tavg.breaks[1])/2
tavg_lim <- range(tavg.breaks) + c(- tavg_step, tavg_step)
prcp_step <- (prcp.breaks[2] - prcp.breaks[1])/2
prcp_lim <- range(prcp.breaks) + c(- prcp_step, prcp_step)
rcp_col <- c("#08519c", "#abd9e9", "#fe9929", "#f03b20")
delta_clim <- reactive({
dataR <- data()
hist.periodR <- hist.period()
proj.periodR <- proj.period()
# Summarize data for each projection
df <- lapply(names(dataR),
function(x) bind_rows(dataR[[x]], .id = "model")) %>%
setNames(names(dataR)) %>%
bind_rows(.id = "scenario") %>% ungroup() %>%
mutate(scenario = factor(scenario,
levels = c("historical", "rcp26", "rcp45","rcp60","rcp85"),
labels = c("Historical", "RCP2.6", "RCP4.5", "RCP6.0", "RCP8.5")))
data_hist <- df %>% filter(scenario == "Historical") %>%
filter(year %in% hist.periodR) %>%
group_by(model) %>%
summarize_at(vars(prcp, tavg), mean) %>%
mutate(prcp = prcp * 12)
data_proj <- df %>% filter(scenario != "Historical") %>%
filter(year %in% proj.periodR) %>%
group_by(scenario, model) %>%
summarize_at(vars(prcp, tavg), mean) %>%
mutate(prcp = prcp * 12)
# mean precip and temp changes
delta_prcp <- data_proj %>%
select(scenario, model, prcp) %>%
left_join(select(data_hist, model, hist_prcp = prcp), by = "model") %>%
mutate(prcp = round((prcp - hist_prcp) / hist_prcp * 100,2))
delta_tavg <- data_proj %>%
select(scenario, model, tavg) %>%
left_join(select(data_hist, model, hist_tavg = tavg), by = "model") %>%
mutate(tavg = round(tavg - hist_tavg,2))
delta_prcp %>%
left_join(delta_tavg, by = c("scenario", "model")) %>%
na.omit() %>% select(scenario, model, prcp, tavg)
# # Axis breaks (if not provided)
# if(is.null(tavg.breaks)) {
# tavg.breaks <- seq(0, round(max(delta_tavg$tavg, na.rm = T),0) + 2, 1)
# }
# if(is.null(prcp.breaks)) {
# prcp.breaks <- seq(
# round(min(delta_prcp$prcp, na.rm = TRUE),-1) -20,
# round(max(delta_prcp$prcp, na.rm = TRUE),-1) +20,
# 10)
# }
})
output$GCMplot <- renderHighchart({
df2 <- delta_clim()
highchart() %>%
hc_add_series_df(df2,
type="scatter", radius=7,
x=tavg, y=prcp, group=scenario, allowPointSelect = T,
borderwidth = 2) %>%
#Axis settings
hc_xAxis(min = min(tavg.breaks), max = max(tavg.breaks),
tickInterval= tavg.breaks[2] - tavg.breaks[1],
gridLineWidth=2, crosshair=T,
title = list(text = "Temperature change (°C)")) %>%
hc_yAxis(min = min(prcp.breaks), max = max(prcp.breaks),
tickInterval= prcp.breaks[2] - prcp.breaks[1],
gridLineWidth=2, crosshair=T,
title = list(text="Precipitation change (%)")) %>%
#Plot appearance
hc_add_theme(hc_theme_smpl()) %>%
hc_plotOptions(
series = list(marker = list(symbol = "circle"))) %>%
hc_legend(
title = list(text="Scenarios", fontSize = "20px"),
align = "right", verticalAlign = "top",
layout = "vertical", x = 0, y = 50) %>%
hc_colors(rcp_col) %>%
#Additional information & settings
hc_tooltip(formatter = JS("function(){
return (' Scenario: ' + this.point.scenario +
' <br> Model :' + this.point.model +
' <br> Delta Temp (DegC) : '+ this.x +
' <br> Delta Precip (%) : '+ this.y)}"),
borderWidth = 2) %>%
hc_exporting(enabled = T) %>%
hc_size(800, 600)
})
}
|
72fbd4975b2efe60504571e00b8d10444c5dda41 | e04c0d423fde5be2567111b6983cc91e63c93232 | /R/libraries_uninstall.R | 4594fe6f9c3519c3a93c4f883bf4dbd2e66771ef | [] | no_license | RafiKurlansik/bricksteR | b42b3b3556ef3394b7e7801568a8e228083ad336 | 9199ab34dda462601186c25cf8655483f0bbe408 | refs/heads/master | 2022-10-28T14:35:21.875280 | 2022-10-06T15:36:30 | 2022-10-06T15:36:30 | 227,508,502 | 25 | 6 | null | 2021-07-15T11:59:22 | 2019-12-12T03:04:36 | R | UTF-8 | R | false | false | 3,482 | r | libraries_uninstall.R | #'
#' Uninstall Packages (libraries) on a Databricks Cluster
#'
#' Set libraries to be uninstalled on a cluster. The libraries aren’t
#' uninstalled until the cluster is restarted. Uninstalling libraries
#' that are not installed on the cluster has no impact but is not an error.
#' You can locate the cluster ID in the URL of the cluster configuration page.
#' For example:
#'
#' https://mycompany.cloud.databricks.com/#/setting/clusters/xxxx-xxxxx-xxxxxx/
#'
#' Where xxxx-xxxxx-xxxxxx is the cluster ID.
#'
#' The API endpoint for uninstalling libraries is
#' '2.0/libraries/uninstall'.
#' For all details on API calls please see the official documentation at
#' \url{https://docs.databricks.com/dev-tools/api/latest/}.
#'
#' @param cluster_id A string containing the unique id for an online
#' Databricks cluster.
#' @param package A string with the name of the package to uninstall.
#' @param workspace A string representing the web workspace of your Databricks
#' instance. E.g., "https://eastus2.azuredatabricks.net" or
#' "https://demo.cloud.databricks.com".
#' @param token A valid authentication token generated via User Settings in
#' Databricks or via the Databricks REST API 2.0. If none is provided,
#' netrc will be used.
#' @param verbose If TRUE, will print the API response to the console. Defaults to
#' FALSE.
#' @return The API response.
#'
#' @examples
#' # Cluster to install on
#' workspace <- "https://mydb.cloud.databricks.com"
#' cluster_id <- "0818-155203-cheese22"
#'
#' # Uninstall package
#' libraries_uninstall(package = "broom", cluster_id, workspace)
#'
#' # Check installation status
#' get_library_statuses(cluster_id, workspace)
#' @export
libraries_uninstall <- function(cluster_id,
package,
workspace,
token = NULL,
verbose = T,
...) {
payload <- paste0(
'{"cluster_id": "', cluster_id, '",
"libraries": [{
"cran": {
"package": "', package,'"
}
}]
}'
)
# Make request, using netrc by default
if (is.null(token)) {
use_netrc <- httr::config(netrc = 1)
res <- httr::with_config(use_netrc, {
httr::POST(url = paste0(workspace, "/api/2.0/libraries/uninstall"),
httr::content_type_json(),
body = payload)})
}
else {
# Authenticate with token
headers <- c(
Authorization = paste("Bearer", token)
)
# Using token for authentication instead of netrc
res <- httr::POST(url = paste0(workspace, "/api/2.0/libraries/uninstall"),
httr::add_headers(.headers = headers),
httr::content_type_json(),
body = payload)
}
# Handling successful API request
if (res$status_code[1] == 200) {
if (verbose == T) {
message(paste0(
"Status: ", res$status_code[1],
"\nRequest successful. Packages will be uninstalled upon cluster restart. \n
Use restart_cluster() to finish uninstalling."
))
}
}
# Handling unsuccessful request
else {
if (verbose == T) {
message(paste0(
"Status: ", res$status_code[1],
"\nThe request was not successful:\n\n", suppressMessages(jsonlite::prettify(res))
))
}
}
# Return response
res
}
|
9a252b4771275ea2bdb34ee686ad6a04617d20a4 | 6813d26adb6b1a6e1295eedcd6a92ec1ffef8569 | /R-functions/funAdjustPop.R | 955e371e61b4395f946c5f4935fa0ebeeffad92b | [] | no_license | jreades/popchange | 6ece3d3f974ae1458d77c605a16946034351aebc | 5dbca9d41841b55d31730a22f5662e8496525c48 | refs/heads/master | 2021-01-20T08:19:10.464494 | 2017-07-17T15:32:23 | 2017-07-17T15:32:23 | 90,128,114 | 1 | 0 | null | 2017-05-03T08:49:05 | 2017-05-03T08:49:05 | null | UTF-8 | R | false | false | 1,081 | r | funAdjustPop.R | #Function to link OA spatial data to population attribute data
#Updated to allow multiple columns within data
AdjustPop <- function(oa2011_grid, data, longLog = FALSE, numberAttributeColumns){
#head(data)
#head(oa2011_grid@data)
#merge data (OAs will be repeated) in oa2011_grid
oa2011_grid@data = data.frame(oa2011_grid@data, data[match(oa2011_grid@data[,"OA11CD"], data[,1]),])
#get column names & initial length
columnNames <- colnames(data)[2:length(data)]
oa2011_length <- length(oa2011_grid@data)
#for each column
for (c in 1:length(columnNames)){
#work out current column in oa2011_grid@data we are creating and calculating
currentCol <- oa2011_length + c
#create column
oa2011_grid@data[currentCol] <- NA
#rename
colnames(oa2011_grid@data)[currentCol] <- paste0(columnNames[c],"WeightedPop")
#multiply out
oa2011_grid@data[currentCol] <- oa2011_grid@data[which(colnames(oa2011_grid@data) == columnNames[c])] * oa2011_grid@data$popProp
} #end for each column
#return data
return(oa2011_grid)
} |
700ae0f84c747078aeabc39ad7bd5e93c8135acd | 40af93322af32d5e64191c4635e0d66c8004997e | /MachineLearing_with_R_language/CarPricePrediction/main.r | eeb41f2db195f9d91f87c26731aaaf5568a6cffb | [] | no_license | akshaysalvi948/MachineLearning | 001a147da0af281353360717788a98d79ff9a272 | 5995349741ff2461c1a79be582b12aabe6299a92 | refs/heads/master | 2021-03-23T03:55:33.646697 | 2020-03-16T14:21:55 | 2020-03-16T14:21:55 | 247,420,582 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 516 | r | main.r |
carInfile=read.csv("Carsdata.csv")
str(carInfile)
summary(carInfile)
mean(carInfile$Prices)
lm1=lm(Price~Mileage + Make + Type + Cylinder + Liter ,data = carInfile)
summary(lm1)
table(carInfile$Make)
table(carInfile$Doors,carInfile$Type)
plot(lm1$residuals)
test =data.frame(Mileage=c(38500,100000),
Type=c('Sedan','Hatchback'),
Make =c('Pontiac','SAAB'),
Cylinder=c(6,8),
Liter=c(4,6))
test
predict(lm1,newdata = test) |
105df657e7fed2ee36535ac2194cc29bfc62ea46 | 517f4375c836881498e8f08c0ffdfa9a2ba151d8 | /funciones/boton_descarga.R | 030caa8e1ef633fc9072d858e36bbe516a0f54b3 | [] | no_license | Aggarch/MQF-UAH | 877975cf6fe50e995d4ee5a36c1e9ef3a292ff03 | 11f846b686d618a9a00b4d745919d710398162e9 | refs/heads/master | 2022-02-18T10:01:36.885756 | 2022-02-09T02:33:23 | 2022-02-09T02:33:23 | 248,128,384 | 2 | 2 | null | 2020-06-27T21:33:50 | 2020-03-18T03:20:21 | R | UTF-8 | R | false | false | 253 | r | boton_descarga.R |
# 4 downloading time series forecast
boton_descarga <- function(outputId, label = "Download"){
tags$a(id = outputId, class = "btn btn-success shiny-download-link", href = "",
target = "_blank", download = NA, icon("file-excel"), label)
} |
0abbcda2b3ecf34f0beb58277667606843eaf4bb | 97818d99fd3a17ec1d485fe2dd21fa27f8a57ed1 | /assignment5.R | 8782e100ed24c2af4b2e0ace2caae9ec17ce6eb3 | [] | no_license | samuelstriz/Assignment-05 | b2c426ddba7b91e1e76856b9a2e4037169f413b6 | dd93e5b37bd2bf31fa1c534c7e0aadd7ef7a6813 | refs/heads/master | 2020-09-10T19:33:18.164083 | 2019-11-15T02:34:36 | 2019-11-15T02:34:36 | 221,815,613 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | assignment5.R | if (!require("data.table")) install.packages("data.table")
library("data.table")
ptm <- proc.time()
DF <- fread("gdp.csv", header="auto",
data.table=FALSE)
as.data.frame(DF)
|
6414e7a97b4cb69f5faf317b2d49f8dee6fbb817 | 8a2ef8564dac1551f9a389c3cc04837453c933d6 | /3_ScriptValidacionClusteres/K-MEANS/Validacion_Fallida/ValidacionClusteres_Kmeans_Luad_1vuelta.R | 844a5b06c75daa87c5f67fac5a7a6f7d3b04d628 | [] | no_license | anemartinezlarrinaga2898/TFM_ANE_MARTINEZ | 0a311e96ccad34f331d798b1f3c459539614e1c7 | 8e41157613fff0cfa4f5eb27e6984d02d000b9ef | refs/heads/main | 2023-07-27T21:41:44.218050 | 2021-08-27T10:15:35 | 2021-08-27T10:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,231 | r | ValidacionClusteres_Kmeans_Luad_1vuelta.R | muestras <- readRDS('MuestrasLuadTraspuesta.rds')
library(NbClust)
#CALCULO DE UN POOL DE INDICES:
#Calculamos cada uno de los indices:
system.time(res_nb_silhouette <- NbClust(muestras, distance = 'euclidean', method = 'kmeans', min.nc=2, max.nc=10, index='silhouette'))
print("Calculado Silhouette_luad")
system.time(res_nb_dindex <- NbClust(muestras, distance = 'euclidean', method = 'kmeans', min.nc=2, max.nc=10, index='dindex'))
print("Calculado Dindex_luad")
system.time(res_nb_dunn <- NbClust(muestras, distance = 'euclidean', method = 'kmeans', min.nc=2, max.nc=10, index='dunn'))
print("Calculado Dunn_luad")
system.time(res_nb_gap <- NbClust(muestras, distance = 'euclidean', method = 'kmeans', min.nc=2, max.nc=10, index='gap'))
print("Calculado gap_luad")
system.time(res_nb_tau <- NbClust(muestras, distance = 'euclidean', method = 'kmeans', min.nc=2, max.nc=10, index='tau'))
print("Calculado tau_luad")
saveRDS(res_nb_silhouette, 'res_nb_silhouette_luad.rds')
saveRDS(res_nb_dindex, 'res_nb_dindex_luad.rds')
saveRDS(res_nb_dunn, 'res_nb_dunn_luad.rds')
saveRDS(res_nb_gap, 'res_nb_gap_luad.rds')
saveRDS(res_nb_tau, 'res_nb_tau_luad.rds')
print("Calculos terminados luad") |
831ec5996b458deb7e117a60c1871de711859cbb | 0755012385996546b7f81caaa66d44ba066d5362 | /R/pkRamp.r | 5f9711391ef8dba02f9b778903834bde8c3f229b | [] | no_license | CristianPachacama/pkrf | 0e95b68b30e638317959f574d8685f149214d0e3 | 1ce1990c266645ca232dec6886adad33fecc6db6 | refs/heads/master | 2020-06-14T18:34:53.730912 | 2019-07-03T08:43:52 | 2019-07-03T08:43:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,835 | r | pkRamp.r | # custom color ramp generator
# philip kraaijenbrink
#' Load preset color ramps
#'
#' Load color ramps from preset
#' @param name Name of the color ramp (string).
#' @param number Number of output colors desired (integer).
#' @param reversed Should the ramp be reversed (logical).
#' @param show Show a plot of all available color ramps (logical).
#'
#' @return Vector with hex colors strings.
#' @export
pkRamp <- function(name='parula', number=100, reversed=F, show=F){
namelist <- c('Jet','Parula','Viridis','Inferno','Magma','Plasma','Cividis','ElevAG','WtSpec','RwB','PwG','OwP','Spectral',
'ElevNat1','ElevNat2','RwBsoft','RwBpale','Taupe', 'Blues','Greens','BW','MHblues','MHramp')
if (sum(tolower(namelist) %in% tolower(name))==0){
message <- stop(paste0('Color ramp not available. Choose any of:\n',paste(sort(namelist),collapse='\n')))
}
getPal <- function(name){
switch(which(tolower(namelist) %in% tolower(name)),
c("0000FF","0080FF","00FFFF","80FF80","FFFF00","FF8000","FF0000","800000"),
c("352A87","0567DF","108ED0","1BAEB4","6BBC85","C4BB5E","F7CB33","F9FB0E"),
c("440154","482878","3E4A89","31688E","26828E","1F9E89","35B779","6DCD59","B4DE2C","FDE725"),
c("000004","1B0C42","4B0C6B","781C6D","A52C60","CF4446","ED6925","FB9A06","F7D03C","FCFFA4"),
c("000004","180F3E","451077","721F81","9F2F7F","CD4071","F1605D","FD9567","FEC98D","FCFDBF"),
c("0D0887","47039F","7301A8","9C179E","BD3786","D8576B","ED7953","FA9E3B","FDC926","F0F921"),
c("00204D","00336F","39486B","575C6D","707173","8A8779","A69D75","C4B56C","E4CF5B","FFEA46") ,
c("AFF0E9","FFFFB3","008040","FCBA03","800000","69300D","ABABAB","FFFCFF"),
c("FFFFFF","B7E8FF","ACD88C","DDE241","E83535","380000"),
c("67001F","B2182B","D6604D","F4A582","FDDBC7","F7F7F7","D1E5F0","92C5DE","4393C3","2166AC","053061"),
c("40004B","762A83","9970AB","C2A5CF","E7D4E8","F7F7F7","D9F0D3","A6DBA0","5AAE61","1B7837","00441B"),
c("7F3B08","B35806","E08214","FDB863","FEE0B6","F7F7F7","D8DAEB","B2ABD2","8073AC","542788","2D004B"),
c("9E0142","D53E4F","F46D43","FDAE61","FEE08B","FFFFBF","E6F598","ABDDA4","66C2A5","3288BD","5E4FA2"),
c('566314','9ba864','c5cc90','a59a7f','5b4242'),
c('75a05b','e5d9a7','fcc575','baa395','e2e2e2'),
c('93021b','e28f76','F4F4F4','8c95ad','2b2d42'),
c("934855","e2b7aa","F4F4F4","9ca0ad","353642"),
c('22223b','4a4e69','9a8c98','c9ada7','f2e9e4'),
c('f7fbff','deebf7','c6dbef','9ecae1','6baed6','4292c6','2171b5','08519c','08306b'),
c('f7fcf5','e5f5e0','c7e9c0','a1d99b','74c476','41ab5d','238b45','006d2c','00441b'),
c('000000','ffffff'),
c('ffffff','232365'),
c('FFCD00','c1ae5e','6b6b8c','232365')
)
}
if (show){ # make plot of available ramps
x11(bg='#2d2d2d')
op <- par(mar=c(0.5,6,0.5,1))
all <- do.call(rbind,lapply(namelist, function(x) colorRampPalette(paste0('#',getPal(x)))(number)))
plot(NA,axes=F,xlab='',ylab='',ylim=c(0,length(namelist)+1), xlim=c(1,number), xaxs='i', yaxs='i')
for (i in 1:length(namelist)){
for (j in 1:number){
irev <- (length(namelist):1)[i]
polygon(x=c(j-1,j,j,j-1), y=c(irev-0.4,irev-0.4,irev+0.4,irev+0.4), border=NA, col=all[i,j])
}
}
mtext(namelist, side=2, line=0.5, at=length(namelist):1,padj=0.5,adj=1,las=2, cex=1.1, col='white')
par(op)
warning('No color ramp output when "show=TRUE", just a plot with available color ramps')
}else{ # generate output
colpal <- getPal(name)
if (reversed){colpal <- rev(colpal)}
outcolors <- colorRampPalette(paste0('#',colpal))(number)
return(outcolors)
}
}
|
095c993a8cbac49fea63fdf07582bc1990ebd0b3 | 45ab1e397b5fc69ba84c8f5dfb66c09b79bca4c6 | /Course_II/R/pract/pract6/task4.r | 02de7ddea125f15b97e774eabcd9305eea3b43f0 | [
"WTFPL"
] | permissive | GeorgiyDemo/FA | 926016727afa1ce0ee49e6ca9c9a3c60c755b35f | 9575c43fa01c261ea1ed573df9b5686b5a6f4211 | refs/heads/master | 2023-06-28T00:35:43.166167 | 2023-06-16T14:45:00 | 2023-06-16T14:45:00 | 203,040,913 | 46 | 65 | WTFPL | 2022-04-09T21:16:39 | 2019-08-18T18:19:32 | Jupyter Notebook | UTF-8 | R | false | false | 1,859 | r | task4.r | #Функция
calc <- function(number,
parameter = "rus",
short = FALSE) {
#Округление
number <- round(number, 0)
#Если больше - берем остаток
if (number > 7) {
number <- number %% 7
}
#Если меньше 1 - пробел
if (number < 1) {
return(" ")
}
#Если есть английский - выводим на англе
if (is.element(parameter,
c("Eng", "eng", "English", "english", "англ", "Англ", "анг"))) {
if (short) {
day <- switch (number,
"Mon.",
"Tue.",
"Wed.",
"Thu.",
"Fri.",
"Sat.",
"Sun.")
} else {
day <- switch (
number,
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"
)
}
#Если нет английского -выводим на русском
} else{
if (short) {
day <- switch (number,
"Пон..",
"Втр.",
"Сред.",
"Чет.",
"Пят.",
"Суб.",
"Вос.")
} else {
day <- switch (
number,
"Понедельник",
"Вторник",
"Среда",
"Четверг",
"Пятница",
"Суббота",
"Воскресенье"
)
}
}
#Отдаем значение
return(day)
}
#Основная программа
{
#Запускаем функцию нормально
result <- calc(20)
print(result)
result <- calc(20, "Eng", TRUE)
print(result)
} |
c0c2b7172f22e0ac132232bfc0a83c1bc06209c5 | 9caa55c11b8da87b4a8881df0b9ec6ab092f4648 | /ClusterFire.R | c664a78bdb95e8f51720a19fcb9f02885101a841 | [] | no_license | zl22good/Forest-Fire-Cluster | b3f810c4604a90c7befa6a02546b81f8ec6c6ad7 | 2fdc48eb0ed1bb1b36d7e03986fc85e34fa66c1e | refs/heads/master | 2020-05-18T04:41:52.206353 | 2019-04-30T03:25:50 | 2019-04-30T03:25:50 | 184,181,773 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 661 | r | ClusterFire.R | #Clustering
#K-means
ab = read.csv("forestfires.csv")
a = ab[,c(3:8)]
str(a)
rownames(a)
complete.cases(a)
all(complete.cases(a))
# 2 standardize your data
Xs = scale(a)
Xs
# 3 Compute the "distance" between observation
dd = dist(Xs)
dd
# 4 K-means pick k
km=kmeans(dd,3) # 3 clusters (just for example)
km
km$cluster
#How many clusters should we use?
#Use the following algorithm
ws = 0
for (i in 1:15) ws[i]=kmeans(dd,i,nstart = 50)$tot.withinss
plot(1:15,ws,type="b",xlab="Number of clusters",ylab="within groups variable")
#I see an elbow at 4!
km=kmeans(dd,4,nstart = 50)
library(cluster)
clusplot(Xs,km$cluster,color=T,shade=T,groups=ab$month)
|
b1707a5ddf3a95517d10f0012f4a41eb4f74f2a7 | d7ff71e8ffb07419aad458fb2114a752c5bf562c | /man/style_roxygen_example_snippet.Rd | e1ed4d4fd3efe13a1949e4c99ce716b6959606e1 | [
"MIT"
] | permissive | r-lib/styler | 50dcfe2a0039bae686518959d14fa2d8a3c2a50b | ca400ad869c6bc69aacb2f18ec0ffae8a195f811 | refs/heads/main | 2023-08-24T20:27:37.511727 | 2023-08-22T13:27:51 | 2023-08-22T13:27:51 | 81,366,413 | 634 | 79 | NOASSERTION | 2023-09-11T08:24:43 | 2017-02-08T19:16:37 | R | UTF-8 | R | false | true | 2,143 | rd | style_roxygen_example_snippet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roxygen-examples.R
\name{style_roxygen_example_snippet}
\alias{style_roxygen_example_snippet}
\title{Given a code snippet is dont* or run, style it}
\usage{
style_roxygen_example_snippet(
code_snippet,
transformers,
is_dont,
base_indention
)
}
\arguments{
\item{code_snippet}{A character vector with code to style.}
\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.}
\item{is_dont}{Whether the snippet to process is a dontrun, dontshow,
donttest segment or not.}
\item{base_indention}{Integer scalar indicating by how many spaces the whole
output text should be indented. Note that this is not the same as splitting
by line and add a \code{base_indention} spaces before the code in the case
multi-line strings are present. See 'Examples'.}
}
\description{
Given a code snippet is dont* or run, style it
}
\section{Hierarchy}{
Styling involves splitting roxygen example code into segments, and segments
into snippets. This describes the process for input of
\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}:
\itemize{
\item Splitting code into roxygen example code and other code. Downstream,
we are only concerned about roxygen code. See
\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}.
\item Every roxygen example code can have zero or more
dontrun / dontshow / donttest sequences. We next create segments of roxygen
code examples that contain at most one of these. See
\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}.
\item We further split the segment that contains at most one dont* sequence into
snippets that are either don* or not. See
\code{\link[=style_roxygen_code_example_segment]{style_roxygen_code_example_segment()}}.
}
Finally, that we have roxygen code snippets that are either dont* or not,
we style them in \code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}} using
\code{\link[=parse_transform_serialize_r]{parse_transform_serialize_r()}}.
}
\keyword{internal}
|
9484d64a90c82d3e128481e16383a814825ecb67 | c1e7158f1947a3135a033487482877b77b439072 | /plot OSM extracted centroids - kml file.R | a4cfdbc54843483ed317833418bf55c19de5e2de | [] | no_license | amandairish-zz/Roof-type-project | b9bad302d5d806d8865d34e790c73aa290e80e1b | d21e65fc9ac331226a9676442199e0f31f3383df | refs/heads/master | 2022-02-21T23:58:56.026627 | 2019-09-16T20:32:17 | 2019-09-16T20:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,137 | r | plot OSM extracted centroids - kml file.R | # plot osm points on google earth
# since osm BWA validation points are not
# working well with training data
# 5/5/19
library(ggmap)
library(maptools)
library(rgdal)
library(tidyverse)
setwd("/Users/amandairish/Desktop/Malaria project/Botswana")
bw.ext <- read.csv("/Users/amandairish/Desktop/Malaria project/Botswana/botswana buildings/BW_Sentinel_032519_new.csv")
# get rid of observations that aren't metal/tile/thatch
bw.ext <- bw.ext %>%
filter(Type == "metal" | Type == "thatch" | Type == "tile") %>%
rename(LULC = Type)
bw.ext$LULC <- factor(bw.ext$LULC) # get rid of unused factor levels
levels(bw.ext$LULC)
# rename levels to match SL predictions
levels(bw.ext$LULC)[levels(bw.ext$LULC)=="metal"] <- "Metal"
levels(bw.ext$LULC)[levels(bw.ext$LULC)=="tile"] <- "Tile"
levels(bw.ext$LULC)[levels(bw.ext$LULC)=="thatch"] <- "Thatch"
coordinates(bw.ext)<-c("Longitude", "Latitude") #Build a SpatialPointsData Frame
proj4string(bw.ext)<-CRS("+proj=longlat +datum=WGS84")
##The one line of code below writes our SpatialPointsDataFrame to a KML File
writeOGR(bw.ext, dsn="bwa.kml", layer= "LULC", driver="KML")
|
a71d9519961bdf280dce32f45452d82a4def1df4 | 9191950f0e79d501b10201b82e06e8b665194ce9 | /src/central-tendency/mean/Event Study Plot JPE.R | 89ed2148eb908b9290db8a59a076c8edf87b62cc | [
"MIT"
] | permissive | tanxpyox/professional-visualisation | b012380a03897afc7849551ae089345e5ab5e93e | aa7fad0c58a0b2191c98c88f2ae0b89b9fdac42d | refs/heads/master | 2023-04-27T15:53:20.499445 | 2023-04-16T19:07:14 | 2023-04-16T19:07:14 | 271,278,934 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,647 | r | Event Study Plot JPE.R | # Event Study Plot w/ CI
# Proto, Rustichini, and Sofianos, "Intelligence, Personality,
# and Gains from Cooperation in Repeated Interactions",
# Journal of Political Economy, 1363.
# Data from Quek (2017) "Rationalist Experiments on War" (Figure 6)
library(haven)
master <- read_dta("data/quek-rw-2017.dta")
output_dir="output"
library(dplyr)
library(ggplot2)
library(magrittr)
library(ggpubr)
library(ggsignif)
df <- master %>%
group_by(enforce11,period) %>%
summarise(
mean = mean(war),
se = sd(war)/sqrt(n())
) %>%
filter(period<16 & period > 10)
df$label <- ifelse(df$enforce11==0, "No Enforcement", "Enforcement")
p <- ggplot(df,aes(x=period, y=mean, group=label, color=label, shape=label)) +
geom_point(size=3) +
geom_line(size=1) +
geom_errorbar(aes(ymax=mean+se,ymin=mean-se),width=0.1) +
theme_bw() +
scale_y_continuous(labels = scales::percent, limits=c(0,1)) +
scale_colour_manual(values=c('darkgrey','black')) +
scale_shape_manual(values=c(1,19)) +
xlab("Round") + ylab("Incidence of War") +
theme(
legend.title = element_blank(),
legend.position = "bottom",
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
axis.line = element_line(size=0.5),
panel.border=element_blank(),
)
ggsave(path=output_dir, filename="Bar Plot.png", width=9, height=6)
# Save example to src folder
# ggsave(path='src/central-tendency/mean', filename='Event Study Plot JPE-example.png', width=9, height = 6)
|
b5d9f341c58fa1c84bc679d3b362b13e2555d722 | 33ced6d677e62ca13ea8e32a14f7bba6b57d19c4 | /Analytical Modeling/exponential smoothing.R | fb235627099cb80184d676ed346753e02b219601 | [] | no_license | shanwenqian23/Samples | 52a37442397b687b10046ea1d1acb811ba6015f3 | 6f5221b3af5f7342c293e22697da4ec9af9932c5 | refs/heads/main | 2023-04-27T05:59:38.466667 | 2021-05-20T12:40:48 | 2021-05-20T12:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,445 | r | exponential smoothing.R | # Homework 4 ISYE 6501
# 7.2 Build an exponential smoothing model to help make a judgement
# of whether the unofficial end of summer has gotten later over the last 20 years
library(cusum)
library(qcc)
library(dplyr)
# Read in the data
temps = as.data.frame(read.table("temps.txt",header=TRUE))
r_names = as.data.frame(temps[1:123,1])
row.names(temps) <- temps[,1]
#convert all data to one dimensional wrap-around frame
temps_vect <- as.vector(unlist(temps[,2:21]))
# convert vector to a time series object (understands time units)
# frequency is agnostic to actual physical time (full year is not a cycle)
temps_ts <- ts(temps_vect,start=1996,frequency=123)
plot(temps_ts,main="Time Series",ylab="Temperature (F)")
# perform exponential smoothing.
# Additive has lower sse, meaning - as temps get warmer, the temp swings don't necessarily get more extreme
temps_HW <- HoltWinters(temps_ts,alpha=NULL,beta=NULL,gamma=NULL,seasonal=c("additive","multiplicative"))
temps_HW$alpha
temps_HW$beta
temps_HW$gamma
plot(temps_HW,ylab="Observed/Fitted Temperature")
summary(temps_HW)
# Analyze the Holt Winters Model
fitted_output <- temps_HW$fitted # "x-hat = Smoothed model"
head(temps_HW$fitted)
tail(temps_HW$fitted)
temps_HW_sf <- matrix(temps_HW$fitted[,4],nrow=123)
smoothed <- matrix(fitted_output[1:2337,1])
# index the smoothed DF to separate the data back into years
# (e.g. ts_object[1:123])
# use the cbind function to combine the vectors into a data frame
smoothed_day <- as.data.frame(t(smoothed[1:19,]))
x1=20
for(day in 2:123){
x2=day*19
row_yr = t(smoothed[x1:x2,])
smoothed_day <- merge(smoothed_day,row_yr,all.x=TRUE,all.y = TRUE)
x1=x2+1
}
smoothed_day
row.names(smoothed_day) <- temps[,1]
head(smoothed_day)
smoothed_ts <- ts(smoothed_day,start=1997,end=2015,frequency = 123)
plot(as.vector(unlist(smoothed_ts[1:19,])),main="Smoothed Data",ylab="Temperature")
# use the smoothed model with cusum to detect change
q2 <- cusum(smoothed_day[1:34,1:19],failure_probability = 0.05, newdata = smoothed_day[35:123,1:19],limit=2.96,main="Cusum Chart Using July as Calibration Data")
summary(q2)
# -----------------------------------------------------------------
# year to year
smoothed_yr <- t(smoothed_day)
q3 <- cusum(smoothed_yr[1:10,],failure_probability = 0.05, newdata = smoothed_yr[11:19,],limit=2.96,main="Cusum Chart Using July as Calibration Data")
summary(q3)
|
29e7c59465826bdfe45644c906a7f094ffc0b4e3 | 278057fd9b095a2805639d1c5f2cb7afdd33a44f | /man/avsapply.Rd | cad711d955849731fbcbead8fb3215f5f0d3e281 | [] | no_license | tlcaputi/tlcPack | 6301c14540d02c35f05d163f6600d96b2c4fac67 | d8d42ce3e3a807ec0e12bd20549fd1e3ac1861d4 | refs/heads/master | 2020-03-24T11:23:48.352052 | 2020-03-23T22:47:13 | 2020-03-23T22:47:13 | 142,684,188 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 235 | rd | avsapply.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackProjectFunctions.r
\name{avsapply}
\alias{avsapply}
\title{Make sapply into a vector}
\usage{
avsapply(...)
}
\description{
Make sapply into a vector
}
|
122028c1457205b2f30c6d5ed0acf11d82428f21 | bfcb2d5bed84e7bb28e371d24a18535d95308bd5 | /test.R | 9767e4aa8f5be86141ac31148a54760e165565e5 | [] | no_license | sika0115/R_study | 025f9294b1afc5a2b3c7cdb4472584472252fdbf | 45cd9c067a18be33ad570b94b17ae8299bd68d44 | refs/heads/master | 2020-04-20T05:57:42.040503 | 2019-02-05T01:23:22 | 2019-02-05T01:23:22 | 168,670,348 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 127 | r | test.R | #ベクトルの作成
x <- c(1,2,3,4,5)
x
y <- c(1:5,3:1)
y
z <- c(rep(3,4),rep(c(1,5,10),c(2,3,4)))
z
a <- c("A","B","C")
a
|
f5eba4dca52e77e535c787fb83e9bf1348058eb4 | 6e910000ab0b80f097ba60724834c2ca46830e9f | /supervised /classification_code/bikeshare/true_ensemble.R | 3b6602b2d910a9175080b300b732738b78de7168 | [
"BSD-3-Clause"
] | permissive | taposh/mlearning | 4d65f10ceb57ac13ba45bb357dfa4d0401190020 | 1f33c4223c0a33589530a16bac32a1016231737b | refs/heads/master | 2022-11-01T16:47:42.781101 | 2022-10-18T20:12:52 | 2022-10-18T20:12:52 | 28,143,470 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 37,862 | r | true_ensemble.R | #Default Directory of Data
setwd("/Users/taposh/workspace/kaggle/bikeshare/")
#Loading Required Libraries
suppressPackageStartupMessages({
library(RWeka)#for PrincipalComponentAnalysis filter
library(wavelets)#for discret wavelet transforms
library(nnet)#training simple layer neural network
library(matrixStats)#calculate std of columns
library(pls)#multivariate regression
library(monmlp)#training multilayer perceptron
library(e1071)#training SVM
library(prospectr)#preprocessing
library(kernlab)# training gaussian process
})
#Discrete Wavelet Transforms using Haar Algorithm
#DF1: input matrix for transform
#nTimes: number of iterations
HaarTransform=function(DF1,nTimes=1)
{
w =function(k)
{
s1=dwt(k, filter="haar")
return (s1@V[[1]])
}
Smt=DF1
for (i in 1:nTimes)
{
Smt=t(apply(Smt,1,w))
}
return (data.frame(Smt))
}
#Getting Derivatives
#DF1: input matrix for transform
#D: Order
Derivative=function(DF1,D=1)
{
df1=t(diff(t(DF1), differences = D))
return(df1)
}
#train MLP on train and then predict test using weights gained by train
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into monmlp function
#Result is predicted values for test
GetMLPPreds=function(train,test,Labels,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5,N.ensemble=10,LTh = tansig, LTo = linear,LTh.prime = tansig.prime, LTo.prime = linear.prime,Seed=1)
{
gc()
set.seed(Seed)
m1=monmlp.fit(as.matrix(train),as.matrix(Labels),scale.y=T,n.trials = 1, hidden1=Hidden1, hidden2=Hidden2, n.ensemble=N.ensemble,Th = LTh, To = LTo,
Th.prime = LTh.prime, To.prime = LTo.prime,iter.max=Iters,monotone=NULL, bag=F, init.weights = c(-(IWeights),IWeights),max.exceptions = 10,silent = T)
pr1=monmlp.predict(as.matrix(test),weights=m1)
rm(m1)
gc()
return (data.frame(pr1))
}
#train simple layer neural network on train and then predict test using model gained by train
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into nnet function
#Result is predicted values for test
GetNNETPreds=function(train,test,Labels,Size=10,Rang=0.5,Decay=0.1,Iters=100,MaxWts=1500)
{
set.seed(1)
g1=nnet((Labels)~.,data=train,size=Size,linout=T,skip =T, rang = Rang, decay = Decay,MaxNWts = MaxWts, maxit = Iters,trace=F)
pr1=predict(g1,test)
rm(g1)
gc()
return (data.frame(pr1))
}
#train SVM on train and then predict test using model gained by train
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into SVM function in e1071 library
#Result is predicted values for test
GetSVMPreds=function(train,test,Labels,Cost=10000)
{
set.seed(1)
s1=svm(data.frame(train),Labels,scale = F,cost = Cost)
pr1=(predict(s1,data.frame(test)))
return (data.frame(pr1))
}
#train GaussianProcess on train and then predict test using model gained by train
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into gausspr function in kernlab library
#Result is predicted values for test
GetGaussPreds=function(train,test,Labels,Kernel='rbfdot',Kpar='automatic',Tol=0.05,Var=0.01)
{
set.seed(1)
v1=gausspr(data.frame(train), (Labels),type= NULL, kernel=Kernel,
kpar=Kpar, var=Var, variance.model = T, tol=Tol, cross=0, fit=F)
pr1=(predict(v1,data.frame(test)))
rm(v1)
gc()
return (data.frame(pr1))
}
#train MVR on train and then predict test using model gained by train
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into mvr function in pls library
#Result is predicted values for test
GetMVRPreds=function(train,test,Labels,Ncomp=120,Scale=True)
{
set.seed(1)
v1=mvr(Labels~.,data=data.frame(train),ncomp=Ncomp, method = pls.options()$pcralg,scale = T)
pr1=data.frame(predict(v1,data.frame(test)))
pr1=data.frame(rowMeans(pr1))
rm(v1)
gc()
return (data.frame(pr1))
}
#Weather <3 and >3
#Seprate data sets based on "Depth" variable and then train two seperate mlp models and then combine results
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into monmlp function
#Result is predicted values for test
GetMLPDepthPreds=function(train,test,Labels,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5,N.ensemble=10,LTh = tansig, LTo = linear,LTh.prime = tansig.prime, LTo.prime = linear.prime,Seed=1)
{
trainD1=train[train[,'Depth']==1,]
trainD2=train[train[,'Depth']==2,]
testD1=test[test[,'Depth']==1,]
testD2=test[test[,'Depth']==2,]
prD1=GetMLPPreds(trainD1,testD1,MyTarget[train[,'Depth']==1],Iters=Iters,Hidden1=Hidden1,Hidden2=Hidden2,IWeights=IWeights,N.ensemble=N.ensemble,LTh = LTh, LTo = LTo,LTh.prime = LTh.prime, LTo.prime = LTo.prime,Seed=Seed)
colnames(prD1)[1]='pr'
prD2=GetMLPPreds(trainD2,testD2,MyTarget[train[,'Depth']==2],Iters=Iters,Hidden1=Hidden1,Hidden2=Hidden2,IWeights=IWeights,N.ensemble=N.ensemble,LTh = LTh, LTo = LTo,LTh.prime = LTh.prime, LTo.prime = LTo.prime,Seed=Seed)
colnames(prD2)[1]='pr'
prD=rbind(prD1,prD2)
prD[test[,'Depth']==1,1]=prD1
prD[test[,'Depth']==2,1]=prD2
return (data.frame(prD))
}
#Seprate data sets based on "Depth" variable and then train two seperate svm models and then combine results
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into svm function
#Result is predicted values for test
GetSVMDepthPreds=function(train,test,Labels,Cost=10000)
{
train=train
test=test
trainD1=train[train[,'Depth']==1,-ncol(train)]
trainD2=train[train[,'Depth']==2,-ncol(train)]
testD1=test[test[,'Depth']==1,-ncol(train)]
testD2=test[test[,'Depth']==2,-ncol(train)]
prD1=GetSVMPreds(trainD1,testD1,MyTarget[train[,'Depth']==1],Cost=Cost)
colnames(prD1)[1]='pr'
prD2=GetSVMPreds(trainD2,testD2,MyTarget[train[,'Depth']==2],Cost=Cost)
colnames(prD2)[1]='pr'
prD=rbind(prD1,prD2)
prD[test[,'Depth']==1,1]=prD1
prD[test[,'Depth']==2,1]=prD2
return (data.frame(prD))
}
#Seprate data sets based on "Depth" variable and then train two seperate gaussian process models and then combine results
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into gausspr function
#Result is predicted values for test
GetGaussDepthPreds=function(train,test,Labels,Kernel='rbfdot',Kpar='automatic',Tol=0.05,Var=0.01)
{
train=train
test=test
trainD1=train[train[,'Depth']==1,-ncol(train)]
trainD2=train[train[,'Depth']==2,-ncol(train)]
testD1=test[test[,'Depth']==1,-ncol(train)]
testD2=test[test[,'Depth']==2,-ncol(train)]
prD1=GetGaussPreds(trainD1,testD1,MyTarget[train[,'Depth']==1],Kernel=Kernel,Kpar=Kpar,Tol=Tol,Var=Var)
colnames(prD1)[1]='pr'
prD2=GetGaussPreds(trainD2,testD2,MyTarget[train[,'Depth']==2],Kernel=Kernel,Kpar=Kpar,Tol=Tol,Var=Var)
colnames(prD2)[1]='pr'
prD=rbind(prD1,prD2)
prD[test[,'Depth']==1,1]=prD1
prD[test[,'Depth']==2,1]=prD2
return (data.frame(prD))
}
#Seprate data sets based on "Depth" variable and then train two seperate mvr process models and then combine results
#train: train data frame for training
#test: test data frame to predict
#Other parameters are passed into mvr function
#Result is predicted values for test
GetMVRDepthPreds=function(train,test,Labels,Ncomp=120,Scale=True)
{
train=train
test=test
trainD1=train[train[,'Depth']==1,-ncol(train)]
trainD2=train[train[,'Depth']==2,-ncol(train)]
testD1=test[test[,'Depth']==1,-ncol(train)]
testD2=test[test[,'Depth']==2,-ncol(train)]
prD1=GetMVRPreds(trainD1,testD1,MyTarget[train[,'Depth']==1],Ncomp=Ncomp,Scale=Scale)
colnames(prD1)[1]='pr'
prD2=GetMVRPreds(trainD2,testD2,MyTarget[train[,'Depth']==2],Ncomp=Ncomp,Scale=Scale)
colnames(prD2)[1]='pr'
prD=rbind(prD1,prD2)
prD[test[,'Depth']==1,1]=prD1
prD[test[,'Depth']==2,1]=prD2
return (data.frame(prD))
}
#Calcule PCA using Weka PrincipalComponents filter
#df: input data frame
#var: variance parameter for PCA
WekPCA=function(df,var)
{
pc=make_Weka_filter('weka/filters/unsupervised/attribute/PrincipalComponents')
d1=pc(df[,1]~.,data=df[,-1],control=c('-R',var))
return (d1[,-ncol(d1)])
}
#Combine train and test and then get rank of features based on their standard deviation
#trainDF:train data frame
#testDF:train data frame
#result is rank of features
GetRanksBySD=function(trainDF,testDF)
{
TAT=rbind(trainDF,testDF)
rnk=rank(colSds(as.matrix(TAT)))
return (rnk)
}
############################
#Reading Datasets
###########################
TrainDataSet=read.csv('training.csv')
TestDataSet=read.csv('sorted_test.csv')
TestDataSet=cbind(TestDataSet,Ca=1:nrow(TestDataSet),P=1:nrow(TestDataSet),pH=1:nrow(TestDataSet),SOC=1:nrow(TestDataSet),Sand=1:nrow(TestDataSet))
#Combining data sets to do some preprocessing
TrainAndTest=rbind(TrainDataSet,TestDataSet)
TrainAndTest[,3595]=as.numeric(TrainAndTest[,3595])
#calcule partial PCs of combined data set
#devide data set into 30 sub frames and then getting PCs
#the combine sub-frames
PC=list()
for (i in 1:30)
{
j1=(i-1)*119+2
j2=(i)*119+1
if (i==30)
{
j2=3579
}
temp1=TrainAndTest[,j1:j2]
flush.console()
PC[[i]]=WekPCA(cbind(TrainAndTest['Ca'],temp1),0.999)
}
PComponents=PC[[1]]
for (i in 2:30)
{
PComponents=cbind(PComponents,PC[[i]])
}
#Multiple Scatter Correction on spectral features(two phases)
TrainAndTestReduced=msc(as.matrix(TrainAndTest[,2:3579]))
TrainAndTestReduced=msc(as.matrix(TrainAndTestReduced))
#First Derivatives
TrainAndTestReduced=Derivative(TrainAndTestReduced,1)
#Original data set(without transformation)
TrainAndTestOriginal=TrainAndTest[2:3600]
#Reduced Dataset(PCA,DWT)
TrainAndTestReduced=cbind(PComponents,data.frame(HaarTransform(TrainAndTestReduced,9)),TrainAndTest[,3580:3600])
#Creating data frame for submission
submission=data.frame(PIDN=TestDataSet['PIDN'],Ca=1:nrow(TestDataSet),P=1:nrow(TestDataSet),pH=1:nrow(TestDataSet),SOC=1:nrow(TestDataSet),Sand=1:nrow(TestDataSet))
k=2#Counter of submission columns
for (TheTarget in c('Ca','P','pH','SOC','Sand'))
{
cat (TheTarget,'.....................................................................\n')
#Retriving train and test data frames from original data set
trainOriginal=TrainAndTestOriginal[(1:nrow(TrainDataSet)),]
testOriginal=TrainAndTestOriginal[-(1:nrow(TrainDataSet)),]
#Retriving train and test data frames from reduced data set
trainReduced=TrainAndTestReduced[(1:nrow(TrainDataSet)),]
testReduced=TrainAndTestReduced[-(1:nrow(TrainDataSet)),]
ThisTarget=trainOriginal[,TheTarget]
MyTarget=trainOriginal[,TheTarget]
#Saturation and log transform for "P" target
if (TheTarget=='P')
{
MyTarget=ifelse(MyTarget>6,6,MyTarget)
MyTarget=log(1+MyTarget)
}
trainReduced=trainReduced[,!colnames(trainReduced)%in% c('Ca','P','pH','SOC','Sand')]
testReduced=testReduced[,!colnames(testReduced)%in% c('Ca','P','pH','SOC','Sand')]
trainOriginal=trainOriginal[,!colnames(trainOriginal)%in% c('Ca','P','pH','SOC','Sand')]
testOriginal=testOriginal[,!colnames(testOriginal)%in% c('Ca','P','pH','SOC','Sand')]
#Training and Prediction phase for "Ca" target
if (TheTarget=='Ca')
{
cat('Ca_SVM_Preds1... ')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 21, m = 0)#savitzkyGolay filter from prospectr library
MyTest=savitzkyGolay(MyTest, p = 3, w = 21, m = 0)
MyTrain=cbind(MyTrain,trainOriginal[c(3583,3586,3587,3588,3589,3591,3594)])
MyTest=cbind(MyTest,testOriginal[c(3583,3586,3587,3588,3589,3591,3594)])
Ca_SVM_Preds1=GetSVMPreds(MyTrain,MyTest,MyTarget,1000)
flush.console();gc();cat('\n');
cat('Ca_SVM_Preds2...')
flush.console()
Ca_SVM_Preds2=GetSVMPreds(trainOriginal,testOriginal,MyTarget,Cost=10000)
flush.console();gc();cat('\n');
cat('Ca_SVM_Preds3...')
flush.console()
Ca_SVM_Preds3=GetSVMPreds(trainOriginal,testOriginal,MyTarget,5000)
flush.console();gc();cat('\n');
cat('Ca_SVM_Preds4...')
flush.console()
MyTrain=trainOriginal
MyTest=testOriginal
SSS=(rbind(MyTrain,MyTest))
#get first 2000 features order by their standard deviation
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:2000]]
MyTest=MyTest[,ordr[1:2000]]
MyTrain=HaarTransform(Derivative(MyTrain),3)
MyTest=HaarTransform(Derivative(MyTest),3)
Ca_SVM_Preds4=GetSVMPreds(MyTrain,MyTest,MyTarget,10000)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds1... ')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
#Get 2500 features with highest standard deviation
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581)])
MyTest=cbind(HaarTransform(MyTest,4),testOriginal[c(3579:3581)])
#Get average of 10 different mlp model with different seed numbers
Ca_MLP_Preds1=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.5,Seed=1,N.ensemble=2)
CNT=10
for (sd in 2:CNT)
{
Ca_MLP_Preds1=Ca_MLP_Preds1+GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.5,Seed=sd,N.ensemble=2)
flush.console()
}
Ca_MLP_Preds1=Ca_MLP_Preds1/CNT
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds2... ')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 11, m = 0)
MyTest=savitzkyGolay(MyTest, p = 3, w = 11, m = 0)
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581)])
MyTest=cbind(HaarTransform((MyTest),4),testOriginal[c(3579:3581)])
Ca_MLP_Preds2=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.5,Seed=1,N.ensemble=10)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds3...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],5),trainOriginal[,c(3579,3581)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],5),testOriginal[,c(3579,3581)])
Ca_MLP_Preds3=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds4...')
flush.console()
MyTrain=cbind(HaarTransform(Derivative(trainOriginal[,1:3578]),7),trainOriginal[,c(3579:3581)])
MyTest=cbind(HaarTransform(Derivative(testOriginal[,1:3578]),7),testOriginal[,c(3579:3581)])
Ca_MLP_Preds4=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=3,Hidden2=20,IWeights=0.6)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds5...')
flush.console()
MyTrain=cbind(Derivative(HaarTransform(trainOriginal,4)),trainOriginal[,c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(testOriginal,4)),testOriginal[,c(3579:3581,3594)])
Ca_MLP_Preds5=GetMLPDepthPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds6...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal,5),trainOriginal[,c(3579:3581,3594)])
MyTest=cbind(HaarTransform(testOriginal,5),testOriginal[,c(3579:3581,3594)])
Ca_MLP_Preds6=GetMLPDepthPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds7...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],4),trainOriginal[,c(3579:3582)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],4),testOriginal[,c(3579:3582)])
Ca_MLP_Preds7=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Ca_MLP_Preds8...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],3),trainOriginal[,c(3579:3582)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],3),testOriginal[,c(3579:3582)])
Ca_MLP_Preds8=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Ca_Gauss_Preds1...')
flush.console()
Ca_Gauss_Preds1=GetGaussPreds(trainOriginal,testOriginal,MyTarget,Kernel='rbfdot',Tol=0.05,Var=0.01)
flush.console();gc();
cat('Ca_Gauss_Preds2...')
flush.console()
Ca_Gauss_Preds2=GetGaussPreds(trainReduced,testReduced,MyTarget,Kernel='rbfdot',Tol=0.05,Var=0.01)
flush.console();gc();
cat('Ca_Gauss_Preds3...')
flush.console()
Ca_Gauss_Preds3=GetGaussPreds(trainOriginal,testOriginal,MyTarget,Kernel='polydot',Tol=0.001,Var=0.1)
flush.console();gc();
cat('Ca_MVR_Preds1...')
flush.console()
MyTrain=trainOriginal
MyTest=testOriginal
SSS=(rbind(MyTrain,MyTest))
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:2000]]
MyTest=MyTest[,ordr[1:2000]]
Ca_MVR_Preds1=GetMVRPreds(MyTrain,MyTest,MyTarget,120,True)
flush.console();gc();cat('\n');
cat('Ca_MVR_Preds2...')
flush.console()
Ca_MVR_Preds2=GetMVRPreds(trainOriginal,testOriginal,MyTarget,100,True)
flush.console();gc();cat('\n');
cat('Ca_NNET_Preds1...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
Ca_NNET_Preds1=GetNNETPreds(MyTrain,MyTest,MyTarget,Size=10,Rang=0.5,Decay=0.1,Iters=100)
flush.console();gc();cat('\n');
#Combining predictions
ThisPred=(100*Ca_SVM_Preds1+100*Ca_MLP_Preds1+100*Ca_MLP_Preds2+15*Ca_Gauss_Preds1+30*Ca_SVM_Preds2+100*Ca_SVM_Preds3+45*Ca_Gauss_Preds2+10*Ca_Gauss_Preds3+15*Ca_MVR_Preds1+10*Ca_SVM_Preds4+150*Ca_MLP_Preds3+50*Ca_MLP_Preds4+10*Ca_NNET_Preds1+5*Ca_MVR_Preds2+30*Ca_MLP_Preds5+30*Ca_MLP_Preds6+150*Ca_MLP_Preds7+50*Ca_MLP_Preds8)/1000
}
#Training and Prediction phase for "P" target
if (TheTarget=='P')
{
cat('P_SVM_Preds1...')
flush.console()
MyTrain=((trainOriginal[,1:3578]))
MyTest=((testOriginal[,1:3578]))
MyTrain=continuumRemoval(MyTrain, type='R',method='substraction')#continuumRemoval from prospectr library
MyTest=continuumRemoval(MyTest, type='R',method='substraction')
MyTrain=ifelse(is.na(MyTrain),1,MyTrain)
MyTest=ifelse(is.na(MyTest),1,MyTest)
MyTrain=cbind(MyTrain,trainOriginal[c(3579,3582)])
MyTest=cbind(MyTest,testOriginal[c(3579,3582)])
P_SVM_Preds1=GetSVMPreds(MyTrain,MyTest,MyTarget,5000)
P_SVM_Preds1[,1]=exp(P_SVM_Preds1[,1])-1
flush.console();gc();cat('\n');
cat('P_SVM_Preds2...')
flush.console()
MyTrain=cbind(trainOriginal[,1:3578],trainOriginal[,3579:3594])
MyTest=cbind(testOriginal[,1:3578],testOriginal[,3579:3594])
P_SVM_Preds2=GetSVMPreds(MyTrain,MyTest,MyTarget,5000)
P_SVM_Preds2[,1]=exp(P_SVM_Preds2[,1])-1
flush.console();gc();cat('\n');
cat('P_SVM_Preds3,',' ')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],1),trainOriginal[c(3583,3594)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],1),testOriginal[c(3583,3594)])
P_SVM_Preds3=GetSVMDepthPreds(MyTrain,MyTest,MyTarget,Cost=1000)
P_SVM_Preds3[,1]=exp(P_SVM_Preds3[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds1... ')
flush.console()
MyTrain=((trainOriginal[,1:3578]))
MyTest=((testOriginal[,1:3578]))
MyTrain=savitzkyGolay(MyTrain, p = 4, w = 11, m = 1)
MyTest=savitzkyGolay(MyTest, p = 4, w = 11, m = 1)
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<3000]
MyTest=MyTest[,rnk<3000]
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(HaarTransform(MyTest,4),testOriginal[c(3579:3581,3594)])
P_MLP_Preds1=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=5,Hidden2=0,IWeights=0.5)
P_MLP_Preds1[,1]=exp(P_MLP_Preds1[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds2... ')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 4, w = 11, m = 1)
MyTest=savitzkyGolay(MyTest, p = 4, w = 11, m = 1)
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(HaarTransform(MyTest,4),testOriginal[c(3579:3581,3594)])
P_MLP_Preds2=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.6,Seed=1,N.ensemble=5)
P_MLP_Preds2[,1]=exp(P_MLP_Preds2[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds3...')
flush.console()
MyTrain=cbind(Derivative(HaarTransform(trainOriginal,4)),trainOriginal[,c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(testOriginal,4)),testOriginal[,c(3579:3581,3594)])
P_MLP_Preds3=GetMLPDepthPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
P_MLP_Preds3[,1]=exp(P_MLP_Preds3[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds4...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
P_MLP_Preds4=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=5,Hidden2=5,IWeights=0.6)
P_MLP_Preds4[,1]=exp(P_MLP_Preds4[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds5...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=cbind(HaarTransform(Derivative(MyTrain),2),trainOriginal[,c(3579,3593)])
MyTest=cbind(HaarTransform(Derivative(MyTest),2),testOriginal[,c(3579,3593)])
SSS=(rbind(MyTrain,MyTest))
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:450]]
MyTest=MyTest[,ordr[1:450]]
P_MLP_Preds5=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=5,Hidden2=5,IWeights=0.6)
P_MLP_Preds5[,1]=exp(P_MLP_Preds5[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds6...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(Derivative(HaarTransform(MyTrain,4)),trainOriginal[c(3579:3581)])
MyTest=cbind(Derivative(HaarTransform(MyTest,4)),testOriginal[c(3579:3581)])
P_MLP_Preds6=GetMLPPreds(MyTrain[],MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
P_MLP_Preds6[,1]=exp(P_MLP_Preds6[,1])-1
flush.console();gc();cat('\n');
cat('P_MLP_Preds7...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(Derivative(HaarTransform(MyTrain,4)),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(MyTest,4)),testOriginal[c(3579:3581,3594)])
P_MLP_Preds7=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
P_MLP_Preds7[,1]=exp(P_MLP_Preds7[,1])-1
flush.console();gc();cat('\n');
cat('P_MVR_Preds1... ')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581)])
MyTest=cbind(HaarTransform((MyTest),4),testOriginal[c(3579:3581)])
P_MVR_Preds1=GetMVRPreds(MyTrain,MyTest,MyTarget,Ncomp=200,Scale=True)
P_MVR_Preds1[,1]=exp(P_MVR_Preds1[,1])-1
flush.console();gc();cat('\n');
ThisPred=(70*P_SVM_Preds1+70*P_MVR_Preds1+70*P_MLP_Preds1+100*P_MLP_Preds3+70*P_SVM_Preds2+70*P_SVM_Preds3+50*P_MLP_Preds4+50*P_MLP_Preds5+50*P_MLP_Preds6+200*P_MLP_Preds7)/800
}
#Training and Prediction phase for "pH" target
if(TheTarget=='pH')
{
cat('pH_SVM_Preds1,',' ')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],1),trainOriginal[c(3583,3594)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],1),testOriginal[c(3583,3594)])
pH_SVM_Preds1=GetSVMDepthPreds(MyTrain,MyTest,MyTarget,Cost=1000)
flush.console();gc();cat('\n');
cat('pH_SVM_Preds2...')
flush.console()
MyTrain=cbind(trainOriginal[,1:3578],trainOriginal[,3579:3594])
MyTest=cbind(testOriginal[,1:3578],testOriginal[,3579:3594])
pH_SVM_Preds2=GetSVMPreds(MyTrain,MyTest,MyTarget,5000)
flush.console();gc();cat('\n');
cat('pH_MLP_Preds1...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],5),trainOriginal[,c(3579,3581)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],5),testOriginal[,c(3579,3581)])
pH_MLP_Preds1=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('pH_MLP_Preds2...')
flush.console()
MyTrain=cbind(Derivative(HaarTransform(trainOriginal,4)),trainOriginal[,c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(testOriginal,4)),testOriginal[,c(3579:3581,3594)])
pH_MLP_Preds2=GetMLPDepthPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('pH_MLP_Preds3...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],4),trainOriginal[,c(3579:3582)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],4),testOriginal[,c(3579:3582)])
pH_MLP_Preds3=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('pH_MLP_Preds4...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(Derivative(HaarTransform(MyTrain,4)),trainOriginal[c(3579:3581)])
MyTest=cbind(Derivative(HaarTransform(MyTest,4)),testOriginal[c(3579:3581)])
pH_MLP_Preds4=GetMLPPreds(MyTrain[],MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('pH_MLP_Preds5...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(Derivative(HaarTransform(MyTrain,4)),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(MyTest,4)),testOriginal[c(3579:3581,3594)])
pH_MLP_Preds5=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
ThisPred=(70*pH_MLP_Preds1+70*pH_MLP_Preds2+50*pH_SVM_Preds1+50*pH_MLP_Preds3+50*pH_SVM_Preds2+70*pH_MLP_Preds4+70*pH_MLP_Preds5)/430
}
#Training and Prediction phase for "SOC" target
if (TheTarget=='SOC')
{
cat('SOC_SVM_Preds1...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=cbind(MyTrain,trainOriginal[c(3581,3590,3591)])
MyTest=cbind(MyTest,testOriginal[c(3581,3590,3591)])
SOC_SVM_Preds1=GetSVMPreds(MyTrain,MyTest,MyTarget,10000)
flush.console();gc();cat('\n');
cat('SOC_SVM_Preds2...')
flush.console()
MyTrain=cbind(trainOriginal[,1:3578],trainOriginal[,3579:3594])
MyTest=cbind(testOriginal[,1:3578],testOriginal[,3579:3594])
SOC_SVM_Preds2=GetSVMPreds(MyTrain,MyTest,MyTarget,5000)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds1...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 11, m = 0)
MyTest=savitzkyGolay(MyTest, p = 3, w = 11, m = 0)
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(HaarTransform(MyTrain,3),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(HaarTransform(MyTest,3),testOriginal[c(3579:3581,3594)])
SOC_MLP_Preds1=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=3,Hidden2=3,IWeights=0.5,Seed=1,N.ensemble=20)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds2...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 11, m = 0)
MyTest=savitzkyGolay(MyTest, p = 3, w = 11, m = 0)
MyTrain=cbind(HaarTransform(MyTrain,3),trainOriginal[c(3579:3581,3594)])
MyTest=cbind(HaarTransform(MyTest,3),testOriginal[c(3579:3581,3594)])
SOC_MLP_Preds2=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=3,Hidden2=3,IWeights=0.5,Seed=1,N.ensemble=10)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds3...')
flush.console();
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 11, m = 0)
MyTest=savitzkyGolay(MyTest, p = 3, w = 11, m = 0)
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(HaarTransform(MyTrain,4),trainOriginal[c(3579:3581)])
MyTest=cbind(HaarTransform(MyTest,4),testOriginal[c(3579:3581)])
SOC_MLP_Preds3=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.5,Seed=1,N.ensemble=10)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds4...')
flush.console()
MyTrain=cbind(HaarTransform(Derivative(trainOriginal[,1:3578]),6),trainOriginal[,c(3579:3581)])
MyTest=cbind(HaarTransform(Derivative(testOriginal[,1:3578]),6),testOriginal[,c(3579:3581)])
SOC_MLP_Preds4=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=4,Hidden2=0,IWeights=0.5)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds5...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
SOC_MLP_Preds5=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=5,Hidden2=5,IWeights=0.6)
flush.console();gc();cat('\n');
cat('SOC_MLP_Preds6...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
rnk=GetRanksBySD(MyTrain,MyTest)
MyTrain=MyTrain[,rnk<2500]
MyTest=MyTest[,rnk<2500]
MyTrain=cbind(Derivative(HaarTransform(MyTrain,4)),trainOriginal[c(3579:3581)])
MyTest=cbind(Derivative(HaarTransform(MyTest,4)),testOriginal[c(3579:3581)])
SOC_MLP_Preds6=GetMLPPreds(MyTrain[],MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
ThisPred=(100*SOC_SVM_Preds1+50*SOC_MLP_Preds3+50*SOC_MLP_Preds2+50*SOC_MLP_Preds1+70*SOC_SVM_Preds2+100*SOC_MLP_Preds4+60*SOC_MLP_Preds5+20*SOC_MLP_Preds6)/500
}
#Training and Prediction phase for "Sand" target
if (TheTarget=='Sand')
{
cat('Sand_SVM_Preds1...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=savitzkyGolay(MyTrain, p = 3, w = 11, m = 0)
MyTest=savitzkyGolay(MyTest, p = 3, w = 11, m = 0)
MyTrain=cbind(MyTrain,trainOriginal[c(3581,3583,3585,3586,3588,3590,3591:3592,3594)])
MyTest=cbind(MyTest,testOriginal[c(3581,3583,3585,3586,3588,3590,3591:3592,3594)])
Sand_SVM_Preds1=GetSVMPreds(MyTrain,MyTest,MyTarget,5000)
flush.console();gc();cat('\n');
cat('Sand_SVM_Preds2...')
flush.console()
Sand_SVM_Preds2=GetSVMPreds(trainOriginal,testOriginal,MyTarget,Cost=10000)#OK
flush.console();gc();cat('\n');
cat('Sand_SVM_Preds3...')
flush.console()
MyTrain=trainOriginal
MyTest=testOriginal
SSS=(rbind(MyTrain,MyTest))
MyTrain=SSS[(1:nrow(trainOriginal)),]
MyTest=SSS[-(1:nrow(trainOriginal)),]
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:2000]]
MyTest=MyTest[,ordr[1:2000]]
MyTrain=HaarTransform(Derivative(MyTrain),3)
MyTest=HaarTransform(Derivative(MyTest),3)
Sand_SVM_Preds3=GetSVMPreds(MyTrain,MyTest,MyTarget,10000)
flush.console();gc();cat('\n');
cat('Sand_SVM_Preds4...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
SSS=(rbind(MyTrain,MyTest))
MyTrain=SSS[(1:nrow(trainOriginal)),]
MyTest=SSS[-(1:nrow(trainOriginal)),]
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:1500]]
MyTest=MyTest[,ordr[1:1500]]
MyTrain=HaarTransform(Derivative(MyTrain),3)
MyTest=HaarTransform(Derivative(MyTest),3)
Sand_SVM_Preds4=GetSVMPreds(MyTrain,MyTest,MyTarget,10000)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds1...')
flush.console()
MyTrain=HaarTransform(trainOriginal[,1:3578],4)
MyTest=HaarTransform(testOriginal[,1:3578],4)
MyTrain=savitzkyGolay(MyTrain, p = 2, w = 3, m = 1)
MyTest=savitzkyGolay(MyTest, p = 2, w = 3, m = 1)
MyTrain=cbind(MyTrain,trainOriginal[c(3579,3593)])
MyTest=cbind(MyTest,testOriginal[c(3579,3593)])
Sand_MLP_Preds1=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=4,Hidden2=4,IWeights=0.6,Seed=1,N.ensemble=10)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds2...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=HaarTransform(MyTrain,4)
MyTest=HaarTransform(MyTest,4)
MTT=rbind(MyTrain,MyTest)
MTT=WekPCA(MTT,0.9995)
MyTrain=MTT[1:nrow(MyTrain),]
MyTest=MTT[-(1:nrow(MyTrain)),]
MyTrain=cbind(MyTrain,trainOriginal[c(3579:3581,3594)])
MyTest=cbind(MyTest,testOriginal[c(3579:3581,3594)])
Sand_MLP_Preds2=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=200,Hidden1=5,Hidden2=5,IWeights=0.5,Seed=1,N.ensemble=30)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds3...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
Sand_MLP_Preds3=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=4,Hidden2=0,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds4...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
Sand_MLP_Preds4=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=5,Hidden2=5,IWeights=0.6)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds5...')
flush.console()
MyTrain=trainOriginal[,1:3578]
MyTest=testOriginal[,1:3578]
MyTrain=cbind(HaarTransform(Derivative(MyTrain),2),trainOriginal[,c(3579,3593)])
MyTest=cbind(HaarTransform(Derivative(MyTest),2),testOriginal[,c(3579,3593)])
SSS=(rbind(MyTrain,MyTest))
MyTrain=SSS[(1:nrow(trainOriginal)),]
MyTest=SSS[-(1:nrow(trainOriginal)),]
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:450]]
MyTest=MyTest[,ordr[1:450]]
Sand_MLP_Preds5=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=50,Hidden1=5,Hidden2=5,IWeights=0.6)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds6...')
flush.console()
MyTrain=cbind(Derivative(HaarTransform(trainOriginal,4)),trainOriginal[,c(3579:3581,3594)])
MyTest=cbind(Derivative(HaarTransform(testOriginal,4)),testOriginal[,c(3579:3581,3594)])
Sand_MLP_Preds6=GetMLPDepthPreds(MyTrain,MyTest,MyTarget,Iters=100,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Sand_MLP_Preds7...')
flush.console()
MyTrain=cbind(HaarTransform(trainOriginal[,1:3578],4),trainOriginal[,c(3579:3582)])
MyTest=cbind(HaarTransform(testOriginal[,1:3578],4),testOriginal[,c(3579:3582)])
Sand_MLP_Preds7=GetMLPPreds(MyTrain,MyTest,MyTarget,Iters=150,Hidden1=5,Hidden2=5,IWeights=0.5)
flush.console();gc();cat('\n');
cat('Sand_Gauss_Preds1...')
flush.console()
Sand_Gauss_Preds1=GetGaussPreds(trainOriginal,testOriginal,MyTarget,Kernel='rbfdot',Tol=0.05,Var=0.01)
flush.console();gc();
cat('Sand_Gauss_Preds2...')
flush.console()
Sand_Gauss_Preds2=GetGaussPreds(trainReduced,testReduced,MyTarget,Kernel='rbfdot',Tol=0.05,Var=0.01)
flush.console();gc();
cat('Sand_Gauss_Preds3...')
flush.console()
Sand_Gauss_Preds3=GetGaussPreds(trainOriginal,testOriginal,MyTarget,Kernel='polydot',Tol=0.001,Var=0.1)
flush.console();gc();
cat('Sand_MVR_Preds1...')
flush.console()
MyTrain=trainOriginal
MyTest=testOriginal
SSS=(rbind(MyTrain,MyTest))
ordr=colnames(SSS)[order(colSds(as.matrix(SSS)),decreasing=T)]
MyTrain=MyTrain[,ordr[1:2000]]
MyTest=MyTest[,ordr[1:2000]]
Sand_MVR_Preds1=GetMVRPreds(MyTrain,MyTest,MyTarget,120,True)
flush.console();gc();cat('\n');
cat('Sand_NNET_Preds1...')
flush.console()
MyTrain=HaarTransform(trainOriginal,5)
MyTest=HaarTransform(testOriginal,5)
Sand_NNET_Preds1=GetNNETPreds(MyTrain,MyTest,MyTarget,Size=10,Rang=0.5,Decay=0.1,Iters=100)
flush.console();gc();cat('\n');
ThisPred=(15*Sand_Gauss_Preds1+30*Sand_SVM_Preds2+10*Sand_Gauss_Preds2+20*Sand_Gauss_Preds3+15*Sand_MVR_Preds1+50*Sand_SVM_Preds3+50*Sand_SVM_Preds4+10*Sand_MLP_Preds3+10*Sand_NNET_Preds1+70*Sand_MLP_Preds4+120*Sand_MLP_Preds5+30*Sand_MLP_Preds6+60*Sand_MLP_Preds7+100*Sand_SVM_Preds1+100*Sand_MLP_Preds2+100*Sand_MLP_Preds1)/790
}
ThisPred[,1]=ifelse(ThisPred[,1]<min(ThisTarget),min(ThisTarget),ThisPred[,1])
ThisPred[,1]=ifelse(ThisPred[,1]>max(ThisTarget),max(ThisTarget),ThisPred[,1])
flush.console()
submission[,k]=ThisPred[,1]
k=k+1
}
write.csv(submission,'FinalSubmission.csv',row.names=F,quote=F,na='') |
879f138e7a2d099e9cd5f066cb992d26634fd652 | 27674239c0da0b7afc6ad9dc2622e084c3f5c004 | /inst/9_17_worker.R | 5dce11ef2de3d89cbbadaa993b8ce393570caf19 | [] | no_license | RobinHankin/knotR | 112248605c8a89a21641be35f2363c19db1c3783 | 0a5a6015a51340faa1ee43066d76be8f39adb499 | refs/heads/master | 2023-05-15T03:19:57.311824 | 2023-05-14T09:03:35 | 2023-05-14T09:03:35 | 99,854,849 | 5 | 0 | null | 2017-10-15T04:48:05 | 2017-08-09T21:37:28 | R | UTF-8 | R | false | false | 926 | r | 9_17_worker.R | library(knotR)
filename <- "9_17.svg"
a <- reader(filename)
Mver <- matrix(c(
25,01,
08,18,
07,19,
09,17,
10,16,
24,02,
05,21,
23,03,
22,04,
11,15,
06,20,
12,14
),ncol=2,byrow=TRUE)
sym917 <-
symmetry_object(
x = a,
Mver = Mver,
xver = c(13,26),
mcdonalds=FALSE
)
a <- symmetrize(a,sym917)
ou917 <-
matrix(c(
02,18,
16,03,
04,21,
22,05,
06,23,
12,07,
09,25,
24,11,
20,15
),byrow=TRUE,ncol=2)
jj <- knotoptim(filename,
symobj = sym917,
ou = ou917,
prob = 0,
weights=c(1,5,1,1,1,1,1,1,1),
# iterlim = 10000,print.level=2
control=list(trace=100,maxit=10000), useNLM=FALSE
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
|
f0bea070ff36cbb688e83ac73f1a6bfd3d14e861 | ba6a84161acd6a7f8d7a8c3ba10b90a16fcfb2e8 | /Rcode_SVM_salaryData.R | 20a51e13aa8d776b106c2fe616eb0637e22c607d | [] | no_license | DarshanaBhamare/Assignments | 0c06e5559a3374d29945ee6590761ee6116c752f | fb628948f500dcf844ac15384cc0d9e00b070f74 | refs/heads/master | 2021-02-12T16:32:54.295133 | 2020-03-03T10:51:22 | 2020-03-03T10:51:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 719 | r | Rcode_SVM_salaryData.R | salaryData_train<-read.csv(file.choose())
library(kernlab)
str(salaryData_train)
salaryData_train$educationno <- as.factor(salaryData_train$educationno)
class(salaryData_train)
salaryData_test<-read.csv(file.choose())
str(salaryData_test)
salaryData_test$educationno <- as.factor(salaryData_test$educationno)
class(salaryData_test)
#building model
attach(salaryData_train)
salary_model<-ksvm(Salary~., data=salaryData_train,kernel="vanilladot")
salary_model
#prediction on test data
y_pred<-predict(salary_model,salaryData_test)
table(y_pred,salaryData_test$Salary)
mean(y_pred==salaryData_test$Salary)# 0.8464143
final <- y_pred == salaryData_test$Salary
table(final)
#FALSE TRUE
#2313 12747 |
740585acb28b6840f7f4ac0198294e06e25c20ad | 536b625de4dd42b21949f2cd9ba4a675bbb13fae | /R/predict.grpreg.R | 890a23fe763695a91840de5682c7e9959d4b84f0 | [] | no_license | debinqiu/grpss | 2e844735f22cd4d2dc36d76c3f55c9998f45fdeb | c1af576e411258fb0d97b694330e814aab0692d2 | refs/heads/master | 2016-08-11T12:31:35.284762 | 2016-01-30T17:41:27 | 2016-01-30T17:41:27 | 50,691,531 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,556 | r | predict.grpreg.R | #' Model predictions for a fitted \code{grpreg} object
#' @description Similar to usual predict methods and \code{predict.grprep} in \code{grpreg} package.
#' @param object A fitted "\code{grpreg}" object from \code{\link{grpss}},
#' or \code{\link[grpreg]{grpreg}} function.
#' @param newdata Optionally, a matrix or data frame where to predict. If omits, the fitted
#' predictors are used.
#' @param lambda Value of the regularization parameter \code{lambda} at which predictions are
#' requested. See details for the default.
#' @param type The type of prediction: "\code{response}" gives the fitted values; \code{"class"}
#' returns the predicted class for the binomial outcome; "\code{probability}" returns the
#' predicted probabilities for the logistic regression.
#' @param ... Not used.
#' @details This function gives the predictions at \code{newdata} or all predictors if the
#' argument \code{newdata} is not supplied. The default \code{lambda} for "\code{grpreg}"
#' object is the one at which we obtain the minimum loss value, i.e., negative log-likelihood
#' value. Typically, \code{type = "response"} is
#' used for linear or poisson regression, and \code{type = "class"} or
#' \code{type = "probability"} is used for logistic regression.
#' @return The predicted values depending on the type.
#' @author Debin Qiu, Jeongyoun Ahn
#' @seealso \code{\link{grpss}}
#' @examples
#' library(MASS)
#' set.seed(23)
#' n <- 30 # sample size
#' p <- 3 # number of predictors in each group
#' J <- 50 # group size
#' group <- rep(1:J,each = 3) # group indices
#' X <- mvrnorm(n,seq(0,5,length.out = p*J),diag(p*J))
#' beta <- runif(12,-2,5)
#' mu <- X%*%matrix(c(beta,rep(0,p*J-12)),ncol = 1)
#'
#' # linear regression with family = "gaussian"
#' y <- mu + rnorm(n)
#'
#' ## without cross-validation
#' gss12 <- grpss(X,y,ncut = 10,group,select = TRUE)
#' predict(gss12) # fitted values
#' predict(gss12,lambda = 0.2) # fitted values at lambda = 0.2
#'
#' # logistic regression with family = "binomial"
#' set.seed(23)
#' y1 <- rbinom(n,1,1/(1 + exp(-mu)))
#' gss21 <- grpss(X,y1,group, criterion = "gDC",select = TRUE,
#' family = "binomial")
#' predict(gss21)
#'
#' @export
predict.grpreg <- function(object, newdata, lambda = NULL,
type = c("response","class","probability"),...) {
type <- match.arg(type)
if (class(object) == "grpss")
stop("No prediction method available for class 'grpss'
without doing grouped variable selection")
callArg <- object$call
if (is.null(callArg$formula)) {
y <- eval(callArg$y)
X <- eval(callArg$X)
}
else {
data <- eval(callArg$data)
yresp <- as.character(callArg$formula)[2]
y <- data[,yresp]
data[,yresp] <- NULL
X <- data
}
group <- eval(object$call$group)
grp.scr <- object$group.screen
XX <- as.matrix(cbind(rep(1,length(y)),X[,group %in% grp.scr]))
if (class(object) == "grpreg") {
index <- ifelse(is.null(lambda), which.min(object$loss),
which.min(abs(object$lambda - lambda)))
lambda <- ifelse(is.null(lambda),object$lambda[index],lambda)
if (lambda > max(object$lambda) || lambda < min(object$lambda))
stop(paste("please specify 'lambda' between",min(object$lambda),
"and",max(object$lambda)))
beta <- object$beta[,index]
}
else {
index <- which.min(abs(object$lambda - object$lambda.min))
beta <- object$fit$beta[,index]
}
if (missing(newdata))
yhat <- XX%*%matrix(beta,ncol = 1)
else {
if (class(newdata) != "matrix") {
temp <- try(newdata <- as.matrix(newdata), silent = TRUE)
if (class(temp)[1] == "try-error")
stop("'newdata' must be a matrix or can be coerced to a matrix")
}
if (any(c(NCOL(newdata),NROW(newdata)) == 1))
newdata <- matrix(c(1,newdata),nrow = 1)
else
newdata <- cbind(rep(1,nrow(newdata)),newdata)
beta.all <- numeric(ncol(X))
beta.all[group %in% grp.scr] = beta[-1]
beta.all <- c(beta[1],beta.all)
yhat <- newdata%*%matrix(beta.all,ncol = 1)
}
colnames(yhat) <- "response"
family <- if(class(object) == "grpreg") object$family else object$fit$family
if (family == "binomial") {
prob <- exp(yhat)/(1 + exp(yhat))
yhat <- matrix(as.numeric(yhat > 0.5), ncol = 1,dimnames = list(NULL,"class"))
if (type == "probability")
yhat <- matrix(prob,ncol = 1,dimnames = list(NULL,"probability"))
}
if (family == "poisson")
yhat <- matrix(exp(yhat),ncol = 1,dimnames = list(NULL,"mean"))
return(yhat)
}
|
f634f1d7410a60f01ae5e9734bbc2188111a5a35 | 15aaef23c9e9ecc481c89ca5fad56e98e62e2abf | /stage2_model.R | e7330d3aa3296f0361d97b91265548a937a63208 | [] | no_license | ngraetz/cfr | 950e9635836a79dd9858200ee603893f14681bfd | 42fca80332044724fdd5959fb908c564227a2a08 | refs/heads/master | 2020-03-26T14:30:22.669905 | 2019-08-09T01:48:28 | 2019-08-09T01:48:28 | 144,991,160 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 81,728 | r | stage2_model.R | library(rsq)
library(relaimpo)
library(plyr)
library(data.table)
library(ggplot2)
library(olsrr)
library(interplot)
locs <- fread('C:/Users/ngraetz/Desktop/gaul_to_loc_id.csv')
setnames(locs, 'loc_name', 'name')
locs[name=='Tanzania', name := 'United Republic of Tanzania']
locs[name=='Democratic Republic of the Congo', name := 'Democratic Republic of Congo']
locs[name=='Ivory Coast', name := "Cote d'Ivoire"]
locs[name=='Iran', name := 'Iran (Islamic Republic of)']
locs[name=='Vietnam', name := 'Viet Nam']
locs[name=='Syria', name := 'Syrian Arab Republic']
locs[name=='Czech Republic', name := 'Czechia']
locs[name=='Russia', name := 'Russian Federation']
locs[name=='Bolivia', name := 'Bolivia (Plurinational State of)']
locs[name=='Venezuela', name := 'Venezuela (Bolivarian Republic of)']
locs[name=='United States', name := 'United States of America']
locs[name=='The Gambia', name := 'Gambia']
locs[name=='Laos', name := "Lao People's Democratic Republic"]
locs[name=='Cape Verde', name := 'Cabo Verde']
locs[name=='Palestine', name := 'State of Palestine']
setnames(locs, 'ihme_lc_id', 'ihme_loc_id')
## STAGE 2 TABLES
d <- readRDS('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/model_data_2018-08-16.rds')
## Fix DRC
drc <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/drc_migration.csv')
setnames(drc,'out_rate','new_out_rate')
d <- merge(d, drc, by=c('name','year'), all.x=TRUE)
d[name=='Democratic Republic of the Congo', out_rate := new_out_rate]
d[name=='Democratic Republic of the Congo', log_out_rate := log(out_rate + 0.000001)]
d[, new_out_rate := NULL]
d <- d[year %in% c(1990,1995,2000,2005), ]
d[gbd_region=='Oceania', gbd_super_region := 'Oceania']
d[gbd_super_region == 'Southeast Asia, East Asia, and Oceania', gbd_super_region := 'East Asia']
d[gbd_super_region %in% c('East Asia','South Asia'), gbd_super_region := 'Asia']
d <- d[gbd_super_region!='Oceania',]
#d <- d[name %in% model_countries[, name], ]
#d <- d[log_out_rate <= -6, log_out_rate := log(0.01)] ## Smaller offset - now only half my smallest observation.
#d <- d[year >= 1990, ]
d[, net_out_migration := net_migration * -1]
d <- d[!is.na(lag5_r_size_15_19)]
d <- d[!is.na(r_size_15_19)]
d <- d[!is.na(prop_15_29)]
d <- d[!is.na(ratio_15_19_20_24)]
d <- d[!is.na(gbd_super_region)]
d[, ratio_15_19_20_24 := ratio_15_19_20_24 * 10]
d[, country_year := paste0(name,'_',year)]
#d[, urbanicity := urbanicity / 100]
#d[, epr := epr / 100]
## Try a couple new variables for absorptive capacity.
## Merge EPR
ilo_epr <- fread('C:/Users/ngraetz/Downloads/ILOSTAT_epr.csv')
ilo_epr <- ilo_epr[sex=='SEX_T' & classif1.label=='Age: 15-24', ]
setnames(ilo_epr, 'ref_area', 'ihme_loc_id')
ilo_epr <- merge(ilo_epr, locs, by='ihme_loc_id')
setnames(ilo_epr, 'obs_value', 'epr')
setnames(ilo_epr, 'time', 'year')
ilo_epr <- ilo_epr[, c('name','year','epr')]
ilo_epr_1990 <- ilo_epr[year==1991, ]
ilo_epr_1990[, year := 1990]
ilo_epr <- rbind(ilo_epr, ilo_epr_1990)
setnames(ilo_epr, 'epr', 'epr_15_24')
d <- merge(d, ilo_epr, by=c('name','year'), all.x=TRUE)
## Unemployment, youth total (% of total labor force ages 15-24) (modeled ILO estimate)
ilo_unemp <- fread("C:/Users/ngraetz/Downloads/API_SL.UEM.1524.ZS_DS2_en_csv_v2_10034482/API_SL.UEM.1524.ZS_DS2_en_csv_v2_10034482.csv")
ilo_unemp <- melt(ilo_unemp, id.vars = 'Country Name', measure.vars = as.character(1990:2010), variable.name = 'year', value.name = 'unemp_15_24')
setnames(ilo_unemp, 'Country Name', 'name')
ilo_unemp[, year := as.numeric(as.character(year))]
ilo_unemp_1990 <- ilo_unemp[year==1991, ]
ilo_unemp_1990[, year := 1990]
ilo_unemp <- rbind(ilo_unemp[year!=1990, ], ilo_unemp_1990)
ilo_unemp[name=='Egypt, Arab Rep.', name := 'Egypt']
ilo_unemp[name=='Congo, Rep.', name := 'Congo']
ilo_unemp[name=='Gambia, The', name := 'Gambia']
ilo_unemp[name=='Kyrgyz Republic', name := 'Kyrgyzstan']
ilo_unemp[name=='Slovak Republic', name := 'Slovakia']
ilo_unemp[name=='Yemen, Rep.', name := 'Yemen']
d <- merge(d, ilo_unemp, by=c('name','year'), all.x=TRUE)
d[, unemp_15_24 := as.numeric(unemp_15_24)]
## Arable land
land <- fread("C:/Users/ngraetz/Downloads/FAOSTAT_data_arable_land.csv")
land <- land[, c('Year','Area','Value')]
setnames(land, c('Year','Area','Value'), c('year','name','arable_pc'))
unique(d[!(name %in% land[, name]), name])
## Copy land values for Sudan to both Sudan and South Sudan.
south_sudan <- land[name=='Sudan (former)', ]
south_sudan[, name := 'South Sudan']
land[name=='Sudan (former)', name := 'Sudan']
land <- rbind(land, south_sudan)
unique(d[!(name %in% land[, name]), name])
d <- merge(d, land, by=c('name','year'), all.x=TRUE)
d[, arable_pc := (arable_pc * 1000) / total_pop]
d[, log_arable_pc := log(arable_pc)]
## Demean everything
for(v in c('epr','r_ldi_pc','ldi_pc_gap','gbd_mx_shocks','edu','urbanicity', 'polity2','epr_15_24','unemp_15_24','log_ldi_pc','log_arable_pc',
c(paste0('lag0_', c('r_size_20_24','r_size_25_29','r_size_10_19','r_size_15_24','r_size_15_19','r_size_15_29')),
paste0('lag5_', c('r_size_20_24','r_size_25_29','r_size_10_19','r_size_15_24','r_size_15_19','r_size_15_29'))),
"lag0_r_gbd_size_15_19","lag5_r_gbd_size_15_19",
'log_out_rate','out_rate','net_out_migration','r_out_rate')) {
for(c in unique(d[, name])) {
c_mean <- mean(d[name==c, get(v)])
d[name==c, (paste0('dmean_',v)) := get(v) - c_mean]
d[name==c, (paste0('cmean_',v)) := c_mean]
}
}
## Make EPR quantiles
# rev(seq(.1,1,.9/3))
for(q in c(1,.75,.5,.25)) {
epr_q <- quantile(d[, epr], p=q)
epr_15_24_q <- quantile(d[, epr_15_24], p=q)
unemp_15_24_q <- quantile(d[, unemp_15_24], p=q)
log_ldi_pc_q <- quantile(d[, log_ldi_pc], p=q)
message(round(log_ldi_pc_q, 3))
d[epr <= epr_q, epr_group := as.character(round(epr_q,0))]
d[epr_15_24 <= epr_15_24_q, epr_15_24_group := as.character(round(epr_15_24_q))]
d[unemp_15_24 <= unemp_15_24_q, unemp_15_24_group := as.character(round(unemp_15_24_q))]
d[log_ldi_pc <= log_ldi_pc_q, log_ldi_pc_group := as.character(round(log_ldi_pc_q))]
}
## Read Stage-1 country-years
write.csv(data.table(country_year=unique(d[, country_year])), 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/stage2_countries.csv', row.names=FALSE)
stage1 <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/stage1_transition_countries.csv')
#d[country_year %in% stage1[, name], stage1 := 1]
d[name %in% stage1[, name], stage1 := 1]
stage1_4 <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/stage1_transition_countries_4.csv')
d[name %in% stage1_4[, name], stage1_4 := 1]
## Most positive correlations:
## log_out_rate:lag5_r_size_20_24
## log_out_rate:lag5_r_size_10_19
## net_out_migration:lag5_r_size_10_19
## log_out_rate:lag5_r_size_15_24
## net_out_migration:lag5_r_size_15_24
## log_out_rate:lag5_r_size_15_29
all_regions <- c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')
outliers_cy <- c('Yemen_1990','Guinea_1990')
outliers <- c('Burundi','Afghanistan')
#for(file in c('noedu','edu')) {
file <- 'noedu'
dv <- 'log_out_rate'
iv <- 'lag5_r_size_15_24'
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + urbanicity + gbd_mx_shocks + epr')
#other_fes <- paste0(' + ', iv, '*as.factor(epr_group)')
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity + epr')
other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + epr_15_24 + urbanicity + polity2')
other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks')
if(file=='edu') other_fes <- paste0(other_fes, ' + edu')
f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + as.factor(name)'))
outliers_cy <- c('Yemen_1990','Guinea_1990')
outliers <- c('Burundi','Afghanistan')
## Run all models
## Run global model and Stage1 model.
#model_data_global <- d[gbd_super_region=='Sub-Saharan Africa' & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_global <- d[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
mod_global <- lm(formula = f, data = model_data_global)
global_coefs <- data.table(model='All countries',
name=names(mod_global$coefficients),
coef=mod_global$coefficients,
se=coef(summary(mod_global))[,2],
p=coef(summary(mod_global))[,4],
r=summary(mod_global)$r.squared,
r_adj=summary(mod_global)$adj.r.squared)
global_coefs[!(grep('as.factor', name)), ]
global_coefs <- rbind(global_coefs, data.table(model='All countries', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_global)$r.squared, summary(mod_global)$adj.r.squared, dim(model_data_global)[1])), fill=TRUE)
## Run global model and Stage1 model.
model_data_stage1 <- d[stage1==1 & !(name %in% outliers) & !(country_year %in% outliers_cy),]
mod_stage1 <- lm(formula = f, data = model_data_stage1)
stage1_coefs <- data.table(model='Stage1',
name=names(mod_stage1$coefficients),
coef=mod_stage1$coefficients,
se=coef(summary(mod_stage1))[,2],
p=coef(summary(mod_stage1))[,4],
r=summary(mod_stage1)$r.squared,
r_adj=summary(mod_stage1)$adj.r.squared)
stage1_coefs <- rbind(stage1_coefs, data.table(model='Stage1', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_stage1)$r.squared, summary(mod_stage1)$adj.r.squared, dim(model_data_stage1)[1])), fill=TRUE)
## Run global model and Stage1 model. (4/4)
model_data_stage1_v2 <- d[stage1_4==1 & !(name %in% outliers) & !(country_year %in% outliers_cy),]
mod_stage1 <- lm(formula = f, data = model_data_stage1_v2)
stage1_v2_coefs <- data.table(model='Stage1_v2',
name=names(mod_stage1$coefficients),
coef=mod_stage1$coefficients,
se=coef(summary(mod_stage1))[,2],
p=coef(summary(mod_stage1))[,4],
r=summary(mod_stage1)$r.squared,
r_adj=summary(mod_stage1)$adj.r.squared)
stage1_v2_coefs <- rbind(stage1_v2_coefs, data.table(model='Stage1_v2', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_stage1)$r.squared, summary(mod_stage1)$adj.r.squared, dim(model_data_stage1)[1])), fill=TRUE)
## Run region models.
run_region_lm <- function(n) {
model_data <- d[gbd_super_region==n & !(name %in% outliers) & !(country_year %in% outliers_cy),]
mod <- lm(formula = f, data = model_data)
coefs <- data.table(model=n,
name=names(mod$coefficients),
coef=mod$coefficients,
se=coef(summary(mod))[,2],
p=coef(summary(mod))[,4])
coefs <- rbind(coefs, data.table(model=n, name=c('R^2','Adj. R^2','N'), coef=c(summary(mod)$r.squared, summary(mod)$adj.r.squared, dim(model_data)[1])), fill=TRUE)
return(coefs)
}
message('Fitting region LMs...')
reg_models <- rbindlist(lapply(c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'),
run_region_lm))
reg_models <- rbind(reg_models, global_coefs, stage1_coefs, stage1_v2_coefs, fill=TRUE)
## Make correlation matrices.
clean_names <- function(x, matrix) {
rownames(matrix) <- gsub(x,'',rownames(matrix))
colnames(matrix) <- gsub(x,'',colnames(matrix))
return(matrix)
}
global_cor_matrix <- round(cor(model_data_global[, c('dmean_log_out_rate','dmean_lag5_r_size_15_24','dmean_r_ldi_pc','dmean_ldi_pc_gap','dmean_gbd_mx_shocks','dmean_polity2','dmean_urbanicity','dmean_epr','dmean_edu')], use='complete.obs'),2)
global_cor_matrix <- clean_names(x='dmean_', global_cor_matrix)
global_cor_matrix <- clean_names(x='lag5_', global_cor_matrix)
stage1_cor_matrix <- round(cor(model_data_stage1[, c('dmean_log_out_rate','dmean_lag5_r_size_15_24','dmean_r_ldi_pc','dmean_ldi_pc_gap','dmean_gbd_mx_shocks','dmean_polity2','dmean_urbanicity','dmean_epr','dmean_edu')], use='complete.obs'),2)
stage1_cor_matrix <- clean_names(x='dmean_', stage1_cor_matrix)
stage1_cor_matrix <- clean_names(x='lag5_', stage1_cor_matrix)
africa_cor_matrix <- round(cor(d[gbd_super_region == 'Sub-Saharan Africa' & !(name %in% outliers) & !(country_year %in% outliers_cy),][, c('dmean_log_out_rate','dmean_lag5_r_size_15_24','dmean_r_ldi_pc','dmean_ldi_pc_gap','dmean_gbd_mx_shocks','dmean_polity2','dmean_urbanicity','dmean_epr','dmean_edu')], use='complete.obs'),2)
africa_cor_matrix <- clean_names(x='dmean_', africa_cor_matrix)
africa_cor_matrix <- clean_names(x='lag5_', africa_cor_matrix)
saveRDS(list(global_cor_matrix,stage1_cor_matrix,africa_cor_matrix), 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/cor_matrices.RDS')
saveRDS(reg_models, paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/', file, '_stage2_models_', iv, '_', Sys.Date(), '.RDS'))
#}
## Check for outliers.
library(influence.ME)
library(lme4)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/outliers_dfbetas.pdf'), width = 8, height = 6)
for(n in c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')) {
message(paste0('Outlier plots: ', n))
#model_data <- d[gbd_super_region==n & !(name %in% outliers) & !(country_year %in% outliers_cy),]
# model_data <- d[stage1==1 & !(name %in% outliers) & !(country_year %in% outliers_cy),]
# f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + (1|name)'))
# mod <- lmer(formula = f, data = model_data)
message(int_iv)
#d[, abs_cap := as.numeric(get(int_iv))]
#file <- c('abs_cap_int')
all_regions <- c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')
#if(file=='abs_cap_int') epr_name <- ':abs_cap'
#if(file=='abs_cap') epr_name <- 'abs_cap'
use_edu <- 'noedu'
dv <- 'log_out_rate'
other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity')
if(use_edu=='edu') other_fes <- paste0(other_fes, ' + edu')
other_fes <- paste0(other_fes, ' + ', int_iv, '*', iv)
f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + as.factor(name)'))
outliers_cy <- c('Yemen_1990','Guinea_1990')
outliers <- c('Burundi','Afghanistan')
## Calculate leverage
dt <- d[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
mod <- lm(formula = f, data = dt)
inf <- influence.ME::influence(mod, obs=TRUE)
dt[, cooks := cooks.distance(inf)]
dfbetas <- data.table(dfbetas(inf))
dfbetas[, country_year := dt[, country_year]]
dfbetas <- melt(dfbetas, id.vars = 'country_year', measure.vars = names(dfbetas)[names(dfbetas)!='country_year'])
dfbetas_outliers <- dfbetas[value <= -(2/sqrt(dim(dfbetas)[1])) | value >= 2/sqrt(dim(dfbetas)[1]), ]
gg <- ggplot(data=dfbetas_outliers) +
geom_point(aes(x=country_year,
y=value)) +
theme_minimal() +
theme(axis.text.x = element_text(angle = 60, hjust = 1)) +
geom_hline(yintercept = c(-(2/sqrt(dim(dfbetas)[1])),2/sqrt(dim(dfbetas)[1])), color='red') +
ggtitle(n) +
facet_wrap(~variable)
print(gg)
}
dev.off()
## Correlation matrices
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/correlations_covs.pdf'), width = 12, height = 8)
## Make full dataset to facet by Global, Stage1, Regions
model_data_stage1[, gbd_super_region := 'Stage1']
model_data_global[, gbd_super_region := 'Global']
full_data <- rbind(d[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),], model_data_stage1, model_data_global)
all_vars <- c('lag5_r_size_15_24','r_ldi_pc','ldi_pc_gap','gbd_mx_shocks','polity2','urbanicity','epr','edu')
for(iv in all_vars) {
message(iv)
other_fes <- ''
f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + as.factor(name)'))
mod_ssa <- lm(formula = f, data = model_data_stage1)
mod_global <- lm(formula = f, data = model_data_global)
gg <- ggplot() +
geom_point(data=full_data,
aes(x=get(paste0('dmean_',iv)),y=get(paste0('dmean_',dv)))) +
# geom_text(data=full_data,
# aes(x=get(paste0('dmean_',iv)),y=get(paste0('dmean_',dv)),label=country_year)) +
labs(x=iv,y=dv,title=paste0(iv,'\nGlobal coefficient: ', round(summary(mod_global)$coefficients[2,1], 2),
' (p=',round(summary(mod_global)$coefficients[2,4],2), ')\nStage1 coefficient: ',
round(summary(mod_ssa)$coefficients[2,1],2), ' (p=',
round(summary(mod_ssa)$coefficients[2,4],2), ')')) +
facet_wrap(~gbd_super_region) +
theme_minimal()
print(gg)
}
dev.off()
## Stage 3: try EPR quantiles vs. out-rate, and also EPR*growth vs. out-rate.
library(interplot)
iv <- 'lag5_r_size_15_24'
#pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/interaction_plots_', iv, '.pdf'), width = 12, height = 8)
#for(int_iv in c('epr','epr_15_24','unemp_15_24','log_ldi_pc')) {
int_iv <- 'log_ldi_pc'
message(int_iv)
d[, abs_cap := as.numeric(get(int_iv))]
#file <- c('abs_cap_int')
all_regions <- c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')
#if(file=='abs_cap_int') epr_name <- ':abs_cap'
#if(file=='abs_cap') epr_name <- 'abs_cap'
use_edu <- 'noedu'
dv <- 'log_out_rate'
#dv <- 'net_out_migration'
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + urbanicity + gbd_mx_shocks + epr')
#other_fes <- paste0(' + ', iv, '*as.factor(epr_group)')
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity')
#other_fes <- paste0(' + epr + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity')
#other_fes <- paste0(' + dmean_epr + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_polity2 + dmean_urbanicity')
#other_fes <- paste0(' + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_log_arable_pc')
other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + epr_15_24')
if(use_edu=='edu') other_fes <- paste0(other_fes, ' + edu')
other_fes <- paste0(other_fes, ' + ', int_iv, '*', iv)
f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + as.factor(name)'))
#f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes))
outliers_cy <- c('Yemen_1990','Guinea_1990')
#,'Bahrain_2005','Kenya_2005'
outliers <- c('Burundi','Afghanistan')
## Run all models
## Run global model and Stage1 model.
#model_data_global <- d[gbd_super_region=='Sub-Saharan Africa' & !(name %in% outliers) & !(country_year %in% outliers_cy),]
#model_data_global <- d[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_global <- copy(d)
model_data_global[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean','High-income','Central Europe, Eastern Europe, and Central Asia'))]
#model_data_global <- model_data_global[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_global <- model_data_global[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_global[, (paste0('original_', iv)) := get(iv)]
model_data_global[, (iv) := get(iv) - mean(get(iv), na.rm=TRUE)]
model_data_global[, (int_iv) := get(int_iv) - mean(get(int_iv), na.rm=TRUE)]
mod_global <- lm(formula = f, data = model_data_global)
#ols_plot_dfbetas(mod_global)
global_coefs <- data.table(model='All countries',
name=names(mod_global$coefficients),
coef=mod_global$coefficients,
se=coef(summary(mod_global))[,2],
p=coef(summary(mod_global))[,4],
r=summary(mod_global)$r.squared,
r_adj=summary(mod_global)$adj.r.squared)
global_coefs[!(grep('as.factor', name)), ]
global_coefs <- rbind(global_coefs, data.table(model='All countries', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_global)$r.squared, summary(mod_global)$adj.r.squared, dim(model_data_global)[1])), fill=TRUE)
#pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/stage3_ldi_interaction_with_high_income',iv,'.pdf'), width = 20, height = 10)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Figure_2_', Sys.Date(), '.pdf'), width = 20, height = 10)
for(q in c(7,4,2,0)) {
model_data_global[original_lag5_r_size_15_24 <= q, growth_group := as.character(round(q,0))]
}
global_interplot <- interplot(m = mod_global, var1 = iv, var2 = int_iv) +
geom_hline(yintercept = 0, color='red') +
geom_vline(xintercept = 0, color='red') +
ggtitle('Figure 2. Conditional coefficient for growth on out-migration based on level of lag-distributed income per capita.') +
ylab(paste0('Conditional coefficient of growth on log(out-migration rate)')) + xlab(paste0('Level of mean-centered log(LDI/pc)')) + theme_minimal()
global_interplot +
geom_jitter(data=model_data_global,
aes(x=log_ldi_pc,
y=log_ldi_pc * mod_global$coefficients[paste0(iv,':',int_iv)] + mod_global$coefficients[iv],
alpha = growth_group,
fill = region_f),
size=8,
height = 0.1,
width = 0.1,
shape = 21) +
#scale_size(name='Growth rate in 15-24 (lag-5)', breaks=c(0,2,4,6,8), range=c(1,10)) +
scale_alpha_manual(values=c(.2,.5,.8,1),name='Growth rate in 15-24 (lag-5)',labels=c('< 0','0-2','2-4','4+')) +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
guides(fill = guide_legend(override.aes = list(size = 10)))
dev.off()
coef_data <- data.table(global_interplot$data)
setnames(coef_data, 'fake', 'log_ldi_pc')
write.csv(coef_data, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Figure_2_coef_data.csv', row.names=FALSE)
scatter_data <- copy(model_data_global)
scatter_data[, predicted_coef := log_ldi_pc * mod_global$coefficients[paste0(iv,':',int_iv)] + mod_global$coefficients[iv]]
scatter_data <- scatter_data[, c('name','year','region_f','original_lag5_r_size_15_24','log_ldi_pc','predicted_coef')]
write.csv(scatter_data, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Figure_2_scatter_data.csv', row.names=FALSE)
#global_coefs <- rbind(global_coefs, data.table(model='All countries', name=paste0(epr_name,'_ref'), coef=0), fill=TRUE)
#interplot(m = mod_global, var1 = iv, var2 = 'abs_cap') + theme_minimal()
## Run global model and Stage1 model.
model_data_stage1 <- d[stage1==1 & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_stage1[, (iv) := get(iv) - mean(get(iv), na.rm=TRUE)]
model_data_stage1[, (int_iv) := get(int_iv) - mean(get(int_iv), na.rm=TRUE)]
mod_stage1 <- lm(formula = f, data = model_data_stage1)
stage1_coefs <- data.table(model='Stage1',
name=names(mod_stage1$coefficients),
coef=mod_stage1$coefficients,
se=coef(summary(mod_stage1))[,2],
p=coef(summary(mod_stage1))[,4],
r=summary(mod_stage1)$r.squared,
r_adj=summary(mod_stage1)$adj.r.squared)
stage1_coefs[!(grep('as.factor', name)), ]
stage1_coefs <- rbind(stage1_coefs, data.table(model='Stage1', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_stage1)$r.squared, summary(mod_stage1)$adj.r.squared, dim(model_data_stage1)[1])), fill=TRUE)
#stage1_coefs <- rbind(stage1_coefs, data.table(model='Stage1', name=paste0(epr_name,'_ref'), coef=0), fill=TRUE)
#interplot(m = mod_stage1, var1 = iv, var2 = 'abs_cap') + theme_minimal()
## Run region models.
run_region_lm <- function(n) {
message(n)
model_data <- d[gbd_super_region==n & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data[, (iv) := get(iv) - mean(get(iv), na.rm=TRUE)]
model_data[, (int_iv) := get(int_iv) - mean(get(int_iv), na.rm=TRUE)]
mod <- lm(formula = f, data = model_data)
coefs <- data.table(model=n,
name=names(mod$coefficients)[!grepl('as.factor',names(mod$coefficients))],
coef=mod$coefficients[!grepl('as.factor',names(mod$coefficients))],
se=coef(summary(mod))[,2][!grepl('as.factor',names(coef(summary(mod))[,2]))],
p=coef(summary(mod))[,4][!grepl('as.factor',names(coef(summary(mod))[,4]))])
coefs <- rbind(coefs, data.table(model=n, name=c('R^2','Adj. R^2','N'), coef=c(summary(mod)$r.squared, summary(mod)$adj.r.squared, dim(model_data)[1])), fill=TRUE)
#coefs <- rbind(coefs, data.table(model=n, name=paste0(epr_name,'_ref'), coef=0), fill=TRUE)
#interplot(m = mod, var1 = iv, var2 = 'abs_cap') + theme_minimal()
return(coefs)
}
message('Fitting region LMs...')
reg_models <- rbindlist(lapply(c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'),
run_region_lm))
reg_models <- rbind(reg_models, global_coefs, stage1_coefs, fill=TRUE)
saveRDS(reg_models, paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/stage3_models_', iv, '_', Sys.Date(), '.RDS'))
# pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/', file, '.pdf'), width = 12, height = 8)
# plot_data <- reg_models[grep(epr_name,name), ]
# plot_data[, name := gsub('lag5_r_size_15_24','',name)]
# plot_data[, name := gsub(':','',name)]
# gg <- ggplot(plot_data) +
# geom_point(aes(x=name,
# y=coef),
# size=5) +
# geom_errorbar(aes(x=name,
# ymin=coef-(se*1.96),
# ymax=coef+(se*1.96))) +
# geom_hline(yintercept = 0, color='red', size=1) +
# labs(y='Coefficient (log out-rate)', x='EPR quantile', title='Non-linear relationship between out-migration and EPR.') +
# facet_wrap(~model) +
# theme_minimal()
# print(gg)
# dev.off()
# run_region_interplot <- function(n) {
# model_data <- d[gbd_super_region==n & !(name %in% outliers) & !(country_year %in% outliers_cy),]
# model_data[, (iv) := get(iv) - mean(get(iv))]
# model_data[, (int_iv) := get(int_iv) - mean(get(int_iv))]
# mod <- lm(formula = f, data = model_data)
# int <- interplot(m = mod, var1 = iv, var2 = int_iv) +
# geom_hline(yintercept = 0, color='red') +
# geom_vline(xintercept = 0, color='red') +
# ggtitle(paste0(n, ifelse(summary(mod)$coefficients[paste0(iv,':',int_iv),4] < 0.05, ' - SIGNIFICANT', ''))) +
# ylab(paste0('Coefficient on ', iv)) + xlab(paste0('Level of mean-centered ', int_iv)) + theme_minimal()
# return(int)
# }
# global_interplot <- interplot(m = mod_global, var1 = iv, var2 = int_iv, hist=TRUE) +
# geom_hline(yintercept = 0, color='red') +
# geom_vline(xintercept = 0, color='red') +
# ggtitle(paste0('All countries', ifelse(summary(mod_global)$coefficients[paste0(iv,':',int_iv),4] < 0.05, ' - SIGNIFICANT', ''))) +
# ylab(paste0('Coefficient on ', iv)) + xlab(paste0('Level of mean-centered ', int_iv)) + theme_minimal()
# stage1_interplot <- interplot(m = mod_stage1, var1 = iv, var2 = int_iv) +
# geom_hline(yintercept = 0, color='red') +
# geom_vline(xintercept = 0, color='red') +
# ggtitle(paste0('Stage 1', ifelse(summary(mod_stage1)$coefficients[paste0(iv,':',int_iv),4] < 0.05, ' - SIGNIFICANT', ''))) +
# ylab(paste0('Coefficient on ', iv)) + xlab(paste0('Level of mean-centered ', int_iv)) + theme_minimal()
# all_interplots <- lapply(c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'), run_region_interplot)
# all_interplots[[5]] <- global_interplot
# all_interplots[[6]] <- stage1_interplot
# int_plots <- marrangeGrob(all_interplots, nrow=2, ncol=3, top=int_iv)
# print(int_plots)
saveRDS(reg_models, paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/', iv, 'interaction_stage2_models_', int_iv, '_',Sys.Date(),'.RDS'))
#}
# dev.off()
library(rmarkdown)
render("C:/Users/ngraetz/Documents/repos/cfr_migration/all_tables.Rmd", output_file=paste0("C:/Users/ngraetz/Documents/repos/cfr_migration/all_tables_", Sys.Date(), ".pdf"))
## Try to make effect size plots.
int_iv <- 'log_ldi_pc'
iv <- 'lag5_r_size_15_24'
message(int_iv)
d[, abs_cap := as.numeric(get(int_iv))]
all_regions <- c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')
use_edu <- 'noedu'
dv <- 'log_out_rate'
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity')
#other_fes <- paste0(' + dmean_epr + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_polity2 + dmean_urbanicity + dmean_log_arable_pc')
#other_fes <- paste0(' + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_log_arable_pc')
#other_fes <- paste0(' + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_epr_15_24')
other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + epr_15_24 + polity2 + urbanicity')
#other_fes <- ' + log_ldi_pc'
if(use_edu=='edu') other_fes <- paste0(other_fes, ' + edu')
other_fes <- paste0(other_fes, ' + ', int_iv, '*', iv)
f <- as.formula(paste0(paste0('',dv), ' ~ ', paste0('',iv,''), other_fes, ' + as.factor(name)'))
outliers_cy <- c('Yemen_1990','Guinea_1990')
outliers <- c('Burundi','Afghanistan')
clean_names <- function(x, matrix) {
rownames(matrix) <- gsub(x,'',rownames(matrix))
colnames(matrix) <- gsub(x,'',colnames(matrix))
return(matrix)
}
## Run all models
model_data_global <- copy(d[year %in% c(1990,1995,2000,2005)])
model_data_global[, name := gsub('\\(','',name)]
model_data_global[, name := gsub('\\)','',name)]
model_data_global[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean','High-income','Central Europe, Eastern Europe, and Central Asia'))]
model_data_global <- model_data_global[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
model_data_global[, (paste0('original_', iv)) := get(iv)]
model_data_global[, (iv) := get(iv) - mean(get(iv), na.rm=TRUE)]
model_data_global[, (int_iv) := get(int_iv) - mean(get(int_iv), na.rm=TRUE)]
global_cor_matrix <- round(cor(model_data_global[, c('dmean_log_out_rate','dmean_lag5_r_size_15_24','dmean_r_ldi_pc','dmean_ldi_pc_gap','dmean_gbd_mx_shocks','dmean_polity2','dmean_urbanicity','dmean_epr','dmean_edu','dmean_log_arable_pc')], use='complete.obs'),2)
global_cor_matrix <- clean_names(x='dmean_', global_cor_matrix)
global_cor_matrix <- clean_names(x='lag5_', global_cor_matrix)
mod_global <- lm(formula = f, data = model_data_global)
global_coefs <- data.table(model='All countries',
name=names(mod_global$coefficients),
coef=mod_global$coefficients,
se=coef(summary(mod_global))[,2],
p=coef(summary(mod_global))[,4],
r=summary(mod_global)$r.squared,
r_adj=summary(mod_global)$adj.r.squared)
global_coefs[!(grep('as.factor', name)), ]
global_coefs <- rbind(global_coefs, data.table(model='All countries', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_global)$r.squared, summary(mod_global)$adj.r.squared, dim(model_data_global)[1])), fill=TRUE)
for(v in c(int_iv,paste0('dmean_',c('r_ldi_pc','ldi_pc_gap','gbd_mx_shocks','polity2','urbanicity','epr')))) {
model_data_global[, (paste0('cont_',v)) := get(v) * global_coefs[name==v, coef]]
}
model_data_global[, cont_interaction := get(iv) * (global_coefs[name==iv, coef] + (global_coefs[name==paste0(iv,':',int_iv), coef] * log_ldi_pc))]
for(c in c('Algeria',gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]))) {
if(c!='Algeria') {
model_data_global[name==c, cont_country := global_coefs[name==paste0('as.factor(name)', c), coef]]
}
if(c=='Algeria') model_data_global[name==c, cont_country := 0]
}
model_data_global[, cont_intercept := global_coefs[name=='(Intercept)', coef]]
model_data_global[, pred_out := apply(.SD, 1, sum, na.rm=T), .SDcols=grep("^cont_", names(model_data_global))]
for(c in unique(model_data_global[, name])) {
if(!(c %in% substr(global_coefs$name, 16, 50))) {
message(paste0(c, ' is missing these variables...'))
for(v in c(int_iv,'r_ldi_pc','ldi_pc_gap','gbd_mx_shocks','polity2','urbanicity'))
}
}
model_data_global[108, c('name','log_out_rate','pred_out',"cont_log_ldi_pc","cont_dmean_r_ldi_pc","cont_dmean_ldi_pc_gap","cont_dmean_gbd_mx_shocks","cont_dmean_polity2","cont_dmean_urbanicity","cont_interaction","cont_country","cont_intercept")]
model_data_global[lag5_r_size_15_24 >= 2, c('log_out_rate','pred_out',"lag5_r_size_15_24","log_ldi_pc","cont_interaction")]
## Simulate uncertainty
library(arm)
sims <- sim(mod_global, 1000)
sims <- as.data.table(sims@coef)
sims[, draw := 1:1000]
sims[, index := 1]
draws <- dcast(sims, index ~ draw, value.var=c('lag5_r_size_15_24','lag5_r_size_15_24:log_ldi_pc'))
plot_data <- copy(model_data_global)
for(draw in 1:1000) {
plot_data[, (paste0('cont_interaction_',draw)) := get(iv) * (draws[, get(paste0(iv,'_',draw))] + (draws[, get(paste0(iv,':',int_iv,'_',draw))] * log_ldi_pc))]
}
plot_data[, cont_interaction_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
plot_data[, cont_interaction_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
plot_data[, cont_interaction_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
#plot_data[, country_f := factor(name, levels=plot_data$name[order(plot_data[, cont_interaction])])]
plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction_mean])])]
plot_data[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Figure_3_', Sys.Date(), '.pdf'), width = 12, height = 8)
ggplot() +
geom_bar(data=plot_data[cont_interaction_lower >= 0 & cont_interaction_mean >= 0.75,],
aes(x=country_year_f,
y=cont_interaction,
fill=region_f),
color='black',
stat='identity') +
geom_errorbar(data=plot_data[cont_interaction_lower >= 0 & cont_interaction_mean >= 0.75,],
aes(x=country_year_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Total predicted log(out-migration) due to growth (15-24, lag-5)', x='', title='Total predicted increase in log(out-migration) associated with growth for\nall countries where the growth coefficient conditional on log(LDI/pc) is positive.') +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
plot_data2 <- copy(plot_data[year==2005, ])
plot_data2[, country_f := factor(name, levels=plot_data2$name[order(plot_data2[, cont_interaction_mean])])]
#plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction])])]
plot_data2[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
ggplot() +
geom_bar(data=plot_data2[cont_interaction_lower >= 0,],
aes(x=country_f,
y=cont_interaction_mean,
fill=region_f),
color='black',
stat='identity') +
geom_errorbar(data=plot_data2[cont_interaction_lower >= 0,],
aes(x=country_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Total predicted log(out-migration) due to growth (15-24, lag-5)', x='', title='2005-2010: Total predicted increase in log(out-migration) associated with growth for\nall countries where the growth coefficient conditional on log(LDI/pc) is positive.') +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
dev.off()
write.csv(model_data_global[, c('name','year','out_rate','lag5_r_size_15_24',"log_ldi_pc","r_ldi_pc","ldi_pc_gap","gbd_mx_shocks","polity2","log_arable_pc")],
'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/descriptives.csv', row.names = FALSE)
model_data_global[name %in% gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name])
, full_pred := predict(mod_global, model_data_global[name %in% gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]),])]
model_data_global[name=='Ethiopia' & year==2005, cont_interaction + cont_log_ldi_pc + cont_r_ldi_pc + cont_ldi_pc_gap + cont_gbd_mx_shocks + cont_polity2 + cont_urbanicity + cont_intercept + cont_country]
model_data_global[name=='Ethiopia' & year==2005, c('name','log_out_rate','pred_out',"cont_log_ldi_pc","cont_dmean_r_ldi_pc","cont_dmean_ldi_pc_gap","cont_dmean_gbd_mx_shocks","cont_dmean_polity2","cont_dmean_urbanicity","cont_interaction","cont_country","cont_intercept")]
## Try Shapley decomposition for total number of migrants
model_data_global[name=='Ethiopia' & year==2005, cont_residual := mod_global$residuals['96']]
model_data_global[name=='Ethiopia' & year==2005, c("cont_dmean_epr","cont_residual","cont_log_ldi_pc","cont_dmean_r_ldi_pc","cont_dmean_ldi_pc_gap","cont_dmean_gbd_mx_shocks","cont_dmean_polity2","cont_dmean_urbanicity","cont_interaction","cont_country","cont_intercept")]
exp(model_data_global[name=='Ethiopia' & year==2005, apply(.SD, 1, sum, na.rm=T), .SDcols=c("cont_dmean_epr","cont_residual","cont_log_ldi_pc","cont_dmean_r_ldi_pc","cont_dmean_ldi_pc_gap","cont_dmean_gbd_mx_shocks","cont_dmean_polity2","cont_dmean_urbanicity","cont_interaction","cont_country","cont_intercept")])
exp(model_data_global[name=='Ethiopia' & year==2005, log_out_rate])
exp(model_data_global[name=='Ethiopia' & year==2005, pred_out])
compare <- exp(model_data_global[name=='Ethiopia' & year==2005, apply(.SD, 1, sum, na.rm=T), .SDcols=c('cont_intercept','cont_country')])
conts <- c()
for(v in c("cont_residual","cont_dmean_epr","cont_log_ldi_pc","cont_dmean_r_ldi_pc","cont_dmean_ldi_pc_gap","cont_dmean_gbd_mx_shocks","cont_dmean_polity2","cont_dmean_urbanicity","cont_interaction")) {
conts <- c(conts, exp(model_data_global[name=='Ethiopia' & year==2005, apply(.SD, 1, sum, na.rm=T), .SDcols=c(v,'cont_intercept','cont_country')]) - compare)
}
library(sp)
library(rgdal)
library(rgeos)
library(maptools)
library(raster)
map <- readOGR("C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/shapefile//ne_50m_admin_0_countries.shp")
#map_simple <- gSimplify(map, tol = 0.5, topologyPreserve = TRUE)
map$plot_name <- as.character(map$name)
map$plot_name[map$plot_name=='Congo (Brazzaville)'] <- 'Congo'
map$plot_name[map$plot_name=='S. Sudan'] <- 'South Sudan'
map$plot_name[map$plot_name=='Eq. Guinea'] <- 'Equatorial Guinea'
map$plot_name[map$plot_name=='Guinea Bissau'] <- 'Guinea-Bissau'
map$plot_name[map$plot_name=='Dominican Rep.'] <- 'Dominican Republic'
map$plot_name[map$plot_name=='Central African Rep.'] <- 'Central African Republic'
map$plot_name[map$plot_name=='Tanzania'] <- "United Republic of Tanzania"
map$plot_name[map$plot_name=='Bolivia'] <- "Bolivia (Plurinational State of)"
map$plot_name[map$plot_name=='Iran'] <- "Iran (Islamic Republic of)"
map$plot_name[map$plot_name=='Ivory Coast'] <- "Cote d'Ivoire"
map$plot_name[map$plot_name=='Congo (Kinshasa)'] <- "Democratic Republic of the Congo"
map$plot_name[map$plot_name=='Laos'] <- "Lao People's Democratic Republic"
map$plot_name[map$plot_name=='Syria'] <- "Syrian Arab Republic"
map$plot_name[map$plot_name=='Venezuela'] <- "Venezuela (Bolivarian Republic of)"
map$plot_name[map$plot_name=='Vietnam'] <- "Viet Nam"
plot_data[!(name %in% map$plot_name), name]
plot_data[, plot_name := name]
map_sp <- merge(map, plot_data[year==2005, c('plot_name','cont_interaction')], by = 'plot_name')
map_data <- fortify(map_sp, region='plot_name')
map_data <- as.data.table(merge(map_data, map_sp@data, by.x = "id", by.y = "plot_name"))
map_data[cont_interaction <= 0, cont_interaction := 0]
map_data <- map_data[!(name %in% c('Antarctica','Greenland')),]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Figure_4_', Sys.Date(), '.pdf'), width = 20, height = 10)
this_gg <- ggplot() +
geom_polygon(data = map_data,
aes(x = long,
y = lat,
group = group,
fill = cont_interaction),
alpha = 0.8,
color = 'black') +
scale_fill_gradientn(name = "Growth rate *\nconditional coefficient", na.value = 'grey', colors=rev(brewer.pal(10,'Spectral')), limits=c(0,max(map_data$cont_interaction))) +
labs(x='',y='',title='Observed growth rate times fitted growth coefficient conditional on LDI/pc, 2005-2010.') +
theme_minimal()
this_gg
dev.off()
message(int_iv)
d[, abs_cap := as.numeric(get(int_iv))]
all_regions <- c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean')
use_edu <- 'noedu'
dv <- 'log_out_rate'
int_iv <- 'log_ldi_pc_group'
#other_fes <- paste0(' + r_ldi_pc + ldi_pc_gap + gbd_mx_shocks + polity2 + urbanicity')
other_fes <- paste0('dmean_epr + dmean_r_ldi_pc + dmean_ldi_pc_gap + dmean_gbd_mx_shocks + dmean_polity2 + dmean_urbanicity')
#other_fes <- ' + log_ldi_pc'
if(use_edu=='edu') other_fes <- paste0(other_fes, ' + edu')
other_fes <- paste0(other_fes, ' + ', int_iv, '*', iv)
f <- as.formula(paste0(paste0('',dv), ' ~ ', other_fes, ' + as.factor(name)'))
outliers_cy <- c('Yemen_1990','Guinea_1990')
outliers <- c('Burundi','Afghanistan')
## Run all models
model_data_global <- copy(d)
model_data_global[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean','High-income','Central Europe, Eastern Europe, and Central Asia'))]
model_data_global <- model_data_global[gbd_super_region %in% all_regions & !(name %in% outliers) & !(country_year %in% outliers_cy),]
for(q in c(1,.75,.5,.25)) {
epr_q <- quantile(model_data_global[, epr], p=q)
epr_15_24_q <- quantile(model_data_global[, epr_15_24], p=q)
unemp_15_24_q <- quantile(model_data_global[, unemp_15_24], p=q)
log_ldi_pc_q <- quantile(model_data_global[, log_ldi_pc], p=q)
message(round(log_ldi_pc_q, 3))
model_data_global[epr <= epr_q, epr_group := as.character(round(epr_q,0))]
model_data_global[epr_15_24 <= epr_15_24_q, epr_15_24_group := as.character(round(epr_15_24_q))]
model_data_global[unemp_15_24 <= unemp_15_24_q, unemp_15_24_group := as.character(round(unemp_15_24_q))]
model_data_global[log_ldi_pc <= log_ldi_pc_q, log_ldi_pc_group := as.character(round(log_ldi_pc_q,2))]
}
model_data_global[, (paste0('original_', iv)) := get(iv)]
model_data_global[, (iv) := get(iv) - mean(get(iv))]
#model_data_global[, (int_iv) := get(int_iv) - mean(get(int_iv))]
mod_global <- lm(formula = f, data = model_data_global)
global_coefs <- data.table(model='All countries',
name=names(mod_global$coefficients),
coef=mod_global$coefficients,
se=coef(summary(mod_global))[,2],
p=coef(summary(mod_global))[,4],
r=summary(mod_global)$r.squared,
r_adj=summary(mod_global)$adj.r.squared)
global_coefs[!(grep('as.factor', name)), ]
global_coefs <- rbind(global_coefs, data.table(model='All countries', name=c('R^2','Adj. R^2','N'), coef=c(summary(mod_global)$r.squared, summary(mod_global)$adj.r.squared, dim(model_data_global)[1])), fill=TRUE)
for(c in c('Algeria',gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]))) {
if(c!='Algeria') {
model_data_global[name==c, country := global_coefs[name==paste0('as.factor(name)', c), coef]]
}
if(c=='Algeria') model_data_global[name==c, country := 0]
}
model_data_global[, intercept := global_coefs[name=='(Intercept)', coef]]
for(i in names(mod_global$residuals)) model_data_global[as.numeric(i), residual := mod_global$residuals[i]]
for(v in c('log_ldi_pc','lag5_r_size_15_24')) {
model_data_global[, (paste0('cont_',v)) := get(v) * global_coefs[name==v, coef]]
}
model_data_global[name=='Niger' & year==2005, c('country','residual','cont_lag5_r_size_15_24','cont_log_ldi_pc','intercept')]
exp(model_data_global[name=='Niger' & year==2005, country + residual + cont_lag5_r_size_15_24 + cont_log_ldi_pc + intercept])
model_data_global[name=='Niger' & year==2005, out_rate]
## Make permutations
#fes <- c("cont_country","cont_residual","log_ldi_pc","lag5_r_size_15_24","dmean_epr","dmean_r_ldi_pc","dmean_ldi_pc_gap","dmean_gbd_mx_shocks","dmean_polity2","dmean_urbanicity")
#fes <- c('country','residual','log_ldi_pc','lag5_r_size_15_24','intercept')
fes <- c('country','log_ldi_pc','lag5_r_size_15_24','ldi_pc_gap','r_ldi_pc','gbd_mx_shocks','epr_15_24')
make_permutations <- function(fes, start_year, end_year) {
permutations <- list()
for(fe in 1:length(fes)) permutations[[fe]] <- c(start_year,end_year)
permutations <- as.data.table(expand.grid(permutations))
setnames(permutations, names(permutations), fes)
return(permutations)
}
permutations <- make_permutations(fes=fes, start_year=0, end_year=1)
#permutations <- permutations[country==1 & intercept==1, ]
for(i in names(mod_global$residuals)) model_data_global[as.numeric(i), residual := mod_global$residuals[i]]
this_dt[name=='Benin', c('name','year','cont_country','cont_residual','cont_log_ldi_pc','cont_lag5_r_size_15_24','cont_intercept', 'lag5_r_size_15_24','with_change','without_change','diff')]
## Calculate contribution
calc_cont <- function(fe, start_year, end_year, d) {
message(fe)
fe_permutations <- permutations[get(fe)==end_year, ]
other_fes <- fes[fes!=fe]
calculate_permutation <- function(p, full_dt) {
## Select permutation.
this_dt <- copy(full_dt)
p_option <- fe_permutations[p,]
## Calculate contribution of thiS FE under this permutation.
for(v in fes[!(fes %in% c('country','residual','intercept'))]) {
if(p_option[, get(v)]==0) this_dt[, (v) := 0]
this_dt[, (paste0('cont_',v)) := get(v) * global_coefs[name==v, coef]]
}
this_dt[, cont_intercept := global_coefs[grep('Intercept',name), coef]]
for(c in c('Algeria',gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]))) {
if(c!='Algeria') this_dt[name==c, cont_country := global_coefs[grep(c,name), coef]]
if(c=='Algeria') this_dt[name==c, cont_country := 0]
}
this_dt[, cont_residual := residual]
this_dt[, cont_country := cont_country + cont_residual + cont_intercept]
#for(v in c('country','residual','intercept')) {
for(v in c('country')) {
if(p_option[, get(v)]==0) this_dt[, (paste0('cont_', v)) := 0]
}
iv <- 'lag5_r_size_15_24'
int_iv <- 'log_ldi_pc'
this_dt[, cont_lag5_r_size_15_24 := get(iv) * (global_coefs[name==iv, coef] + (global_coefs[name==paste0(iv,':',int_iv), coef] * log_ldi_pc))]
#this_dt[, cont_interaction := 0]
#this_dt[, cont_interaction_nochange := cont_interaction]
#if(fe=='lag5_r_size_15_24') this_dt[, cont_interaction_nochange := 0]
#if(fe=='log_ldi_pc') this_dt[, cont_interaction_nochange := get(iv) * (global_coefs[name==iv, coef])]
## Calculate total difference in prediction attributable to this covariate.
this_dt[, with_change := apply(.SD, 1, sum, na.rm=T), .SDcols=c(paste0('cont_',fe),paste0('cont_',other_fes))]
this_dt[, without_change := apply(.SD, 1, sum, na.rm=T), .SDcols=c(paste0('cont_',other_fes))]
this_dt[, diff := exp(with_change) - exp(without_change)]
return(this_dt)
}
#total_cont <- mean(unlist(lapply(1:dim(fe_permutations)[1], calculate_permutation, full_dt=d)))
all_diffs <- as.data.table(rbind.fill(lapply(1:dim(fe_permutations)[1], calculate_permutation, full_dt=d)))
#total_cont <- data.table(cov=fe, cont=total_cont)
all_diffs <- all_diffs[, list(cont=mean(diff)), by=c('name','year')]
all_diffs[, fe := fe]
return(all_diffs)
}
all_conts <- rbindlist(lapply(c(fes), calc_cont, start_year=0, end_year=1, d=model_data_global))
test <- all_conts[, list(decomp_out_rate=sum(cont)), by=c('name','year')]
model_data_global$decomp_out_rate <- NULL
model_data_global <- merge(model_data_global, test, by=c('name','year'))
model_data_global[name=='Ethiopia', c('name','year','out_rate','decomp_out_rate')]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Shapley_decomp_comparison.pdf'), width = 12, height = 8)
ggplot() +
geom_point(data=model_data_global[decomp_out_rate<100 & decomp_out_rate>-5,],
aes(x=decomp_out_rate,
y=out_rate)) +
geom_abline(slope=1, intercept=0) +
ggtitle(paste0('Showing ', length(model_data_global[decomp_out_rate<100 & decomp_out_rate>-5, decomp_out_rate]), '/', length(model_data_global[, decomp_out_rate]), ' country-years.')) +
theme_minimal()
dev.off()
## Predict on all permutations.
calculate_contribution <- function(fe, fes, start_year, end_year, coefs, d) {
## Grab all permutations where this fixed effect is changing (2010) and calculate difference vs. difference if it had not changed (1990).
## The difference of these two differences is the "contribution" of change in that fixed effect WITHIN this permutation (though this
## difference seems to be identical across permutations).
message(paste0('Calculating contribution from ', fe, '...'))
fe_permutations <- permutations[get(fe)==end_year, ]
other_fes <- fes
other_fes <- other_fes[other_fes!=fe]
calculate_permutation <- function(p, dt, coefs) {
## Select permutation.
this_dt <- copy(dt)
p_option <- fe_permutations[p,]
## Assign values to be added from all effects besides the target effect.
for(other_fe in other_fes) {
if(!(other_fe %in% c('residual'))) {
## Assign relevant fixed effect values.
if(p_option[, get(other_fe)]==end_year) this_dt[, (paste0('p_',other_fe)) := get(paste0(other_fe,'_', end_year)) * coefs[name==other_fe, coef]]
if(p_option[, get(other_fe)]==start_year) this_dt[, (paste0('p_',other_fe)) := get(paste0(other_fe,'_', start_year)) * coefs[name==other_fe, coef]]
}
if(other_fe=='residual') {
## Assign relevant value for residual.
if(p_option[, residual]==end_year) this_dt[, p_residual := get(paste0('residual_',end_year))] ## MAKE SURE RESIDUAL IS IN LOGIT SPACE - it comes in normal space from INLA object.
if(p_option[, residual]==start_year) this_dt[, p_residual := get(paste0('residual_',start_year))]
}
}
## Generate full prediction for this permutation based on whether FE value stayed the same or changed over the period.
## Assign target FE value based on change (2010 value) or no change (1990 value), and then add in all other effects.
if(!(fe %in% c('residual'))) {
this_dt[, (paste0('p_with_change_',p)) := (get(paste0(fe,'_',end_year)) * coefs[name==fe, coef])]
this_dt[, (paste0('p_without_change_',p)) := (get(paste0(fe,'_',start_year)) * coefs[name==fe, coef])]
}
if(fe=='residual') {
this_dt[, (paste0('p_with_change_',p)) := get(paste0('residual_',end_year))]
this_dt[, (paste0('p_without_change_',p)) := get(paste0('residual_',start_year))]
}
## Add intercept and random effects.
for(c in c('Algeria',gsub('as.factor\\(name\\)','',coefs[grep('as.factor',name), name]))) {
if(c=='Algeria') this_dt[name==c, country_int := 0]
if(c!='Algeria') this_dt[name==c, country_int := coefs[name==paste0('as.factor(name)',c), coef]]
}
this_dt[, (paste0('p_with_change_',p)) := get((paste0('p_with_change_',p))) + coefs[name=='(Intercept)', coef] + country_int]
this_dt[, (paste0('p_without_change_',p)) := get((paste0('p_without_change_',p))) + coefs[name=='(Intercept)', coef] + country_int]
## This change (2010 prediction based just on FE of interest - 1990 prediction) is the same across all permutations...
## But then we are just adding a constant to both sides derived from this specific permutation of all other effects...?
#message(this_dt[, get((paste0('p_with_change_',p)))][1])
for(other_fe in other_fes) {
this_dt[, (paste0('p_with_change_',p)) := get((paste0('p_with_change_',p))) + get(paste0('p_',other_fe))]
this_dt[, (paste0('p_without_change_',p)) := get((paste0('p_without_change_',p))) + get(paste0('p_',other_fe))]
}
## Generate difference in full prediction for this permutation attributable to change in this FE value over the period.
## The difference attributable to this effect in this permutation needs to be calculated in normal space. This is how we handle non-linearities.
## If we decompose life expectancy, here is where you would want to convert both scenarios in this permutation to normal space,
## calculate life expectancies, and return the difference.
this_dt[, diff := exp(get(paste0('p_with_change_',p))) - exp(get(paste0('p_without_change_',p)))]
this_dt[, p_with_change := get(paste0('p_with_change_',p))]
this_dt[, p_without_change := get(paste0('p_without_change_',p))]
this_dt <- this_dt[, c('name', 'p_with_change', 'p_without_change', 'diff')]
this_dt[, p := p]
return(this_dt)
}
message('Calculating all permutations...')
all_diffs <- as.data.table(rbind.fill(lapply(1:dim(fe_permutations)[1], calculate_permutation, dt=d, coefs=coefs)))
## As this is a Shapley decomposition, here is where we "average over" potential path dependencies (i.e. all the different permutations).
## A more complex generalized decomposition could be used, such as g-computation (actually estimate all the path dependencies, decompose
## direct/indirect change attributable via bootstrap and stochastic simulation through periods.
all_diffs <- all_diffs[, list(cont=mean(diff)), by=c('name')]
all_diffs[, fe := fe]
all_diffs[, year := end_year]
return(all_diffs)
}
## Reshape data and make all changes
fes <- c('residual','log_ldi_pc','lag5_r_size_15_24','ldi_pc_gap','r_ldi_pc','gbd_mx_shocks','epr_15_24')
make_permutations <- function(fes, start_year, end_year) {
permutations <- list()
for(fe in 1:length(fes)) permutations[[fe]] <- c(start_year,end_year)
permutations <- as.data.table(expand.grid(permutations))
setnames(permutations, names(permutations), fes)
return(permutations)
}
permutations <- make_permutations(fes=fes, start_year=2000, end_year=2005)
#permutations <- permutations[country==1 & intercept==1, ]
for(i in names(mod_global$residuals)) model_data_global[as.numeric(i), residual := mod_global$residuals[i]]
decomp_data <- dcast(model_data_global, name ~ year, value.var=c('out_rate',fes))
all_contributions <- rbindlist(lapply(fes, calculate_contribution,
fes=fes,
start_year=2000,
end_year=2005,
coefs=global_coefs,
d=decomp_data))
test <- all_contributions[, list(decomp_change=sum(cont)), by=c('name','year')]
decomp_data[, obs_change := out_rate_2005 - out_rate_2000]
decomp_data <- merge(decomp_data, test, by=c('name'))
decomp_data <- decomp_data[name %in% c('Algeria',gsub('as.factor\\(name\\)','',coefs[grep('as.factor',name), name]))]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Shapley_change_2000_2005_decomp_comparison.pdf'), width = 12, height = 8)
ggplot() +
geom_point(data=decomp_data,
aes(x=decomp_change,
y=obs_change)) +
geom_abline(slope=1, intercept=0) +
theme_minimal()
dev.off()
plot_data <- copy(all_contributions)
plot_data <- merge(plot_data, unique(model_data_global[, c('name','gbd_super_region')]), by='name')
plot_data <- merge(plot_data, decomp_data[, c('name','obs_change')])
plot_data[, country_year := paste0(name,'_',year)]
plot_data_obs <- unique(plot_data[, c('country_year','obs_change')])
plot_data_obs[, country_year_f := factor(country_year, levels=plot_data_obs$country_year[order(plot_data_obs[, obs_change])])]
plot_data[, country_year_f := factor(country_year, levels=plot_data_obs$country_year[order(plot_data_obs[, obs_change])])]
#plot_data_obs[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Shapley_change_2000_2005_test_Figure_3_', Sys.Date(), '.pdf'), width = 20, height = 12)
ggplot() +
geom_bar(data=plot_data[!is.na(obs_change),],
aes(x=country_year_f,
y=cont,
fill=fe),
color='black',
stat='identity',
width=0.7) +
geom_point(data=plot_data_obs[!is.na(obs_change),],
aes(x=country_year_f,
y=obs_change),
size=3) +
# geom_errorbar(data=plot_data[cont_interaction_lower >= 0 & cont_interaction_mean >= 0.75,],
# aes(x=country_year_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=brewer.pal(7,'Spectral')) +
labs(y='Total change in out-migration/thousand', x='', title='Total change in out-migration/thousand attributable to each covariate, 2000-2005.') +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
plot_data_regions <- merge(plot_data, model_data_global[year==2005, c('name','total_pop','gbd_region')])
plot_data_regions <- plot_data_regions[, list(cont=weighted.mean(cont, total_pop, na.rm=TRUE)), by=c('gbd_region','fe')]
plot_data_regions_obs <- merge(plot_data, model_data_global[year==2005, c('name','total_pop','gbd_region')])
plot_data_regions_obs <- plot_data_regions_obs[, list(obs_change=weighted.mean(obs_change, total_pop, na.rm=TRUE)), by=c('gbd_region')]
plot_data_regions_obs[, region_f := factor(gbd_region, levels=plot_data_regions_obs$gbd_region[order(plot_data_regions_obs[, obs_change])])]
plot_data_regions[, region_f := factor(gbd_region, levels=plot_data_regions_obs$gbd_region[order(plot_data_regions_obs[, obs_change])])]
ggplot() +
geom_bar(data=plot_data_regions,
aes(x=region_f,
y=cont,
fill=fe),
color='black',
stat='identity',
width=0.7) +
geom_point(data=plot_data_regions_obs[!is.na(obs_change),],
aes(x=region_f,
y=obs_change),
size=3) +
# geom_errorbar(data=plot_data[cont_interaction_lower >= 0 & cont_interaction_mean >= 0.75,],
# aes(x=country_year_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=brewer.pal(7,'Set1')) +
labs(y='Total change in out-migration/thousand', x='', title='Total change in out-migration/thousand attributable to each covariate, 2000-2005.') +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
dev.off()
## Just do exp(growth + stuff) - exp(stuff)
for(c in c('Algeria',gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]))) {
if(c=='Algeria') model_data_global[name==c, country_int := 0]
if(c!='Algeria') model_data_global[name==c, country_int := global_coefs[name==paste0('as.factor(name)',c), coef]]
}
model_data_global[, intercept := global_coefs[name=='(Intercept)', coef]]
for(i in names(mod_global$residuals)) model_data_global[as.numeric(i), residual := mod_global$residuals[i]]
for(v in fes <- c('log_ldi_pc','ldi_pc_gap','r_ldi_pc','gbd_mx_shocks','epr_15_24')) {
model_data_global[, (paste0('cont_',v)) := get(v) * global_coefs[name==v, coef]]
}
model_data_global[, cont_interaction := get(iv) * (global_coefs[name==iv, coef] + (global_coefs[name==paste0(iv,':',int_iv), coef] * log_ldi_pc))]
for(v in c(int_iv,paste0('',c('r_ldi_pc','ldi_pc_gap','gbd_mx_shocks','polity2','urbanicity','epr_15_24','log_ldi_pc')))) {
model_data_global[, (paste0('cont_',v)) := get(v) * global_coefs[name==v, coef]]
}
for(c in c('Algeria',gsub('as.factor\\(name\\)','',global_coefs[grep('as.factor',name), name]))) {
if(c!='Algeria') {
model_data_global[name==c, cont_country := global_coefs[name==paste0('as.factor(name)', c), coef]]
}
if(c=='Algeria') model_data_global[name==c, cont_country := 0]
}
model_data_global[, cont_intercept := global_coefs[name=='(Intercept)', coef]]
for(i in names(mod_global$residuals)) model_data_global[as.numeric(i), residual := mod_global$residuals[i]]
plot_data <- copy(model_data_global)
library(arm)
sims <- sim(mod_global, 1000)
sims <- as.data.table(sims@coef)
sims[, draw := 1:1000]
sims[, index := 1]
draws <- dcast(sims, index ~ draw, value.var=c('lag5_r_size_15_24','lag5_r_size_15_24:log_ldi_pc'))
for(draw in 1:500) {
plot_data[, (paste0('cont_interaction_',draw)) := get(iv) * (draws[, get(paste0(iv,'_',draw))] + (draws[, get(paste0(iv,':',int_iv,'_',draw))] * log_ldi_pc))]
plot_data[, (paste0('total_growth_contribution_',draw)) := exp(cont_country + cont_intercept + residual + get(paste0('cont_interaction_',draw)) + cont_log_ldi_pc +
cont_ldi_pc_gap + cont_r_ldi_pc + cont_gbd_mx_shocks + cont_epr_15_24) -
exp(cont_country + cont_intercept + residual + cont_log_ldi_pc +
cont_ldi_pc_gap + cont_r_ldi_pc + cont_gbd_mx_shocks + cont_epr_15_24)]
plot_data[, (paste0('abs_total_growth_contribution_',draw)) := (get(paste0('total_growth_contribution_',draw))/1000) * total_pop]
}
plot_data[, total_growth_contribution_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_pop_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_pop_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_pop_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data))]
plot_data[, total_growth_contribution_pop := (total_growth_contribution_mean/1000) * total_pop]
plot_data[, country_year := paste0(name,'_',year)]
plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, total_growth_contribution_mean])])]
plot_data[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
ggplot() +
geom_point(data=plot_data,
aes(x=total_growth_contribution_pop,
y=total_growth_contribution_pop_mean)) + geom_abline(slope = 1, intercept = 0) + theme_minimal()
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/by_country_year_absolute_Figure_3_', Sys.Date(), '.pdf'), width = 12, height = 8)
for(y in c(1990,1995,2000,2005)) {
plot_data2 <- copy(plot_data[year==y, ])
#plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction])])]
plot_data2[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
#plot_data2[, total_growth_contribution_pop := (total_growth_contribution/1000) * total_pop]
plot_data2[, country_f := factor(name, levels=plot_data2$name[order(plot_data2[, total_growth_contribution_pop])])]
plot_data3 <- copy(plot_data2)
plot_data3[, country_f := factor(name, levels=plot_data3$name[order(plot_data3[, total_growth_contribution])])]
# plot_data3 <- plot_data3[total_growth_contribution >= 0, ]
# plot_data3[, plot_diff := out_rate - total_growth_contribution]
# plot_data3 <- melt(plot_data3, id.vars = c('country_f','region_f','out_rate'), measure.vars = c('plot_diff','total_growth_contribution'), variable.name = 'bar_vals', value.name = 'est_out')
this_gg <- ggplot() +
geom_bar(data=plot_data3[!is.na(total_growth_contribution) & total_growth_contribution_lower >= 0],
aes(x=country_f,
y=total_growth_contribution,
fill=region_f),
color='black',
stat='identity') +
# geom_point(data=plot_data3[!is.na(est_out) & est_out >= 0],
# aes(x=country_f,
# y=out_rate),
# size=3) +
geom_errorbar(data=plot_data3[total_growth_contribution_lower >= 0,],
aes(x=country_f, ymin=total_growth_contribution_lower, ymax=total_growth_contribution_upper), width=.4) +
theme_minimal() +
scale_alpha_discrete(guide=FALSE) +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Total out-migration/thousand attributable to growth', x='', title=paste0('Total out-migration/thousand attributable to growth for all countries where this contribution is positive, ', y, '.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg)
# plot_data2 <- plot_data2[total_growth_contribution_pop >= 0, ]
# plot_data2[, plot_diff := (out_rate/1000)*total_pop - total_growth_contribution_pop]
# plot_data2 <- melt(plot_data2, id.vars = c('country_f','region_f','out_rate'), measure.vars = c('plot_diff','total_growth_contribution_pop'), variable.name = 'bar_vals', value.name = 'est_out')
this_gg2 <- ggplot() +
geom_bar(data=plot_data2[!is.na(total_growth_contribution_pop) & total_growth_contribution_pop_lower >= 0],
aes(x=country_f,
y=total_growth_contribution_pop,
fill=region_f),
color='black',
stat='identity') +
# geom_point(data=plot_data2[name != 'Pakistan' & !is.na(total_growth_contribution) & total_growth_contribution >= 0],
# aes(x=country_f,
# y=(out_rate/1000)*total_pop),
# size=3) +
geom_errorbar(data=plot_data2[total_growth_contribution_lower >= 0,],
aes(x=country_f, ymin=total_growth_contribution_pop_lower, ymax=total_growth_contribution_pop_upper), width=.4) +
theme_minimal() +
scale_alpha_discrete(guide=FALSE) +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Total out-migrants attributable to growth', x='', title=paste0('Total number of out-migrants attributable to growth for all countries where this contribution is positive, ', y,'.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg2)
}
dev.off()
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/1990_2010_aggregated_absolute_Figure_3_', Sys.Date(), '.pdf'), width = 12, height = 8)
target_period <- c(1990,1995,2000,2005)
year_title <- '1990-2010'
plot_drops <- ''
for(q in rev(seq(0.1,1,0.1))) {
log_ldi_pc_q <- quantile(plot_data[, log_ldi_pc], p=q, na.rm=TRUE)
message(round(log_ldi_pc_q, 3))
plot_data[log_ldi_pc <= log_ldi_pc_q, log_ldi_pc_group := as.character(round(log_ldi_pc_q,2))]
}
plot_data2 <- copy(plot_data[year %in% target_period & !(name %in% plot_drops), ])
plot_data2 <- plot_data2[, lapply(.SD, sum, na.rm=TRUE), by=c('name','gbd_region'), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2)) ]
plot_data2[, total_growth_contribution_pop_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, region_f := factor(gbd_region)]
plot_data2[, country_f := factor(name, levels=plot_data2$name[order(plot_data2[, total_growth_contribution_pop_mean])])]
this_gg2 <- ggplot() +
geom_bar(data=plot_data2[!is.na(total_growth_contribution_pop_mean) & total_growth_contribution_pop_lower >= 0],
aes(x=country_f,
y=total_growth_contribution_pop_mean,
fill=region_f),
color='black',
stat='identity') +
# geom_point(data=plot_data2[name != 'Pakistan' & !is.na(total_growth_contribution) & total_growth_contribution >= 0],
# aes(x=country_f,
# y=(out_rate/1000)*total_pop),
# size=3) +
geom_errorbar(data=plot_data2[total_growth_contribution_pop_lower >= 0,],
aes(x=country_f, ymin=total_growth_contribution_pop_lower, ymax=total_growth_contribution_pop_upper), width=.4) +
theme_minimal() +
scale_alpha_discrete(guide=FALSE) +
scale_fill_manual(name='GBD Super Region', values=brewer.pal(12,'Set3')) +
labs(y='Total out-migrants attributable to growth', x='', title=paste0('Total number of out-migrants attributable to growth for all countries where this contribution is positive, ', year_title,'.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg2)
plot_data2 <- copy(plot_data[year %in% target_period & !(name %in% plot_drops), ])
plot_data2 <- plot_data2[, lapply(.SD, sum, na.rm=TRUE), by=c('gbd_region'), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2)) ]
plot_data2[, total_growth_contribution_pop_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, region_f := factor(gbd_region, levels=plot_data2$gbd_region[order(plot_data2[, total_growth_contribution_pop_mean])])]
this_gg2 <- ggplot() +
geom_bar(data=plot_data2[!is.na(total_growth_contribution_pop_mean)],
aes(x=region_f,
y=total_growth_contribution_pop_mean,
fill=region_f),
color='black',
stat='identity') +
# geom_point(data=plot_data2[name != 'Pakistan' & !is.na(total_growth_contribution) & total_growth_contribution >= 0],
# aes(x=country_f,
# y=(out_rate/1000)*total_pop),
# size=3) +
geom_errorbar(data=plot_data2,
aes(x=region_f, ymin=total_growth_contribution_pop_lower, ymax=total_growth_contribution_pop_upper), width=.4) +
theme_minimal() +
scale_alpha_discrete(guide=FALSE) +
scale_fill_manual(name='GBD Super Region', values=brewer.pal(12,'Set3')) +
labs(y='Total out-migrants attributable to growth', x='', title=paste0('Total number of out-migrants attributable to growth for all GBD regions, ', year_title,'.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg2)
## By LDI quantile
plot_data2 <- copy(plot_data[!is.na(log_ldi_pc_group) & year %in% target_period & !(name %in% plot_drops), ])
plot_data2 <- plot_data2[, lapply(.SD, sum, na.rm=TRUE), by=c('log_ldi_pc_group'), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2)) ]
plot_data2[, total_growth_contribution_pop_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, region_f := factor(log_ldi_pc_group, levels=plot_data2$log_ldi_pc_group[order(plot_data2[, as.numeric(log_ldi_pc_group)])])]
this_gg2 <- ggplot() +
geom_bar(data=plot_data2[!is.na(total_growth_contribution_pop_mean)],
aes(x=region_f,
y=total_growth_contribution_pop_mean,
fill=region_f),
color='black',
stat='identity') +
# geom_point(data=plot_data2[name != 'Pakistan' & !is.na(total_growth_contribution) & total_growth_contribution >= 0],
# aes(x=country_f,
# y=(out_rate/1000)*total_pop),
# size=3) +
geom_errorbar(data=plot_data2,
aes(x=region_f, ymin=total_growth_contribution_pop_lower, ymax=total_growth_contribution_pop_upper), width=.4) +
theme_minimal() +
scale_alpha_discrete(guide=FALSE) +
scale_fill_manual(name='LDI/pc groups', values=brewer.pal(12,'Set3')) +
labs(y='Total out-migrants attributable to growth', x='', title=paste0('Total number of out-migrants attributable to growth for all LDI/pc groups, ', year_title,'.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg2)
dev.off()
## Make absolute table
plot_data2 <- copy(plot_data[year %in% target_period & !(name %in% plot_drops), ])
plot_data2 <- plot_data2[, lapply(.SD, sum, na.rm=TRUE), by=c('gbd_region'), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2)) ]
plot_data2[, total_growth_contribution_pop_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, total_growth_contribution_pop_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^abs_total_growth_contribution_", names(plot_data2))]
plot_data2[, region_f := factor(gbd_region, levels=plot_data2$gbd_region[order(plot_data2[, total_growth_contribution_pop_mean])])]
wb_ids <- fread("C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/wb_gbd_ids.csv")
plot_data <- copy(model_data_global)
for(draw in 1:1000) {
plot_data[, (paste0('cont_interaction_',draw)) := (exp(get(iv) * (draws[, get(paste0(iv,'_',draw))] + (draws[, get(paste0(iv,':',int_iv,'_',draw))] * log_ldi_pc)))-1)*100]
}
plot_data[, cont_interaction_mean := apply(.SD, 1, mean, na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
plot_data[, cont_interaction_upper := apply(.SD, 1, quantile, c(.975), na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
plot_data[, cont_interaction_lower := apply(.SD, 1, quantile, c(.025), na.rm=T), .SDcols=grep("^cont_interaction_", names(plot_data))]
plot_data[, country_year := paste0(name,'_',year)]
plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction])])]
plot_data[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
library(dplyr)
library(forcats)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/v2_relative_Figure_3_', Sys.Date(), '.pdf'), width = 12, height = 8)
for(y in c(1990,1995,2000,2005)) {
plot_data2 <- copy(plot_data[year==y, ])
plot_data2[, country_f := factor(name, levels=plot_data2$name[order(plot_data2[, cont_interaction])])]
#plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction])])]
plot_data2[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
this_gg <- ggplot() +
geom_bar(data=plot_data2[!is.na(cont_interaction_mean) & cont_interaction_lower >= 0,],
aes(x=country_f,
y=(exp(cont_interaction)-1)*100,
fill=region_f),
color='black',
stat='identity') +
geom_errorbar(data=plot_data2[!is.na(cont_interaction) & cont_interaction_lower >= 0,],
aes(x=country_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Percent increase in out-migration/thousand attributable to growth', x='', title=paste0('Percent increase in out-migration/thousand attributable to growth for all countries where this contribution is positive, ', y, '.')) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
print(this_gg)
}
dev.off()
plot_data <- copy(all_conts[fe=='lag5_r_size_15_24',])
plot_data <- merge(plot_data, unique(model_data_global[, c('name','gbd_super_region')]), by='name')
plot_data[, cont_interaction_mean := cont]
plot_data[, country_year := paste0(name,'_',year)]
plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction_mean])])]
plot_data[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/Shapley_test_Figure_3_', Sys.Date(), '.pdf'), width = 12, height = 8)
# ggplot() +
# geom_bar(data=plot_data[cont >= 10 & cont < 100,],
# aes(x=country_year_f,
# y=cont,
# fill=region_f),
# color='black',
# stat='identity') +
# # geom_errorbar(data=plot_data[cont_interaction_lower >= 0 & cont_interaction_mean >= 0.75,],
# # aes(x=country_year_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
# theme_minimal() +
# scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
# labs(y='Total out-migration/thousand attributable to growth', x='', title='Total out-migration/thousand attributable to growth for all countries where this contribution is positive, 2005.') +
# theme(axis.text.x = element_text(angle = 60, hjust = 1))
plot_data2 <- copy(plot_data[year==2005, ])
plot_data2[, country_f := factor(name, levels=plot_data2$name[order(plot_data2[, cont_interaction_mean])])]
#plot_data[, country_year_f := factor(country_year, levels=plot_data$country_year[order(plot_data[, cont_interaction])])]
plot_data2[, region_f := factor(gbd_super_region, levels = c('North Africa and Middle East','Sub-Saharan Africa','Asia','Latin America and Caribbean'))]
ggplot() +
geom_bar(data=plot_data2[cont >= 0 & cont < 100,],
aes(x=country_f,
y=cont,
fill=region_f),
color='black',
stat='identity') +
# geom_errorbar(data=plot_data2[cont_interaction_lower >= 0,],
# aes(x=country_f, ymin=cont_interaction_lower, ymax=cont_interaction_upper), width=.4) +
theme_minimal() +
scale_fill_manual(name='GBD Super Region', values=c('#253494','#2ca25f','#bdbdbd','#de2d26', '#ff7f00', '#ffff33')) +
labs(y='Total out-migration/thousand attributable to growth', x='', title='Total out-migration/thousand attributable to growth for all countries where this contribution is positive, 2005.') +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
dev.off()
model_data_global[name=='Ethiopia' & year==2005, c('country_int','intercept','residual','cont_interaction',
'cont_log_ldi_pc','cont_ldi_pc_gap','cont_r_ldi_pc','cont_gbd_mx_shocks',
'cont_epr_15_24')]
model_data_global[name=='Ethiopia' & year==2005, exp(country_int + intercept + residual + 0 + cont_log_ldi_pc +
cont_ldi_pc_gap + cont_r_ldi_pc + cont_gbd_mx_shocks + cont_epr_15_24)]
model_data_global[name=='Ethiopia' & year==2005, exp(country_int + intercept + residual + cont_interaction + cont_log_ldi_pc +
cont_ldi_pc_gap + cont_r_ldi_pc + cont_gbd_mx_shocks + cont_epr_15_24)]
model_data_global[name=='Ethiopia' & year==2005, (global_coefs[name==iv, coef] + (global_coefs[name==paste0(iv,':',int_iv), coef] * log_ldi_pc))]
|
a598423237ca934af14a428b9266a93195b19028 | cb297ed62881e7ec3b2fd2734ef830fa1f2018f4 | /R/krw_mafa_helpers.R | 6c97b20aacdb2714d50f5049ceeb51c44452c794 | [
"MIT"
] | permissive | RedTent/krw | 0c72d57e360c211072ec5bc7a6bcc34183a312b9 | ec190548d5d5e74f446cd59003f2ba6aaa90fddb | refs/heads/master | 2021-12-27T14:29:22.274431 | 2021-11-12T14:03:07 | 2021-11-12T14:03:07 | 195,770,080 | 0 | 0 | NOASSERTION | 2019-08-12T17:33:50 | 2019-07-08T08:33:08 | R | UTF-8 | R | false | false | 1,598 | r | krw_mafa_helpers.R | # omrekenfunctie van aantallen naar abundanties
n_to_abund <- function(x){
abund <-
x %>%
log() %>%
+1.5 %>%
floor() %>%
ifelse(. > 9, 9, .)
abund
}
ept_factor <- function(taxa, krwwatertype.code){
if (!krwwatertype.code %in% c("R7", "R16")) return(1)
families <- unique(twn::increase_taxonlevel(taxa, "Familia"))
aantal_ept_families <- sum(families %in% krw::ept_families)
ept_factor <- min(c(1, 0.6 + aantal_ept_families * 0.1))
}
# pt aantal pos taxa,
# dn_perc 100 * abundantie_N gedeeld door som_abundantie_tot
# pt_perc, 100 * pos-taxa gedeeld door tot_n_taxa,
# km_perc, 100 * pos-taxa gedeeld door tot_n_taxa,
ekr_formule <- function(pt, ptmax, dn_abund_perc, dnmax, pt_km_abund_perc, km_perc, kmmax, krwwatertype.code, ept_factor, ...){
# Kunstmatige wateren M1 t/m M10
if (!is.na(ptmax) & ptmax > 0) {
pt_frac <- pt / ptmax
dn_frac <- dn_abund_perc / dnmax
ekr <- (2 * pt_frac + 1 - dn_frac) / 3
ekr <- max(0, ekr)
return(ekr)
}
# Natuurlijke wateren
if (!is.na(kmmax) & kmmax > 0) {
# Afwijkende weegfactor voor negatieve taxa voor de meeste stromende wateren
stromende_wateren_fact <- c("R4", "R5", "R6", "R7", "R12", "R13", "R14", "R15", "R16", "R17", "R18")
R_fact <- ifelse(krwwatertype.code %in% stromende_wateren_fact, 1, 0)
km_frac <- min(km_perc / kmmax, 1)
ekr <- ept_factor * ((200 * km_frac) + (1 + R_fact) * (100 - dn_abund_perc) + pt_km_abund_perc) / (400 + R_fact * 100)
ekr <- max(0, ekr)
return(ekr)
}
return(NA_real_)
}
|
0dfc2e9153a4269c6a65b19bf089b646f4e06558 | 7cbc27673d06901a43b65ebf5100c16c78781524 | /extras/MultiAnalysesVignetteDataFetch.R | 27b66a6b141445e50ea974502ff09092599b27f5 | [
"Apache-2.0"
] | permissive | krfeeney89/CaseControl | 4deece643e3970ca0d4a04fb43e957a0e2505649 | a8312bc464c685a43ed2e43884dc377821d8e5bd | refs/heads/master | 2021-01-23T12:44:08.287611 | 2017-05-30T09:52:45 | 2017-05-30T09:52:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,577 | r | MultiAnalysesVignetteDataFetch.R | # @file MultiAnalysesVignetteDataFetch.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of CaseControl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code should be used to fetch the data that is used in the vignettes.
library(SqlRender)
library(DatabaseConnector)
library(CaseControl)
options(fftempdir = "s:/fftemp")
pw <- NULL
dbms <- "pdw"
user <- NULL
server <- "JRDUSAPSCTL01"
cdmDatabaseSchema <- "CDM_Truven_MDCD_V417.dbo"
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "mschuemi_sccs_vignette"
oracleTempSchema <- NULL
outputFolder <- "s:/temp/vignetteCaseControl2"
port <- 17001
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
connection <- DatabaseConnector::connect(connectionDetails)
sql <- SqlRender::loadRenderTranslateSql("vignette.sql",
packageName = "CaseControl",
dbms = dbms,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable)
DatabaseConnector::executeSql(connection, sql)
# Check number of subjects per cohort:
sql <- "SELECT cohort_definition_id, COUNT(*) AS count FROM @cohortDatabaseSchema.@cohortTable GROUP BY cohort_definition_id"
sql <- SqlRender::renderSql(sql,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable)$sql
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
DatabaseConnector::querySql(connection, sql)
negativeControls <- c(705178,
705944,
710650,
714785,
719174,
719311,
735340,
742185,
780369,
781182,
924724,
990760,
1110942,
1111706,
1136601,
1317967,
1501309,
1505346,
1551673,
1560278,
1584910,
19010309,
19044727,
40163731)
diclofenac <- 1124300
giBleed <- 1
rheumatoidArthritis <- 2
exposureOutcomeNcList <- list()
for (exposureId in c(diclofenac, negativeControls)) {
exposureOutcomeNc <- createExposureOutcomeNestingCohort(exposureId = exposureId,
outcomeId = giBleed,
nestingCohortId = rheumatoidArthritis)
exposureOutcomeNcList[[length(exposureOutcomeNcList) + 1]] <- exposureOutcomeNc
}
getDbCaseDataArgs1 <- createGetDbCaseDataArgs(useNestingCohort = FALSE, getVisits = FALSE)
selectControlsArgs1 <- createSelectControlsArgs(firstOutcomeOnly = FALSE,
washoutPeriod = 180,
controlsPerCase = 2,
matchOnAge = TRUE,
ageCaliper = 2,
matchOnGender = TRUE,
matchOnProvider = FALSE,
matchOnVisitDate = FALSE)
getDbExposureDataArgs1 <- createGetDbExposureDataArgs()
createCaseControlDataArgs1 <- createCreateCaseControlDataArgs(firstExposureOnly = FALSE,
riskWindowStart = 0,
riskWindowEnd = 0)
fitCaseControlModelArgs1 <- createFitCaseControlModelArgs()
ccAnalysis1 <- createCcAnalysis(analysisId = 1,
description = "Matching on age and gender",
getDbCaseDataArgs = getDbCaseDataArgs1,
selectControlsArgs = selectControlsArgs1,
getDbExposureDataArgs = getDbExposureDataArgs1,
createCaseControlDataArgs = createCaseControlDataArgs1,
fitCaseControlModelArgs = fitCaseControlModelArgs1)
getDbCaseDataArgs2 <- createGetDbCaseDataArgs(useNestingCohort = TRUE, getVisits = TRUE)
ccAnalysis2 <- createCcAnalysis(analysisId = 2,
description = "Matching on age and gender, nesting in indication",
getDbCaseDataArgs = getDbCaseDataArgs2,
selectControlsArgs = selectControlsArgs1,
getDbExposureDataArgs = getDbExposureDataArgs1,
createCaseControlDataArgs = createCaseControlDataArgs1,
fitCaseControlModelArgs = fitCaseControlModelArgs1)
covariateSettings <- createCovariateSettings(useCovariateRiskScores = TRUE,
useCovariateRiskScoresCharlson = TRUE,
useCovariateRiskScoresDCSI = TRUE,
useCovariateRiskScoresCHADS2 = TRUE)
getDbExposureDataArgs2 <- createGetDbExposureDataArgs(covariateSettings = covariateSettings)
fitCaseControlModelArgs2 <- createFitCaseControlModelArgs(useCovariates = TRUE,
prior = createPrior("none"))
ccAnalysis3 <- createCcAnalysis(analysisId = 3,
description = "Matching on age and gender, nesting in indication, using covars",
getDbCaseDataArgs = getDbCaseDataArgs2,
selectControlsArgs = selectControlsArgs1,
getDbExposureDataArgs = getDbExposureDataArgs2,
createCaseControlDataArgs = createCaseControlDataArgs1,
fitCaseControlModelArgs = fitCaseControlModelArgs2)
selectControlsArgs2 <- createSelectControlsArgs(firstOutcomeOnly = FALSE,
washoutPeriod = 180,
controlsPerCase = 2,
matchOnAge = TRUE,
ageCaliper = 2,
matchOnGender = TRUE,
matchOnProvider = FALSE,
matchOnVisitDate = TRUE,
visitDateCaliper = 30)
ccAnalysis4 <- createCcAnalysis(analysisId = 4,
description = "Matching on age, gender and visit, nesting in indication, using covars",
getDbCaseDataArgs = getDbCaseDataArgs2,
selectControlsArgs = selectControlsArgs2,
getDbExposureDataArgs = getDbExposureDataArgs2,
createCaseControlDataArgs = createCaseControlDataArgs1,
fitCaseControlModelArgs = fitCaseControlModelArgs2)
ccAnalysisList <- list(ccAnalysis1, ccAnalysis2, ccAnalysis3, ccAnalysis4)
saveExposureOutcomeNestingCohortList(exposureOutcomeNcList,
"s:/temp/vignetteCaseControl2/exposureOutcomeNestingCohortList.txt")
saveCcAnalysisList(ccAnalysisList, "s:/temp/vignetteCaseControl2/ccAnalysisList.txt")
# exposureOutcomeNcList <-
# loadExposureOutcomeNestingCohortList('s:/temp/vignetteCaseControl2/exposureOutcomeNestingCohortList.txt')
# ccAnalysisList <- loadCcAnalysisList('s:/temp/vignetteCaseControl2/ccAnalysisList.txt')
outcomeDatabaseSchema <- cohortDatabaseSchema
outcomeTable <- cohortTable
nestingCohortDatabaseSchema <- cohortDatabaseSchema
nestingCohortTable <- cohortTable
exposureDatabaseSchema <- cdmDatabaseSchema
exposureTable <- "drug_era"
getDbCaseDataThreads <- 1
selectControlsThreads <- 1
getDbExposureDataThreads <- 1
createCaseControlDataThreads <- 1
fitCaseControlModelThreads <- 1
exposureOutcomeNestingCohortList <- exposureOutcomeNcList
result <- runCcAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = cdmDatabaseSchema,
exposureDatabaseSchema = cdmDatabaseSchema,
exposureTable = "drug_era",
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
nestingCohortDatabaseSchema = cohortDatabaseSchema,
nestingCohortTable = cohortTable,
outputFolder = outputFolder,
exposureOutcomeNestingCohortList = exposureOutcomeNcList,
ccAnalysisList = ccAnalysisList,
getDbCaseDataThreads = 1,
selectControlsThreads = 4,
getDbExposureDataThreads = 3,
createCaseControlDataThreads = 4,
fitCaseControlModelThreads = 4,
cvThreads = 10)
# result <- readRDS('s:/temp/sccsVignette2/outcomeModelReference.rds')
analysisSum <- summarizeCcAnalyses(result)
saveRDS(analysisSum, "s:/temp/sccsVignette2/analysisSummary.rds")
x <- readRDS(result$modelFile[1])
summary(x)
max(x$exposed)
|
cf63d221167aedd113e7d5534226f5b179a90e47 | 7679f049a406b760ce93e31068a784c33276941b | /R/Extract.mcmc.list.dc.R | 0a7b6a37a46c8bfdbaaedb2b3ecfa70d5b334545 | [] | no_license | datacloning/dclone | e5ec61ea3d3cc484bd4df1a8e3efa0cae882fc57 | b17d9a663e92ff935ce6b130ce6c1c30ed6e2909 | refs/heads/master | 2023-07-10T10:09:29.324647 | 2023-07-03T17:52:36 | 2023-07-03T17:52:36 | 25,499,525 | 6 | 2 | null | 2018-02-27T06:27:27 | 2014-10-21T03:14:14 | R | UTF-8 | R | false | false | 312 | r | Extract.mcmc.list.dc.R | "[.mcmc.list.dc" <-
function (x, i, j, drop = TRUE)
{
class(x) <- "mcmc.list"
#out <- coda:::"[.mcmc.list"(x=x, i=i, j=j, drop=drop)
out <- as(x, "mcmc.list")[i=i, j=j, drop=drop]
attr(out, "n.clones") <- attr(x, "n.clones")
class(out) <- c("mcmc.list.dc", "mcmc.list")
out
}
|
7f9367c916743e64b30257d53ea9a43a5171d71c | 057ac9d20c897349eacfb47405136bdb8e0e071f | /man/simResultSinglePeriod-class.Rd | ff96e5e316789a772b37503ed4824722d865f138 | [] | no_license | cran/portfolioSim | a588b186a93b60474b2b24c50d2c3887d4cde85a | 30dd4328db735ab0226e9ddd04f9b70f0df0a2e8 | refs/heads/master | 2020-05-17T11:25:32.863031 | 2013-07-08T00:00:00 | 2013-07-08T00:00:00 | 17,698,675 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,625 | rd | simResultSinglePeriod-class.Rd | \name{simResultSinglePeriod-class}
\docType{class}
\alias{simResultSinglePeriod-class}
\alias{loadIn,simResultSinglePeriod,character,missing-method}
\alias{saveOut,simResultSinglePeriod,character,missing,character,missing,logical-method}
\title{Class "simResultSinglePeriod"}
\description{Contains simulation result data for a single period. }
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("simResultSinglePeriod", ...)}.
}
\section{Slots}{
\describe{
\item{\code{start.data}:}{Object of class \code{"instantData"} that
contains cross-sectional data as of the start of the period.}
\item{\code{end.data}:}{Object of class \code{"instantData"} that
contains cross-sectional data as of the end of the period.}
\item{\code{period.data}:}{Object of class \code{"periodData"} that
contains data for the period involving the passage of time. }
}
}
\section{Methods}{
\describe{
\item{loadIn}{\code{signature(object = "simResultSinglePeriod",
in.loc = "character", fmt = "missing")}: load in the simulation data
stored in \code{in.loc}. Currently only one format, binary
.RData, is available, and so the \code{fmt} parameter is missing
here.
}
\item{saveOut}{\code{signature(object = "simResultSinglePeriod",
type = "character", fmt = "missing", out.loc = "character", name =
"missing", verbose = "logical")}: save this object. Currently
only one format, binary .RData, is available, and so the
\code{fmt} parameter is missing here.}
}
}
\author{Jeff Enos \email{jeff@kanecap.com}}
\keyword{classes}
|
0f5c79444bd86007bbbd8f08f6e5c58793616a18 | 47ac09d7c2a1dd0546ef1c4afa5f53ebc03ce472 | /R_to_Word.R | 1438ac017da7bac0065fd9acd2d7a1606ad127eb | [] | no_license | sbraddaughdrill/WRDS_to_R | 444f3ad261bb2e1022f474b1161d01b680f9e096 | e15bd84996db0a54247a9131232c7ad3fb0b9667 | refs/heads/master | 2020-05-30T17:56:06.096689 | 2015-02-05T20:13:22 | 2015-02-05T20:13:22 | 29,551,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,910 | r | R_to_Word.R | # install.packages("R2wd")
# library(help=R2wd)
require(R2wd)
wdGet(T) # If no word file is open, it will start a new one - can set if to have the file visiable or not
wdNewDoc("c:\\This.doc") # this creates a new file with "this.doc" name
wdApplyTemplate("c:\\This.dot") # this applies a template
wdTitle("Examples of R2wd (a package to write Word documents from R)") # adds a title to the file
wdSection("Example 1 - adding text", newpage = T) # This can also create a header
wdHeading(level = 2, "Header 2")
wdBody("This is the first example we will show")
wdBody("(Notice how, by using two different lines in wdBody, we got two different paragraphs)")
wdBody("(Notice how I can use this: '\ n' (without the space), to \n go to the next
line)")
wdBody("?????? ???? ???????? ???????????? ?")
wdBody("It doesn't work with Hebrew...")
wdBody("O.k, let's move to the next page (and the next example)")
wdSection("Example 2 - adding tables", newpage = T)
wdBody("Table using 'format'")
wdTable(format(head(mtcars)))
wdBody("Table without using 'format'")
wdTable(head(mtcars))
wdSection("Example 3 - adding lm summary", newpage = T)
## Example from ?lm
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2,10,20, labels=c("Ctl","Trt"))
weight <- c(ctl, trt)
# This wouldn't work!
# temp <- summary(lm(weight ~ group))
# wdBody(temp)
# Here is a solution for how to implent the summary.lm output to word
wdBody.anything <- function(output)
{
# This function takes the output of an object and prints it line by line into the word document
# Notice that in many cases you will need to change the text font into courier new roman...
a <- capture.output(output)
for(i in seq_along(a))
{
wdBody(format(a[i]))
}
}
temp <- summary(lm(weight ~ group))
wdBody.anything(temp)
wdSection("Example 4 - Inserting some plots", newpage = T)
wdPlot(rnorm(100), plotfun = plot, height = 10, width =20, pointsize = 20)
wdPlot(rnorm(100), plotfun = plot, height = 10, width =20, pointsize = 20)
wdPlot(rnorm(100), plotfun = plot, height = 10, width =20, pointsize = 50)
# wdPageBreak()
wdSave("c:\\This.doc") # save current file (can say what file name to use)
wdQuit() # close the word file
#############################################################################################
###########################################################
###########################################################
## Demo - Reporting Life in Transition Survey 2011 using
## R and MS Word - 30.10.2012 by Markus Kainu
###########################################################
###########################################################
###########################################################
###------------------ PART 1 ---------------------------###
#---- LOAD DATA FROM EBRD AND CLEAN IT FOR ANALYSIS ----- #
###########################################################
###########################################################
###########################################################
## Load Required Packages ##
library(car)
library(reshape2)
library(foreign)
library(stringr)
###########################################################
## Load Data ##
temp <- tempfile()
download.file("http://www.ebrd.com/downloads/research/surveys/lits2.dta", temp)
lits2 <- read.dta(temp)
###########################################################
## Clean data ##
# remove whitespaces from country
lits2$cntry <- str_trim(lits2$country_, side="right")
lits2$cntry <- as.factor(lits2$cntry)
# Recode "don't know" -> "dont know"
lits2$crise <- as.factor(str_replace(lits2$q801, "Don't know", "Dont know"))
# Rename Sex and Age
lits2$sex <- lits2$q102_1 # Sex
lits2$age <- lits2$q104a_1 # Age
lits2$income <- lits2$q227 # income
## Merge classes
# Useless values into Not Applicaple
lits2$crise <- recode(lits2$crise, "c('Filtered','Not applicable','Not stated','Refused')=NA")
lits2$crise <- recode(lits2$crise, "'A GREAT DEAL'='a) A GREAT DEAL';
'A FAIR AMOUNT'='b) A FAIR AMOUNT';
'JUST A LITTLE'='c) JUST A LITTLE';
'NOT AT ALL'='d) NOT AT ALL';
'Dont know'='e) DONT KNOW'")
## Recode numerical values into factors
# Perceived income
lits2$income2 <- recode(lits2$income, "1:3='a) low income';
4:7='b) middle income';
8:10='c) high income';
else=NA")
lits2$income2 <- as.factor(lits2$income2)
# Level of education
lits2$education <- recode(lits2$q515, "1:2='a) no or primary';
3:4='b) Secondary';
5:7='c) Tertiary or higher'")
lits2$education <- as.factor(lits2$education)
# Remove NA cases
lits2 <- lits2[ which(lits2$crise!= 'NA'), ]
lits2 <- lits2[ which(lits2$income2!= 'NA'), ]
lits2 <- lits2[ which(lits2$education!= 'NA'), ]
### Subset the final dataset ###
df2 <- subset(lits2, select=c("SerialID", "cntry",
"sex","age", # sex and age
"weight","XCweight", # weights
"income","income2", # incomes
"crise","education"))
### Select 6 countries)
df <- subset(df2, cntry %in% c("Russia","Sweden","Italy",
"Mongolia","Turkey","Poland"))
df$cntry <- factor(df$cntry)
#####################################################
### Let's explore the data ###
#####################################################
head(df)
summary(df)
str(df)
########################################################
########################################################
### ------------- PART 2 ----------------------------###
#---------- Creation of the Word Document -------------#
########################################################
########################################################
## --------- begin preparations ------- ##
# In case you don't have "R2wd" and "rcom" -packages available, install them running following three rows.
# Command "installstatconnDCOM()" will also install the current version of statconnDCOM that required for
# R to be able to interact with MS Word
## --- install packages "R2wd" and "rcom". --- ##
# install.packages("R2wd")
# install.packages("rcom")
# installstatconnDCOM()
# install.packages("statconnDCOM")
## --------- end preparations ------- ##
#####################################################
## --------- Creating the document ------- ##
## OPEN AN EMPTY DOCUMENT IN MS WORD !!!!! ###
## THEN RUN THE CODE BELOW ##
library(R2wd)
library(rcom)
library(ggplot2)
library(reshape2)
library(car)
library(survey)
library(arm)
wdGet()
#####################################################
# Set page margins and orientation
wdPageSetup(orientation="portrait",
margins=c(1,1,1,1),scope="all")
#####################################################
wdTitle("Reproducible reports using R and MS Word",
label="R2wd")
#####################################################
#####################################################
#####################################################
#####################################################
#####################################################
wdSection("Introduction")
#####################################################
#####################################################
##
wdBody("This is an example on how to use the R2wd package for writing tables and graphs directly from microdata.")
##
#####################################################
wdSubsection("Plotting the age distribution using ggplot2")
#####################################################
plotfun<-function(t)
print(ggplot(df, aes(x=age, color=sex)) +
geom_density() +
theme(legend.position="top") +
facet_wrap(~cntry))
wdPlot(t,plotfun=plotfun,
caption="Unweighted distributions of age by sex",
height=6,width=5)
#####################################################
#####################################################
wdSection("Statistical tables and graphs", newpage = TRUE)
#####################################################
#####################################################
##
wdBody("Then we made a section break and started from new page.")
##
#####################################################
wdSubsection("Mean age of respondents")
#####################################################
##
wdBody("First, we need a table presenting means and standard errors of age by sex and country in weighted scheme.")
##
#####################################################
### Survey design ###
#####################################################
library(survey)
d.df <- svydesign(id = ~SerialID,
weights = ~weight,
data = df)
# Means and standard errors of age by Sex and Country
t <- data.frame(svyby(~age, ~sex+cntry, design=d.df, svymean, na.rm=T))
names(t) <- c("Sex","Country","mean_age","SE")
t <- t[c(2,1,3,4)]
# lets round the numbers
t$mean_age <- round(t$mean_age, 1)
t$SE <- round(t$SE, 3)
##
wdBody("First, a table with no formatting.")
##
# table
wdTable(t)
##
wdBody("Then, on new page, a table with some formatting.")
##
# table
wdTable(t, caption = "Means and standard errors of age by Sex and Country",
# caption.pos="above", bookmark = NULL,
pointsize = 9, padding = 5, autoformat = 2,
row.names=FALSE)
##
wdBody("At last, we also want a graphical illustrations of the age with errorbars showing the standard errors" )
##
# errorbars for plot
errorbar <- aes(ymax = mean_age + SE, ymin=mean_age - SE)
# plot with errorbars
plotfun<-function(t)
print(ggplot(t, aes(x=Country, y=mean_age, fill=Sex)) +
geom_bar(position="dodge", stat="identity") +
geom_errorbar(errorbar, position=(position_dodge(width=0.9)), width=0.25) +
coord_cartesian(ylim=c(35,65)) +
theme(legend.position="top"))
wdPlot(t,plotfun=plotfun,
caption="Means and standard errors of age by Sex and Country",
height=4,width=5)
#####################################################
wdSubsection("Perceived income by countries", newpage = T)
#####################################################
## quantities by country and gender
## as data.frame of relative shares
t <- prop.table(svytable(~income2+cntry, d.df),2)
t2 <- data.frame(t)
t2 <- t2[c(2,1,3)]
# for percentages and round at 2 decimal points
t2$Freq <- round((t2$Freq *100), 2)
wdTable(t2, caption.pos="above",bookmark = NULL,
pointsize = 9, padding = 5, autoformat = 3,
row.names=FALSE)
plotfun<-function(t2)
print(ggplot(t2, aes(x=cntry, y=Freq, fill=income2, label=Freq)) +
geom_bar(stat="identity", position="dodge") +
theme(legend.position="top") +
geom_text(position = position_dodge(width=1), vjust=-0.5, size=3))
wdPlot(t2,plotfun=plotfun,
caption="Percentage distribution of income classes",
height=3,width=6)
#####################################################
wdSectionBreak( continuous = TRUE,
bookmark = NULL,wdapp = .R2wd)
wdPageSetup(orientation="landscape" ,scope="section")
#####################################################
#####################################################
#####################################################
wdSubsection("Perceived effect of financial crisis by countries",
newpage = FALSE)
#####################################################
##
wdBody("And then one wide plot on a horizontal page" )
##
## quantities by country and crise and income
t <- prop.table(svytable(~crise+income2+cntry, d.df),3)
t2 <- data.frame(t)
# for percentages and round at 2 decimal points
t2$Freq <- round((t2$Freq *100), 2)
# Plot
plotfun<-function(t3)
print(ggplot(t2, aes(x=cntry, y=Freq, fill=crise, label=Freq)) +
geom_bar(stat="identity", position="dodge") + coord_flip() +
facet_wrap(~income2) + theme(legend.position="top") +
geom_text(position = position_dodge(width=1), hjust=-0.2, size=2))
wdPlot(t3,plotfun=plotfun,
caption="Percentage distribution of household suffered a great deal of financial crisis",
height=4,width=8)
#####################################################
wdSectionBreak( continuous = FALSE,
bookmark = NULL,wdapp = .R2wd)
wdPageSetup(orientation="portrait", scope="section")
#####################################################
#####################################################
wdSubsection("Linear Regression", newpage = FALSE)
#####################################################
#####################################################
# Regression:
mod <- svyglm(income~crise+cntry+age, design=d.df)
regr_tab <- data.frame(summary(mod)$coefficients)
colnames(regr_tab) <- colnames(summary(mod)$coefficients)
regr_tab[ ,4] <- ifelse(regr_tab[ ,4] < .001, "< 0.001",
ifelse(regr_tab[ ,4] < .01, "< 0.01",
round(regr_tab[ ,4], 3)))
# print table to doc in word-default format:
wdTable(format(regr_tab),
caption = "Linear regression of perceived income",
caption.pos="above",bookmark = NULL,
pointsize = 9, padding = 5, autoformat = 3,
row.names=FALSE)
#####################################################
#####################################################
wdSubsection("Conclusions", newpage = FALSE)
#####################################################
##
wdBody("Works pretty well. For more information, google: R + R2wd!" )
##
wdSave("demo_report")
##
wdQuit()
#####################################################
#####################################################
lits2 <- read.csv("E:/workspace/lits/lits2/lits2.csv") |
0db9b3a905a68d1def7c296891d8e980a1835faf | b170752e9e937098aa5e551189ebd876e8d4c774 | /R/Introducao/07-Matriz.R | e362332a1f968ccd467d860935f5342aa2d23508 | [] | no_license | DeividWillyan/Data-Science | e9a33ce6fa108ffbebf3debd2df91a466bb3990a | 557d610eb099fa02c69a9c72a49cef48482fbcb2 | refs/heads/master | 2020-04-07T23:39:15.166138 | 2018-11-27T18:50:48 | 2018-11-27T18:50:48 | 158,820,473 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,670 | r | 07-Matriz.R | # Matrizes
# Criando Matrizes
#Número de Linhas
matrix (c(1,2,3,4,5,6), nrow = 2)
matrix (c(1,2,3,4,5,6), nrow = 3)
matrix (c(1,2,3,4,5,6), nrow = 6)
# Número de Colunas
matrix ( c ( 1,2,3,4,5,6), ncol = 2)
# Help
?matrix
# Matrizes precisam ter um número par de valores
matrix( c (1,2,3,4,5), ncol = 2)
# Criando matrizes a partir de vetores e preenchendo a partir d
meus_dados = c(1:10)
matrix(data = meus_dados, nrow = 5, ncol = 2, byrow = T)
# Fatiando a Matriz
mat <- matrix(c(2,3,4,5), nrow = 2)
mat
mat[1,2]
mat[2,2]
mat[1,3]
mat[,2]
# Criando uma matriz diagonal
matriz = 1:3
diag (matriz)
# Extraindo vetor de uma matriz diagonal
vetor = diag(matriz)
diag(vetor)
# Transposta da matriz
W <- matrix(c(2,4,8,12), nrow = 2, ncol = 2)
W
t(W)
U <- t(W)
U
# Obtendo uma matriz inversa
solve(W)
# Multiplicação de Matrizes
mat1 <- matrix(c(2,3,4,5), nrow = 2)
mat1
mat2 <- matrix(c(6,7,8,9), nrow = 2)
mat2
mat1 * mat2
mat1 / mat2
mat1 + mat2
mat1 - mat2
# Multiplicação Matriz com Vetor
x = c(1:4)
x
y <- matrix(c(2:5), nrow = 2)
x * y
# Nomeando a Matriz
mat3 <- matrix(c('Futebol', 'Natação', 'Campo', 'Piscina'), nrow = 2)
mat3
dimnames(mat3) = (list(c("Linha1", "Linha2"), c("Coluna1", "Coluna2")))
mat3
# Identificando linhas e colunas no momento da criação da matrix
matrix (c(1,2,3,4), nrow = 2, ncol = 2, dimnames = list(c("Linha 1", "Linha 2"), c("Coluna 1", "Coluna 2")))
# Combinando Matrizes
mat4 <- matrix(c(2,3,4,5), nrow = 2)
mat4
mat5 <- matrix(c(6,7,8,9), nrow = 2)
cbind(mat4, mat5)
rbind(mat4, mat5)
# Desconstruindo a Matriz
c(mat4)
|
9961b8cb65464dbbc792ee7ba4dbdbe426fd5081 | f86ebf6ac4278139f8eee3a714ad3aa1c88dbdc9 | /leaflet/app.R | 7a81581effa4723d750b53ebb8f59a46fc2aade4 | [] | no_license | nara907/mapPE | a1f2405115eaf33492ad8a9a859c14b72c167c77 | d05b4100b28f95938840b7951d32ec7572377cb5 | refs/heads/master | 2020-03-14T08:36:05.844893 | 2018-05-01T20:43:55 | 2018-05-01T20:43:55 | 131,528,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,884 | r | app.R | library("googlesheets")
library("leaflet")
library("shiny")
library("rgdal")
library("raster")
library("sp")
library("devtools")
library("roxygen2")
library('mapPE')
library("mapview")
library(shinymaterial)
library(shinydashboard)
install.packages("mapPE")
sheet<- gs_key('1hT9JHKGhKR1QcUDB8ylylURmgxoIkylLd4SF9zqdTVo')
kebele<-shapefile("inst/extdata/kebeles.shp")
fnm1<- system.file("/extdata/kebeles.shp", package = "mapPE")
#kebele <- shapefile(fnm1)
getwd()
fnm1
#Creating School points
Schoolpoints<- sheet %>% gs_read(ws = 1, range = "A1:R18")
#Adding data to shapefile
UniT<- sheet %>% gs_read(ws = 2, range = "A1:C34")
UniT<- as.data.frame(UniT)
#merge to kebeles
oo <- merge(kebele,UniT, by="id")
HV<- sheet %>% gs_read(ws = 3, range = "A1:L34")
HV<- as.data.frame(HV)
kebeleS <- merge(oo,HV)
#Adding economic opportunities data
EconOpp<- sheet %>% gs_read(ws = 4, range = "A1:E34")
EconOpp<- as.data.frame(EconOpp)
kebeles <- merge(kebeleS,EconOpp)
bins1 <- c(0, 1, 2, 3, 5, Inf)
palUniT <- colorBin(
palette = "YlOrRd",
domain = kebeles$UniT2012,
bins=bins1
)
bins2<- c(0, 300, 450, 600, 750, Inf)
palHV <- colorBin(
palette = "BuPu",
domain = kebeles$Homes,
bins = bins2
)
bins3<- c(0, 6, 30, 60, Inf)
palEO <- colorBin(
palette = "YlGn",
domain = kebeles$`Farmer's Association members assisted`,
bins = bins3
)
server <-function(input,output) {
data <- reactiveValues(clickedMarker=NULL)
output$map<- renderLeaflet({
m <- leaflet()
m <- m %>%
addTiles()%>%
addProviderTiles(providers$OpenStreetMap.DE)%>%
setView(36.776, 11.242, zoom = 10) %>%
addLegend("bottomright", pal = palUniT, values = kebeles$UniT2012,
title = "Number of Awards (2012)",
opacity = 1
)
m <- addPolygons(m,
data = kebeles,
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.7,
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = FALSE),
fillColor = ~palUniT(kebeles$UniT2012),
popup = paste("Number of University Transition Awards: ", kebeles$UniT2012, sep="")
) %>%
addCircles(data=Schoolpoints,
lat = ~Lat, lng = ~Lng,
radius = 60,
color = '#191970',
label = Schoolpoints$`School Name`,
labelOptions = labelOptions(
style = list(
"color"= "black",
"font-size" = "12px",
"border-color" = "rgba(0,0,0,0.5)")),
popup = paste('<h7 style="color:white;">', "Name:", "<b>", Schoolpoints$`School Name`, "</b>", '</h7>', "<br>",
'<h8 style="color:white;">',"New Buildings:", Schoolpoints$`New Buildings`,'</h8>', "<br>",
'<h8 style="color:white;">', "New Classrooms:", Schoolpoints$`New Classrooms`, '</h8>', "<br>",
'<h8 style="color:white;">', "Wells:", Schoolpoints$Wells, '</h8>', "<br>",
'<h8 style="color:white;">', "Piped Water:", Schoolpoints$`piped water system`, '</h8>', "<br>",
'<h8 style="color:white;">', "Latrines:", Schoolpoints$` Latrines `, '</h8>', "<br>",
popupImage(Schoolpoints$photos)))
})
output$mapHV<- renderLeaflet({
HV<- leaflet()
HV <- HV %>%
addTiles()%>%
addProviderTiles(providers$OpenStreetMap.DE)%>%
setView(36.776, 11.242, zoom = 10)%>%
addLegend("bottomright", pal = palHV, values = kebeles$Homes,
title = "Homes Improved",
opacity = 1
)
HV <- addPolygons(HV,
data = kebeles,
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.7,
label = kebeles$Kebele,
labelOptions = labelOptions(
style = list(
"color"= "black",
"font-size" = "12px",
"border-color" = "rgba(0,0,0,0.5)")),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
fillColor = ~palHV(kebeles$Homes),
popup = paste('<h7 style="color:white;">', "Name:", "<b>", kebeles$Kebele, "</b>", '</h7>', "<br>",
'<h8 style="color:white;">',"Total Homes Improved:", kebeles$Homes,'</h8>', "<br>",
'<h8 style="color:white;">', "Wells:", kebeles$Wells, '</h8>', "<br>",
'<h8 style="color:white;">', "Piped Water:", kebele$Wells, '</h8>', "<br>",
'<h8 style="color:white;">', "Cement Floors:", kebeles$`Piped water`, '</h8>', "<br>",
'<h8 style="color:white;">', "Solar Lanterns:", kebeles$`Cement Floors`, '</h8>', "<br>",
'<h8 style="color:white;">', "Latrines:", kebeles$`Latrines`, '</h8>', "<br>",
popupImage(kebeles$HVphotos)))
})
output$mapEO<- renderLeaflet({
EO<- leaflet()
EO <- EO %>%
addTiles()%>%
addProviderTiles(providers$OpenStreetMap.DE)%>%
setView(36.776, 11.242, zoom = 10)%>%
addLegend("bottomright", pal = palEO, values = kebeles$`Farmer's Association members assisted`,
title = "Farmers Assisted",
opacity = 1
)
EO <- addPolygons(EO,
data = kebeles,
color = "#444444",
weight = 1,
smoothFactor = 0.5,
opacity = 1.0,
fillOpacity = 0.7,
label = kebeles$Kebele,
labelOptions = labelOptions(
style = list(
"color"= "black",
"font-size" = "12px",
"border-color" = "rgba(0,0,0,0.5)")),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE),
fillColor = ~palEO(kebeles$`Farmer's Association members assisted`),
popup = paste('<h7 style="color:white;">', "Name:", "<b>", kebeles$Kebele, "</b>", '</h7>', "<br>",
'<h8 style="color:white;">',"Microloans Distributed:", kebeles$Microloans,'</h8>', "<br>",
'<h8 style="color:white;">', "Farmers Assisted:", kebeles$`Farmer's Association members assisted`, '</h8>', "<br>",
popupImage(kebeles$EOphotos)))
})
}
ui<- material_page(
title = "Project Ethiopia Achievement Map",
nav_bar_color = "green darken-2",
material_tabs(
tabs = c(
"Education"= "Education_Tab",
"Healthy Villages"= "HV_Tab",
"Economic Opportunity"= "EO_Tab"),
color= "green"),
material_tab_content(
tab_id = "Education_Tab",
fluidRow(leafletOutput("map", height= 600))
),
material_tab_content(
tab_id= "HV_Tab",
fluidRow(leafletOutput("mapHV", height= 600))
),
material_tab_content(
tab_id= "EO_Tab",
fluidRow(leafletOutput("mapEO", height= 600))
)
)
runApp(shinyApp(ui, server), launch.browser = TRUE)
|
ba09c0bd8aa01e63da85a52f760e43f8e8925932 | 607d3cbb96e05c489cd5e9e939488d0f9de59e82 | /man/lengthNorm.limma.createRmd.Rd | 894c5d6ab91effaf761dea59d31f98aaa5e58ccf | [] | no_license | csoneson/compcodeR | 95fa5f8867af7fc8c034dacffa91642a5a4506d0 | e7b809e889789bf5e9b627f8a136cb4089fc5f78 | refs/heads/devel | 2023-07-06T13:36:51.779149 | 2023-07-03T14:21:36 | 2023-07-03T14:21:36 | 18,625,797 | 9 | 3 | null | 2023-07-03T14:14:37 | 2014-04-10T06:03:04 | HTML | UTF-8 | R | false | true | 6,827 | rd | lengthNorm.limma.createRmd.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateRmdCodeDiffExpPhylo.R
\name{lengthNorm.limma.createRmd}
\alias{lengthNorm.limma.createRmd}
\title{Generate a \code{.Rmd} file containing code to perform differential expression analysis with length normalized counts + limma}
\usage{
lengthNorm.limma.createRmd(
data.path,
result.path,
codefile,
norm.method,
extra.design.covariates = NULL,
length.normalization = "RPKM",
data.transformation = "log2",
trend = FALSE,
block.factor = NULL
)
}
\arguments{
\item{data.path}{The path to a .rds file containing the \code{phyloCompData} object that will be used for the differential expression analysis.}
\item{result.path}{The path to the file where the result object will be saved.}
\item{codefile}{The path to the file where the code will be written.}
\item{norm.method}{The between-sample normalization method used to compensate for varying library sizes and composition in the differential expression analysis. The normalization factors are calculated using the \code{calcNormFactors} of the \code{edgeR} package. Possible values are \code{"TMM"}, \code{"RLE"}, \code{"upperquartile"} and \code{"none"}}
\item{extra.design.covariates}{A vector containing the names of extra control variables to be passed to the design matrix of \code{limma}. All the covariates need to be a column of the \code{sample.annotations} data frame from the \code{\link{phyloCompData}} object, with a matching column name. The covariates can be a numeric vector, or a factor. Note that "condition" factor column is always included, and should not be added here. See Details.}
\item{length.normalization}{one of "none" (no length correction), "TPM", or "RPKM" (default). See details.}
\item{data.transformation}{one of "log2", "asin(sqrt)" or "sqrt". Data transformation to apply to the normalized data.}
\item{trend}{should an intensity-trend be allowed for the prior variance? Default to \code{FALSE}.}
\item{block.factor}{Name of the factor specifying a blocking variable, to be passed to \code{\link[limma]{duplicateCorrelation}} function of the \code{limma} package. All the factors need to be a \code{sample.annotations} from the \code{\link{phyloCompData}} object. Default to null (no block structure).}
}
\value{
The function generates a \code{.Rmd} file containing the code for performing the differential expression analysis. This file can be executed using e.g. the \code{knitr} package.
}
\description{
A function to generate code that can be run to perform differential expression analysis of RNAseq data (comparing two conditions) by applying a length normalizing transformation followed by differential expression analysis with limma. The code is written to a \code{.Rmd} file. This function is generally not called by the user, the main interface for performing differential expression analysis is the \code{\link{runDiffExp}} function.
}
\details{
For more information about the methods and the interpretation of the parameters, see the \code{limma} package and the corresponding publications.
The \code{length.matrix} field of the \code{phyloCompData} object
is used to normalize the counts, using one of the following formulas:
\itemize{
\item \code{length.normalization="none"} : \eqn{CPM_{gi} = \frac{N_{gi} + 0.5}{NF_i \times \sum_{g} N_{gi} + 1} \times 10^6}
\item \code{length.normalization="TPM"} : \eqn{TPM_{gi} = \frac{(N_{gi} + 0.5) / L_{gi}}{NF_i \times \sum_{g} N_{gi}/L_{gi} + 1} \times 10^6}
\item \code{length.normalization="RPKM"} : \eqn{RPKM_{gi} = \frac{(N_{gi} + 0.5) / L_{gi}}{NF_i \times \sum_{g} N_{gi} + 1} \times 10^9}
}
where \eqn{N_{gi}} is the count for gene g and sample i,
where \eqn{L_{gi}} is the length of gene g in sample i,
and \eqn{NF_i} is the normalization for sample i,
normalized using \code{calcNormFactors} of the \code{edgeR} package.
The function specified by the \code{data.transformation} is then applied
to the normalized count matrix.
The "\eqn{+0.5}" and "\eqn{+1}" are taken from Law et al 2014,
and dropped from the normalization
when the transformation is something else than \code{log2}.
The "\eqn{\times 10^6}" and "\eqn{\times 10^9}" factors are omitted when
the \code{asin(sqrt)} transformation is taken, as \eqn{asin} can only
be applied to real numbers smaller than 1.
The \code{design} model used in the \code{\link[limma]{lmFit}}
uses the "condition" column of the \code{sample.annotations} data frame from the \code{\link{phyloCompData}} object
as well as all the covariates named in \code{extra.design.covariates}.
For example, if \code{extra.design.covariates = c("var1", "var2")}, then
\code{sample.annotations} must have two columns named "var1" and "var2", and the design formula
in the \code{\link[limma]{lmFit}} function will be:
\code{~ condition + var1 + var2}.
}
\examples{
try(
if (require(limma)) {
tmpdir <- normalizePath(tempdir(), winslash = "/")
## Simulate data
mydata.obj <- generateSyntheticData(dataset = "mydata", n.vars = 1000,
samples.per.cond = 5, n.diffexp = 100,
id.species = factor(1:10),
lengths.relmeans = rpois(1000, 1000),
lengths.dispersions = rgamma(1000, 1, 1),
output.file = file.path(tmpdir, "mydata.rds"))
## Add covariates
## Model fitted is count.matrix ~ condition + test_factor + test_reg
sample.annotations(mydata.obj)$test_factor <- factor(rep(1:2, each = 5))
sample.annotations(mydata.obj)$test_reg <- rnorm(10, 0, 1)
saveRDS(mydata.obj, file.path(tmpdir, "mydata.rds"))
## Diff Exp
runDiffExp(data.file = file.path(tmpdir, "mydata.rds"), result.extent = "length.limma",
Rmdfunction = "lengthNorm.limma.createRmd",
output.directory = tmpdir, norm.method = "TMM",
extra.design.covariates = c("test_factor", "test_reg"))
})
}
\references{
Smyth GK (2005): Limma: linear models for microarray data. In: 'Bioinformatics and Computational Biology Solutions using R and Bioconductor'. R. Gentleman, V. Carey, S. Dudoit, R. Irizarry, W. Huber (eds), Springer, New York, pages 397-420
Smyth, G. K., Michaud, J., and Scott, H. (2005). The use of within-array replicate spots for assessing differential expression in microarray experiments. Bioinformatics 21(9), 2067-2075.
Law, C.W., Chen, Y., Shi, W. et al. (2014) voom: precision weights unlock linear model analysis tools for RNA-seq read counts. Genome Biol 15, R29.
Musser, JM, Wagner, GP. (2015): Character trees from transcriptome data: Origin and individuation of morphological characters and the so‐called “species signal”. J. Exp. Zool. (Mol. Dev. Evol.) 324B: 588– 604.
}
\author{
Charlotte Soneson, Paul Bastide, Mélina Gallopin
}
|
ae13037dfe1949dda2f3ba6f0eb6a781e2ee56d3 | e4d7ab3d8da49cba570277133631c8d7ae134b93 | /R/update_version_control.r | 8c109327258c1f9fec69d6a88adc2fbed992b83f | [
"MIT"
] | permissive | williamlief/DataVersionControl | dd5cc156b7c90408a981487df012bb45f32fb205 | d876fc76f57549f1f224649464ba2d7b15280e95 | refs/heads/master | 2020-08-22T14:46:07.893544 | 2020-01-05T01:14:34 | 2020-01-05T01:14:34 | 216,418,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,386 | r | update_version_control.r | #' Update Version Control
#'
#' @description DATA_VC is a file that tracks which data files are currently
#' used in the program. This is used because git is not tracking changes in the data
#' files. Whenever a new file is saved or an old file updated,the record in DATA_VC
#' will be updated to include the current stamp. This function is used inside the
#' save wrapper functions (e.g. saveRDS_vc)
#'
#' @param file name of file being written
#' @param stamp value of stamp
#' @param version_control version control file
#' @param verbose logical, show record update values?
#'
#' @return invisibly updates
#' @keywords internal
update_version_control <- function(file,
stamp,
version_control,
verbose = FALSE) {
if(!file.exists(version_control)) {
stop(paste("version control file:", version_control, "does not exist. Have you run make_data_vc?"))
}
update <- data.frame(file = file,
stamp = stamp,
stringsAsFactors = FALSE)
current <- readr::read_csv(version_control, col_types = "cc")
current <- current[current$file != file,]
data_vc <- rbind(current, update)
readr::write_csv(x = data_vc, path = version_control)
if(verbose) {
print(paste("Record updated for:", file, "with stamp:", stamp))
}
}
|
15b2088dd502ee3cbe939794e60e80d7f59804cd | 770b14ae44e4991d444f0a0b1af124396bf2960f | /pkg/R/aaa.R | 1497cf52b49eb92da324429be1b321bfeea5750d | [] | no_license | melff/memisc | db5e2d685e44f3e2f2fa3d50e0986c1131a1448c | b5b03f75e6fe311911a552041ff5c573bb3515df | refs/heads/master | 2023-07-24T19:44:10.092063 | 2023-07-07T23:09:11 | 2023-07-07T23:09:11 | 29,761,100 | 40 | 10 | null | 2022-08-19T19:19:13 | 2015-01-24T01:21:55 | R | UTF-8 | R | false | false | 5,321 | r | aaa.R |
.memiscEnv <- new.env()
.SummaryTemplates <- list()
.SummaryStats <- list()
.CoefTemplates <- list()
.CoefTemplates$default <- c(est="($est:#)($p:*)",
se="(($se:#))")
.CoefTemplates$stat <- c(est="($est:#)($p:*)",
stat="(($stat:#))")
.CoefTemplates$all <- c(est="($est:#)($p:*)",
se="(($se:#))",
stat="(($stat:#))",
p="(($p:#))"
)
.CoefTemplates$all.nostar <- c(est="($est:#)",
se="(($se:#))",
stat="(($stat:#))",
p="(($p:#))"
)
.CoefTemplates$horizontal <- t(c(est="($est:#)($p:*)",
se="(($se:#))"))
# .CoefTemplates$ci.se <- c(est="($est:3)",
# se="(($se:#))",
# ci="[($lwr:#);($upr:#)]")
.CoefTemplates$ci <- c(est="($est:#)",
lwr="[($lwr:#)",
upr="($upr:#)]"
)
.CoefTemplates$ci.se <- c(est="($est:#)",
se="(($se:#))",
lwr="[($lwr:#)",
upr="($upr:#)]"
)
.CoefTemplates$ci.se.horizontal<- matrix(c(est="($est:#)",
se="(($se:#))",
lwr="[($lwr:#)",
upr="($upr:#)]"
),ncol=2,nrow=2,byrow=TRUE,
dimnames=list(
c("est","ci"),
c("est","se")
))
.CoefTemplates$ci.horizontal<- matrix(c(est="($est:#)",
lwr="[($lwr:#)",
upr="($upr:#)]"),
ncol=3,nrow=1,byrow=TRUE,
dimnames=list(
c("est"),
c("est","lwr","upr")
))
.CoefTemplates$ci.p <- c(est="($est:#)",
p="(($p:#))",
lwr="[($lwr:#)",
upr="($upr:#)]"
)
.CoefTemplates$ci.p.horizontal<- matrix(c(est="($est:#)",
p="(($p:#))",
lwr="[($lwr:#)",
upr="($upr:#)]"
),ncol=2,nrow=2,byrow=TRUE,
dimnames=list(
c("est","ci"),
c("est","se")
))
.SummaryTemplates$lm <-
c(
"R-squared" = "($r.squared:f#)",
"adj. R-squared" = "($adj.r.squared:f#)",
sigma = "($sigma:#)",
F = "($F:f#)",
p = "($p:f#)",
"Log-likelihood" = "($logLik:f#)",
Deviance = "($deviance:f#)",
AIC = "($AIC:f#)",
BIC = "($BIC:f#)",
N = "($N:d)"
)
.SummaryStats$lm <- c("adj. R-squared","N")
.SummaryTemplates$glm <-
c(
"Aldrich-Nelson R-sq." = "($Aldrich.Nelson:f#)",
"McFadden R-sq." = "($McFadden:f#)",
"Cox-Snell R-sq." = "($Cox.Snell:f#)",
"Nagelkerke R-sq." = "($Nagelkerke:f#)",
phi = "($phi:#)",
"Likelihood-ratio" = "($LR:f#)",
p = "($p:#)",
"Log-likelihood" = "($logLik:f#)",
Deviance = "($deviance:f#)",
AIC = "($AIC:f#)",
BIC = "($BIC:f#)",
N = "($N:d)"
)
.SummaryStats$glm <- c("Log-likelihood","N")
.SummaryTemplates$default <-
c(
"Aldrich-Nelson R-sq." = "($Aldrich.Nelson:f#)",
"McFadden R-sq." = "($McFadden:f#)",
"Cox-Snell R-sq." = "($Cox.Snell:f#)",
"Nagelkerke R-sq." = "($Nagelkerke:f#)",
"Likelihood-ratio" = "($LR:f#)",
p = "($p:#)",
"Log-likelihood" = "($logLik:f#)",
Deviance = "($deviance:f#)",
AIC = "($AIC:f#)",
BIC = "($BIC:f#)",
N = "($N:d)"
)
.SummaryStats$default <- c("Log-likelihood","N")
.SummaryTemplates$mer <- .SummaryTemplates$lmer
c(
"Log-likelihood" = "($logLik:f#)",
Deviance = "($deviance:f#)",
AIC = "($AIC:f#)",
BIC = "($BIC:f#)",
N = "($N:d)"
)
assign("SummaryTemplates",.SummaryTemplates, envir=.memiscEnv)
assign("CoefTemplates",.CoefTemplates, envir=.memiscEnv)
|
8da2d832ccb771e5c4fb416acb9c36fe1905abd1 | 9d2b24865162fe4020d0ba2a769961b781eba894 | /R/getDataValueSets.R | 90ed96f70a0382c36d53aac5783ad20cc357a53f | [
"CC0-1.0"
] | permissive | pepfar-datim/datimutils | a127e7617b580f2a0af400aab152ad804b5a16b8 | 31c0be49b3e7d224394c54f1578a0d901b93d8e0 | refs/heads/master | 2023-08-31T07:50:09.729702 | 2023-07-18T15:12:16 | 2023-07-18T15:12:16 | 202,563,797 | 8 | 11 | CC0-1.0 | 2023-09-13T14:50:05 | 2019-08-15T15:14:43 | R | UTF-8 | R | false | false | 3,866 | r | getDataValueSets.R | #' @export
#' @title getDataValueSets
#'
#' @description Used to read DHIS 2 data using the data value set endpoint
#' @param keys character vector - data value set parameter keys
#' (e.g. "dataSet", "period")
#' @param values character vector - values marching the key from keys
#' (e.g. "Abcde123456", "2019Q1"
#' @param d2_session R6 datimutils object which handles authentication
#' with DATIM
#' @param retry number of times to retry
#' @param timeout number of seconds to wait during call
#' @param verbose return raw content with data
#' @param quiet Echo the URL which is called to the console if TRUE.
#' @return Data frame with the data requested
#'
getDataValueSets <- function(variable_keys = NULL, #keys,
variable_values = NULL, #values,
d2_session = dynGet("d2_default_session",
inherits = TRUE),
retry = 1, timeout = 180,
verbose = FALSE,
quiet = TRUE) {
#Test that the provided variables have their associated values used for
# munging
assertthat::assert_that(length(variable_keys) == length(variable_values),
msg = "The number of keys provided is not equal to the number of values provided.
Please ensure these match and try again.")
#Example api call
#/api/dataValueSets.json?dataSet=pBOMPrpg1QX&period=201401&orgUnit=DiszpKrYNg8
#TODO: Consider implementing a check of all paramaters
#https://docs.dhis2.org/en/develop/using-the-api/dhis-core-version-master/data.html
#Requirements
# The following constraints apply to the data value sets resource:
#1 At least one data set must be specified OR a dataElementGroup.
if (!(is.element("dataSet", variable_keys)) == TRUE &&
!(is.element("dataElementGroup", variable_keys)) == TRUE) {
stop("At least one data set or data element group must be specified.")
}
#2 Either at least one period or a start date and end date must be specified.
if ((
(!(is.element("startDate", variable_keys)) ||
!(is.element("endDate", variable_keys)))
&&
!(is.element("period", variable_keys))
)) {
stop("Either at least one period or a start date and end date must be
specified.")
}
#3 At least one organisation unit must be specified.
if (!(is.element("orgUnit", variable_keys)) == TRUE &&
!(is.element("orgUnitGroup", variable_keys)) == TRUE) {
stop("At least one organisation unit or organisation unit group must be specified.")
}
#4 Organisation units must be within the hierarchy of the organisation units
# of the authenticated user.
# This should be taken care of by loginToDatim, so wont add at this time.
#5 Limit cannot be less than zero.
# I don't think we need to bother coding this
# Removed ifelse below, as it should theoretically be impossible to not
# enter variables with the above stop's
# concatenate and format the keys and values provided for the api call
variable_k_v_pairs <- mapply(function(x, y) paste0(x, "=", y),
variable_keys, variable_values)
variable_k_v_pairs <- paste0(variable_k_v_pairs, collapse = "&")
#Create URL Path
path <- paste0("api/dataValueSets.json?",
variable_k_v_pairs,
"&paging=false")
resp <- api_get(
path = path,
d2_session = d2_session,
retry = retry,
timeout = timeout,
verbose = verbose,
quiet = quiet
)
if (verbose) {
meta_data <- resp$api_responses
resp <- resp$data
}
#Create Dataframe from api response
resp <- as.data.frame(resp$dataValues, stringsAsFactors = FALSE)
if (verbose) {
return(list("data" = resp, "api_responses" = meta_data))
} else {
return(resp)
}
}
|
91795cac652b35cc5cd42df05794ec6a935ebe8c | 5ba49d899e7c1cbea1f4968891d61a2d5e18a1ea | /byAreaMapCompare.r | f4ab753d753a8d84f2dbaa3ee87df26ed36372cc | [] | no_license | vppatil/YRBscripts | 1545fe266989e73e97f25b224feb7f66526fd0d5 | 2f27426d0febd464d939379ba5169b7e0b823455 | refs/heads/master | 2021-01-10T20:39:32.118092 | 2012-07-23T00:11:31 | 2012-07-23T00:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | byAreaMapCompare.r | library(RODBC)
#check
#sqlQuery(copy,"select TransectID,VegEndEasting,VegEndNorthing from tblTransects where TransectID like '%0_32_1'")
#next 2 odbc connections are created) or TransectID like ( called yrb and yrb2011
yrb<-odbcConnectAccess2007('c:/users/vppatil/desktop/yrb/YRBiodiversity.accdb')
#get average habitat type length for each habitat and lakeID from tblVegTransectMetadata
t=sqlQuery(yrb,"select LakeID,TransectID,VegStartEasting,VegStartNorthing,VegEndEasting,VegEndNorthing from tblTransects where VegEndEasting is not Null and VegStartEasting > 0 and VegStartNorthing > 0 and VegEndNorthing > 0 and VegEndEasting > 0")
m=sqlQuery(yrb,"select TransectID,HabitatType,HabitatTypeLength from tblVegTransectPctCover where HabitatTypeLength > 0 ")
hab=base::merge(t,m,by="TransectID")
means<-tapply(hab$HabitatTypeLength,list(hab$HabitatType,hab$LakeID),mean)
query="select LakeID,HabitatType,avg(HabitatTypeLength) from (select tblTransects.LakeID,tblVegTransectPctCover.* from tblTransects inner join tblVegTransectPctCover on tblTransects.TransectID = tblVegTransectPctCover.TransectID) group by LakeID,HabitatType"
a<-sqlQuery(yrb,query)
|
6c083a8281c5dddf87b60c34465f9389419af076 | d4b16f7f36b2a3a32d4cc9841424d680cf0cac2f | /.vim/templates/template.R | 0c2d88a5787c23f36df2cdc1969abe43322cca97 | [] | no_license | vsbuffalo/dotfiles | 5a724741229b1744b1f3de8a768656db8cae5161 | 5d7c4584d8ce42743bae9a37dee368bd30ff9a10 | refs/heads/master | 2023-02-05T09:37:28.289282 | 2023-01-27T00:28:00 | 2023-01-27T00:28:00 | 13,942,607 | 51 | 16 | null | null | null | null | UTF-8 | R | false | false | 111 | r | template.R | # %FFILE% -- %HERE%
# Copyright (C) %YEAR% %USER% <%MAIL%>
# Distributed under terms of the %LICENSE% license.
|
550454808685c235b4a97b7022bc24fa119348ad | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802636-test.R | 507bf7b136836f090576ed755a2ea3aa643eafe3 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 686 | r | 1612802636-test.R | testlist <- list(bytes1 = c(-1L, -30199L, -822148992L, 0L, 33751253L, -587076224L, 8443357L, 32342528L, 32768L, 32768L, 33023L, -13910017L, -11298L, 1595159295L, -16702465L, -213L, -1L, -740401408L, 262143L, 706871060L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 352261140L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 336860180L, 336860415L, -1567724L, 336860415L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 255L, 0L, 0L, 9046336L, -1979068879L, 2097152255L, -65281L, -13959169L, -50256902L, NA, -1L, NA, 131840L, 1L, 2105376035L), pmutation = -8.15880834098229e+298)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
4fd65613b48c532cb7006caf3d8796fe96e2716d | cad1cf40be29ccd980b0871bb6c79ed1838b3e04 | /man/oadmin.installed_package.Rd | fefedd27037daa0955c2f8e01b121d32d940a914 | [] | no_license | StuartWheater/opaladmin | b42c2487c99a04db29369b940ddd8a50f14cce19 | a0161f7ff36c20916ae8b743ddf8683008d62eef | refs/heads/master | 2020-03-25T21:12:53.604158 | 2018-11-23T13:16:21 | 2018-11-23T13:16:21 | 144,165,262 | 0 | 0 | null | 2018-08-09T14:38:33 | 2018-08-09T14:38:33 | null | UTF-8 | R | false | false | 353 | rd | oadmin.installed_package.Rd | \name{oadmin.installed_package}
\alias{oadmin.installed_package}
\title{Check if a package is installed in Opal(s).}
\usage{
oadmin.installed_package(opal, pkg)
}
\arguments{
\item{opal}{Opal object or list of opal objects.}
\item{pkg}{Package name.}
}
\value{
TRUE if installed
}
\description{
Check if a package is installed in Opal(s).
}
|
80fdea871be89eff59a9c0a8c5cd55337efde48c | b99a692b325e6d2e6419172fc37fd3107ffb79c2 | /tests/testthat/test_ops.R | bffce5c5fc7fdcbc4d6588fa6a90df400b401229 | [] | no_license | RobinHankin/stokes | 64a464d291cc1d53aa6478fe9986dd69cf65ad1e | 0a90b91b6492911328bad63f88084d7e865f70a9 | refs/heads/master | 2023-08-17T07:46:50.818411 | 2023-08-15T01:56:56 | 2023-08-15T01:56:56 | 177,894,352 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,188 | r | test_ops.R |
foo1 <- function(S){
expect_true(S == +S,info=S)
expect_true(S == -(-S),info=S)
expect_true(S - S == S*0,info=S)
expect_true(S + S == 2*S,info=S)
expect_true(S + S == S*2,info=S)
expect_true(S/2 + S/2 == S,info=S)
} # foo1() closes
foo2 <- function(S1,S2){
expect_true(S2 + S1 - S2 == S1,info=list(S1,S2))
expect_true(S1 + S2 == S2 + S1,info=list(S1,S2))
} # foo2() closes
test_that("Cross product Ops behave", {
for(i in 1:10){
terms <- rpois(1,20)
k <- sample(3:10,1)
n <- k+sample(3:10,1)
S <- rform(terms,k,n,sample(terms))
foo1(S)
}
for(i in 1:10){
terms <- rpois(1,20)
k <- sample(3:10,1)
n <- k+sample(3:10,1)
S1 <- rform(terms,k,n,sample(terms))
S2 <- rform(terms,k,n,sample(terms))
foo2(S1,S2)
}
})
test_that("Wedge product Ops behave", {
for(i in 1:10){
terms <- rpois(1,20)
k <- sample(3:10,1)
n <- k+sample(3:10,1)
K <- rform(terms,k,n,sample(terms))
foo1(K)
}
for(i in 1:10){
terms <- rpois(1,20)
k <- sample(3:10,1)
n <- k+sample(3:10,1)
K1 <- rform(terms,k,n,sample(terms))
K2 <- rform(terms,k,n,sample(terms))
foo2(K1,K2)
}
})
|
9381f35d70fff7b4e5b60b34d7a46d23a2779990 | 8327aedc9fca9c1d5f11c160d440ecc082fb915d | /man/layerBiom.Rd | 169d0084c0642198a4420f8922d659ba57c1d806 | [] | no_license | SESjo/SES | f741a26e9e819eca8f37fab71c095a4310f14ed3 | e0eb9a13f1846832db58fe246c45f107743dff49 | refs/heads/master | 2020-05-17T14:41:01.774764 | 2014-04-17T09:48:14 | 2014-04-17T09:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 826 | rd | layerBiom.Rd | \name{layerBiom}
\alias{layerBiom}
\title{Compute the biomass in each layer during the day and night periods.}
\usage{
layerBiom(grp, all.col = FALSE, layers = NULL, is.day = NULL)
}
\arguments{
\item{grp}{Atomic vector giving the functional groups
biomass in the following order: \code{c(epi, meso, mmeso,
bathy, mbathy, hmbathy)}.}
\item{all.col}{Should the function return all columns:
\code{Layer} and \code{is.Day}}
\item{layers}{Should the function focus on a specific
layer (to choose in \code{c("Bathy", "Epi", "Meso")}).
Default is all layers.}
\item{is.day}{Should the function focus on a specific
period (to choose in \code{c(TRUE, FALSE)}).}
}
\description{
Compute the biomass in each layer during the day and night
periods.
}
\examples{
layerBiom(1:6) # Should be c(4, 10, 7, 15, 1, 5)
}
|
28d1011189edeb7fa20c3bde4cab68f3b42b2660 | f4ce32716a7a0051c0b15ec1e0bbc664298418c0 | /MechaCarChallenge.R | 49b5172698bdf303a40c17e6613641b125928798 | [] | no_license | kimango/MechaCar_Statistical_Analysis | 4ae39c2d4ba2594e627a24516a0178a28ea048b0 | 9ce1bf5a4313a3df643ef5c34fcb429403cfb1dd | refs/heads/main | 2023-08-13T01:50:35.989174 | 2021-09-15T21:04:57 | 2021-09-15T21:04:57 | 406,581,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,783 | r | MechaCarChallenge.R | > library(readr)
> library(tidyverse)
> MechaCar_mpg <- read_csv("MechaCar_Statistical_Analysis/MechaCar_mpg.csv")
Rows: 50 Columns: 6
0s-- Column specification ------------------------------------------------------------------------------------------
Delimiter: ","
dbl (6): vehicle_length, vehicle_weight, spoiler_angle, ground_clearance, AWD, mpg
i Use `spec()` to retrieve the full column specification for this data.
i Specify the column types or set `show_col_types = FALSE` to quiet this message.
> View(MechaCar_mpg)
> head(MechaCar_mpg)
# A tibble: 6 x 6
vehicle_length vehicle_weight spoiler_angle ground_clearance AWD mpg
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 14.7 6408. 48.8 14.6 1 49.0
2 12.5 5182. 90 14.4 1 36.8
3 20 8338. 78.6 12.3 0 80
4 13.4 9420. 55.9 13.0 1 18.9
5 15.4 3773. 26.1 15.1 1 63.8
6 14.5 7287. 30.6 13.1 0 48.5
> ## Linear Regression to Predict MPG
> lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + AWD + ground_clearance,data=MechaCar_mpg)
Call:
lm(formula = mpg ~ vehicle_length + vehicle_weight + spoiler_angle +
AWD + ground_clearance, data = MechaCar_mpg)
Coefficients:
(Intercept) vehicle_length vehicle_weight spoiler_angle AWD ground_clearance
-1.040e+02 6.267e+00 1.245e-03 6.877e-02 -3.411e+00 3.546e+00
> # Generate Summary
> summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + AWD + ground_clearance,data=MechaCar_mpg))
Call:
lm(formula = mpg ~ vehicle_length + vehicle_weight + spoiler_angle +
AWD + ground_clearance, data = MechaCar_mpg)
Residuals:
Min 1Q Median 3Q Max
-19.4701 -4.4994 -0.0692 5.4433 18.5849
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -1.040e+02 1.585e+01 -6.559 5.08e-08 ***
vehicle_length 6.267e+00 6.553e-01 9.563 2.60e-12 ***
vehicle_weight 1.245e-03 6.890e-04 1.807 0.0776 .
spoiler_angle 6.877e-02 6.653e-02 1.034 0.3069
AWD -3.411e+00 2.535e+00 -1.346 0.1852
ground_clearance 3.546e+00 5.412e-01 6.551 5.21e-08 ***
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residual standard error: 8.774 on 44 degrees of freedom
Multiple R-squared: 0.7149, Adjusted R-squared: 0.6825
F-statistic: 22.07 on 5 and 44 DF, p-value: 5.35e-11
> library(readr)
> Suspension_Coil <- read_csv("MechaCar_Statistical_Analysis/Suspension_Coil.csv")
Rows: 150 Columns: 3
-- Column specification -------------------------------------------------------------------------------------------
Delimiter: ","
chr (2): VehicleID, Manufacturing_Lot
dbl (1): PSI
i Use `spec()` to retrieve the full column specification for this data.
i Specify the column types or set `show_col_types = FALSE` to quiet this message.
> View(Suspension_Coil)
> head(Suspension_Coil)
# A tibble: 6 x 3
VehicleID Manufacturing_Lot PSI
<chr> <chr> <dbl>
1 V40858 Lot1 1499
2 V40607 Lot1 1500
3 V31443 Lot1 1500
4 V6004 Lot1 1500
5 V7000 Lot1 1501
6 V17344 Lot1 1501
> # Total Summary Table
> total_summary <- Suspension_Coil %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep')
> total_summary
# A tibble: 1 x 4
Mean Median Variance SD
<dbl> <dbl> <dbl> <dbl>
1 1499. 1500 62.3 7.89
> lot_summary <- Suspension_Coil %>% group_by(Manufacturing_Lot) %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep')
> lot_summary
# A tibble: 3 x 5
# Groups: Manufacturing_Lot [3]
Manufacturing_Lot Mean Median Variance SD
<chr> <dbl> <dbl> <dbl> <dbl>
1 Lot1 1500 1500 0.980 0.990
2 Lot2 1500. 1500 7.47 2.73
3 Lot3 1496. 1498. 170. 13.0
> global_sample_table <- Suspension_Coil %>% sample_n(50)
> plt <- ggplot(Suspension_Coil,aes(x=PSI))
> plt + geom_density()
> plt <- ggplot(global_sample_table,aes(x=PSI))
> plt + geom_density()
> plt <- ggplot(global_sample_table,aes(x=log10(PSI)))
> plt + geom_density()
> plt <- ggplot(global_sample_table,aes(x=log10(PSI)))
> plt + geom_density()
> t.test(global_sample_table$PSI,mu=mean(Suspension_Coil$PSI))
One Sample t-test
data: global_sample_table$PSI
t = 0.89414, df = 49, p-value = 0.3756
alternative hypothesis: true mean is not equal to 1498.78
95 percent confidence interval:
1497.557 1501.963
sample estimates:
mean of x
1499.76
> psi_lot1_sample <- Suspension_Coil %>% subset(Manufacturing_Lot=='Lot1') %>% sample_n(25)
> force(mean_cl_boot)
function (x, ...)
{
check_installed("Hmisc")
fun <- getExportedValue("Hmisc", fun)
result <- do.call(fun, list(x = quote(x), ...))
rename(new_data_frame(as.list(result)), c(Median = "y",
Mean = "y", Lower = "ymin", Upper = "ymax"))
}
<bytecode: 0x000001a625e87540>
<environment: 0x000001a626253f60>
> t.test(psi_lot1_sample$PSI,mu=mean(Suspension_Coil$PSI))
One Sample t-test
data: psi_lot1_sample$PSI
t = 7.6825, df = 24, p-value = 6.414e-08
alternative hypothesis: true mean is not equal to 1498.78
95 percent confidence interval:
1499.789 1500.531
sample estimates:
mean of x
1500.16
> total_summary <- Suspension_Coil %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep')
> total_summary <- Suspension_Coil %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep')
> t.test(psi_lot1_sample$PSI,mu=mean(Suspension_Coil$PSI))
One Sample t-test
data: psi_lot1_sample$PSI
t = 7.6825, df = 24, p-value = 6.414e-08
alternative hypothesis: true mean is not equal to 1498.78
95 percent confidence interval:
1499.789 1500.531
sample estimates:
mean of x
1500.16
> psi_lot2_sample <- Suspension_Coil %>% subset(Manufacturing_Lot=='Lot2') %>% sample_n(25)
> psi_lot2_sample <- suspension_coil_data %>% subset(Manufacturing_Lot=='Lot2') %>% sample_n(25)
Error in subset(., Manufacturing_Lot == "Lot2") :
object 'suspension_coil_data' not found
> psi_lot2_sample <- Suspension_Coil %>% subset(Manufacturing_Lot=='Lot2') %>% sample_n(25)
> t.test(psi_lot2_sample$PSI,mu=mean(Suspension_Coil$PSI))
One Sample t-test
data: psi_lot2_sample$PSI
t = 2.9254, df = 24, p-value = 0.007404
alternative hypothesis: true mean is not equal to 1498.78
95 percent confidence interval:
1499.257 1501.543
sample estimates:
mean of x
1500.4
> psi_lot3_sample <- Suspension_Coil %>% subset(Manufacturing_Lot=='Lot3') %>% sample_n(25)
> t.test(psi_lot3_sample$PSI,mu=mean(Suspension_Coil$PSI))
One Sample t-test
data: psi_lot3_sample$PSI
t = -1.4598, df = 24, p-value = 0.1573
alternative hypothesis: true mean is not equal to 1498.78
95 percent confidence interval:
1490.235 1500.245
sample estimates:
mean of x
1495.24
|
87f0ad7ed10c416f6e959fecb64c55924fe905a7 | 2def8d9222401713e5d173968cda06ab149df781 | /tests/R6Unit/test_resources/example_packages/SignaturesMinimalWithOutdatedAutoComments/man/G1-numeric-method.Rd | e83f4d20ce2df2764ba3c8699dbb4939a718b8fc | [] | no_license | mamueller/roxygen2 | 5d25d131fc70cc7cedaef70a8a230961dd6cd535 | 0eb82a4c6719c0d6bdc8ecc5ff89a9a659d7dc2b | refs/heads/master | 2021-08-17T08:54:52.383731 | 2020-08-05T20:12:33 | 2020-08-05T20:12:33 | 211,916,767 | 0 | 0 | null | 2019-09-30T17:32:47 | 2019-09-30T17:32:47 | null | UTF-8 | R | false | true | 410 | rd | G1-numeric-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.R
\name{G1,numeric-method}
\alias{G1,numeric-method}
\title{Automatic description: G1,numeric,ANY-method}
\usage{
\S4method{G1}{numeric}(a1, a2)
}
\arguments{
\item{a1}{object of class:\code{numeric}, no manual documentation}
\item{a2}{no manual documentation}
}
\description{
Automatic description: G1,numeric,ANY-method
}
|
1688ff8d2d525d02cf519d56b4f8eca0a38199e2 | a04d6571573a33959ddc1e8285b74867097a1254 | /Election_Map_Work.R | 45c9aa7659c7c341c187a09ae1eb5e4187c6fa47 | [] | no_license | robwebby/robwebby.github.io | 76e5eaf5aba203fd4d130387fdb6b3ec11d6d5fe | 28b209d8bc0fe242c13ba00ed8506fdb949aac7c | refs/heads/master | 2021-01-23T01:46:24.086332 | 2019-08-25T17:25:11 | 2019-08-25T17:25:11 | 92,885,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,856 | r | Election_Map_Work.R | library(sp)
library(rgdal)
library(raster)
library(plyr)
library(rgeos)
library(leaflet)
library(rmapshaper)
library(htmlwidgets)
setwd(mydir)
Constituencies <- readOGR("Cartogram_GE.shp")
Ref_Votes <- read.csv("Estimates_Leave_Vote.csv")
GE_2015 <- read.csv("GE2015_Results.csv")
GE_2010 <- read.csv("GE2010_Results.csv")
latlong = "+init=epsg:4326"
Referendum_WGS <- merge(Constituencies,Ref_Votes, by = "CODE")
GE2015_WGS <- merge(Constituencies,GE_2015, by = "CODE")
GE2010_WGS <- merge(Constituencies,GE_2010, by = "CODE")
Combo_WGS <- merge(GE2015_WGS,GE2010_WGS, by = "CODE")
altbin <- c(0,10,20,30,40,50,60,70,80,90,100)
altpal <- colorBin(c('#08519c','#3182bd','#6baed6','#9ecae1','#c6dbef','#fdd0a2', '#fdae6b','#fd8d3c','#e6550d','#a63603'), domain = Combo_WGS$Estimated.Leave.vote, bins = altbin)
Elec15pal <- colorFactor(c("darkblue","chartreuse","firebrick2","goldenrod3","dimgrey","darkgreen","gold","darkorchid1"), GE2015_WGS$Winner.15)
Elec10pal <- colorFactor(palette = c("darkblue","chartreuse","firebrick2","goldenrod3","dimgrey","darkgreen","gold"), domain = Combo_WGS$Winner.10)
labelleave <- sprintf(
"<strong>%s</strong><br/> <strong> %g </strong> percentage votes for Leave <br /> <strong> %g </strong> percentage votes for Remain", Combo_WGS$NAME,Combo_WGS$Estimated.Leave.vote,Combo_WGS$Estimated.Remain
) %>% lapply(htmltools::HTML)
labelelec2015 <- sprintf(
"<strong>%s</strong><br/>Winner: <strong>%s</strong><br/> Lab Vote Share <strong> %g (%g) </strong> <br /> Con Vote Share <strong> %g (%g) </strong> <br />Lib Dem Vote Share <strong> %g (%g)</strong> <br />UKIP Vote Share <strong> %g (%g) </strong> <br />SNP Vote Share <strong> %g (%g) </strong> <br />Green Vote Share <strong> %g (%g) </strong> <br />Other Vote Share <strong> %g (%g) </strong> <br />", GE2015_WGS$Constituency.Name,GE2015_WGS$Winner.15,GE2015_WGS$Labour.Vote.Share.15,GE2015_WGS$Labour.Vote.Share.Change.15,GE2015_WGS$Conservative.Vote.Share.15,GE2015_WGS$Conservative.Vote.Share.Change.15,GE2015_WGS$Lib.Dems.Vote.Share.15,GE2015_WGS$Lib.Dems.Vote.Share.Change.15,GE2015_WGS$UKIP.Vote.Share.15,GE2015_WGS$UKIP.Vote.Share.Change.15,GE2015_WGS$SNP.Vote.Share.15,GE2015_WGS$SNP.Vote.Share.Change.15,GE2015_WGS$Green.Vote.Share.15,GE2015_WGS$Green.Vote.Share.Change.15,GE2015_WGS$Other.Vote.Share.15,GE2015_WGS$Other.Vote.Share.Change.15
) %>% lapply(htmltools::HTML)
labelelec2010 <- sprintf(
"<strong>%s</strong><br/> Lab Vote Share <strong> %g </strong> <br /> Con Vote Share <strong> %g </strong> <br />Lib Dem Vote Share <strong> %g </strong> <br />UKIP Vote Share <strong> %g </strong> <br />SNP Vote Share <strong> %g </strong> <br />Green Vote Share <strong> %g </strong> <br />Other Vote Share <strong> %g </strong> <br />", Combo_WGS$NAME,Combo_WGS$Labour.Vote.Share.10,Combo_WGS$Conservative.Vote.Share.10,Combo_WGS$Lib.Dems.Vote.Share.10,Combo_WGS$UKIP.Vote.Share.10,Combo_WGS$SNP.Vote.Share.10,Combo_WGS$Green.Vote.Share.10,Combo_WGS$Other.Vote.Share.10
) %>% lapply(htmltools::HTML)
GE2015_Leaflet <- leaflet(GE2015_WGS) %>%
fitBounds(-10.02,49.67,2.09,58.06) %>%
addPolygons(stroke = FALSE, smoothFactor = 0.2, fillOpacity = 1,
color = ~Elec15pal(Winner.15),
highlight = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labelelec2015,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "1px 1px"),
textsize = "9px",
direction = "auto", group = "Election 15")
) %>% addLegend("bottomright", pal = Elec15pal, values = ~Winner.15,
title = "Winner GE 2015",
labFormat = labelFormat(prefix = ""),
opacity = 1)
GE2010_Leaflet <- leaflet(Combo_WGS) %>%
fitBounds(-14.02,49.67,2.09,61.06) %>%
addPolygons(fill = TRUE,
stroke = FALSE, smoothFactor = 0.2, fillOpacity = 1,
color = ~Elec10pal(Winner.10),
highlight = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labelelec2010,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "1px 1px"),
textsize = "9px",
direction = "auto", group = "Election 10")
) %>% addLegend("bottomright", pal = Elec10pal, values = ~Winner.10,
title = "Winner GE 2010",
labFormat = labelFormat(prefix = ""),
opacity = 1)
Referendum_Leaflet <- leaflet(Combo_WGS) %>%
addTiles(group = "OSM (default)") %>%
fitBounds(-14.02,49.67,2.09,61.06) %>%
addPolygons(stroke = FALSE, smoothFactor = 0.2, fillOpacity = 0.9,
color = ~altpal(Estimated.Leave.vote),
highlight = highlightOptions(
weight = 3,
color = "#666",
dashArray = "5,5",
fillOpacity = 0.9,
bringToFront = TRUE),
label = labelleave,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "1px 1px"),
textsize = "9px",
direction = "auto", group = "Referendum")
) %>% addLegend("bottomleft", pal = altpal, values = ~Estimated.Leave.vote,
title = "Leave Vote",
labFormat = labelFormat(prefix = ""),
opacity = 1)
saveWidget(GE2015_Leaflet,file = "Election_2015.html")
saveWidget(GE2010_Leaflet,file = "Election_2010.html")
saveWidget(Referendum_Leaflet,file = "EU_Referendum.html")
|
66eb1bad9b8a626b502fe686ff0af18be67d29f2 | 9c077831aaa80a56cff9e78303e3b923ff9c66d3 | /R/app_server.R | eb74b0110b07e5c2a3592027f264c4cf0dc379e2 | [
"MIT"
] | permissive | MaryleneH/Exo_activity | ae5df7c3ef80d4a211a8491c4cad3169d8e1707b | 18a132862e3ed8fd57c9f3e3eb8154042eee8bb2 | refs/heads/main | 2023-03-08T10:53:56.720355 | 2021-02-24T14:36:15 | 2021-02-24T14:36:15 | 340,514,066 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 688 | r | app_server.R | #' The application server-side
#'
#' @param input,output,session Internal parameters for {shiny}.
#' DO NOT REMOVE.
#' @import shiny
#' @importFrom skimr skim
#' @importFrom readr read_csv
#' @importFrom DT DTOutput renderDT datatable updateCaption
#' @importFrom dplyr sample_n filter
#' @importFrom ggplot2 ggplot aes geom_point geom_boxplot geom_col labs theme_minimal
#' @importFrom lubridate month
#' @noRd
app_server <- function( input, output, session ) {
# List the first level callModules here
r <- reactiveValues()
callModule(mod_upload_server, "upload_ui_1", r = r)
callModule(mod_data_server, "data_ui_1",r = r)
callModule(mod_visu_server, "visu_ui_1",r=r)
}
|
70d37557e8419e90ae261ee250591b987af6293c | 1f74a31dce7c679d3ef4507335e2f6e763987ff1 | /stockassessment/man/is.whole.positive.number.Rd | 67c6b4b6ee799d24e2092434ab16f7f0dae5fdba | [] | no_license | fishfollower/SAM | 5b684c0a54d6e69f05300ebb7629829b2a003692 | a1f1c5b17505a7a73da28736f0077805a7606b30 | refs/heads/master | 2023-07-22T00:50:48.411745 | 2023-04-21T10:25:20 | 2023-04-21T10:25:20 | 67,597,583 | 55 | 35 | null | 2023-02-22T08:42:23 | 2016-09-07T10:39:25 | R | UTF-8 | R | false | true | 374 | rd | is.whole.positive.number.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reading.R
\name{is.whole.positive.number}
\alias{is.whole.positive.number}
\title{Function to test if x is ...}
\usage{
is.whole.positive.number(x, tol = .Machine$double.eps^0.5)
}
\arguments{
\item{x}{number}
\item{tol}{precision}
}
\description{
Function to test if x is ...
}
\details{
...
}
|
1792ab3b115e136b1b19fbd9b9ebd1da32563202 | 14fe2c3d47b44d1f65ea25dddc0b5ffbf977f2fb | /R/limma-voom.R | 3311620c9f82ac869d2bfa73514addbe96c2506d | [] | no_license | zhanghfd/contamDE | 7cea396eb43712099fb19ef4998cadafc6860c62 | 3712abcfabf93b7093858671d090dd936bd0e1a2 | refs/heads/master | 2021-01-13T12:27:57.351062 | 2019-11-26T05:43:43 | 2019-11-26T05:43:43 | 72,605,127 | 6 | 6 | null | null | null | null | UTF-8 | R | false | false | 735 | r | limma-voom.R |
limma_voom <- function(counts){
d <- DGEList(counts)
d <- calcNormFactors(d)
size <- d$samples[,2]*d$sample[,3]
size <- size/mean(size)
N <- ncol(counts)/2; # number of patients
pair <- as.factor(rep(1:N,2))
condition <- as.factor(c(rep(0,N),rep(1,N)))
design <- model.matrix(~0+condition+pair)
d <- voom(d, design, normalize.method="quantile")
d <- lmFit(d, design)
contr <- 'condition1-condition0'
contrs <- makeContrasts(contrasts= contr,levels=colnames(design))
d <- contrasts.fit(d,contrs)
d <- eBayes(d,trend=TRUE)
p.limma <- d$p.value
log2FC.limma <- d$coefficient
res <- list(counts=counts,size=size,p.limma=p.limma,log2FC.limma=log2FC.limma)
return(res)
}
|
867d02322143f602e6ea6a9daa5dae8bc77d5de6 | 591f92914707a8c233795a8b7e6178760449db8a | /man/data_scraping.Rd | 86c9df7e6d49040b486f25f3a64d5002a6a6737d | [] | no_license | nnguyen2496/gradinfo | afd0dc025badd014b339dc7bf5482cab42a66f2c | 37b490fc8f1fbda2de59dcaab4007da517fe9370 | refs/heads/master | 2020-12-31T07:11:35.962035 | 2017-02-01T10:48:49 | 2017-02-01T10:48:49 | 80,595,514 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 602 | rd | data_scraping.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_scraping.R
\name{data_scraping}
\alias{data_scraping}
\title{Read in data on all available graduating classes}
\usage{
data_scraping()
}
\value{
A dataframe object containing information about all available graduating classes
}
\description{
Read in data on all available graduating classes. The information of interest
includes student names, latin honors, department honors, Clark fellowship,
Phi Kappa Beta membership and Sigma Xi membership.
}
\keyword{StudentInfo}
\keyword{readStudent}
|
c832683c6cd4a970c9b5cba9265e89307ffe42a6 | 1e9072fb96c3f5cb47e156fed8ede092d6e56d14 | /man/PPQcxLat2.Rd | ff2059a450b3bd271914560367e9d86faf7f9ba8 | [] | no_license | cran/pgs | 1102955328ea60e733e611148f852088d91b05c9 | 4154077e52a8814f979775bfddb604e30d50cb32 | refs/heads/master | 2020-04-06T03:53:31.051960 | 2013-11-29T00:00:00 | 2013-11-29T00:00:00 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,745 | rd | PPQcxLat2.Rd | % Documentation file for PGS package
% Version: 0.2-0
% Authors: Kien Kieu & Marianne Mora
% License: CeCILL
% Copyright © 2009 INRA, Université Paris Ouest Nanterre la Défense
\name{PPQcxLat2}
\alias{PPQcxLat2}
\title{Generator of 2D quincunx lattices of point patterns}
\description{
Create a \code{FigLat-class} object representing a 2D quincunx
lattice of point patterns.
}
\usage{
PPQcxLat2(d=1,dx=sqrt(2)*d,n=1,hp=dx/5,vp=hp,h3=TRUE)
}
\arguments{
\item{d}{the distance between two neighbour diagonal locations. Default: 1.}
\item{dx}{the distance between two neighbour horizontal locations. Default value: quincunx inside a square.}
\item{n}{the number of points in each point pattern. Valid values for
\code{n}: 1, 4, 5, 6, 7, 8 or 9 (see \code{\link{PP2}}
documentation). Default: 1 (lattice of points).}
\item{hp}{the horizontal side length of the rectangle bounding the point
pattern. Default: dx/5.}
\item{vp}{the vertical side length of the rectangle bounding the point
pattern. Default: hp (square bounding box for each point pattern).}
\item{h3}{determines the orientation of the point pattern when
\code{n}=6, 7 or 8, see \code{\link{PP2}} documentation.}
}
\value{
A \code{\link{FigLat-class}} object.
}
\seealso{Generators \code{\link{FigLat}}, \code{\link{PointPattern}},
\code{\link{PP2}}, \code{\link{QcxLat2}}, other generators of 2D figure
lattices
\code{\link{PPHexLat2}}, \code{\link{PPRectLat2}},
\code{\link{QHexLat2}}, \code{\link{QQcxLat2}}, \code{\link{QRectLat2}},
\code{\link{SHexLat2}}, \code{\link{SQcxLat2}},
\code{\link{SRectLat2}}, \code{\link{LLat2}}.
}
\examples{
plot(PPQcxLat2(n=6,h3=FALSE),xlim=c(0,3),ylim=c(0,3))
}
\keyword{spatial}
|
e89f029c92efd45d9e6a9f2d8b3bd0d07eb5aeb8 | 0bae1a0fee545d466ea074db52e18693caa7324b | /ZTest/TwoSample.R | 76b142f98505dd4bbc709c99e93c8e503ff1bf2a | [] | no_license | anillimaye/R-codes | 3d0827a76f1c80875f63e89e6cc5921ceb743385 | 78ea38bb17b515a59b9785a927abe0536a71d04e | refs/heads/master | 2022-12-14T22:39:09.680482 | 2022-12-04T10:04:55 | 2022-12-04T10:04:55 | 204,685,493 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 635 | r | TwoSample.R | # This code is for two sample z test. The sample sizes are large.
# Takes two columns of values as input and computes mean.
# Performs a two sample two-tailed z test to find whether means and significantly different
Data <- read.table(file.choose(), header=TRUE, sep="\t")
mG1 = mean(Data$Group1); mG2 = mean(Data$Group2)
vG1 = var(Data$Group1); vG2 = var(Data$Group2)
lG1 = length(Data$Group1); lG2 = length(Data$Group2)
d = (mG1-mG2)/sqrt((vG1/lG1)+(vG2/lG2))
p = 2*pnorm(d, mean=0, sd=1, lower.tail=FALSE)
# Final conclusion
if (p < 0.05) {
print ("significantly different")
} else {
print("not significantly different")
}
|
94e9cc9010eca6fd2682ecb8e91e0433709f38d3 | cbb8de54c2b49bb7b61c435b909c4b6bcf3e2ca5 | /cachematrix.R | b83dfb4fb3f72cc52f53b1e3d560aac5ce1f51e9 | [] | no_license | pizzaqq/ProgrammingAssignment2 | 949665029999faa52255a08c5120e51389a9b1e4 | 80fea6cab783101a0999fb07b2ff91ef05a73b4f | refs/heads/master | 2021-01-15T09:42:23.721311 | 2015-07-20T10:36:57 | 2015-07-20T10:36:57 | 39,298,556 | 0 | 0 | null | 2015-07-18T13:06:42 | 2015-07-18T13:06:42 | null | UTF-8 | R | false | false | 1,351 | r | cachematrix.R | ## It is often costly to calculate the inverse of matrices so we want to
## cache the inverse of a matrix rather than compute it repeatedly.Below
## are two functions that will
## 1) create a special "matrix" object that can cache its inverse
## 2) computes the inverse or retrieve the inverse from cache
# The makeCacheMatrix below is a list of four functions
# 1) set function sets the value for matrix
# 2) get function gets the value for matrix
# 3) setinverse function sets the inverse value for matrix
# 4) getinverse function gets the inverse value for matrix
makeCacheMatrix<-function(x=matrix()){
set<-function(y){
x<<-y
inver<<-NULL
}
get<-function()x
setinverse<-function(inverse) inver<<-inverse
getinverse<-function()inver
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The second function below returns the inverse of matrix.
# If the inverse has been computes already, it gets the result.
# If not, it computes the inverse and sets the vlaue in cache by the function setinverse
cacheSolve <- function(x, ...) {
inver <- x$getinverse()
if(!is.null(inver)) {
message("getting cached data.")
return(inver)
}
data <- x$get()
inver <- solve(data)
x$setinverse(inver)
inver
<<<<<<< Updated upstream
}
=======
}
>>>>>>> Stashed changes
|
d4458f49de5993b47f28edc4d93aa5364427d590 | f7a70d93b566c9f866ef4934e6c0886f9261cb3b | /Lab_6/ChiassonK_201255916_Lab6.R | a42ab8f7286a6f68a3c26dcb4aea67c7119d142e | [] | no_license | pitoucc/geog3222 | f7ff580e685c989de127b9166a5b40fa39234319 | 00d27d768929a3c4438702e47ed5a1ed0a2d4642 | refs/heads/master | 2020-04-19T04:33:16.250977 | 2019-03-21T13:18:22 | 2019-03-21T13:18:22 | 167,965,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,675 | r | ChiassonK_201255916_Lab6.R | # Geog 3222 - Lab 6
# Karl Chiasson
# 201255916
#Set working directory
setwd('D:\\Class\\geog_3222\\Labs\\Lab_6')
#Load data/scripts/libraries
library(extRemes)
library(MASS)
load('Dataset3.Rdata')
source('decluster.g3222.R')
source('event.decluster.R')
source('pareto.thresh.decluster.R')
source('grouping.ts.R')
#Applies grouping.ts to get daily maxima
nl_daily_maxima <- grouping.ts(data,period='Day',operation='max')
#Declusters wind data to identify "wind storms"
wind_storm_eventlist <- decluster.g3222(nl_daily_maxima, thresh = 50, lull = 2, vlag = 5, event.list = TRUE)
wind_storm_eventlist
wind_storms <- decluster.g3222(nl_daily_maxima, thresh = 50, lull = 2, vlag = 5, event.list = FALSE)
wind_storms
#Prints a paragraph explaining the output from decluster
print("When event.list is true it returns a set of wind data, for a given threshold, that shows us the date of when a wind event has\n
has occured, the duration of that wind event, and how long the occurance took place. When event.list is false it returns a set\n
of wind data which groups events together and shows the counts and duration of these events.")
#Applies fitdistr to identify the parameters of best fitting Poison and NB distributions
param_poi <- fitdistr(wind_storms$Count,densfun="Poisson")
param_nb <- fitdistr(wind_storms$Count,densfun="negative binomial")
#Prints the parameters and the statistics of annual count data.
wsmean <- mean(wind_storms$Count)
wssd <-sd(wind_storms$Count)
wsmean
wssd
param_poi
param_nb
paste("Based on the parameters and the statistics that the values between the mean:",wsmean,", lambda",param_poi$estimate, "and mu:", param_nb$estimate[[2]],"are approximately equal also that\n
the standard deviation:",wssd," and the negative binomial size:",param_nb$estimate[[1]], " are approximately equal, leaving us to conclude that these distributions\n
may be suitable for this data.")
#Plots a histogram of annual windstorm counts
pdf("Wind_storms_hist_poi_nb_distributions.pdf",width = 11, height = 8)
hist(wind_storms$Count, ylim = c(0,0.3), breaks=10, prob =T,xlab="Number of Wind Storms per Year", main="Histogram of Wind Storm Frequency")
#Uses parameters to add lines to show poisson and nb distribution
range_pnb <- min(wind_storms$Count):max(wind_storms$Count)
lines(dpois(range_pnb,lambda = param_poi$estimate), type = "p", col = "red")
lines(dnbinom(range_pnb,mu = param_nb$estimate[[2]], size = param_nb$estimate[[1]]), type = "p", col = "blue")
legend(7.5,0.3, legend = c("Poisson", "Negative Binomial"), col=c("red","blue"), cex=0.8, pch=1, title = "Distributions")
dev.off()
#Calculates the probability of anniual windstorm counts using poisson and nb distributions
ppois(range_pnb,lambda = param_poi$estimate,lower.tail =F)
pnbinom(range_pnb,mu = param_nb$estimate[[2]], size = param_nb$estimate[[1]],lower.tail = F)
#Prints a paragraph discussing the suitability of the distributions for the data
print("From the appearance of both histograms it may seem that the distributions may not be suitable for the data sicnce the\n
distributions do not always line up with the counts shown in the historgrams. There are some sections where the points for the\n
distributions do match but if we refer to the probabilities, we see that as count of wind storms increases the probabilities of\n
of those counts decrease, thus fitting to the distributions. In this case the negative binomial may be slightly more suited given\n
the shape of the data.")
#Identifies the parameters of best fitting NB and geometric distributions of windstorm duration data
param_nb2 <- fitdistr(wind_storm_eventlist$events$Dur,densfun="negative binomial")
param_geo <- fitdistr(wind_storm_eventlist$events$Dur,densfun="geometric")
#Prints the parameters
param_nb2
param_geo
#Plots a histogram of wind storm durations
pdf("Wind_storm_event_hist_geo_nb_distributions.pdf",width = 11, height = 8)
hist(wind_storm_eventlist$events$Dur, ylim=c(0,1), breaks = 5, prob =T, xlab="Length of Wind Storms (days)", main="Histogram of Wind Storm Duration")
#Uses the parameters to add lines to the histrogram for geometric and nb distributions
range_gnb <- min(wind_storm_eventlist$events$Dur):max(wind_storm_eventlist$events$Dur)
lines(dgeom(range_gnb, p=param_geo$estimate), type = "p", col = "red")
lines(dnbinom(range_gnb,mu = param_nb2$estimate[[2]], size = param_nb2$estimate[[1]]), type = "p", col = "blue")
legend(4,1, legend = c("Geometric", "Negative Binomial"), col=c("red","blue"), cex=0.8, pch=1,title = "Distributions")
dev.off()
#Calculates the probability of the windstorm counts using geometric and NB distributions
pgeom(range_gnb, p=param_geo$estimate,lower.tail = F)
pnbinom(range_gnb,mu = param_nb2$estimate[[2]], size = param_nb2$estimate[[1]],lower.tail = F)
#Prints a paragraph discussing the suitability of the distributions for the data
print("These distributions are farily similar to their fit to the data, though the low amount of counts makes it hard for\n
this data work well with any distribution, there is not enough spread. The negative binomal distrubition may be better\n
suited to deal with the large spike of single occurances.")
#Prints a paragraph discussing the probabilities
print("The parameter p is the maximum likelyhood that the storm will be around for another day, if it will persist. As each day\n
passes the likelyhood will reduce, given the law of multiplicity, but never exceed the maximum likelyhood. The persistance\n
measured through the geometric distribution is more worrying since it shows a higher likelyhood of a storm persisting.") |
72231d2d3047c5d0c36e122c65c2bbc5e5dbb0d1 | edebce025e08bfb9a4a427465298ae4fd3ede569 | /code/deloitte.R | 359e5798b2e5aef45d2f321ac77c8d5282db71e1 | [] | no_license | pssguy/worldSoccer | 50b60db5e18ce85406b8cdb3814dd27a8641ef45 | 22a3f7be911c55bf4eee9640e2cd59d2b4c551aa | refs/heads/master | 2021-01-10T04:40:49.708917 | 2016-02-16T17:30:39 | 2016-02-16T17:30:39 | 36,517,392 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,384 | r | deloitte.R | ## tables from https://en.wikipedia.org/wiki/Deloitte_Football_Money_League
## compiled into data.frame
output$deloitteData <- DT::renderDataTable({
allYears %>%
ungroup() %>%
arrange(desc(Year),Rank) %>%
mutate(Revenue=round(Revenue)) %>%
select(Rank,Year,Club,Country,Revenue) %>%
DT::datatable(rownames=F,class='compact stripe hover row-border',
colnames=c("Annual Rank","Season Commencing","Club","Country","Revenue(mill Euro)"),
options=list(columnDefs = list(list(className = 'dt-center', targets = 1)))
)
})
observe({
if(input$delCount=="Top 10") {
temp <- allYears %>%
ungroup() %>%
filter(Rank<11)
} else if(input$delCount=="Top 20") {
temp <- allYears %>%
ungroup() %>%
filter(Rank<21&Year>=2004)
} else {
temp <- allYears %>%
ungroup() %>%
filter(Rank<31&Year>=2009)
}
temp %>%
group_by(Country,Year) %>%
tally() %>%
rename(Count=n) %>%
ggvis(~Year,~Count) %>%
layer_bars(fill=~Country) %>%
add_axis("x", format='d', title="Season Commencing") %>%
add_axis("y", title="Countries in Top 10") %>%
add_legend("fill", title="") %>%
bind_shiny("delByCountry")
})
output$delTeamCount <- DT::renderDataTable({
if(input$delCount=="Top 10") {
df <- allYears %>%
ungroup() %>%
filter(Rank<11)
} else if(input$delCount=="Top 20") {
df <- allYears %>%
ungroup() %>%
filter(Rank<21&Year>=2004)
} else {
df <- allYears %>%
ungroup() %>%
filter(Rank<31&Year>=2009)
}
df %>%
group_by(Club) %>%
tally() %>%
rename(Years=n) %>%
ungroup() %>%
arrange(desc(Years)) %>%
DT::datatable(rownames=F,class='compact stripe hover row-border',
options = list(searching= FALSE, info=FALSE, paging=TRUE))
})
observe({
if (input$delCategory=="Rank") {
allYears %>%
filter(Club %in% topTen) %>%
ggvis(~Year,~Rank) %>%
group_by(Club) %>%
layer_lines() %>%
layer_points(fill=~Club) %>%
add_axis("x", format='d', title="Season Commencing") %>%
scale_numeric("y", reverse=T) %>%
add_legend("fill",values=topTen, title="") %>%
bind_shiny("delTopTen")
} else if (input$delCategory=="Revenue") {
allYears %>%
filter(Club %in% topTen) %>%
ggvis(~Year,~Revenue) %>%
group_by(Club) %>%
layer_lines() %>%
layer_points(fill=~Club) %>%
add_axis("x", format='d', title="Season Commencing") %>%
add_axis("y", format='d', title="Revenue (mill. Euro)") %>%
add_legend("fill",values=topTen, title="") %>%
bind_shiny("delTopTen")
} else {
index <- allYears %>%
filter(Club %in% topTen&Year==2011) %>%
mutate(baseRev=Revenue) %>%
select(Club,baseRev)
allYears %>%
filter(Club %in% topTen) %>%
left_join(index) %>%
mutate(index=round(100*Revenue/baseRev)) %>%
ggvis(~Year,~index) %>%
group_by(Club) %>%
layer_lines() %>%
layer_points(fill=~Club) %>%
add_axis("x", format='d', title="Season Commencing") %>%
add_axis("y", format='d', title="Revenue Index (2004=100") %>%
scale_numeric("y") %>%
add_legend("fill",values=topTen, title="") %>%
bind_shiny("delTopTen")
}
}) |
7991e8cd2fa2b37f62274cbca61fa7367cd1a707 | 06aa6ef719c8d1de4e9520043760a0153aced8e8 | /understanding_stats_using_r/Chap07c_HypothesisTest.R | 18fae72aa4592ee41ffe738739a9777242247d6a | [] | no_license | alexmerk/courses | 8be9e85330c768ced99857766ad7254e14ca6c8b | e0a0fb3fa4067084e93fbc3b3984c2337607b2e7 | refs/heads/master | 2022-11-23T14:42:27.775464 | 2020-07-31T11:32:11 | 2020-07-31T11:32:11 | 112,396,700 | 1 | 0 | null | 2017-11-28T22:29:46 | 2017-11-28T22:29:46 | null | UTF-8 | R | false | false | 3,078 | r | Chap07c_HypothesisTest.R | # HYPOTHESIS TEST program - Chapter 7
# Set popMean and popVar to the population mean and variance
# Set nullMean to the mean of the null hypothesis
# Set tails for the type of test being conducted
# Set sampleSize to the size that each sample should be
# Set alpha to the desired confidence level
# Set varUse to 0 for population variance and 1 for sample variance
# Set numSamples to the number of samples to test
popMean <- 10.5
popVar <- 2
nullMean <- 10
tails <- 1 # 0=one-tailed less than null, 1=one-tailed greater than null, 2=two-tailed
sampleSize <- 36
alpha <- .05
varUse <- 1 # 0=population standard deviation, 1=sample standard deviation
numSamples <- 10
options(width=90)
pValue <- function(nullMean,popVar,sampleMean,sampleSize,tails) {
if (tails==0) {
pVal <- pnorm(sampleMean,nullMean,sqrt(popVar)/sqrt(sampleSize))
}
if (tails==1) {
pVal <- 1-pnorm(sampleMean,nullMean,sqrt(popVar)/sqrt(sampleSize))
}
if (tails==2) {
pVal <- (.5-abs(.5-pnorm(sampleMean,nullMean,sqrt(popVar)/sqrt(sampleSize))))*2
}
pVal
}
chap07 <- function() {
outputMatrix1 <- NULL
decision <- NULL
for (i in 1:numSamples) {
sampleData <- rnorm(sampleSize,popMean,sqrt(popVar))
outputVector1 <- NULL
if (varUse==0) {
probability <- pValue(nullMean,popVar,mean(sampleData),sampleSize,tails)
if (probability < alpha) {
decision <- "REJECT NULL"
}
else
{
decision <- "RETAIN NULL"
}
zstat <- (mean(sampleData)-nullMean)/(sqrt(popVar)/sqrt(sampleSize))
outputVector1 <- cbind(outputVector1,round(mean(sampleData),digits=3),round(sqrt(popVar),digits=3),round(zstat,digits=3),decision,format(ceiling(probability*1000)/1000,nsmall=3))
}
else
{
probability <- pValue(nullMean,var(sampleData),mean(sampleData),sampleSize,tails)
if (probability < alpha) {
decision <- "REJECT NULL"
}
else
{
decision <- "RETAIN NULL"
}
tstat <- (mean(sampleData)-nullMean)/(sqrt(var(sampleData))/sqrt(sampleSize))
outputVector1 <- cbind(outputVector1,round(mean(sampleData),digits=3),round(sqrt(var(sampleData)),digits=3),round(tstat,digits=3),decision,format(ceiling(probability*1000)/1000,nsmall=3))
}
outputMatrix1 <- rbind(outputMatrix1,outputVector1)
}
cat("\n\n")
cat("Pop. Mean =",popMean,"Pop. Variance =",popVar,"Null Mean =",nullMean,"\n")
cat("Sample Size =",sampleSize,"Alpha =",alpha,"Number of Samples",numSamples,"\n\n")
cat("Variance type =",varUse,"(0 = population; 1 = sample)","\n")
cat("Hypothesis direction =",tails,"(0 < Null, 1 > Null, 2 = two-tailed)")
cat("\n\n")
if (varUse==0) {
dimnames(outputMatrix1) <- list(rep("",numSamples),c("Sample Mean","Pop. SD","z-statistic","Decision","p-value"))
print(outputMatrix1,min.colwidth=14,prefix.width=0,quote=F)
}
else
{
dimnames(outputMatrix1) <- list(rep("",numSamples),c("Sample Mean","Sample SD","t-statistic","Decision","p-value"))
print(outputMatrix1,min.colwidth=14,prefix.width=0,quote=F)
}
}
chap07 () |
6892508b3c668d945514095765d2bbee696d20d1 | 818b023db0c8273606ae8e046bb8a829b8c859e8 | /mycode.R | a204bd804bab1162efe619e4cec3c61620e85a98 | [] | no_license | yashwanth104/datasciencecoursera | c3c98ab019112a4980392ab536700fbbd2f0be61 | d23a72b6c97fb5991a180cca244e41ccc1afb389 | refs/heads/master | 2021-04-15T07:57:22.448004 | 2018-06-04T05:17:26 | 2018-06-04T05:17:26 | 126,745,106 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 97 | r | mycode.R | myfunction <- function(){
x <- 1:4
y <- 2:3
x + y
}
second <- function(x){
x + rnorm(length(x))
} |
51d15a2aebceb8be858de8d6fb741efcefdd67dc | b7b83e40e803452dfdd904a76291d244d51d4a6a | /2stagemodelbyleire/functions_loglik.r | f0ceaaa31b5a686d0dc2687123889d90f99447be | [] | no_license | a4a/model-dev | 0996bbba2ce6d47b2230ccf18628d36304b50b27 | a1fbcb6bbb37321ac2516e7f389f4ecf68616d25 | refs/heads/master | 2021-01-09T22:39:06.925439 | 2015-11-25T10:42:54 | 2015-11-25T10:42:54 | 14,422,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,364 | r | functions_loglik.r | ################################################################################
################################################################################
# functions to compute the log likelihood to be maximised for the CBBM model
# version with and without closed-form solutions for some of the parameters
# leire ibaibarriaga, preparation of stecf ewg 2014-03
################################################################################
################################################################################
# function to compute the log likelihood, without closed-forms solutions
loglik.f <- function(param, bio.dat){
Y <- dim(bio.dat)[1] # TO BE CHECKED
if (length(param)!= (3*Y+16)){
warning("Dimensions do not match")
}
logqdepm <- param[1]
logqac <- param[2]
logqrobs <- param[3]
logkrobs <- param[4]
logpsidepm <- param[5]
logpsiac <- param[6]
logpsirobs <- param[7]
xidepm <- param[8]
xiac <- param[9]
xicatch <- param[10]
logB0 <- param[11]
logR <- param[(11+1):(11+Y)]
logsage1sem1 <- param[11+Y+1]
logsage1sem2 <- param[11+Y+2]
logfsem1 <- param[(13+Y+1):(13+Y+Y)]
logfsem2 <- param[(13+2*Y+1):(13+2*Y+Y)]
logG1 <- param[13+3*Y+1]
logG2 <- param[13+3*Y+2]
logpsig <- param[13+3*Y+3]
dsurv <- 0.375
dsem1 <- 0.5
dsem2 <- 0.5
M1 <- 0.8
M2 <- 1.2
psidepm <- exp(logpsidepm)
psiac <- exp(logpsiac)
psirobs <- exp(logpsirobs)
psig <- exp(logpsig)
sage1sem1 <- exp(logsage1sem1)
sage1sem2 <- exp(logsage1sem2)
B0 <- exp(logB0)
R <- exp(logR)
G1 <- exp(logG1)
G2 <- exp(logG2)
fsem1 <- exp(logfsem1)
fsem2 <- exp(logfsem2)
fsem1[bio.dat$Ctotsem1==0] <- 0 # when there are no catches we set f to zero
fsem2[bio.dat$Ctotsem2==0] <- 0
# compute population based on the function "C:/use/tesis/analysis/chapter2/model winbugs blackbox/function_calcpop.r"
mat <- c(fsem1, fsem2, logB0, logR, sage1sem1, sage1sem2, log(M1), log(M2), log(G1), log(G2))
names(mat) <- c(paste("fsem1[",1:Y,"]",sep=""), paste("fsem2[",1:Y,"]",sep=""), "logB0", paste("logR[",1:Y,"]",sep=""),
"sage1[1]", "sage1[2]", "logM1", "logM2", "logG1", "logG2")
pop <- calc.pop(mat, Y=Y, dsurv=dsurv, d=c(dsem1, dsem2))
B1 <- pop[match(paste("B1[",1:Y,"]",sep=""), names(pop))]
Btot <- pop[match(paste("Btot[",1:Y,"]",sep=""), names(pop))]
C1sem1 <- pop[match(paste("C1sem1[",1:Y,"]",sep=""), names(pop))]
Ctotsem1 <- pop[match(paste("Ctotsem1[",1:Y,"]",sep=""), names(pop))]
C1sem2 <- pop[match(paste("C1sem2[",1:Y,"]",sep=""), names(pop))]
Ctotsem2 <- pop[match(paste("Ctotsem2[",1:Y,"]",sep=""), names(pop))]
# create index variables for observation equations (avoiding missing data)
year.BP.depm <- (1:Y)[!is.na(bio.dat$BP.depm)]
year.Btot.depm <- (1:Y)[!is.na(bio.dat$Btot.depm)]
year.BP.ac <- (1:Y)[!is.na(bio.dat$BP.ac)]
year.Btot.ac <- (1:Y)[!is.na(bio.dat$Btot.ac)]
year.Robs <- (1:Y)[!is.na(bio.dat$Robs)]
year.Ctotsem1 <- (1:Y)[!is.na(bio.dat$Ctotsem1) & bio.dat$Ctotsem1>0]
year.Ctotsem2 <- (1:Y)[!is.na(bio.dat$Ctotsem2) & bio.dat$Ctotsem2>0]
year.CPsem1 <- (1:Y)[!is.na(bio.dat$CPsem1)]
year.CPsem2 <- (1:Y)[!is.na(bio.dat$CPsem2)]
year.G1 <- (1:Y)[!is.na(bio.dat$G1)]
year.G2 <- (1:Y)[!is.na(bio.dat$G2)]
# compute log likelihood
ll <- 0
# print("BP.depm")
for (i in 1:length(year.BP.depm)){
idx <- year.BP.depm[i]
sh1 <- exp(xidepm)*B1[idx]/Btot[idx]
sh2 <- exp(xidepm)*(1-B1[idx]/Btot[idx])
aux <- dbeta(x=bio.dat$BP.depm[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Btot.depm")
for (i in 1:length(year.Btot.depm)){
idx <- year.Btot.depm[i]
ml <- logqdepm + log(Btot[idx])
sdl <- sqrt((psidepm+bio.dat$psidepm[idx])/(psidepm*bio.dat$psidepm[idx]))
aux <- dlnorm(x=bio.dat$Btot.depm[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("BP.ac")
for (i in 1:length(year.BP.ac)){
idx <- year.BP.ac[i]
sh1 <- exp(xiac)*B1[idx]/Btot[idx]
sh2 <- exp(xiac)*(1-B1[idx]/Btot[idx])
aux <- dbeta(x=bio.dat$BP.ac[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Btot.ac")
for (i in 1:length(year.Btot.ac)){
idx <- year.Btot.ac[i]
ml <- logqac + log(Btot[idx])
sdl <- sqrt((psiac+bio.dat$psiac[idx])/(psiac*bio.dat$psiac[idx]))
aux <- dlnorm(x=bio.dat$Btot.ac[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("CPsem1")
for (i in 1:length(year.CPsem1)){
idx <- year.CPsem1[i]
sh1 <- exp(xicatch)*C1sem1[idx]/Ctotsem1[idx]
sh2 <- exp(xicatch)*(1-C1sem1[idx]/Ctotsem1[idx])
aux <- dbeta(x=bio.dat$CPsem1[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Ctotsem1")
for (i in 1:length(year.Ctotsem1)){
idx <- year.Ctotsem1[i]
ml <- log(Ctotsem1[idx])
sdl <- 1/sqrt(400)
aux <- dlnorm(x=bio.dat$Ctotsem1[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("CPsem2")
for (i in 1:length(year.CPsem2)){
idx <- year.CPsem2[i]
sh1 <- exp(xicatch)*C1sem2[idx]/Ctotsem2[idx]
sh2 <- exp(xicatch)*(1-C1sem2[idx]/Ctotsem2[idx])
aux <- dbeta(x=bio.dat$CPsem2[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Ctotsem2")
for (i in 1:length(year.Ctotsem2)){
idx <- year.Ctotsem2[i]
ml <- log(Ctotsem2[idx])
sdl <- 1/sqrt(400)
aux <- dlnorm(x=bio.dat$Ctotsem2[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("Robs")
for (i in 1:length(year.Robs)){
idx <- year.Robs[i]
ml <- logqrobs + exp(logkrobs)*log(R[idx])
sdl <- 1/sqrt(psirobs)
aux <- dlnorm(x=bio.dat$Robs[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("G1")
for (i in 1:length(year.G1)){
idx <- year.G1[i]
m <- G1
sd <- 1/sqrt(psig)
aux <- dnorm(x=bio.dat$G1[idx], mean=m, sd=sd, log=T)
ll <- ll + aux
}
# print("G2")
for (i in 1:length(year.G2)){
idx <- year.G2[i]
m <- G2
sd <- 1/sqrt(psig)
aux <- dnorm(x=bio.dat$G2[idx], mean=m, sd=sd, log=T)
ll <- ll + aux
}
return (ll)
}
################################################################################
################################################################################
# function to derive closed-form of some parameters given all the rest
cf <- function(Btot, R, G1, G2, psidepm, psiac, bio.dat){
Y <- dim(bio.dat)[1]
# create index variables for observation equations (avoiding missing data)
year.Btot.depm <- (1:Y)[!is.na(bio.dat$Btot.depm)]
year.Btot.ac <- (1:Y)[!is.na(bio.dat$Btot.ac)]
year.Robs <- (1:Y)[!is.na(bio.dat$Robs)]
year.G1 <- (1:Y)[!is.na(bio.dat$G1)]
year.G2 <- (1:Y)[!is.na(bio.dat$G2)]
wtdepm <- psidepm*bio.dat$psidepm[year.Btot.depm]/(psidepm+bio.dat$psidepm[year.Btot.depm])
logqdepm <- sum(wtdepm*(log(bio.dat$Btot.depm[year.Btot.depm])-log(Btot[year.Btot.depm])))/sum(wtdepm)
wtac <- psiac*bio.dat$psiac[year.Btot.ac]/(psiac+bio.dat$psiac[year.Btot.ac])
logqac <- sum(wtac*(log(bio.dat$Btot.ac[year.Btot.ac])-log(Btot[year.Btot.ac])))/sum(wtac)
aa <- - sum(log(bio.dat$Robs[year.Robs]))
bb <- length(year.Robs)
cc <- sum(log(R[year.Robs]))
dd <- sum(log(bio.dat$Robs[year.Robs])*log(R[year.Robs]))
ee <- - cc
ff <- - sum(log(R[year.Robs])^2)
krobs <- (aa*ee-dd*bb)/(ff*bb-cc*ee)
logqrobs <- -cc / bb * krobs - aa/bb
psirobs <- length(year.Robs)/sum( (log(bio.dat$Robs[year.Robs]) - logqrobs - krobs*log(R[year.Robs]))^2 )
psig <- (length(year.G1)+length(year.G2))/(sum((bio.dat$G1[year.G1]-G1)^2)+sum((bio.dat$G2[year.G2]-G2)^2))
out <- c(logqdepm, logqac, krobs, logqrobs, psirobs, psig)
names(out) <- c("logqdepm", "logqac", "krobs", "logqrobs", "psirobs", "psig")
return (out)
}
################################################################################
################################################################################
# function to compute the log likelihood, with closed-forms solutions
loglikcf.f <- function(param, bio.dat){
Y <- dim(bio.dat)[1] # TO BE CHECKED
if (length(param)!= (3*Y+10)){
warning("Dimensions do not match")
}
logpsidepm <- param[1]
logpsiac <- param[2]
xidepm <- param[3]
xiac <- param[4]
xicatch <- param[5]
logB0 <- param[6]
logR <- param[(6+1):(6+Y)]
logsage1sem1 <- param[6+Y+1]
logsage1sem2 <- param[6+Y+2]
logfsem1 <- param[(8+Y+1):(8+Y+Y)]
logfsem2 <- param[(8+2*Y+1):(8+2*Y+Y)]
logG1 <- param[8+3*Y+1]
logG2 <- param[8+3*Y+2]
dsurv <- 0.375
dsem1 <- 0.5
dsem2 <- 0.5
M1 <- 0.8
M2 <- 1.2
psidepm <- exp(logpsidepm)
psiac <- exp(logpsiac)
sage1sem1 <- exp(logsage1sem1)
sage1sem2 <- exp(logsage1sem2)
B0 <- exp(logB0)
R <- exp(logR)
G1 <- exp(logG1)
G2 <- exp(logG2)
fsem1 <- exp(logfsem1)
fsem2 <- exp(logfsem2)
fsem1[bio.dat$Ctotsem1==0] <- 0 # when there are no catches we set f to zero
fsem2[bio.dat$Ctotsem2==0] <- 0
# compute population based on the function "C:/use/tesis/analysis/chapter2/model winbugs blackbox/function_calcpop.r"
mat <- c(fsem1, fsem2, logB0, logR, sage1sem1, sage1sem2, log(M1), log(M2), log(G1), log(G2))
names(mat) <- c(paste("fsem1[",1:Y,"]",sep=""), paste("fsem2[",1:Y,"]",sep=""), "logB0", paste("logR[",1:Y,"]",sep=""),
"sage1[1]", "sage1[2]", "logM1", "logM2", "logG1", "logG2")
pop <- calc.pop(mat, Y=Y, dsurv=dsurv, d=c(dsem1, dsem2))
B1 <- pop[match(paste("B1[",1:Y,"]",sep=""), names(pop))]
Btot <- pop[match(paste("Btot[",1:Y,"]",sep=""), names(pop))]
C1sem1 <- pop[match(paste("C1sem1[",1:Y,"]",sep=""), names(pop))]
Ctotsem1 <- pop[match(paste("Ctotsem1[",1:Y,"]",sep=""), names(pop))]
C1sem2 <- pop[match(paste("C1sem2[",1:Y,"]",sep=""), names(pop))]
Ctotsem2 <- pop[match(paste("Ctotsem2[",1:Y,"]",sep=""), names(pop))]
rest <- cf(Btot, R, G1, G2, psidepm, psiac, bio.dat)
logqdepm <- rest[match("logqdepm", names(rest))]
logqac <- rest[match("logqac", names(rest))]
logkrobs <- log(rest[match("krobs", names(rest))])
logqrobs <- rest[match("logqrobs", names(rest))]
psirobs <- rest[match("psirobs", names(rest))]
psig <- rest[match("psig", names(rest))]
# create index variables for observation equations (avoiding missing data)
year.BP.depm <- (1:Y)[!is.na(bio.dat$BP.depm)]
year.Btot.depm <- (1:Y)[!is.na(bio.dat$Btot.depm)]
year.BP.ac <- (1:Y)[!is.na(bio.dat$BP.ac)]
year.Btot.ac <- (1:Y)[!is.na(bio.dat$Btot.ac)]
year.Robs <- (1:Y)[!is.na(bio.dat$Robs)]
year.Ctotsem1 <- (1:Y)[!is.na(bio.dat$Ctotsem1) & bio.dat$Ctotsem1>0]
year.Ctotsem2 <- (1:Y)[!is.na(bio.dat$Ctotsem2) & bio.dat$Ctotsem2>0]
year.CPsem1 <- (1:Y)[!is.na(bio.dat$CPsem1)]
year.CPsem2 <- (1:Y)[!is.na(bio.dat$CPsem2)]
year.G1 <- (1:Y)[!is.na(bio.dat$G1)]
year.G2 <- (1:Y)[!is.na(bio.dat$G2)]
# compute log likelihood
ll <- 0
# print("BP.depm")
for (i in 1:length(year.BP.depm)){
idx <- year.BP.depm[i]
sh1 <- exp(xidepm)*B1[idx]/Btot[idx]
sh2 <- exp(xidepm)*(1-B1[idx]/Btot[idx])
aux <- dbeta(x=bio.dat$BP.depm[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Btot.depm")
for (i in 1:length(year.Btot.depm)){
idx <- year.Btot.depm[i]
ml <- logqdepm + log(Btot[idx])
sdl <- sqrt((psidepm+bio.dat$psidepm[idx])/(psidepm*bio.dat$psidepm[idx]))
aux <- dlnorm(x=bio.dat$Btot.depm[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("BP.ac")
for (i in 1:length(year.BP.ac)){
idx <- year.BP.ac[i]
sh1 <- exp(xiac)*B1[idx]/Btot[idx]
sh2 <- exp(xiac)*(1-B1[idx]/Btot[idx])
aux <- dbeta(x=bio.dat$BP.ac[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Btot.ac")
for (i in 1:length(year.Btot.ac)){
idx <- year.Btot.ac[i]
ml <- logqac + log(Btot[idx])
sdl <- sqrt((psiac+bio.dat$psiac[idx])/(psiac*bio.dat$psiac[idx]))
aux <- dlnorm(x=bio.dat$Btot.ac[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("CPsem1")
for (i in 1:length(year.CPsem1)){
idx <- year.CPsem1[i]
sh1 <- exp(xicatch)*C1sem1[idx]/Ctotsem1[idx]
sh2 <- exp(xicatch)*(1-C1sem1[idx]/Ctotsem1[idx])
aux <- dbeta(x=bio.dat$CPsem1[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Ctotsem1")
for (i in 1:length(year.Ctotsem1)){
idx <- year.Ctotsem1[i]
ml <- log(Ctotsem1[idx])
sdl <- 1/sqrt(400)
aux <- dlnorm(x=bio.dat$Ctotsem1[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("CPsem2")
for (i in 1:length(year.CPsem2)){
idx <- year.CPsem2[i]
sh1 <- exp(xicatch)*C1sem2[idx]/Ctotsem2[idx]
sh2 <- exp(xicatch)*(1-C1sem2[idx]/Ctotsem2[idx])
aux <- dbeta(x=bio.dat$CPsem2[idx], shape1=sh1, shape2=sh2, log=T)
ll <- ll + aux
}
# print("Ctotsem2")
for (i in 1:length(year.Ctotsem2)){
idx <- year.Ctotsem2[i]
ml <- log(Ctotsem2[idx])
sdl <- 1/sqrt(400)
aux <- dlnorm(x=bio.dat$Ctotsem2[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("Robs")
for (i in 1:length(year.Robs)){
idx <- year.Robs[i]
ml <- logqrobs + exp(logkrobs)*log(R[idx])
sdl <- 1/sqrt(psirobs)
aux <- dlnorm(x=bio.dat$Robs[idx], meanlog=ml, sdlog=sdl, log=T)
ll <- ll + aux
}
# print("G1")
for (i in 1:length(year.G1)){
idx <- year.G1[i]
m <- G1
sd <- 1/sqrt(psig)
aux <- dnorm(x=bio.dat$G1[idx], mean=m, sd=sd, log=T)
ll <- ll + aux
}
# print("G2")
for (i in 1:length(year.G2)){
idx <- year.G2[i]
m <- G2
sd <- 1/sqrt(psig)
aux <- dnorm(x=bio.dat$G2[idx], mean=m, sd=sd, log=T)
ll <- ll + aux
}
return (ll)
}
################################################################################
################################################################################
|
7a22041403a4824c92fe57c9ccf5278d7898ba82 | caabc947bd905f4b17d4c0c0de9ff1db532c14c5 | /man/Merging.Rd | f72dcb9874e8b6ab4c19f48ba64388f7dbd7e451 | [] | no_license | tallulandrews/TreeOfCells | a97b38eaa5839dcddf9c30761b7250b8d104382a | 309fb1fa892ed02d2de0acce74ae270d01b3b1fc | refs/heads/master | 2021-05-12T01:51:36.727639 | 2020-04-14T17:13:43 | 2020-04-14T17:13:43 | 117,569,510 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,549 | rd | Merging.Rd | \name{merge_lists}
\alias{merge_lists}
\alias{merge_profiles}
\alias{merge_across_protocols}
\title{Merge Profiles}
\description{
Methods for merging profiles across cell-types, and experimental protocols.
}
\usage{
merge_across_protocols(merged_profile_list, norm_func=scaled_regression, fs_func="none")
merge_profiles(list_of_profiles)
merge_lists(list1, list2, name1="A", name2="B")
}
\arguments{
\item{merged_profile_list}{a named list of output from merge_profiles, one item for each different experimental protocol (e.g. Chromium, Smartseq, Cellseq).}
\item{list_of_profiles}{a named list of output from ZINB fitting, one item for each cell-type.}
\item{norm_func}{normalization function to use}
\item{fs_func}{feature selection function to use}
\item{list1}{first list}
\item{list2}{second list}
\item{name1}{the name of the first list, used as a prefix to avoid duplicate items.}
\item{name2}{the name of the second list, used as a prefix to avoid duplicate items.}
}
\details{
\code{merge_lists} merges together two lists ensuring all contents of each list are present in the final list.
\code{merge_profiles} reorganizes fitted ZINB parameters into a table of mus, rs, and ds where the columns are each cell-type, and rows are each gene.
\code{merge_across_protocols} performs normalization and feature selection on profiles from each protocol separately then merges them together into a single set of mus, rs, and ds matrices. Reported features are detected in at least two of the protocols.
}
\value{
\code{merge_lists} a list of containing all contents of each input list.
\code{merge_profiles} a list of mus, rs, ds and Ns, the parameters of the ZINB across all genes (rows) and cell-types (columns), Ns are the number of cells in each cell-type.
\code{merge_across_protocols} a list of two items:
merged_profiles (list of parameters across all cell-types & protocols)
features (consensus features from at least 2 protocols).
}
\examples{
set.seed(1010)
counts <- matrix(rnbinom(5000*100, mu=30, size=0.5), ncol=100);
fits1 <- fit_NB_to_matrix(counts[,1:50]);
fits2 <- fit_NB_to_matrix(counts[,51:75]);
fits3 <- fit_NB_to_matrix(counts[,76:100]);
merged_profiles <- merge_profiles(list(celltype1=fits1, celltype2=fits2, celltype3=fits3));
cross_protocol <- merge_across_protocols(list(X10=merged_profiles, SS2=merged_profiles))
l1 <- list("hat"=1, "cat"=2, "pat"=3)
l2 <- list("dog"=3, "cat"=10, "horse"=4)
merge_l <- merge_lists(l1, l2, name1="rhyme", name2="animal")
}
\keyword{profiles, merging}
|
64d19caa37208eb3b45e1a4f568d0b5213b52b00 | 5788da1d6726711a21a5b710835e739c337248b7 | /pymontecarlo_casino2/casino2/int/RPO.R | c2e094842fa2d4e6cf3a797ae3c78cf8dadf3cd7 | [
"Apache-2.0"
] | permissive | pymontecarlo/pymontecarlo-casino2 | 23df0911da1161cea7e9394528faab85927fd696 | ccb9505a49ec2b8c108691a5e930441b1a9ddcb4 | refs/heads/master | 2022-01-14T00:31:53.346316 | 2022-01-03T14:23:31 | 2022-01-03T14:23:31 | 41,909,147 | 0 | 2 | NOASSERTION | 2021-12-29T16:35:26 | 2015-09-04T10:01:05 | Python | UTF-8 | R | false | false | 130 | r | RPO.R | version https://git-lfs.github.com/spec/v1
oid sha256:3740ce407a20e211da38576bf4a1915be1555e353729a5233a5eca6053dbc6fa
size 20800
|
02908f59b9d834d4beb6e84e841635ab1d64fbef | 4d848e9a52f6efa7c5d34fe414116cc9d7847924 | /simkim.R | a7e0d552dcd732bf6f9439d0ce81e2d0d8f4d931 | [] | no_license | kimkimroll/Polio-environmental-surveillance-algorithm-simulation | 0c9c399facb424aec8fd5492e5cc2ea12e457a10 | 689030367e72a2dce2edc8852bc39d1e25bef217 | refs/heads/main | 2023-04-01T14:06:10.230421 | 2021-04-08T15:47:03 | 2021-04-08T15:47:03 | 355,964,680 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,488 | r | simkim.R | setwd("//cdc.gov/project/CCID_NCIRD_DVD_PPLB/_PMDDL/Kim/R/simulation")
library(tidyverse)
library(readr)
library(janitor)
envs<-function(rw){
#number of simulations
n.sims<-10
#randomly assigning positive or negative results to each assay
p1f1<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.3, 0.7))
p1f2<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.3, 0.7))
p1f3<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.3, 0.7))
p1f4<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.3, 0.7))
p1f5<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.3, 0.7))
p1f6<-sample(c("positive", "negative"), n.sims, replace = TRUE, prob = c(0.7, 0.3))
#bind all results together by column
new1<-as.data.frame(cbind(p1f1, p1f2, p1f3, p1f4, p1f5, p1f6))
print(new1)
#conditioning based on pos/neg flasks from 1st passage
#if NEG
new1$p2f1[new1$p1f1 == "negative"]<-"negative"
new1$p2f2[new1$p1f2 == "negative"]<-"negative"
new1$p2f3[new1$p1f3 == "negative"]<-"negative"
new1$p2f4[new1$p1f4 == "negative"]<-"negative"
new1$p2f5[new1$p1f5 == "negative"]<-"negative"
new1$p2f6[new1$p1f6 == "negative"]<-"negative"
new1$p2f1[new1$p1f1 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new1$p2f2[new1$p1f2 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new1$p2f3[new1$p1f3 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new1$p2f4[new1$p1f4 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new1$p2f5[new1$p1f5 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new1$p2f6[new1$p1f6 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2<-as.data.frame(cbind(new1))
print(new2)
new2$p3f1[new2$p2f1 == "negative"]<-"negative"
new2$p3f2[new2$p2f2 == "negative"]<-"negative"
new2$p3f3[new2$p2f3 == "negative"]<-"negative"
new2$p3f4[new2$p2f4 == "negative"]<-"negative"
new2$p3f5[new2$p2f5 == "negative"]<-"negative"
new2$p3f6[new2$p2f6 == "negative"]<-"negative"
new2$p3f1[new2$p2f1 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2$p3f2[new2$p2f2 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2$p3f3[new2$p2f3 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2$p3f4[new2$p2f4 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2$p3f5[new2$p2f5 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new2$p3f6[new2$p2f6 == "positive"]<-sample(c("positive", "negative"), 1, replace = TRUE, prob = c(0.7, 0.3))
new3<-as.data.frame(cbind(new2))
print(new3)
#if ITD
new3$itd1[new2$p3f1 == "positive"]<-"yes"
new3$itd2[new2$p3f2 == "positive"]<-"yes"
new3$itd3[new2$p3f3 == "positive"]<-"yes"
new3$itd4[new2$p3f4 == "positive"]<-"yes"
new3$itd5[new2$p3f5 == "positive"]<-"yes"
new3$itd6[new2$p3f6 == "positive"]<-"yes"
new3$itd1[new2$p3f1 == "negative"]<-"no"
new3$itd2[new2$p3f2 == "negative"]<-"no"
new3$itd3[new2$p3f3 == "negative"]<-"no"
new3$itd4[new2$p3f4 == "negative"]<-"no"
new3$itd5[new2$p3f5 == "negative"]<-"no"
new3$itd6[new2$p3f6 == "negative"]<-"no"
#1 is negative
#2 is positive
#add DASH column to data
new4<-new3 %>% mutate(DASH = row_number(),
DASH = as.character(DASH))
total_itd <- new4 %>% group_by(DASH) %>%
mutate(itd1 = as.character(itd1),
itd2 = as.character(itd2),
itd3 = as.character(itd3),
itd4 = as.character(itd4),
itd5 = as.character(itd5),
itd6 = as.character(itd6)) %>%
summarise("overall" = sum(itd1 == 'yes',
itd2 == 'yes',
itd3 == 'yes',
itd4 == 'yes',
itd5 == 'yes',
itd6 == 'yes', na.rm = TRUE),
"flask1" = sum(itd1 == 'yes', na.rm = TRUE),
"flask2" = sum(itd2 == 'yes', na.rm = TRUE),
"flask3" = sum(itd3 == 'yes', na.rm = TRUE),
"flask4" = sum(itd4 == 'yes', na.rm = TRUE),
"flask5" = sum(itd5 == 'yes', na.rm = TRUE),
"flask6" = sum(itd6 == 'yes', na.rm = TRUE),
)
new5<-left_join(new4, total_itd, by = "DASH")
print(new5)
output<-as.data.frame(cbind(new5))
print(output)
return(output)
}
envs(rw)
rw1<-envs(rw)
#save sim1
write.csv(rw1, "//cdc.gov/project/CCID_NCIRD_DVD_PPLB/_PMDDL/Kim/R/simulation/rw1.csv", row.names = FALSE)
#SIM 2 for ITD ct values
#grab POS flask from cell culture
#sanity check totals
print(rw1)
total = sum(rw1$overall)
ct<-function(rw2){
#number of simulations by positive flasks
rw2 <- rw1 %>%
group_by(DASH) %>%
select(DASH, flask1, flask2, flask3, flask4, flask5, flask6) %>%
gather(rw1, "flask", flask1:flask6) %>%
mutate(flask = as.numeric(flask)) %>%
filter(flask == '1')# %>%
n.flasks<-nrow(rw2[rw2$flask == '1',])
n.flasks<-as.numeric(n.flasks)
rw2 <- rw2 %>% select(-flask)
#randomly assigning positive or negative results to each assay
rw2$sabin1<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.7, 0.3))
rw2$sabin2<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.035, 0.965))
rw2$sabin3<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.3, 0.3))
rw2$WPV1<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.018, 0.982))
rw2$Qb<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.999, 0.001))
rw2$WPV3<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.0, 1))
rw2$PVsec<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.7, 0.3))
rw2$PV2sec<-sample(c("positive", "negative"), n.flasks, replace = TRUE, prob = c(0.0001, 0.9999))
#bind all results together by column
#new1<-as.data.frame(cbind(rw2, rw1, sabin1, sabin2, sabin3, WPV1, Qb, WPV3, PVsec, PV2sec))
print(rw2)
new1<-rw2
#define ct values for positive results. randomly generated by assay
new1$Sabin1_ct<-sample(17:37, size = n.flasks, replace = TRUE)
new1$Sabin1_ct[new1$sabin1 == "negative"]<-"NA"
new1$Sabin2_ct<-sample(20:34, size = n.flasks, replace = TRUE)
new1$Sabin2_ct[new1$sabin2 == "negative"]<-"NA"
new1$Sabin3_ct<-sample(16:35, size = n.flasks, replace = TRUE)
new1$Sabin3_ct[new1$sabin3 == "negative"]<-"NA"
new1$WPV1_ct<-sample(15:33, size = n.flasks, replace = TRUE)
new1$WPV1_ct[new1$WPV1 == "negative"]<-"NA"
new1$QbCt<-sample(16:32, size = n.flasks, replace = TRUE)
new1$QbCt[new1$Qb == "negative"]<-"NA"
new1$WPV3_ct<-sample(20:31, size = n.flasks, replace = TRUE)
new1$WPV3_ct[new1$WPV3 == "negative"]<-"NA"
#conditioning PanPV based on each assay's result
new1$PV[new1$WPV1_ct > 32 | new1$Sabin1_ct > 32 | new1$Sabin2_ct > 32 | new1$Sabin3_ct > 32 | new1$WPV3_ct > 32]<-"negative"
new1$PV[new1$WPV1_ct <= 32 | new1$Sabin1_ct <= 32 | new1$Sabin2_ct <= 32 | new1$Sabin3_ct <= 32 | new1$WPV3_ct <= 32]<-"positive"
new1$PVthir[new1$PV == "negative" & new1$PVsec == "negative"]<-"negative"
new1$PVthir[new1$PV == "negative" & new1$PVsec == "positive"]<-"positive"
new1$PVthir[new1$PV == "positive" & new1$PVsec == "positive"]<-"positive"
new1$PVthir[new1$PV == "positive" & new1$PVsec == "negative"]<-"positive"
#1 is negative
#2 is positive
#if PanPV positive, then assign Ct value
new1$panPV_ct<-sample(17:39, size = n.flasks, replace = TRUE)
new1$panPV_ct[new1$PVthir == "negative"]<-"NA"
#conditioning PV2 based on Sabin 2 Ct and result
new1$PV2[new1$sabin2 == "negative"]<-"negative"
new1$PV2[new1$sabin2 == "positive"]<-"positive"
new1$PV2thir[new1$PV2sec == "negative" & new1$PV2 == "negative"]<-"negative"
new1$PV2thir[new1$PV2sec == "positive" & new1$PV2 == "positive"]<-"positive"
new1$PV2thir[new1$PV2sec == "positive" & new1$PV2 == "negative"]<-"positive"
new1$PV2thir[new1$PV2sec == "negative" & new1$PV2 == "positive"]<-"positive"
#if Sabin 2 positive, assign Ct value to PV2
new1$PV2_ct<-sample(20:31, size = n.flasks, replace = TRUE)
new1$PV2_ct[new1$PV2thir == "negative"]<-"NA"
#add DASH column to data
new2<-new1 #%>% mutate(DASH = row_number())
print(new2)
output<-as.data.frame(cbind(new2$DASH, new2$rw1, new2$QbCt, new2$Sabin1_ct, new2$Sabin2_ct, new2$Sabin3_ct, new2$WPV1_ct, new2$PV2_ct, new2$WPV3_ct, new2$PV, new2$PVsec, new2$PVthir, new2$panPV_ct, new2$PV2, new2$PV2sec, new2$PV2thir, new2$PV2_ct))
colnames(output)<-c("DASH", "Flask", "QbCt", "Sabin1_ct", "Sabin2_ct", "Sabin3_ct", "WPV1_ct", "PV2_ct", "WPV3_ct", "PV1st", "PVsec", "PVthir", "panPV_ct", "PV2", "PV2sec", "PV2thir", "PV2_ct")
print(output)
return(output)
}
ct(rw2)
rw2<-ct(rw2)
#save sim1
write.csv(rw2, "//cdc.gov/project/CCID_NCIRD_DVD_PPLB/_PMDDL/Kim/R/simulation/rw2.csv", row.names = FALSE)
|
815963371e4e5c045d08c99215def24a4928ff50 | 70dd6507c11fa1ecabe84d781e56dbd4ee93097b | /R/t_test.R | ae150022d29b2fcbdb34ee40c50ef9062ee63107 | [] | no_license | jrpriceUPS/Math160UPS | 4604d17f810be65ccd1d2cd03e294da9c345ce4f | d0ff7cec6eda8d8da860d7d4b8d6c6fe9185683d | refs/heads/master | 2023-06-09T11:55:19.386088 | 2023-06-08T18:11:25 | 2023-06-08T18:11:25 | 190,257,509 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,020 | r | t_test.R | #' t-test
#'
#' This function asks you a sequence of questions in order to execute a t-test. It finds a confidence interval and a p-value, produces a plot, and indicates how this could be queried directly from R.
#' @export
#' @examples
#'
#'
#'
#'
#' > x = c(1, 2, 3, 4, 5, 6, 7)
#' > t_test()
#' Do you have a single population or are you comparing populations?
#' Possible answers are 'single' and 'comparing'.
#' single
#' Do you have the whole dataset or do you just have the statistics (mean, standard deviation)?
#' Possible answers are 'whole' or 'stats'.
#' whole
#' What is the name of your variable?
#' x
#' The statistics for your dataset are:
#' xbar = 4
#' s = 2.160247
#' n = 7
#' df = 7 - 1 = 6
#'
#' What is the theoretical mean you are testing against (called mu_0)?
#' (If you only want a confidence interval, type 'NA')
#' 3
#' What is your desired confidence level?
#' .95
#' Your t-statistic is:
#' t = (4-3)/(2.160247/sqrt(7)) = 1.224745
#'
#' The probability of getting this result or more extreme for xbar
#' if mu really is 3 is
#' p = 0.2665697
#'
#' You can get this result by typing:
#' 2*(1-pt(1.22474487139159,6))
#'
#'
#' The 95% confidence interval for the population mean is
#' 2.002105 < mu < 5.997895
#'
#' You can get this result by finding:
#' tstar = 1-qt((1-0.95)/2,6) = 2.446912
#'
#' and then calculating:
#' 4 - 2.446912 x 2.160247/sqrt(7) and 4 + 2.446912 x 2.160247/sqrt(7)
#'
#'
#' Or, since you have the whole dataset, you could just type:
#' t.test(x,mu = 3,conf.level = 0.95)
#'
#'
#'
#'> t_test()
#'Do you have a single population or are you comparing populations?
#' Possible answers are 'single' and 'comparing'.
#'single
#'Do you have the whole dataset or do you just have the statistics (mean, standard deviation)?
#' Possible answers are 'whole' or 'stats'.
#'stats
#'What is your sample mean?
#' 4
#'What is your sample standard deviation?
#' 2.16
#'What is your sample size?
#' 7
#'What is the theoretical mean you are testing against (called mu_0)?
#' (If you only want a confidence interval, type 'NA')
#'3
#'What is your desired confidence level?
#' .95
#'Your t-statistic is:
#' t = (4-3)/(2.16/sqrt(7)) = 1.224885
#'
#'The probability of getting this result or more extreme for xbar
#'if mu really is 3 is
#'p = 0.2665206
#'
#'You can get this result by typing:
#' 2*(1-pt(1.22488486623361,6))
#'
#'
#'The 95% confidence interval for the population mean is
#'2.002333 < mu < 5.997667
#'
#'You can get this result by finding:
#' tstar = 1-qt((1-0.95)/2,6) = 2.446912
#'
#'and then calculating:
#' 4 - 2.446912 x 2.16/sqrt(7) and 4 + 2.446912 x 2.16/sqrt(7)
#'
#'
#'
#'
#'
#'
#' > t_test()
#' Do you have a single population or are you comparing populations?
#' Possible answers are 'single' and 'comparing'.
#' comparing
#' Do you have the whole dataset or do you just have the statistics (mean, standard deviation)?
#' Possible answers are 'whole' or 'stats'.
#' whole
#' Is this a matched-pairs comparison in which the same subjects are measured twice?
#' yes
#' What is the name of the variable for the first set of measurements?
#' x
#' What is the name of the variable for the second set of measurements?
#' y
#' The statistics for your datasets are:
#' n = 7
#' xbar1 = 4
#' s1 = 2.160247
#'
#' xbar2 = 8.842857
#' s2 = 4.595236
#'
#' The statistics for the difference are:
#' xbar = 4.842857
#' s = 2.445988
#' n = 7
#' df = 7 - 1 = 6
#'
#' What is your desired confidence level?
#' .95
#' Your t-statistic is:
#' t = 4.842857/(2.445988/sqrt(7)) = 5.238372
#'
#' The probability of getting this result or more extreme for xbar2 - xbar1 if there really is no difference is
#' p = 0.001941435
#'
#' You can get this result by typing:
#' 2*(1-pt(5.23837230565063,6))
#'
#'
#' The 95% confidence interval for the difference in population means is
#' 2.580696 < mu2 - mu1 < 7.105019
#'
#' You can get this result by finding:
#' tstar = 1-qt((1-0.95)/2,6) = 2.446912
#'
#' and then calculating:
#' (8.84285714285714-4) - 2.446912 x 2.445988/sqrt(7) and (8.84285714285714-4) + 2.446912 x 2.445988/sqrt(7)
#'
#'
#' Or, since you have the whole dataset, you could just type:
#' t.test(y,x, paired = TRUE, conf.level = 0.95)
#'
#'
#'
#'
#'
#'
#' > t_test()
#' Do you have a single population or are you comparing populations?
#' Possible answers are 'single' and 'comparing'.
#' comparing
#' Do you have the whole dataset or do you just have the statistics (mean, standard deviation)?
#' Possible answers are 'whole' or 'stats'.
#' whole
#' Is this a matched-pairs comparison in which the same subjects are measured twice?
#' no
#' What is the name of the variable for the first set of measurements?
#' x
#' What is the name of the variable for the second set of measurements?
#' y
#' The statistics for your datasets are:
#' n1 = 7
#' xbar1 = 4
#' s1 = 2.160247
#'
#' n2 = 7
#' xbar2 = 8.842857
#' s2 = 4.595236
#'
#' The statistics for the difference are:
#' xbar = 4.842857
#' s = sqrt(2.160247^2/7 + 4.595236^2/7) = 1.919183
#' df = 8.5285
#'
#' What is your desired confidence level?
#' .95
#' Your t-statistic is:
#' t = (4.84285714285714)/(1.919183) = 2.523395
#'
#' The probability of getting this result or more extreme for xbar2 - xbar1 if there really is no difference is
#' p = 0.03391985
#'
#' You can get this result by typing:
#' 2*(1-pt(2.52339452856832,8.52849965837585))
#'
#'
#' The 95% confidence interval for the difference in population means is
#' 0.4644978 < mu2 - mu1 < 9.221216
#'
#' You can get this result by finding:
#' tstar = 1-qt((1-0.95)/2,8.5285) = 2.281366
#'
#' and then calculating:
#' (8.84285714285714-4) - 2.281366 x 1.919183 and (8.84285714285714-4) + 2.281366 x 1.919183
#'
#'
#' Or, since you have the whole dataset, you could just type:
#' t.test(y,x, conf.level = 0.95)
t_test <- function(){
cat("Are you executing a one-sample t-test or a two-sample t-test?\nPossible answers are 'one' and 'two'.\n")
compare = readline()
while(!(compare %in% c('one','1','One','two','2','Two'))){
cat("Please choose either 'one' or 'two'.\n")
compare = readline()
}
if(compare %in% c('one','1','One')){compare = "single"}
if(compare %in% c('two','2','Two')){compare = "comparing"}
cat("Do you have the whole dataset or do you just have the statistics (mean, standard deviation)?\nPossible answers are 'whole' or 'stats'.\n")
vec = readline()
if(compare=="comparing"){
if(vec=="stats"){
cat("What is the sample mean of the first set of measurements? \n")
xbar1 = as.numeric(readline())
cat("What is the sample mean of the second set of measurements? \n")
xbar2 = as.numeric(readline())
cat("What is the sample standard deviation of the first set of measurements? \n")
s1 = as.numeric(readline())
cat("What is the sample standard deviation of the second set of measurements? \n")
s2 = as.numeric(readline())
cat("What is the sample size of the first set of measurements? \n")
n1 = as.numeric(readline())
cat("What is the sample size of the second set of measurements? \n")
n2 = as.numeric(readline())
xbar = xbar2-xbar1
df = n1+n2-2
s = sqrt(s1^2/n1+s2^2/n2)
cat("The statistics for the difference are: ")
cat("\n")
cat(paste("xbar = ",format(xbar,scientific=FALSE)))
cat("\n")
cat(paste("s = sqrt(",format(s1,scientific=FALSE),"^2/",format(n1,scientific=FALSE)," + ",format(s2,scientific=FALSE),"^2/",format(n2,scientific=FALSE),") = ",format(s,scientific=FALSE),sep=""))
cat("\n")
cat(paste("df = ",format(n1,scientific=FALSE),"+",format(n2,scientific=FALSE),"- 2 = ",format(n1+n2-2,scientific=FALSE)))
cat("\n")
cat("\n")
}
if(vec=="whole"){
cat("What is the name of the variable for the first set of measurements? \n")
varname1 = readline()
cat("What is the name of the variable for the second set of measurements? \n")
varname2 = readline()
if(grepl("$", varname1, fixed=TRUE)){
names = strsplit(varname1,"\\$")
frame = get(names[[1]][1])
data1 = frame[[names[[1]][2]]]
} else{
data1 = get(varname1)}
data1 = data1[!is.na(data1)]
xbar1 = mean(data1)
s1 = sd(data1)
n1 = length(data1)
if(grepl("$", varname2, fixed=TRUE)){
names = strsplit(varname2,"\\$")
frame = get(names[[1]])
data2 = frame[[names[[1]][2]]]
} else{
data2 = get(varname2)}
data2 = data2[!is.na(data2)]
xbar2 = mean(data2)
s2 = sd(data2)
n2 = length(data2)
xbar = mean(data2)-mean(data1)
s = sqrt(sd(data1)^2/n1 + sd(data2)^2/n2)
test_result = t.test(data1,data2)
df = as.numeric(test_result$parameter)
cat("The statistics for your datasets are: ")
cat("\n")
cat(paste("n1 = ",format(n1,scientific=FALSE)))
cat("\n")
cat(paste("xbar1 = ",format(xbar1,scientific=FALSE)))
cat("\n")
cat(paste("s1 = ",format(s1,scientific=FALSE)))
cat("\n")
cat("\n")
cat(paste("n2 = ",format(n2,scientific=FALSE)))
cat("\n")
cat(paste("xbar2 = ",format(xbar2,scientific=FALSE)))
cat("\n")
cat(paste("s2 = ",format(s2,scientific=FALSE)))
cat("\n")
cat("\n")
cat("The statistics for the difference are: ")
cat("\n")
cat(paste("xbar = ",format(xbar,scientific=FALSE)))
cat("\n")
cat(paste("s = sqrt(",format(s1,scientific=FALSE),"^2/",format(n1,scientific=FALSE)," + ",format(s2,scientific=FALSE),"^2/",format(n2,scientific=FALSE),") = ",format(s,scientific=FALSE),sep=""))
cat("\n")
cat(paste("df = ",format(df,scientific=FALSE),sep=""))
cat("\n")
cat("\n")
}
cat("What is your desired confidence level? \n")
conf_level = as.numeric(readline())
while(conf_level<0 | conf_level>1){cat('Please choose a confidence level between 0 and 1\n')
cat("What is your desired confidence level? \n")
conf_level = as.numeric(readline("What is your desired confidence level? "))
}
sidedness = "both"
if(sidedness!="NA"){
t = xbar/s
out = conduct_t_test(t,df,sidedness)}
# new version with no one-sided confidence intervals:
tstar = -qt((1-conf_level)/2,df)
thing_to_type2 = paste("tstar = 1-qt((1-",format(conf_level,scientific=FALSE),")/2,",format(df,scientific=FALSE),") = ",format(tstar,scientific=FALSE),sep="")
thing_to_type3 = paste("(",toString(xbar2),"-",toString(xbar1),") - ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),sep="")
thing_to_type4 = paste("(",toString(xbar2),"-",toString(xbar1),") + ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),sep="")
lower = xbar - tstar*s
upper = xbar + tstar*s
if(sidedness!="NA"){
sidedness_type = paste("The probability of getting this result or more extreme for xbar2 - xbar1 if there really is no difference is",sep="")
cat("Your t-statistic is:")
cat("\n")
cat(paste("t = (",toString(xbar),")/(",format(s,scientific=FALSE),") = ",format(t,scientific=FALSE),sep=""))
cat("\n")
cat("\n")
cat(sidedness_type)
cat("\n")
cat(paste("p = ",format(out$prob,scientific=FALSE)))
cat("\n")
cat("\n")
cat("You can get this result by typing:")
cat("\n")
cat(out$p_value_type)
cat("\n")
cat("\n")
cat("\n")}
cat(paste("The ",toString(conf_level*100),"% confidence interval for the difference in population means is",sep=""))
cat("\n")
cat(paste(format(lower,scientific=FALSE)," < mu2 - mu1 < ",format(upper,scientific=FALSE)))
cat("\n")
cat("\n")
cat("You can get this result by finding:")
cat("\n")
cat(thing_to_type2)
cat("\n")
cat("\n")
cat("and then calculating:")
cat("\n")
cat(paste(thing_to_type3," and ",thing_to_type4))
if(vec=="whole"){
cat("\n")
cat("\n")
cat("\n")
cat("Or, since you have the whole dataset, you could just type:")
cat("\n")
cat(paste("t.test(",varname2,",",varname1,", conf.level = ", toString(conf_level), ")",sep=""))
}
}
if(compare=="single"){
if(vec=="stats"){
cat("What is your sample mean? \n")
xbar = as.numeric(readline())
cat("What is your sample standard deviation? \n")
s = as.numeric(readline())
cat("What is your sample size? \n")
n = as.numeric(readline())
df = n-1
}
if(vec=="whole"){
cat("What is the name of your variable? \n")
varname = readline()
if(grepl("$", varname, fixed=TRUE)){
names = strsplit(varname,"\\$")
frame = get(names[[1]][1])
data = frame[[names[[1]][2]]]
} else{
data = get(varname)}
data = data[!is.na(data)]
xbar = mean(data)
s = sd(data)
n = length(data)
cat("The statistics for your dataset are: ")
cat("\n")
cat(paste("xbar = ",format(xbar,scientific=FALSE)))
cat("\n")
cat(paste("s = ",format(s,scientific=FALSE)))
cat("\n")
cat(paste("n = ",format(n,scientific=FALSE)))
cat("\n")
cat(paste("df = ",format(n,scientific=FALSE),"- 1 = ",format(n-1,scientific=FALSE)))
cat("\n")
cat("\n")
}
cat("What is the theoretical mean you are testing against (called mu_0)?\n")
cat("(If you only want a confidence interval, type 'NA')\n")
mu_0 = readline()
if(mu_0!="NA"){mu_0 = as.numeric(mu_0)}
cat("What is your desired confidence level? \n")
conf_level = as.numeric(readline())
while(conf_level<0 | conf_level>1){cat('Please choose a confidence level between 0 and 1\n')
cat("What is your desired confidence level? \n")
conf_level = as.numeric(readline())
}
sidedness = "both"
if(mu_0!="NA"){
t = (xbar - mu_0)/(s/sqrt(n))
df = n-1
out = conduct_t_test(t,df,sidedness)}
# new version with no one-sided confidence intervals
tstar = -qt((1-conf_level)/2,df)
thing_to_type2 = paste("tstar = 1-qt((1-",format(conf_level,scientific=FALSE),")/2,",format(df,scientific=FALSE),") = ",format(tstar,scientific=FALSE),sep="")
thing_to_type3 = paste(toString(xbar)," - ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
thing_to_type4 = paste(toString(xbar)," + ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
lower = xbar - tstar*s/sqrt(n)
upper = xbar + tstar*s/sqrt(n)
# if(sidedness == "both"){
#
# tstar = -qt((1-conf_level)/2,df)
#
# thing_to_type2 = paste("tstar = 1-qt((1-",format(conf_level,scientific=FALSE),")/2,",format(df,scientific=FALSE),") = ",format(tstar,scientific=FALSE),sep="")
# thing_to_type3 = paste(toString(xbar)," - ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
# thing_to_type4 = paste(toString(xbar)," + ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
#
# lower = xbar - tstar*s/sqrt(n)
# upper = xbar + tstar*s/sqrt(n)
# }
#
# if(sidedness == "less"){
#
# tstar = qt(conf_level,df)
#
# thing_to_type2 = paste("tstar = qt(",format(conf_level,scientific=FALSE),",",format(df,scientific=FALSE),") = ",format(tstar,scientific=FALSE),sep="")
# thing_to_type3 = "-Infinity"
# thing_to_type4 = paste(toString(xbar)," + ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
#
# lower = -Inf
# upper = xbar + tstar*s/sqrt(n)
# }
#
# if(sidedness == "greater"){
# tstar = qt(conf_level,df)
#
# thing_to_type2 = paste("tstar = qt(",format(conf_level,scientific=FALSE),",",format(df,scientific=FALSE),") = ",format(tstar,scientific=FALSE),sep="")
# thing_to_type3 = paste(toString(xbar)," - ",format(tstar,scientific=FALSE)," x ",format(s,scientific=FALSE),"/sqrt(",toString(n),")",sep="")
# thing_to_type4 = "Infinity"
#
# lower = xbar - tstar*s/sqrt(n)
# upper = Inf
#
# }
if(mu_0!="NA"){
cat("Your t-statistic is:")
cat("\n")
cat(paste("t = (",format(xbar,scientific=FALSE),"-",format(mu_0,scientific=FALSE),")/(",format(s,scientific=FALSE),"/sqrt(",format(n,scientific=FALSE),")) = ",format(t,scientific=FALSE),sep=""))
cat("\n")
cat("\n")
cat(paste("The probability of getting this result or more extreme for xbar\nif mu really is ",toString(mu_0)," is",sep=""))
cat("\n")
cat(paste("p = ",format(out$prob,scientific=FALSE)))
cat("\n")
cat("\n")
cat("You can get this result by typing:")
cat("\n")
cat(out$p_value_type)
cat("\n")
cat("\n")
cat("\n")}
cat(paste("The ",toString(conf_level*100),"% confidence interval for the population mean is",sep=""))
cat("\n")
cat(paste(format(lower,scientific=FALSE)," < mu < ",format(upper,scientific=FALSE)))
cat("\n")
cat("\n")
cat("You can get this result by finding:")
cat("\n")
cat(thing_to_type2)
cat("\n")
cat("\n")
cat("and then calculating:")
cat("\n")
cat(paste(thing_to_type3," and ",thing_to_type4))
if(vec=="whole"){
cat("\n")
cat("\n")
cat("\n")
cat("Or, since you have the whole dataset, you could just type:")
cat("\n")
if(sidedness=="both"){
cat(paste("t.test(",varname,",mu = ",format(mu_0,scientific=FALSE),",conf.level = ",toString(conf_level),")",sep=""))
}
}
}
}
|
dafb7ff95ab29783bebb20c792b0dc74cd793778 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ahnr/examples/fit.Rd.R | 19d32370cf40fdfa29218ca5d6dbcc08456743c4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 428 | r | fit.Rd.R | library(ahnr)
### Name: fit
### Title: fit
### Aliases: fit
### ** Examples
# Create data
x <- 2 * runif(1000) - 1;
x <- sort(x)
y <- (x < 0.1) * (0.05 * runif(100) + atan(pi*x)) +
(x >= 0.1 & x < 0.6) * (0.05 * runif(1000) + sin(pi*x)) +
(x >= 0.6) * (0.05 * runif(1000) + cos(pi*x))
# Create Sigma list
Sigma <- list(X = data.frame(x = x), Y = data.frame(y = y))
# Train AHN
ahn <- fit(Sigma, 5, 0.01, 500)
|
5c52f7a8059bfe91119a2af7199ea85e62eb075f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NominalLogisticBiplot/examples/plot.nominal.logistic.biplot.Rd.R | ae5a312cd3548ee57914689997cbc060fa07f835 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 780 | r | plot.nominal.logistic.biplot.Rd.R | library(NominalLogisticBiplot)
### Name: plot.nominal.logistic.biplot
### Title: Graphical representation of a Nominal Logistic Biplot.
### Aliases: plot.nominal.logistic.biplot
### Keywords: plot
### ** Examples
data(HairColor)
nlbo = NominalLogisticBiplot(HairColor,sFormula=NULL,
numFactors=2,method="EM",penalization=0.2,show=FALSE)
plot(nlbo,QuitNotPredicted=TRUE,ReestimateInFocusPlane=TRUE,
planex = 1,planey = 2,proofMode=TRUE,LabelInd=TRUE,
AtLeastR2 = 0.01,xlimi=-1.5,xlimu=1.5,ylimi=-1.5,
ylimu=1.5,linesVoronoi = TRUE,SmartLabels = FALSE,
PlotInd=TRUE,CexInd = c(0.6,0.7,0.5,0.4,0.5,0.6,0.7)
,PchInd = c(1,2,3,4,5,6,7),ColorInd="black",PlotVars=TRUE,
LabelVar = TRUE,PchVar = c(1,2,3,4,5),
ColorVar = c("red","black","yellow","blue","green")
,ShowResults=TRUE)
|
903efbf8fe81aa24abaef55b7e9d563da31a5378 | bf3b28de48464cb180928a2333b805d3ea8d97aa | /man/read_sea_csv.Rd | 5cea20c92b1f106d12729b02a82bc15f5e35d24e | [
"MIT"
] | permissive | e-leib/aceR | afb39e862862cf8486fb3412ba2a9607e2f95058 | f056aeae68a15a7489c5fe2d00ec539142396b8c | refs/heads/master | 2020-05-18T02:25:06.892011 | 2019-05-05T18:05:27 | 2019-05-05T18:05:27 | 183,947,083 | 0 | 0 | MIT | 2019-04-28T18:57:06 | 2019-04-28T18:57:06 | null | UTF-8 | R | false | true | 608 | rd | read_sea_csv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load-sea.R
\name{read_sea_csv}
\alias{read_sea_csv}
\title{Because the "Reading Fluency" module has some cells with unquoted commas,
They are causing the usual read.csv delimiter guessing to split
one cell into two, creating too many columns for just a few rows}
\usage{
read_sea_csv(file)
}
\description{
Because the "Reading Fluency" module has some cells with unquoted commas,
They are causing the usual read.csv delimiter guessing to split
one cell into two, creating too many columns for just a few rows
}
\keyword{internal}
|
db59f868a0db8fe92cfe26b4bfc09a747a21a081 | 5dddaf7f6939fd7e257753847cc1ebdfc169af0d | /R/ChapmanMR.R | f33e599479372c19526457e53b6c5ddba3ce2638 | [] | no_license | KevinSee/SiteAbundances | 57c2359a135b23e14681438eab1e288dce88f452 | 483dc6c2402c368e259f2bc0528084cedb752b9e | refs/heads/master | 2021-01-22T14:33:10.084752 | 2015-07-01T18:51:48 | 2015-07-01T18:51:48 | 11,206,391 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 891 | r | ChapmanMR.R | ###############################################################
# modified Lincoln-Petersen (Chapman) estimator
# to estimate abundance from single mark / single recap mark recapture dataa
###############################################################
ChapmanMR = function(data, rmInvalid=TRUE){
if(is.data.frame(data)==F) data = as.data.frame(data)
names(data) = c('M', 'C', 'R')
N.hat = with(data, ((M + 1) * (C + 1)) / (R + 1) -1)
N.hat.SE = with(data, sqrt(((M + 1) * (C + 1) * (M - R) * (C - R)) / ((R + 1)^2 * (R + 2))))
p.hat = with(data, R / M)
# using Robson & Regier criteria for valid abundance estimates
Valid = with(data, (M * C)) > (N.hat * 4)
# Valid = (data$M * data$C) > (data$N.hat * 4) | data$R >= 7
if(rmInvalid==T){
N.hat[Valid==F] = NA
N.hat.SE[Valid==F] = NA
}
return(data.frame(N.hat=N.hat, N.hat.SE=N.hat.SE, p.hat=p.hat, Valid=Valid))
} |
55de20c44931eeac83c70bb878b85c2aaf85ec42 | 87d68604d034086d1dc2582e4c618f5e8c729748 | /script/ancestorPopulationStructure/db2SNP.R | ef5e689f4abbfc01aab4da6eab492179c0faf176 | [
"MIT"
] | permissive | naturalis/changing-invaders | 1618edb5b274cb0e6a8d8f6a3c48b213eadfb255 | c0a848991d32a76d75807694fd9aa23ef501b72a | refs/heads/master | 2023-04-07T01:53:12.213808 | 2022-06-23T15:53:26 | 2022-06-23T15:53:26 | 206,013,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,642 | r | db2SNP.R | #!/usr/bin/env Rscript
# changing invaders
# by david
# create 0.3k SNP bcf file
# so a structure analysis could be done over that data
library(RSQLite)
library(dplyr, warn.conflicts = FALSE)
# sed -En '/>.*voor/s/.(.*) .*/\1/p' SNP_V3.fasta > SNPOUT
# sed -En '1!s/,([^,]+).*/-\1/p' SNP_Vfinal.csv > SNPOUT
# sed -En '1!s/,([^,]+).*/-\1/p' SNP.csv > SNPOUT
SNPs <- read.table("SNP-files/SNPOUT", sep = "-", header = FALSE, col.names = c("CHR", "POS"))
eightnucleotide <- dbConnect(SQLite(), Sys.glob("/h*/d*n*/onenucleotide_eight.db"))
dbWriteTable(eightnucleotide, "CHOSEN", SNPs, overwrite = TRUE)
exulans <- tbl(eightnucleotide, "EXULANS")
about0.3kSNPsdb <- tbl(eightnucleotide, "CHOSEN")
about0.3kSNPs <- inner_join(exulans, about0.3kSNPsdb, c("CHROM" = "CHR", "POS" = "POS")) %>% collect()
writeLines(grep("##", readLines("vcfheader"), value = TRUE), "merge0.3k.vcf")
about0.3kSNPs$ID <- paste0("SNP_", 1:nrow(about0.3kSNPs))
about0.3kSNPs$FILTER <- "."
about0.3kSNPs$FORMAT <- "GT:PL"
about0.3kSNPs <- about0.3kSNPs[,c("CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT",
grep("^EXUL", colnames(about0.3kSNPs), value = TRUE))]
about0.3kSNPs <- cbind(about0.3kSNPs[,-grep("^EXUL", colnames(about0.3kSNPs))], sapply(grep("PL", colnames(about0.3kSNPs), value = TRUE), function(x) do.call(function(...)paste(..., sep = ":"), about0.3kSNPs[,c(sub("PL$", "GT", x), x)])))
colnames(about0.3kSNPs) <- sub("PL$", "", sub("CHROM", "#CHROM", colnames(about0.3kSNPs)))
suppressWarnings(write.table(about0.3kSNPs, "merge0.3k.vcf", sep = "\t", quote = FALSE, append = TRUE, row.names = FALSE))
|
fe19eff309fea41ac817f3ab683ff29d973a291c | fdfc22afa8f51ac83096fc8dbb145909d9c61edc | /man/get_us_regional_cases_only_level_1.Rd | 55ba8f03251c0e1452904e5df62e29042ce38fc3 | [
"MIT"
] | permissive | GuilhermeShinobe/covidregionaldata | 1581cc8fbc92123646f934854a25e5452710bfe0 | c1f0b5c6ab5284ac6cf9608b64e002c757bd1da7 | refs/heads/master | 2022-12-05T20:36:09.681272 | 2020-09-02T12:41:17 | 2020-09-02T12:41:17 | 292,276,446 | 0 | 0 | NOASSERTION | 2020-09-02T12:27:20 | 2020-09-02T12:27:19 | null | UTF-8 | R | false | true | 605 | rd | get_us_regional_cases_only_level_1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/usa.R
\name{get_us_regional_cases_only_level_1}
\alias{get_us_regional_cases_only_level_1}
\title{US Regional Daily COVID-19 Count Data - States}
\usage{
get_us_regional_cases_only_level_1()
}
\value{
A data frame of daily COVID cases for the US by state, to be further processed by \code{get_regional_data()}.
}
\description{
Extracts daily COVID-19 data for the USA, stratified by state.
Data available at \url{https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv}.
It is loaded and then sanitised.
}
|
285321135b005871b8734908d88fb06da47e31c7 | 80b660989fecff2b947aaf930ef54abcb30b3119 | /R_Practice/2_tree_classification.R | 8c6463871c447e93baa3f7cf17889adec18d3722 | [] | no_license | jasonkwak190/works | 9f1847238cf40de23bf10477eb236d40e675febf | 9fcfcee721d2b578dfd6cd91749ee0f9d3dbb13b | refs/heads/main | 2023-06-18T16:01:26.434404 | 2021-07-19T13:19:23 | 2021-07-19T13:19:23 | 377,873,941 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,009 | r | 2_tree_classification.R | # 1) (20 points Modified Exercise 14.4 in ESL)
# Cluster the marketing data of Table 14.1 (ESL) using a classification tree. This
# data is in the ISLR package, and also available on UB learns.
# Specifically, generate a reference sample of the same size of the training set.
# This can be done in a couple of ways, e.g., (i) sample uniformly for each variable,
# or (ii) by randomly permuting the values within each variable independently. Build
# a classification tree to the training sample (class 1) and the reference sample
# (class 0) and describe the terminal nodes having highest estimated class 1
# probability. Compare the results to the results near Table 14.1 (ESL), which were
# derived using PRIM.
#install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
marketing
head(marketing)
dim(marketing)
summary(marketing)
#Split the data
set.seed(1)
my_marketing <- na.omit(marketing) #omit NAs
my_marketing <- as.data.frame(my_marketing)
my_marketing$Class <- 0
my_marketing
hist(my_marketing$Income)
hist(my_marketing$Sex)
hist(my_marketing$Marital)
hist(my_marketing$Age)
hist(my_marketing$Edu)
hist(my_marketing$Occupation)
hist(my_marketing$Lived)
hist(my_marketing$DualIncome)
hist(my_marketing$Household)
hist(my_marketing$Householdu18)
hist(my_marketing$Status)
hist(my_marketing$HomeType)
hist(my_marketing$Ethnic)
hist(my_marketing$Language)
##############################################################################################
#name each variables
#Income/Sex/ Marital/Age/Edu/Occupation/Lived/Dual_Income/Household/Householdu18/Status/Home_Type/Ethnic
# marketing$Income #1~9
# marketing$Sex #1~2
# marketing$Marital #1~5
# marketing$Age #1~6
# marketing$Edu #1~6
# marketing$Occupation #1~9
# marketing$Lived #1~5
# marketing$Dual_Income #1~3
# marketing$Household #1~9
# marketing$Householdu18 #0~9
# marketing$Status #1~3
# marketing$Home_Type #1~5
# marketing$Ethnic #1~8
# marketing$Language #1~3
my_marketing[["Income"]] <- ordered(cut(my_marketing[["Income"]], c(0,3,6,9), labels = c("low", "med", "high")))
my_marketing[["Sex"]] <- ordered(cut(my_marketing[["Sex"]], c(0,1,2), labels = c("man", "woman")))
my_marketing[["Marital"]] <- ordered(cut(my_marketing[["Marital"]], c(0, 2, 4, 5), labels = c("not married", "solo", "married")))
my_marketing[["Age"]] <- ordered(cut(my_marketing[["Age"]], c(0,3,5,7), labels = c("young", "med", "old")))
my_marketing[["Edu"]] <- ordered(cut(my_marketing[["Edu"]], c(0,2,4,6), labels = c("low", "med", "high")))
my_marketing[["Occupation"]] <- ordered(cut(my_marketing[["Occupation"]], c(0,3,6,9), labels = c("no job", "have job", "retired")))
my_marketing[["Lived"]] <- ordered(cut(my_marketing[["Lived"]], c(0,2,4,5), labels = c("low", "med", "high")))
my_marketing[["DualIncome"]] <- NULL
my_marketing[["Household"]] <- ordered(cut(my_marketing[["Household"]], c(0,3,6,9), labels = c("low", "med", "high")))
my_marketing[["Householdu18"]] <- NULL
my_marketing[["Status"]] <- ordered(cut(my_marketing[["Status"]], c(0,1,2,3), labels = c("low", "med", "high")))
my_marketing[["HomeType"]] <- NULL
my_marketing[["Ethnic"]] <- ordered(cut(my_marketing[["Ethnic"]], c(0,3,6,9), labels = c("low", "med", "high")))
my_marketing[["Language"]] <- NULL
sample0 <- my_marketing
sample1 <- my_marketing
for(i in 1:ncol(sample1)){
sample1[,i] = sample(sample1[,i], nrow(sample1), replace = T)
}
sample1$Class <- 1
#sample1
combine_data <- rbind(sample0, sample1)
dim(combine_data)
for(i in 1:ncol(combine_data)){
combine_data[,i] = as.factor(as.character(combine_data[,i]))
}
tree <- rpart(Class~., data = combine_data, method = "class", control = rpart.control(maxdepth = 4, minsplit = 9, xval = 10, cp = 0))
tree
tree$cptable
tree$control
plot(tree, uniform = T, compress = T, margin = 0.02)
text(tree, use.n = T)
plotcp(tree)
prp(tree)
|
12d29392e92754feed9e946447da3bb023c930db | 5a7e12e77006ddd46c9cd69bbb3985945138894b | /man/bru_model.Rd | 0067c89963317277ba72e95bc82b4efbb168b22c | [] | no_license | cran/inlabru | ba002f0eb10ba85144dfbfeb1f3af4755b9d8acb | 77e5590164955a652e9af2d6a814fdf2c8d9a1f2 | refs/heads/master | 2023-07-12T01:38:40.674227 | 2023-06-20T13:10:02 | 2023-06-20T13:10:02 | 110,278,992 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,655 | rd | bru_model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{bru_model}
\alias{bru_model}
\alias{summary.bru_model}
\alias{print.summary_bru_model}
\title{Create an inlabru model object from model components}
\usage{
bru_model(components, lhoods)
\method{summary}{bru_model}(object, ...)
\method{print}{summary_bru_model}(x, ...)
}
\arguments{
\item{components}{A \link{component_list} object}
\item{lhoods}{A list of one or more \code{lhood} objects}
\item{object}{Object to operate on}
\item{\dots}{Arguments passed on to other methods}
\item{x}{A \code{summary_bru_model} object to be printed}
}
\value{
A \link{bru_model} object
}
\description{
The \link{inlabru} syntax for model formulae is different from what
\code{INLA::inla} considers a valid.
In inla most of the effects are defined by adding an \code{f(...)} expression to the formula.
In \link{inlabru} the \code{f} is replaced by an arbitrary (exceptions: \code{const} and \code{offset})
string that will determine the label of the effect. See Details for further information.
}
\details{
For instance
\code{y ~ f(myspde, ...)}
in INLA is equivalent to
\code{y ~ myspde(...)}
in inlabru.
A disadvantage of the inla way is that there is no clear separation between the name of the covariate
and the label of the effect. Furthermore, for some models like SPDE it is much more natural to
use spatial coordinates as covariates rather than an index into the SPDE vertices. For this purpose
\link{inlabru} provides the new \code{main} agument. For convenience, the \code{main} argument can be used
like the first argument of the f function, e.g., and is the first argument of the component definition.
\code{y ~ f(temperature, model = 'linear')}
is equivalent to
\code{y ~ temperature(temperature, model = 'linear')}
and
\code{y ~ temperature(main = temperature, model = 'linear')}
as well as
\code{y ~ temperature(model = 'linear')}
which sets \code{main = temperature}.
On the other hand, map can also be a function mapping, e.g the \link{coordinates} function of the
\link{sp} package :
\code{y ~ mySPDE(coordinates, ...)}
This exctract the coordinates from the data object, and maps it to the latent
field via the information given in the \code{mapper}, which by default is
extracted from the \code{model} object, in the case of \code{spde} model
objects.
Morevover, \code{main} can be any expression that evaluates within your data as an environment.
For instance, if your data has columns 'a' and 'b', you can create a fixed effect of 'sin(a+b)' by
setting \code{map} in the following way:
\code{y ~ myEffect(sin(a+b))}
}
\keyword{internal}
|
95ac4c73ceda902b73b408a4c45691fa681cc5cc | 26730e472b6ee4892b4e1dd93c85933797d3356a | /meta_analysis_stuff.R | 988d2ef988e03cd0aca0344483556ac43eb84549 | [] | no_license | NKalavros/thyroid-cancer-a-bioinformatics-approach | ba6f37aa876ed30e3210d21c60c1509df4fb171c | 4f5b07e10abb1610f8c3e184ca9ceb7ba9368d97 | refs/heads/master | 2020-08-04T08:58:17.691065 | 2019-10-08T02:10:59 | 2019-10-08T02:10:59 | 212,081,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,359 | r | meta_analysis_stuff.R | library("devtools")
library(arrayConnector)
install_github("https://github.com/metaOmics/MetaQC")
library("MetaQC")
library("GEOquery")
library("arrayQualityMetrics")
BiocManager::install("hgu133a.db")
BiocManager::install("hgu133a2.db")
GSE58545 <- getGEO("GSE58545",GSEMatrix=T,AnnotGPL=FALSE)[[1]]
arrayQualityMetrics::arrayQualityMetrics(GSE58545)
GSE33630 <- getGEO("GSE33630",GSEMatrix=T,AnnotGPL=FALSE)[[1]]
GSE29265 <- getGEO("GSE29265",GSEMatrix=T,AnnotGPL=FALSE)[[1]]
GSE27155 <- getGEO("GSE27155",GSEMatrix=T,AnnotGPL=FALSE)[[1]]
GSE3678 <- getGEO("GSE3678",GSEMatrix=T,AnnotGPL=FALSE)[[1]]
mean(apply(exprs(GSE58545),2,max,na.rm = TRUE))
mean(apply(exprs(GSE33630),2,max,na.rm = TRUE))
mean(apply(exprs(GSE29265),2,max,na.rm = TRUE))
mean(apply(exprs(GSE27155),2,max,na.rm = TRUE))
mean(apply(exprs(GSE3678),2,max,na.rm = TRUE))
exprs(GSE27155) <- exprs(GSE27155)/4*14
exprs(GSE3678) <- log2(exprs(GSE3678))
annotation(GSE58545) <- "hgu133a"
annotation(GSE33630) <- "hgu133a2"
annotation(GSE29265) <- "hgu133a2"
annotation(GSE27155) <- "hgu133a"
annotation(GSE3678) <- "hgu133a2"
#Get only the samples we care for
GSE33630 <- GSE33630[,11:ncol(GSE33630)-1]
GSE29265 <- GSE29265[,10:ncol(GSE29265)]
GSE27155 <- GSE27155[,c(14:17,38:ncol(GSE27155) - 17)]
data(pathway)
GList <- pathway[[i]]
filterGenes <- TRUE
cutRatioByMean <- 0.3
cutRatioByVar <- 0.3
all_datasets <- list()
all_datasets$data <- list(GSE58545,GSE33630,GSE27155,GSE29265,GSE3678)
names(all_datasets$data) <- c("GSE58545","GSE33630","GSE27155","GSE29265","GSE3678")
all_datasets$data <- lapply(all_datasets$data,exprs)
samples <- lapply(all_datasets$data,ncol)
reversed_rep <- function(integer){return(rep(0,integer))}
samples <- lapply(samples,reversed_rep)
all_datasets$dataLabel <- samples
QCresult <- MetaQC(all_datasets$data, all_datasets$dataLabel, GList,filterGenes,cutRatioByMean,cutRatioByVar)
my_virtualArrays <- NULL
my_virtualArrays$noBatchEffect <- virtualArrayExpressionSets(covars = "all", supervised = TRUE)
arrayQualityMetrics(my_virtualArrays$noBatchEffect)
group <- as.factor(my_virtualArrays$noBatchEffect@phenoData@data$Covariate.1)
###################################################
### code chunk number 12: virtualArray.Rnw:246-250
###################################################
pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[5] <-
c(as.character(pData(GSE23402)[,8]),as.character(pData(GSE26428)[,1]))
pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[6] <-
c(rep("red",24),rep("blue1",3))
###################################################
### code chunk number 13: virtualArray.Rnw:268-271
###################################################
dist_iPSC_hESC_noBatchEffect <-
dist(t(exprs(my_virtualArrays$iPSC_hESC_noBatchEffect)),
method="euclidian")
###################################################
### code chunk number 14: virtualArray.Rnw:283-286
###################################################
hc_iPSC_hESC_noBatchEffect <-
hclust(dist_iPSC_hESC_noBatchEffect, method="average")
hc_iPSC_hESC_noBatchEffect$call <- NULL
###################################################
### code chunk number 15: virtualArray.Rnw:309-314
###################################################
virtualArrayHclust(hc_iPSC_hESC_noBatchEffect,
lab.col=pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[,6],
lab=pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[,5],
main="batch effect removed",cex=0.7,
xlab="sample names")
###################################################
### code chunk number 16: virtualArray.Rnw:371-372 (eval = FALSE)
###################################################
## my_virtualArrays$iPSC_hESC_supervised <- virtualArrayExpressionSets(supervised=TRUE)
###################################################
### code chunk number 17: virtualArray.Rnw:375-376
###################################################
my_virtualArrays$iPSC_hESC_supervised <- virtualArrayExpressionSets(supervised=TRUE,sampleinfo=system.file("extdata","sample_info_mod.txt",package="virtualArray"))
###################################################
### code chunk number 18: virtualArray.Rnw:379-382
###################################################
dist_iPSC_hESC_supervised <-
dist(t(exprs(my_virtualArrays$iPSC_hESC_supervised)),
method="euclidian")
###################################################
### code chunk number 19: virtualArray.Rnw:384-387
###################################################
hc_iPSC_hESC_supervised <<-
hclust(dist_iPSC_hESC_supervised, method="average")
hc_iPSC_hESC_supervised$call <- NULL
###################################################
### code chunk number 20: virtualArray.Rnw:390-395
###################################################
virtualArrayHclust(hc_iPSC_hESC_supervised,
lab.col=pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[,6],
lab=pData(my_virtualArrays$iPSC_hESC_noBatchEffect)[,5],
main="batch effect removed - supervised mode",cex=0.7,
xlab="sample names")
###################################################
### code chunk number 21: virtualArray.Rnw:400-403
###################################################
pca_supervised <- prcomp(t(exprs(my_virtualArrays$iPSC_hESC_supervised)))
plot(pca_supervised$x, pch=19, cex=2, col=c(rep("red",24),rep("blue",3),pch=17))
legend("topleft",c("GSE23402","GSE26428"),col=c("red","blue"),pch=19,cex=1)
|
0bf582ca340f211d386ed38ceaf0a9453249ec6b | a7c95f93e964ce75eaaeeb4adbb5acdeac2a442e | /Assignment/Untitled.R | 248ceb27974489188d4ceb01413e89701001418b | [] | no_license | NielsKrarup/DemMorCourse | effae7d81f84fd1093e4a3dc6def80929e04e81d | d4de8e3386e3259e15ee01f5d4dd5d1f28042e55 | refs/heads/master | 2023-08-14T23:05:37.056570 | 2021-10-05T16:01:24 | 2021-10-05T16:01:24 | 403,335,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,060 | r | Untitled.R | tmp <-
surf_obj$mu[,,dimnames(surf_obj$mu)$Year %in% as.character(1836:2020)] -
surf_obj$mu[,,dimnames(surf_obj$mu)$Year %in% as.character(1835:2019)]
tmp[,,5]
#sum array
apply(X = OEdata$O, MARGIN = c(1,3), FUN = sum)
arr_long <- reshape2::melt(surf_obj$mu) %>% arrange(Year, Group, Age)
arr_long %>% mutate(age_mod = Age %% 10)
arr_long %>% filter(Year == 1839)
0:100 %% 10
ggplot(arr_long %>% mutate(age_mod = Age %% 10) %>% filter(Year < 1900),
aes(x = Year, y = value, col = Age)) +
geom_line(aes(group = Age, col = factor(age_mod)), alpha = 0.4) +
facet_wrap(vars(Group), scales = "free") +
#scale_y_continuous(trans = "log10") +
guides(col="none")
ggplot(arr_long %>% filter(Age ==0)%>% mutate(Age_mod = Age %% 10), aes(x = Year, y = value)) +
geom_line(aes(group = factor(Group):factor(Age, col = factor(Age_mod)), alpha = 0.4) +
facet_wrap(vars(Age_grp), scales = "free") +
#facet_wrap(vars(Group), scales = "free") +
#scale_y_continuous(trans = "log10") +
guides(col="none")
surf_obj$mu[,1,1:2]
|
04c2536ea326a9583473acaec35e34ce3a7df745 | 97e8ef7f8220ed512c0b7270f38158df702168e5 | /R/pxtable.R | 91d6e4de208d39a85c10a527d2c5e7bd1e28d6b8 | [
"MIT"
] | permissive | HanOostdijk/HOQCutil | 9441048f2c637b1b0ce1e2c038ce16d18971ad8c | 2021c2cb2dac1ad181b570db6fe2408e02328c43 | refs/heads/master | 2023-08-22T15:51:42.185724 | 2023-07-21T20:11:56 | 2023-07-21T20:11:56 | 140,326,813 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,275 | r | pxtable.R | #' print data.frame in LaTeX
#'
#' This function adds to the print function of the `xtable` package by providing predefined `add.to.row` code for the `longtable` LaTeX package: 'continued on/from ...' and 'end of table' lines that can switched on or off. This functionality can only be used with the `longtable` environment. When `scalebox` is specified the `tabular` environment is used and `add.to.row` code is not available. If the `longtable` package is not available then set `scalebox=1` or another appropriate value.
#'
#' @name pxtable
#' @param df data.frame that will be printed.
#' @param tablabel Character string that can be used to reference the table as `table:tablabel`. Default: ''
#' @param tabcap Character string that is as caption for the table. Default: ''
#' @param adef Character string with the default alignment. Default: "l"
#' @param ap Integer vector with the positions of the columns for which an alignment specification is given in `av`. Default: c()
#' @param av Character vector with aligment specifications for the positions in `ap`. E.g. with values as 'c', 'r', 'p{3cm}' . Default: c()
#' @param ddef Integer with the default number of digits to display. Default: 0
#' @param dp Integer vector with the positions of the columns for which an digit specification is given in `dv`. Default: c()
#' @param dv Integer vector with digit specifications for the positions in `dp`. Default: c()
#' @param tco Boolean indicating if 'continued on' should be displayed at the bottom of a page(only) when using the `longtable` package. Default: T
#' @param tcf Boolean indicating if 'continued from' should be displayed at the top of a page(only) when using the `longtable` package. Default: `tco`
#' @param te Boolean indicating if 'end of table' should be displayed at the end of the table (only) when using the `longtable` package. Default: F
#' @param newpage Boolean indicating if the table should start on a new page. Default: F
#' @param scalebox Positive number for scaling the table. Forces the `tabular` environment. See `xtable::print.xtable`. Default: NULL
#' @param include.colnames Boolean indicating if column names will be printed. See `xtable::print.xtable`. Default: T
#' @param colnames Character vector with column headers to use instead of field names. Default: NA
#' @param rotate.colnames Boolean indicating if column names will be printed vertically. See `xtable::print.xtable`. Default: T
#' @param include.rownames Boolean indicating if row names will be printed. See `xtable::print.xtable`. Default: F
#' @param sanitize.colnames.function Function used for nicer printing of column names. See `xtable::print.xtable`. Default: xtable::sanitize
#' @param booktabs Boolean indicating if LaTeX package `booktabs` will be used for formatting horizontal lines. See `xtable::print.xtable`. Default: F
#' @param ... Additional arguments that are passed to `xtable::print.xtable`.
#' @return See `xtable::print.xtable` as this function prepares only its function arguments. Using this function in a `knitr` chunk with `results ='markup'` will show the generated LaTeX text. With `results ='asis'` the text is transfered to the intermediate output file and interpreted as LaTeX in the final pdf document.
#' @export
#' @examples
#' \dontrun{
#' df1 = data.frame(
#' v1 = rep(c(1,NA),50),
#' v2 = rep(c('R version 3.5.0', 'R is free software'),50),
#' v3 = rep(c('Copyright (C) 2019', 'You are welcome'),50),
#' v4 = rep(c('a','b'),50),
#' v5 = rep(c(pi,NA),50),
#' stringsAsFactors = F
#' )
#'
#' pxtable (df1,tablabel='tab1',tabcap='mycaption',
#' ap = 1:4, av = c("r", "p{6cm}", "p{6cm}", "c"), adef = "l",
#' dp = c(1, 5), dv = c(2, 3), ddef = 0, te = F,
#' include.rownames = T,
#' rotate.rownames = T # example of an argument that will be passed to xtable::print.xtable
#' )
#' }
pxtable <- function(df,
tablabel = '',
tabcap = '',
adef = 'l',
ap = c(),
av = c(),
ddef = 0,
dp = c(),
dv = c(),
tco = T,
tcf = tco,
te = F,
newpage = F,
scalebox = NULL,
include.colnames = T,
colnames = NA,
rotate.colnames = T,
include.rownames = F,
sanitize.colnames.function = xtable::sanitize,
booktabs = F,
...) {
my_align = rep(adef, dim(df)[2])
my_align[ap] = av
my_digits = rep(ddef, dim(df)[2])
my_digits[dp] = dv
my_caption = HOQCutil::def_tab(tablabel, tabcap)
if (!knitr::is_latex_output()) {
ao =purrr::map_lgl(my_align,function(x) x %in% c('r','l','c'))
if (purrr::some(ao, ~ isFALSE(.))){
#warning('one or more alignment specifications changed!')
my_align = unlist(purrr::map_if(my_align,!ao,function(x) 'l') )
}
print(
knitr::kable(
df,
digits = my_digits,
col.names = colnames,
row.names = include.rownames,
align = my_align,
caption = tabcap
)
)
invisible(NULL)
} else{
sanitize.colnames.function0 = sanitize.colnames.function
if (!is.null(scalebox)) {
floating = T
tenv = "tabular"
my_hline.after = c(0, nrow(df))
add.to.row <- NULL
} else {
floating = F
tenv = "longtable"
my_hline.after = 0 # hline at nrow(df) handled by add.to.row
nms = names(df)
sanitize.colnames.function1 <- function( x ) {
colnames
}
if (!is.na(colnames) && length(colnames) == length(nms))
sanitize.colnames.function0 = purrr::compose(
sanitize.colnames.function,sanitize.colnames.function1
)
add.to.row <-
format_addtorow(sanitize.colnames.function0(nms),
dim(df)[1],
include.colnames,
include.rownames,
rotate.colnames,
booktabs,
tcf,
tco,
te)
}
if (newpage) {
cat('\\newpage')
}
print(
x = xtable::xtable (
df,
caption = my_caption,
align = c('l', my_align),
digits = c(0, my_digits)
),
add.to.row = add.to.row,
hline.after = my_hline.after,
include.colnames = include.colnames,
include.rownames = include.rownames,
rotate.colnames = rotate.colnames,
floating = floating,
sanitize.colnames.function = sanitize.colnames.function0,
tabular.environment = tenv,
scalebox = scalebox,
booktabs = booktabs,
...
)
invisible(NULL)
}
}
format_header <- function(names,
include.colnames = T,
include.rownames = T,
sideways = F) {
g = ''
if (include.colnames) {
if (sideways == T) {
b = '\\begin{sideways}'
e = '\\end{sideways}'
} else {
b = ''
e = ''
}
g = glue::glue_collapse(glue::glue("{b} {names} {e}"), sep = ' & ')
if (include.rownames) {
g = paste0(' & ', g)
}
}
g
}
format_addtorow <-
function(nms,
nr,
include.colnames = T,
include.rownames = F,
rotate.colnames = include.colnames,
booktabs = F,
tcf = T,
tco = tcf,
te = tcf) {
nrc <- length(nms)
if (include.rownames) {
nrc = nrc + 1
}
if (booktabs) {
top = '\\toprule'
mid = '\\midrule'
bot = '\\bottomrule'
} else {
top = '\\hline'
mid = '\\hline'
bot = '\\hline'
}
if (tcf == T) {
# format 'continued from previous page'
tcf1 = glue::glue(
"\\multicolumn{<nrc>}{l}",
"{\\footnotesize \\tablename\\ \\thetable{} -- continued from previous page} \\\\",
.open = '<',
.close = '>'
)
} else {
tcf1 = ''
}
if (tco == T) {
# format 'continued on next page'
tco1 = glue::glue(
"\\multicolumn{<nrc>}{l}",
"{\\footnotesize \\tablename\\ \\thetable{} -- continued on next page} \\\\",
.open = '<',
.close = '>'
)
} else {
tco1 = ''
}
if (te == T) {
# format 'end of table'
te1 = glue::glue(
"\\multicolumn{<nrc>}{l}",
"{\\footnotesize end of \\tablename\\ \\thetable{} } \\\\",
.open = '<',
.close = '>'
)
} else {
te1 = ''
}
command1 <- paste0(
"\\endfirsthead\n",
tcf1,
" \n",
format_header(
nms,
include.colnames = include.colnames,
include.rownames = include.rownames,
sideways = rotate.colnames
),
" \\\\ \n",
mid,
" \n",
"\\endhead\n",
bot,
" \n",
tco1,
" \n",
"\\endfoot\n",
"\\endlastfoot\n"
)
command2 <- paste0(te1, "\n",bot,"\\\\","\n")
if (!booktabs) {
# remove the extra \hline when booktabs is not used
command2 <- paste0(command2, "%")
}
add.to.row <- list()
add.to.row$pos <- list(0, nr)
add.to.row$command <- c(command1, command2)
add.to.row
}
|
8d3c5d783d166ea5d28eeb653dfc9049a8e4166e | 8ff8c54737289ee29e1e9dbee6153e7e8be2d17a | /cachematrix.R | 0e5c8d5285099343d5ef4ced060cc91fdd6c2580 | [] | no_license | mturle/ProgrammingAssignment2 | 3e1b43057901da85296caf1780f03f6d359fcb7f | 9e1c5edb79474d7d053b57452481d354b5d8d55a | refs/heads/master | 2021-01-15T20:14:12.772526 | 2016-05-18T08:03:25 | 2016-05-18T08:03:25 | 58,883,731 | 0 | 0 | null | 2016-05-15T20:50:19 | 2016-05-15T20:50:19 | null | UTF-8 | R | false | false | 1,793 | r | cachematrix.R | # Programmatic Assignement 3
# This function returns a list containing a function to store (set), read (get)
# the value of a matrix and later to store the the inverted matrix or retrieve it
# from cache
makeCacheMatrix <- function(x = matrix()) {
minv <- NULL
set <- function(y) {
x <<- y
minv <<- NULL
}
get <- function() x
setinverse <- function(inverse) minv <<- inverse
getinverse <- function() minv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function returns an iverted matrix via SOLVE function, but first
# checks wheter an inverted matrix is not already stored in cache. If
# yes, the inverted matrix is read and not calculated
cacheSolve <- function(x, ...) {
minv <- x$getinverse()
if(!is.null(minv)) {
message("getting cached data.")
return(minv)
}
data <- x$get()
minv <- solve(data)
x$setinverse(minv)
minv
}
# Test
# A simple matrix x (3x3)
# > x <- matrix(c(1,3,3,1,4,3,1,3,4), nrow=3, ncol=3)
# Storing the matrix
## > m <- makeCacheMatrix(x)
# Retrieve the original matrix from m variable
## > m$get()
# [,1] [,2] [,3]
# [1,] 1 1 1
# [2,] 3 4 3
# [3,] 3 3 4
# Test the cacheSolve function for the first run (the inverse of a matrix
# is calculated)
# > cacheSolve(m)
# [,1] [,2] [,3]
# [1,] 7 -1.000000e+00 -1
# [2,] -3 1.000000e+00 0
# [3,] -3 1.665335e-16 1
# > cacheSolve(m)
# Test the cacheSolve function for the second run (the inverse of a matrix
# is read from cache)
# > cacheSolve(m)
# > getting cached data.
# > [,1] [,2] [,3]
# > [1,] 7 -1.000000e+00 -1
# > [2,] -3 1.000000e+00 0
# > [3,] -3 1.665335e-16 1
|
a3a27e10038788c149b85c9bcd0d967de6f0e3b2 | e5c43a31a082bbfec5ebbc20b34d373896721579 | /R/functions/reptile_juv_survival.R | f82288e9209161c659c2a14ece78c5da5afeba77 | [] | no_license | geryan/rfst | 3dde3a499651f3a1ccc736f8c6597c5972f0e17c | 0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca | refs/heads/master | 2023-05-02T12:32:51.743467 | 2021-04-27T01:26:47 | 2021-04-27T01:26:47 | 164,573,310 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 447 | r | reptile_juv_survival.R | reptile_juv_survival <- function(
a, # age at first reproduction
Sa, # adult survival
n, # number of clutches per year
c # number per clutch
){
Sj = (2*(1- Sa)/(n*c))^(1/a)
return(Sj)
}
# Formula from:
# Pike, D.A., Pizzatto, L., Pike, B.A. and Shine, R., 2008.
# Estimating survival rates of uncatchable animals: the myth of high juvenile mortality in reptiles.
# Ecology, 89(3), pp.607-611.
# https://doi.org/10.1890/06-2162.1 |
d549daee7909193731b447419f0828f6267a17f9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sampSurf/examples/bboxCheck.Rd.R | 75bd37da6c8a4fcf7d12a9387525adb80b61e3dd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | bboxCheck.Rd.R | library(sampSurf)
### Name: bboxCheck
### Title: Function to Check Spatial Bounding Boxes
### Aliases: bboxCheck
### Keywords: ~kwd1 ~kwd2
### ** Examples
tract = Tract(c(x=20,y=20), cellSize=0.5)
bb = bbox(tract)
bboxCheck(bb)
|
825a85cc31d2cc2ee674308b087df821505f2715 | bf48edd26c053b4ce61dd3e70e1db40588af1b7e | /r/init.R | 347461d9110ec2f2c8fd304803083a66c32908ad | [] | no_license | syncShan/kjm | 1df3509232370c876cb537d7d6e9ff0c0102f925 | 7f5bc9e65720370f91757150155a454b6b146042 | refs/heads/master | 2021-01-15T15:43:05.974886 | 2018-03-03T09:14:25 | 2018-03-03T09:14:25 | 44,855,385 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 416 | r | init.R | source("r/mongoFunc.R")
source("conf/parameter.R")
source("r/restore.R")
library(rjson)
library(quantmod)
date = as.character(as.Date(Sys.time()),"%Y%m%d")
#idList = readLines("conf/etf.conf")
idList = readLines("conf/etfInd.conf")
for( id in idList){
stockdf = updateSingleStock(id,"20100101",date,mongodb,rawTable)
}
#restore key metrics from certain date for running
restoreAll(idList,"20110101",date,mongodb)
|
5e9bd6490182fc3fb3829f93d7695e214ecd3886 | 9db1042b59eccaa4729882c619631599426b6f14 | /data_cleaning_ea.R | 8415f5682e2933efc844c9be76b564a77048c8f3 | [] | no_license | sreifeis/maRkov-chain-gang | 4ef242c22922888d302eb4d1f81981f5a8b93479 | 6a09359ddf17fdf92b8d95396b52fa3e19002201 | refs/heads/master | 2020-05-01T19:09:04.733684 | 2019-04-26T22:50:24 | 2019-04-26T22:50:24 | 177,640,699 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,345 | r | data_cleaning_ea.R | remove(list = ls())
############################################################################
## Programmer: Ethan Alt
## Last updated: 03/28/2019
##
## Description of file:
##
## This file inputs the data into R. Performs some basic cleaning, and runs
## latent dirichlet allocation (LDA) on the data set.
############################################################################
##################################################################
## Put all packages to be loaded in pkglist. Script will check if
## package is installed. If not, it will install and then load.
##################################################################
pkglist <- c("data.table", "text2vec", "magrittr", "topicmodels", "tm", "Matrix")
## Function to check if package is installed. Will install
## the package if it is not, and then load
install.pkg <- function(pkgname, character.only = TRUE) {
if(!(pkgname %in% rownames(installed.packages()))) {
install.packages(pkgname)
}
require(pkgname, character.only = character.only)
}
## This applies the install.pkg function over the length
## of the pkglist vector
lapply(pkglist, install.pkg, character.only = TRUE)
###############################################
## Load the data from CSV
###############################################
setwd("C:/Users/ethanalt/Google Drive/BIOS 735/Data")
amzn.df <- fread("1429_1.csv")
######################################################
## Subset the data to only include the review message
######################################################
id <- amzn.df$id
review <- amzn.df$reviews.text
title <- amzn.df$reviews.title
#######################################
## Get corpus of reviews and titles
#######################################
source.review <- VectorSource(review)
corpus.review <- Corpus(source.review)
source.title <- VectorSource(title)
corpus.title <- Corpus(source.title)
###################################
## Put corpi in list for looping
###################################
corpus.list <- list(corpus.review, corpus.title)
n.topics <- c(20, 5)
type = c("review", "title")
#################################################
## Loop through corpi, outputting
## the topics and the posterior
## probabilities as CSV files.
#################################################
terms.list <- list()
for(c in 1:2) {
corpus = corpus.list[[c]]
## Remove stopwords (common words that don't convey sentiment e.g., "i", "me", "they")
exclude <- c("amazon", "alexa", "echo", "tablet", "kindle", "fire", "alexia",
"read", "reading", "ipad", "husband", "son", "daughter", "kids", "kid")
stopwords <- c(stopwords("en"), exclude)
corpus <- tm_map(corpus, removeWords, stopwords)
## remove whitespace
corpus <- tm_map(corpus, stripWhitespace)
## convert to lowercase
corpus <- tm_map(corpus, content_transformer(tolower))
## Remove stopwords (common words that don't convey sentiment e.g., "i", "me", "they")
exclude <- c("amazon", "alexa", "echo", "tablet", "kindle", "fire", "alexia",
"read", "reading", "ipad")
stopwords <- c(stopwords("en"), exclude)
corpus <- tm_map(corpus, removeWords, stopwords)
## remove punctuation
corpus <- tm_map(corpus, removePunctuation)
## Strip digits
corpus <- tm_map(corpus, removeNumbers)
## Crate a document term matrix
dtm <- DocumentTermMatrix(corpus)
## Cleaning up the reviews resulted in some of
## the reviews being empty. Remove these
dtm <- dtm[unique(dtm$i), ]
id2 <- id[unique(dtm$i)]
#########################################
## Latent Dirichlet Allocation (LDA)
#########################################
## Set parameters for Gibbs sampling
burnin <- 4000
iter <- 2000
thin <- 500
seed <- list(2003, 5, 63, 100001, 765)
nstart <- 5
best <- TRUE
## Number of topics
k <- n.topics[c]
## Run LDA using Variational Bayes EM algorithm
ldaOut <- topicmodels::LDA(dtm,
k,
method = "VEM"
)
## write out results
## docs to topics
ldaOut.topics <- as.matrix(topics(ldaOut))
# top 50 terms in each topic
terms.list[[c]] <- as.matrix(terms(ldaOut,50))
# probabilities associated with each topic assignment
topicProbabilities <- as.data.frame(ldaOut@gamma)
names(topicProbabilities) <- paste0("p_topic", 1:ncol(topicProbabilities))
## Output latent topic and posterior probabilities
setwd("C:/Users/ethanalt/Google Drive/BIOS 735/Data")
filename <- paste0(type[c], "_topics.csv")
df = data.frame(id = id2, topic = ldaOut.topics, topicProbabilities)
names(df)[-1] <- paste0(type[c], "_", names(df)[-1])
write.csv(df, file = filename, row.names = FALSE)
}
#
# ## remove whitespace
# corpus <- tm_map(corpus, stripWhitespace)
# ## convert to lowercase
# corpus <- tm_map(corpus, content_transformer(tolower))
#
# ## Remove stopwords (common words that don't convey sentiment e.g., "i", "me", "they")
# exclude <- c("amazon", "alexa", "echo", "tablet", "kindle", "fire", "alexia",
# "read", "reading", "ipad")
# stopwords <- c(stopwords("en"), exclude)
# corpus <- tm_map(corpus, removeWords, stopwords)
#
# ## remove punctuation
# corpus <- tm_map(corpus, removePunctuation)
# ## Strip digits
# corpus <- tm_map(corpus, removeNumbers)
#
#
#
# ## Crate a document term matrix
# dtm <- DocumentTermMatrix(corpus)
#
#
# ## Cleaning up the reviews resulted in some of
# ## the reviews being empty. Remove these
# dtm <- dtm[unique(dtm$i), ]
#
# #########################################
# ## Latent Dirichlet Allocation (LDA)
# #########################################
#
# ## Set parameters for Gibbs sampling
# burnin <- 4000
# iter <- 2000
# thin <- 500
# seed <- list(2003, 5, 63, 100001, 765)
# nstart <- 5
# best <- TRUE
#
# ## Number of topics
# k <- 50
#
#
# ## Run LDA using Variational Bayes EM algorithm
# ldaOut <- topicmodels::LDA(dtm,
# k,
# method = "VEM"
# )
#
#
# ## write out results
# ## docs to topics
# ldaOut.topics <- as.matrix(topics(ldaOut))
#
#
#
# # top 6 terms in each topic
# ldaOut.terms <- as.matrix(terms(ldaOut,50))
#
#
# # probabilities associated with each topic assignment
# topicProbabilities <- as.data.frame(ldaOut@gamma)
|
c66046dd2002561b677cf447090a184b516b387c | 3f312cabe37e69f3a2a8c2c96b53e4c5b7700f82 | /ver_devel/bio3d/R/read.dcd.R | 255b9b043dd5594174dbfd05949d055b56ffd2fc | [] | no_license | Grantlab/bio3d | 41aa8252dd1c86d1ee0aec2b4a93929ba9fbc3bf | 9686c49cf36d6639b51708d18c378c8ed2ca3c3e | refs/heads/master | 2023-05-29T10:56:22.958679 | 2023-04-30T23:17:59 | 2023-04-30T23:17:59 | 31,440,847 | 16 | 8 | null | null | null | null | UTF-8 | R | false | false | 10,624 | r | read.dcd.R | "read.dcd" <-
function(trjfile, big=FALSE, verbose=TRUE, cell = FALSE){
# Version 0.2 ... Tue Jan 18 14:20:12 PST 2011
# Version 0.1 ... Thu Mar 9 21:18:54 PST 2005
#
# Description:
# Reads a CHARMM or X-PLOR/NAMD binary
# trajectory file with either big- or
# little-endian storage formats
#
# Details:
# Reading is accomplished with two different
# functions.
# 1. 'dcd.header' which reads headder info
# 2. 'dcd.frame' takes the header info and
# reads frame by frame producing a
# nframes/natom*3 matrix of cartisean
# coordinates
#===DCD=FORMAT==============================================
#HDR NSET ISTRT NSAVC 5-ZEROS NATOM-NFREAT DELTA 9-ZEROS
#CORD files step1 step zeroes (zero) timestep zeroes
#C*4 INT INT INT 5INT INT DOUBLE 9INT
# [CHARACTER*20]
#===========================================================
#NTITLE TITLE
#INT C*MAXTITL
#C*2 C*80
#===========================================================
#NATOM
#INT
#===========================================================
#CELL(I), I=1,6 (DOUBLE)
#===========================================================
#X(I), I=1,NATOM (SINGLE)
#Y(I), I=1,NATOM
#Z(I), I=1,NATOM
#===========================================================
dcd.header <- function(trj,...) {
# Read DCD Header section
end = .Platform$endian # Check endianism
check <- readBin(trj,"integer",1,endian=end)
# first thing in file should be an '84' header
if (check != 84) {
# if not we have the wrong endianism
if (end == "little") { end="big" } else { end="little" }
check <- readBin(writeBin(check, raw()), "integer", 1, endian = end)
if (check != 84) {
close(trj)
stop("PROBLEM with endian detection")
}
}
hdr <- readChar(trj, nchars=4) # data => CORD or VELD
# how big is the file 'end.pos'
cur.pos <- seek(trj, where=1, origin = "end") # pos ?
end.pos <- seek(trj, where=cur.pos, origin= "start")
icntrl <- readBin(trj,"integer", 20, endian=end) # data => header info
# header information:
nframe = icntrl[1] # number of frames
first = icntrl[2] # number of previous steps
step = icntrl[3] # frequency of saving
nstep = icntrl[4] # total number of steps
nfile <- nstep/step # number of files
last <- first + (step * nframe) # last step
# 5 zeros
ndegf = icntrl[8] # number of degrees of freedom
nfixed = icntrl[9] # number of fixed atoms
delta = icntrl[10] # coded time step
cryst = icntrl[11] # crystallographic group
block = icntrl[12] # extra block?
# 9 zeros
vers = icntrl[20]
# flush to end of line
a<-readBin(trj,"integer",1, endian=end) # should be '84' line tail
## cur.pos<-seek(trj, where=92, origin= "start") # position 92
rm(icntrl) # tidy up
# Are we CHARMM or X-PLOR format
charmm=FALSE; extrablock=FALSE; four.dims=FALSE
if (vers != 0) {
charmm=TRUE # charmm version number
if (cryst == 1) { # check for
extrablock = TRUE # extra free
} # atom block &
if (block == 1) { # extra four
four.dims=TRUE # dimensions
}
} else {
# re-read X-PLOR delta as a double
cur.pos <- seek(trj, where=44, origin= "start")
delta = readBin(trj,"double", 1, endian=end)
seek(trj, where=cur.pos, origin= "start")
}
#=======#
# Title #
a<-readBin(trj,"integer",1, endian=end) # flush FORTRAN header
ntitle <- readBin(trj,"integer",1, endian=end) # data => Num title lines
title<-NULL # store title & date
cur.pos <- seek(trj, where=NA) ## 100
for (i in 1:ntitle) {
### ==> !!!nasty hack due to invalid UTF-8 input (Jun 5th 07) !!! <=== ###
ll<-try(title<-c( title, suppressWarnings(readChar(trj,80)) ),silent=TRUE)
}
# OR: title<- readChar(trj, (ntitle*80))
if(inherits(ll, "try-error")) {
warning("Check DCD header data is correct, particulary natom")
##cur.pos <- seek(trj, where=260, origin= "start") # pos 260
cur.pos <- seek(trj, where=(80*ntitle+cur.pos), origin= "start")
}
### == end hack
a<-readBin(trj,"integer",1, endian=end) # flush FORTRAN tail
#=======#
# Natom #
a<-readBin(trj,"integer",1, endian=end) # flush FORTRAN header
natom <- readBin(trj,"integer",1, endian=end) # number of atoms
a<-readBin(trj,"integer",1, endian=end) # flush FORTRAN tail
##cur.pos <- seek(trj, where=276, origin= "start") # pos 276
#=============#
# Freeindexes #
if (nfixed != 0) {
# Free (movable) atom indexes if nfixed > 0
a <- readBin(trj,"integer",1, endian=end) # flush FORTRAN header
free.ind <- readBin(trj,"integer", (natom-nfixed), endian=end )
a <- readBin(trj,"integer",1, endian=end) # flush FORTRAN tail
print("FIXED ATOMS IN SIMULATION => CAN'T READ YET")
}
if (verbose) {
## EDIT ## R version 2.11.0 does not like "\0", just remove for now - Apr 12 2010
## cat( sub(" +$","",gsub(pattern="\0", replacement="", x=title)),sep="\n" )
cat(" NATOM =",natom,"\n")
cat(" NFRAME=",nframe,"\n")
cat(" ISTART=",first,"\n")
cat(" last =",last,"\n")
cat(" nstep =",nstep,"\n")
cat(" nfile =",nfile,"\n")
cat(" NSAVE =",step,"\n")
cat(" NDEGF =",ndegf,"\n")
cat(" version",vers,"\n")
}
# Done with Header :-)
header <- list(natom=natom,
nframe=nframe,
first=first,
last=last,
nstep=nstep,
nfile=nfile,
step=step,
ndegf=ndegf,
nfixed=nfixed,
charmm=charmm,
extrablock=extrablock,
four.dims=four.dims,
end.pos=end.pos,
end=end)
}
dcd.frame <- function(trj, head, cell) {
# DCD step/frame data
# read one frame from the current conection 'trj'
# which should have been already through
# 'dcd.header' so the "where" position is at
# the start of the cooedinate section
#============#
# Free atoms #
# Uncomment the next two lines if reading cell
# parameters only works with CHARMM DCD files
# if(!head$charmm && cell)
# stop("Cell parameters can only be read from CHARMM dcd files.")
if ( head$charmm && head$extrablock) {
# CHARMM files may contain lattice parameters
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN header
u <- readBin(trj, "numeric", size = 8, n = (a/8),endian = head$end)
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN tail
}
##cur.pos <- seek(trj, where=332, origin= "start") # pos 332
#========#
# Coords #
if (head$nfixed == 0) {
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN header
x <- readBin(trj,"numeric", # read x coords
size=4, n=(a/4), endian=head$end)
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN tail
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN header
y <- readBin(trj,"numeric", # read y coords
size=4, n=(a/4), endian=head$end)
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN tail
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN header
z <- readBin(trj,"numeric", # read z coords
size=4, n=(a/4), endian=head$end)
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN tail
} else {
# not implemented yet! => cant cope with fixed atoms
}
#===============#
# 4th dimension #
if (head$charmm && head$four.dims) {
# CHARMM files may contain an extra block?
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN header
seek(trj, where=a, origin= "current") # skip this block
a <- readBin(trj,"integer",1, endian=head$end) # flush FORTRAN tail
}
# Done with coord frame :-)
#coords <- list(x=x,
# y=y,
# z=z)
if(cell) to.return <- c( u[c(1,3,6)], (180/pi)*acos(u[c(5,4,2)]))
else to.return <- as.vector(rbind(x,y,z))
class(to.return) = "xyz"
return(to.return)
}
# Check if file exists
if( !file.exists(trjfile) ) {
stop(paste("No input DCD file found with name:", trjfile))
}
# Open file conection
trj <- file(trjfile, "rb")
#verbose=T
head<-dcd.header(trj,verbose)
nframes = head$nframe
natoms = head$natom
# blank xyz data structures
# format: rows => nframes, cols => natoms
### ==> !!! Insert to read big dcd files (Sep 29th 08) !!! <=== ###
###xyz <- matrix(NA, nrow=nframes,ncol=natoms*3)
if(!big) {
if(cell) to.return <- matrix(NA, nrow=nframes,ncol=6)
else to.return <- matrix(NA, nrow=nframes,ncol=natoms*3)
} else {
##-! Insert to read big dcd files (Sep 29th 08)
oops <- requireNamespace("bigmemory", quietly = TRUE)
if(!oops)
stop("Please install the bigmemory package from CRAN")
if(cell) to.return <- bigmemory::big.matrix(nrow=nframes,ncol=6, init = NA, type = "double")
else to.return <- bigmemory::big.matrix(nrow=nframes,ncol=natoms*3, init = NA, type = "double")
}
### ==> !!! end big.matrix insert
if(verbose){ cat("Reading (x100)") }
store <- NULL
# fill xyz with frame coords
if(verbose) pb <- txtProgressBar(1, nframes, style=3)
for(i in 1:nframes) {
curr.pos <- seek(trj, where=0, origin= "current")
if (curr.pos <= head$end.pos) {
to.return[i,]<- as.vector( dcd.frame(trj,head,cell) )
if (verbose) {
setTxtProgressBar(pb, i)
# if(i %% 100==0) { cat(".") }
}
# print(paste("frame:",i,"pos:",curr.pos))
store<-cbind(store,curr.pos)
} else {
print("Premature end of file")
print(paste(" last frame:",i,
"nframe:",head$nframe ))
break
}
}
# if(verbose) { cat("done",sep="\n") }
if(verbose)
cat("\n")
close(trj)
##class(to.return) = "xyz"
if(big) {
warning("Returned a 'big.memory' matrix that is not fully supported by some Bio3D functions.")
return(to.return)
}
else {
return( as.xyz(to.return) )
}
}
|
c7ee5c9d451cb3b1a6a70f1af5d2cfdc51bdc69e | c0ca17c30d8739deacd4f246bb2fc0ceb3dce183 | /plot1.R | 991b67a9722ae3c5d3ba1e38c500033f1e4c821f | [] | no_license | jackelincl/ExData_Plotting1 | 9d3a237566e0df8801814982951fabbdca5fa03b | e1187e40327da17eabfafb0397020bea4ed9f5f2 | refs/heads/master | 2022-12-17T07:34:27.216005 | 2020-09-27T22:19:36 | 2020-09-27T22:19:36 | 299,112,859 | 0 | 0 | null | 2020-09-27T20:24:31 | 2020-09-27T20:24:30 | null | UTF-8 | R | false | false | 779 | r | plot1.R | #Downloading the file, unzipping and saving the data
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "electricpower.zip", method="curl")
unzip("electricpower.zip")
rawData <- "household_power_consumption.txt"
#Subsetting the data
data <- read.table(rawData, header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?",
quote="",comment.char="")
data <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
#Opening the graphic device, creating the histogram and saving it in png
png(file="plot1.png", width = 480, height = 480)
hist(data$Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
ylab="Frequency", main="Global Active Power")
dev.off() |
b32f94576ffdf1c124192d545783ab0f2757006b | cd76594c45fd529e00000ba6f436913c3a31b40a | /copula.R | 7e0642360c8742f3884692d40f1e49d383887b20 | [] | no_license | irudnyts/CopulaMLE | 0de514371089835e05dcc99f51e82e98ca413dc4 | f5af3c0ecd4afcefdc385bb457c873490f430e74 | refs/heads/master | 2016-09-05T22:07:45.087277 | 2014-12-11T10:23:06 | 2014-12-11T10:23:06 | 27,123,897 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,701 | r | copula.R | #install.packages("copula")
library("copula")
set.seed(1)
n <- 1000 # number of observation
p <- 0.3 # mixing parameter
a <- 5 # parameter of 1st copula
b <- 10 # parameter of second copula
I <- rbinom(n = n, size = 1, prob = p) # Bernoili r.v.
#_________________________________Gumbel copula_________________________________
data <- I * rCopula(n = n, copula = gumbelCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = gumbelCopula(param = b, dim = 2))
density <- function(arg) {
sum(log(arg[1] * dCopula(u = data, copula =
gumbelCopula(param = arg[2], dim = 2))
+ (1 - arg[1]) * dCopula(u = data, copula =
gumbelCopula(param = arg[3], dim = 2))))
}
optim(par = c(0, 1, 1), fn = density, control = list(fnscale = -1),
lower = c(0,1,1), upper = c(1, Inf, Inf))
#_________________________________Frank copula__________________________________
data <- I * rCopula(n = n, copula = frankCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = frankCopula(param = b, dim = 2))
density <- function(arg) {
sum(log(arg[1] * dCopula(u = data, copula =
frankCopula(param = arg[2], dim = 2))
+ (1 - arg[1]) * dCopula(u = data, copula =
frankCopula(param = arg[3], dim = 2))))
}
optim(par = c(0.1, 1, 1), fn = density, control = list(fnscale = -1),
lower = c(0,1,1), upper = c(1, Inf, Inf))
#________________________________Clayton copula_________________________________
data <- I * rCopula(n = n, copula = claytonCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = claytonCopula(param = b, dim = 2))
density <- function(arg) {
sum(log(arg[1] * dCopula(u = data, copula =
claytonCopula(param = arg[2], dim = 2))
+ (1 - arg[1]) * dCopula(u = data, copula =
claytonCopula(param = arg[3], dim = 2))))
}
optim(par = c(0.1, 1, 1), fn = density, control = list(fnscale = -1),
lower = c(0, 1, 1), upper = c(1, Inf, Inf))
#________________________________Mixed 2 copulas________________________________
data <- I * rCopula(n = n, copula = claytonCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = gumbelCopula(param = b, dim = 2))
density <- function(arg) {
sum(log(arg[1] * dCopula(u = data, copula =
claytonCopula(param = arg[2], dim = 2))
+ (1 - arg[1]) * dCopula(u = data, copula =
gumbelCopula(param = arg[3], dim = 2))))
}
optim(par = c(0.1, 1, 1), fn = density, control = list(fnscale = -1),
lower = c(0,1,1), upper = c(1, Inf, Inf))
#_______________________________General function________________________________
copula.mle <- function(sample, copula1, copula2, lower, upper) {
# Maximum likelihood estimator for mixed copula
#
# Args:
# sample: a matrix of 2-dimenssional random sample from copula (i.e.
# values between 0 and 1, columns are univariate random sample from
# uniform distribution)
# copula1: the class of the first copula (gumbelCopula etc.)
# copula2: the class of the second copula
# lower: the vector of lower boundaries of parameters for copula1 and
# copula2 respectivly
# upper: the vector of upper boundaries of parameters for copula1 and
# copula2 respectivly
#
# Returns:
# the vector of parameters p, alpha and beta respectivly
# definition of mixed copula density fucntion
density <- function(arg) {
sum(log(arg[1] * dCopula(u = sample, copula =
copula1(param = arg[2], dim = 2))
+ (1-arg[1]) * dCopula(u = sample, copula =
copula2(param = arg[3], dim = 2))))
}
# perform the optimization and subset estimated parameters
optim(par = c(0, 1, 1), fn = density, control = list(fnscale = -1),
lower = c(0, lower), upper = c(1, upper))$par
}
# example Gumbel-Gumbel
data <- I * rCopula(n = n, copula = gumbelCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = gumbelCopula(param = b, dim = 2))
copula.mle(sample = data, copula1 = gumbelCopula, copula2 = gumbelCopula,
lower = c(1, 1), upper = c(Inf, Inf))
# example Frank-Gumbel
data <- I * rCopula(n = n, copula = frankCopula(param = a, dim = 2)) +
(1 - I) * rCopula(n = n, copula = gumbelCopula(param = b, dim = 2))
copula.mle(sample = data, copula1 = frankCopula, copula2 = gumbelCopula,
lower = c(1, 1), upper = c(Inf, Inf)) |
b9d1353c01a54162974e6410b2908b031c4cfa34 | 388d7a62bbbd144f243438f9e6a5a456eb2cce3c | /R/getDayNight.R | 9e7e04641e00dc40c61a84a45c1afb70f0c1b3fe | [] | no_license | aspillaga/fishtrack3d | 64c7dcb2a97a833ef830d845e8bfbc3aaf387827 | 2be695e0f88d97e095f074acd17240cb8878dbbc | refs/heads/master | 2022-01-18T10:50:53.776454 | 2019-05-23T15:09:18 | 2019-05-23T15:09:18 | 118,634,135 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,293 | r | getDayNight.R | #' Classify the time stamps of a track into 'day' and 'night' categories
#'
#' This function classifies the time stamps of a track into day and night
#' categories according to the local sunrise and sunset times.
#'
#' @param time.stamp vector with the date and time of the detections, in
#' \code{POSIXt} format.
#' @param coord a \code{data.frame} or \code{matrix} object with a unique pair
#' of coordinates or with a pair of coordinates for each time stamp ('x'
#' and 'y' columns with the coordinates).
#' @param proj \link[rgdal]{CRS} object with the reference system of the
#' coordinates, by default longlat WGS84.
#'
#' @return a character vector with a 'D' (day) or 'N' (night) label for each
#' time stamp.
#'
#' @export
#'
#' @examples
#'
#' times <- Sys.time() + seq(-48, 48, 12) * 3600
#' coord <- cbind(x = 42.04818, y = 3.19555)
#' daynight <- getDayNight(times, coord)
#' daynight
#'
#'
getDayNight <- function(time.stamp, coord,
proj = sp::CRS("+proj=longlat +datum=WGS84")) {
# This function needs the 'maptools' package to work
if (!requireNamespace("maptools", quietly = TRUE)) {
stop(paste("Package 'maptools' needed for this function to work. Please",
"install it."), call. = FALSE)
}
# Check if arguments are correct =============================================
if (is.null(time.stamp) | class(time.stamp)[2] != "POSIXt") {
stop("Time stamps must be in 'POSIXt' format.", call. = FALSE)
}
if (is.null(coord) | !class(coord) %in% c("data.frame", "matrix") |
ncol(coord) != 2) {
stop(paste("Coordinates must be provided in a 'data.frame' or 'matrix'",
"with the 'x' and 'y' columns."), call. = FALSE)
}
if (nrow(coord) != 1 & nrow(coord) != length(time.stamp)) {
stop(paste("Provide either a single pair of coordinates or a pair of",
"coordinates for each time stamp"), call. = FALSE)
}
ref.proj <- sp::CRS("+proj=longlat +datum=WGS84")
coord <- as.matrix(coord)
if (proj@projargs != ref.proj@projargs) {
points <- sp::SpatialPoints(coord, proj = proj)
points <- sp::spTransform(points, CRSobj = ref.proj)
coord <- sp::coordinates(points)
}
coordinates <- sp::SpatialPoints(matrix(coord, 1, 2), proj4string = ref.proj)
if (nrow(coord) > 1) {
sunset <- maptools::sunriset(coordinates, as.POSIXct(time.stamp),
POSIXct.out = TRUE, direction = 'sunset')$time
sunrise <- maptools::sunriset(coordinates, as.POSIXct(time.stamp),
POSIXct.out = TRUE,
direction = 'sunrise')$time
} else {
dates <- as.Date(time.stamp)
unique.dates <- unique(dates)
sunset <- maptools::sunriset(coordinates, as.POSIXct(unique.dates),
POSIXct.out = TRUE, direction = 'sunset')$time
sunrise <- maptools::sunriset(coordinates, as.POSIXct(unique.dates),
POSIXct.out = TRUE,
direction = 'sunrise')$time
sunset <- sunset[match(dates, unique.dates)]
sunrise <- sunrise[match(dates, unique.dates)]
}
period <- ifelse(time.stamp >= sunrise & time.stamp < sunset, "D", "N")
return(period)
}
|
767ea1420cd3b6ead92a7662fd3d9f0276805353 | c5585993ea0bea0ebea10b1142f12098984806f2 | /R/operators.R | fa40ef7c87520de6b4ef7ecfe216103d7ceacac3 | [
"MIT"
] | permissive | Sithara26/strafica | b0ff86e3339e868152c840c61379ef8571248dcb | 0e1084a96c17c6e69b419893d7844940f946bc4f | refs/heads/master | 2023-03-18T12:49:31.617195 | 2021-03-04T07:00:53 | 2021-03-04T07:00:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 376 | r | operators.R | # -*- coding: us-ascii-unix -*-
#' Test against inclusion, opposite of \code{\%in\%}.
#' @param x vector or \code{NULL}: the values to be matched
#' @param table vector or \code{NULL}: the values to be matched against
#' @return A logical vector.
#' @export
#' @name %nin%
#' @rdname nin
#' @usage x \%nin\% table
`%nin%` = function(x, table) {
return(!(x %in% table))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.