content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
withNames =
function(x, n) {temp = data.frame(x=x,n=n);
x = temp$x;
n = temp$n;
names(x) <- n;
x}
|
/CTDesignExperimenter/inst/helperCode/withNames.R
|
no_license
|
professorbeautiful/CTDesignExperimenter
|
R
| false
| false
| 162
|
r
|
withNames =
function(x, n) {temp = data.frame(x=x,n=n);
x = temp$x;
n = temp$n;
names(x) <- n;
x}
|
#Math E-156 Script 6B-BootstrapIntro.R
#Topic 1 - A bootstrap sampling distribution
#Start with the example from section 5.1 of the textbook
#For birth weights of NC babies, we do not know the population distribution.
NCB<-read.csv("NCBirths2004.csv"); head(NCB)
BabyWt<-NCB$Weight; hist(BabyWt, breaks = "FD")
#Now suppose that, instead of a sample of 1009, all we had was a sample of 6 (the book uses 3)
Sample6<-sample(BabyWt,6); Sample6
#Pretend that this was the entire population (like an unfair die)
#We can look at the means for all possible samples of size 6 (like rolling the die 6 times)
Baby6<-expand.grid(Sample6,Sample6,Sample6,Sample6,Sample6,Sample6); head(Baby6); nrow(Baby6)
#Work with this as if it were a population data frame with 6^6 rows
x<-rowMeans(Baby6)
hist(x,breaks="FD") #shape is similar to the original full distribution
#Alternatively, we can draw random samples as we did with dice and cards
N=10000; result<-numeric(N)
for (i in 1:N){
result[i] <-mean(sample(Sample6,6,replace = TRUE))
}
plot1<-hist(result, breaks= "FD")
#For comparison, we can draw random samples from the full sample of 2009 babies
N=10000; big6<-numeric(N)
for (i in 1:N){
big6[i] <-mean(sample(BabyWt,6,replace = TRUE))
}
plot2<-hist(big6, breaks= "FD")
#Google "R histogram transparent colors" for details of the following trick.
#On overlaying the histograms, observe that they have different means but similar shape
plot( plot1, col=rgb(0,0,1,1/4)) # first histogram, using all the data
plot( plot2, col=rgb(1,0,0,1/4), add=T) # second, made from the bootstrap
#Now use samples of size 1009 from the original data.
#We get a good approximation to the sampling distribution
N=10000; bigsample<-numeric(N)
for (i in 1:N){
bigsample[i] <-mean(sample(BabyWt,1009,replace = TRUE))
}
hist(bigsample, breaks= "FD", probability = TRUE)
#Of course, now the central limit theorem applies
curve(dnorm(x,mean(BabyWt), sd(BabyWt)/sqrt(1009)), col = "red", add = TRUE)
#Topic 2 -- trying a bootstrap where we know the population distribution
#Example 5.1, where the sample of 50 is from the normal distribution N(23, 7^2)
curve(dnorm(x, 23, 7), from = 6, to = 39) #figure 5.2a
abline(v = 23, col = "red", lty = 2)
my.sample <- rnorm(50, 23, 7)
hist(my.sample, breaks ="FD", freq = F) #will resemble figure 5.2b
mean(my.sample) #will be close to 23,
abline(v = mean(my.sample), col = "blue", lty = 2)
#Overlay the population density function
curve(dnorm(x, 23, 7), from = 6, to = 39, col = "red", add = TRUE)
abline(v = 23, col = "red", lty = 2)
#Now compare the bootstrap distribution with the sampling distribution
N=10^5; my.boot<-numeric(N)
for (i in 1:N) {
my.boot[i] = mean(sample(my.sample, 50, replace = TRUE))
}
hist(my.boot, breaks= "FD", probability = TRUE)
curve(dnorm(x,23,7/sqrt(50)), col = "red", add = TRUE)
abline(v = mean(my.sample), col = "blue", lty = 2)
abline(v = 23, col = "red", lty = 2)
#The shape of the bootstrap distribution is a great match (CLT)
#As usual, though, the mean is wrong
#The variance of the bootstrap distribution is determined by the variance of the sample
#We could use the bootstrap distribution to determine what to add and subtract
#if we want to generate endpoints of a confidence interval
#Calculate the amount X.add to add to the sample mean
X.add <- mean(my.boot)-quantile(my.boot,0.025); X.add #compare with 1.940265
#Calculate an amount X.sub to subtract from the sample mean
X.sub <- quantile(my.boot,0.975)-mean(my.boot); X.sub #compare with 1.940265
#Topic 3 -- the bootstrap reveals skewness in the population
#Example 5.2, where the sample is from the skewed distribution Gamma(1,1/2)
curve(dgamma(x, 1, 1/2), from = 0, to = 10) #exponential with lambda = 1/2
my.sample <- rgamma(16, 1, 1/2) #draw 16 samples from this population
mean(my.sample) #expected mean is 2 but will vary widely
hist(my.sample, breaks= "FD", probability = TRUE)
abline(v = mean(my.sample), col = "blue", lty = 2)
curve(dgamma(x, 1, 1/2), col = "red", add = T)
abline(v = 2, col = "red", lty = 2) #population mean is 2
#Now compare the bootstrap and sampling distributions
N=10^5; my.boot<-numeric(N)
for (i in 1:N) {
my.boot[i] = mean(sample(my.sample, 16, replace = TRUE))
}
hist(my.boot, breaks= "FD", probability = TRUE)
abline(v = mean(my.boot), col = "blue", lty = 2)
#Overlay the theoretical sampling distribution
curve(dgamma(x,16,8), col = "red", add = TRUE)
abline(v = 2, col = "red", lty = 2)
#The bootstrap data has the shape of the sampling distribution but the mean of the sample
#We can compare the third central moments to check skewness
mean((my.boot - mean(my.boot))^3) #from the bootstrap - should be positive
integrate(function(x) dgamma(x,16,8)*(x-2)^3, 0, Inf)
#Again we can use the bootstrap distribution to determine what to add and subtract
#if we want to generate endpoints of a confidence interval
#Calculate the amount X.add to add to the sample mean
X.add <- mean(my.boot)-quantile(my.boot,0.025); X.add #compare with 0.8153184
#Calculate an amount X.sub to subtract from the sample mean
X.sub <- quantile(my.boot,0.975)-mean(my.boot); X.sub #compare with 1.134036
#In spite of the single tiny sample, the bootstrap captures the fact
#that the confidence interval should not be centered on the sample mean.
#It makes a big difference whether a couple of our 16 samples came from the tail.
#Repeat topic 3 for several different samples of 16.
|
/Math_E156/class6/6B-BootstrapIntro.R
|
no_license
|
ddarl4/ModernKicks
|
R
| false
| false
| 5,569
|
r
|
#Math E-156 Script 6B-BootstrapIntro.R
#Topic 1 - A bootstrap sampling distribution
#Start with the example from section 5.1 of the textbook
#For birth weights of NC babies, we do not know the population distribution.
NCB<-read.csv("NCBirths2004.csv"); head(NCB)
BabyWt<-NCB$Weight; hist(BabyWt, breaks = "FD")
#Now suppose that, instead of a sample of 1009, all we had was a sample of 6 (the book uses 3)
Sample6<-sample(BabyWt,6); Sample6
#Pretend that this was the entire population (like an unfair die)
#We can look at the means for all possible samples of size 6 (like rolling the die 6 times)
Baby6<-expand.grid(Sample6,Sample6,Sample6,Sample6,Sample6,Sample6); head(Baby6); nrow(Baby6)
#Work with this as if it were a population data frame with 6^6 rows
x<-rowMeans(Baby6)
hist(x,breaks="FD") #shape is similar to the original full distribution
#Alternatively, we can draw random samples as we did with dice and cards
N=10000; result<-numeric(N)
for (i in 1:N){
result[i] <-mean(sample(Sample6,6,replace = TRUE))
}
plot1<-hist(result, breaks= "FD")
#For comparison, we can draw random samples from the full sample of 2009 babies
N=10000; big6<-numeric(N)
for (i in 1:N){
big6[i] <-mean(sample(BabyWt,6,replace = TRUE))
}
plot2<-hist(big6, breaks= "FD")
#Google "R histogram transparent colors" for details of the following trick.
#On overlaying the histograms, observe that they have different means but similar shape
plot( plot1, col=rgb(0,0,1,1/4)) # first histogram, using all the data
plot( plot2, col=rgb(1,0,0,1/4), add=T) # second, made from the bootstrap
#Now use samples of size 1009 from the original data.
#We get a good approximation to the sampling distribution
N=10000; bigsample<-numeric(N)
for (i in 1:N){
bigsample[i] <-mean(sample(BabyWt,1009,replace = TRUE))
}
hist(bigsample, breaks= "FD", probability = TRUE)
#Of course, now the central limit theorem applies
curve(dnorm(x,mean(BabyWt), sd(BabyWt)/sqrt(1009)), col = "red", add = TRUE)
#Topic 2 -- trying a bootstrap where we know the population distribution
#Example 5.1, where the sample of 50 is from the normal distribution N(23, 7^2)
curve(dnorm(x, 23, 7), from = 6, to = 39) #figure 5.2a
abline(v = 23, col = "red", lty = 2)
my.sample <- rnorm(50, 23, 7)
hist(my.sample, breaks ="FD", freq = F) #will resemble figure 5.2b
mean(my.sample) #will be close to 23,
abline(v = mean(my.sample), col = "blue", lty = 2)
#Overlay the population density function
curve(dnorm(x, 23, 7), from = 6, to = 39, col = "red", add = TRUE)
abline(v = 23, col = "red", lty = 2)
#Now compare the bootstrap distribution with the sampling distribution
N=10^5; my.boot<-numeric(N)
for (i in 1:N) {
my.boot[i] = mean(sample(my.sample, 50, replace = TRUE))
}
hist(my.boot, breaks= "FD", probability = TRUE)
curve(dnorm(x,23,7/sqrt(50)), col = "red", add = TRUE)
abline(v = mean(my.sample), col = "blue", lty = 2)
abline(v = 23, col = "red", lty = 2)
#The shape of the bootstrap distribution is a great match (CLT)
#As usual, though, the mean is wrong
#The variance of the bootstrap distribution is determined by the variance of the sample
#We could use the bootstrap distribution to determine what to add and subtract
#if we want to generate endpoints of a confidence interval
#Calculate the amount X.add to add to the sample mean
X.add <- mean(my.boot)-quantile(my.boot,0.025); X.add #compare with 1.940265
#Calculate an amount X.sub to subtract from the sample mean
X.sub <- quantile(my.boot,0.975)-mean(my.boot); X.sub #compare with 1.940265
#Topic 3 -- the bootstrap reveals skewness in the population
#Example 5.2, where the sample is from the skewed distribution Gamma(1,1/2)
curve(dgamma(x, 1, 1/2), from = 0, to = 10) #exponential with lambda = 1/2
my.sample <- rgamma(16, 1, 1/2) #draw 16 samples from this population
mean(my.sample) #expected mean is 2 but will vary widely
hist(my.sample, breaks= "FD", probability = TRUE)
abline(v = mean(my.sample), col = "blue", lty = 2)
curve(dgamma(x, 1, 1/2), col = "red", add = T)
abline(v = 2, col = "red", lty = 2) #population mean is 2
#Now compare the bootstrap and sampling distributions
N=10^5; my.boot<-numeric(N)
for (i in 1:N) {
my.boot[i] = mean(sample(my.sample, 16, replace = TRUE))
}
hist(my.boot, breaks= "FD", probability = TRUE)
abline(v = mean(my.boot), col = "blue", lty = 2)
#Overlay the theoretical sampling distribution
curve(dgamma(x,16,8), col = "red", add = TRUE)
abline(v = 2, col = "red", lty = 2)
#The bootstrap data has the shape of the sampling distribution but the mean of the sample
#We can compare the third central moments to check skewness
mean((my.boot - mean(my.boot))^3) #from the bootstrap - should be positive
integrate(function(x) dgamma(x,16,8)*(x-2)^3, 0, Inf)
#Again we can use the bootstrap distribution to determine what to add and subtract
#if we want to generate endpoints of a confidence interval
#Calculate the amount X.add to add to the sample mean
X.add <- mean(my.boot)-quantile(my.boot,0.025); X.add #compare with 0.8153184
#Calculate an amount X.sub to subtract from the sample mean
X.sub <- quantile(my.boot,0.975)-mean(my.boot); X.sub #compare with 1.134036
#In spite of the single tiny sample, the bootstrap captures the fact
#that the confidence interval should not be centered on the sample mean.
#It makes a big difference whether a couple of our 16 samples came from the tail.
#Repeat topic 3 for several different samples of 16.
|
setwd(dir = '/Users/skick/Desktop/NYC Data Science Academy/Class_R/')
library(dplyr)
library(ggplot2)
#Question 1
#1
Champions = read.csv('Champions.csv', stringsAsFactors = FALSE)
#View(Champions)
tbl_df = filter(Champions, HomeGoal > AwayGoal)
filter(Champions, HomeTeam == 'Barcelona' | HomeTeam == 'Real Madrid')
#2
Home = select(Champions, starts_with('Home'))
Smaller = select(Champions,
contains('Team'),
contains('Goal'),
contains('Corner'))
head(Home)
head(Smaller)
#3
arrange(Smaller, desc(HomeGoal))
#4
by_hometeam = group_by(Champions, HomeTeam)
summarise(by_hometeam,
Avg_goal = mean(HomeGoal),
Avg_poss = mean(HomePossession),
Avg_yellow = mean(HomeYellow))
#5
#optional
temp = mutate(CL, score = ifelse(HomeGoal > AwayGoal,
paste(HomeGoal, AwayGoal, sep = "-"),
paste(AwayGoal, HomeGoal, sep = "-")))
temp = group_by(temp, score)
temp = arrange(summarise(temp, n = sum(n)), desc(n))
temp[1:5, ]
## Another solution using apply
cl_sub2=select(CL,contains("Goal"))
# Nice solution by transpose the matrix.
all_score<-t(apply(cl_sub2,1,sort))
all<-data.frame(score=apply(all_score,1,paste,collapse=""))
score_frq<-all %>%
group_by(.,score)%>%
summarise(.,count=n()) %>%
arrange(.,desc(count))
score_frq[1:5,]
##### SE version of dplyr
##### https://cran.r-project.org/web/packages/dplyr/vignettes/nse.html
#Question 2
#1
data(cars)
p = ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point()
#2
p +
ggtitle('Speed Vs. Distance') +
labs(x = 'Speed (mpg)', y = 'Stopping Distance (ft)')
#3
ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point(pch = 17, col = 'red')
#Question 3
data(faithful)
#View(faithful)
#1
faithful$length = ifelse(faithful$eruptions < 3.2, 'short', 'long')
faithful$length = as.factor(faithful$length)
#2
ggplot(data = faithful, aes(x = length, y = waiting)) + geom_boxplot(aes(color = length))
#3
ggplot(data= faithful, aes(x = waiting)) + geom_density(aes(color = length))
#4
#From the density curves, it seems the waiting times for the long eruptions are around 80 minutes,
#and the times for the short eruptions is around 54 minutes.
#From the box plots, you can see the same thing within the common values.
#Question 4
knicks = load('Knicks.RDA') #saves the table under "data" for some reason ??????
knicks = data #reassign the data frame to "knicks"
#View(knicks)
#1
Winratio_byseason = knicks %>%
group_by(season) %>%
summarise(winning_ratio = sum(win == 'W')/n())
#could use spread to split the win into two columns then just count the columns that have it
ggplot(Winratio_byseason, aes(x = season, y = winning_ratio)) +
geom_bar(stat = 'identity', aes(fill = season)) #doesn't work unless use stat = 'identity'
#2
Winratio_byhome = knicks %>%
group_by(season, visiting) %>%
mutate(winning_ratio = sum(win == 'W')/n()) #can use summarise instead of mutate
ggplot(Winratio_byhome,
aes(x = season, y = winning_ratio)) +
geom_bar(aes(fill = visiting),
position = 'dodge',
stat = 'identity')
#3
ggplot(knicks, aes(x = points)) +
geom_histogram(binwidth = 5,
aes(fill = season)) +
facet_wrap(~season)
#4
#optional
knicks3 <- group_by(knicks, opponent) %>%
summarise(ratio=sum(win=="W")/n(), diff=mean(points-opp))
ggplot(knicks3,aes(x=diff, y=ratio)) +
geom_point(color='red4',size=4)+
geom_hline(yintercept=0.5,colour='grey20',size=0.5,linetype=2)+ #at 0.5 for winning/losing percentage
geom_vline(xintercept=0,colour='grey20',size=0.5,linetype=2)+ #at 0 for winning/losing point diff #could put at mean
geom_text(aes(label=substring(opponent,1,5)),
hjust=0.7, vjust=1.4,angle = -35)+
theme_bw()
|
/R_practice/Ggplot2_prac.R
|
no_license
|
skickham/brainteasers
|
R
| false
| false
| 3,885
|
r
|
setwd(dir = '/Users/skick/Desktop/NYC Data Science Academy/Class_R/')
library(dplyr)
library(ggplot2)
#Question 1
#1
Champions = read.csv('Champions.csv', stringsAsFactors = FALSE)
#View(Champions)
tbl_df = filter(Champions, HomeGoal > AwayGoal)
filter(Champions, HomeTeam == 'Barcelona' | HomeTeam == 'Real Madrid')
#2
Home = select(Champions, starts_with('Home'))
Smaller = select(Champions,
contains('Team'),
contains('Goal'),
contains('Corner'))
head(Home)
head(Smaller)
#3
arrange(Smaller, desc(HomeGoal))
#4
by_hometeam = group_by(Champions, HomeTeam)
summarise(by_hometeam,
Avg_goal = mean(HomeGoal),
Avg_poss = mean(HomePossession),
Avg_yellow = mean(HomeYellow))
#5
#optional
temp = mutate(CL, score = ifelse(HomeGoal > AwayGoal,
paste(HomeGoal, AwayGoal, sep = "-"),
paste(AwayGoal, HomeGoal, sep = "-")))
temp = group_by(temp, score)
temp = arrange(summarise(temp, n = sum(n)), desc(n))
temp[1:5, ]
## Another solution using apply
cl_sub2=select(CL,contains("Goal"))
# Nice solution by transpose the matrix.
all_score<-t(apply(cl_sub2,1,sort))
all<-data.frame(score=apply(all_score,1,paste,collapse=""))
score_frq<-all %>%
group_by(.,score)%>%
summarise(.,count=n()) %>%
arrange(.,desc(count))
score_frq[1:5,]
##### SE version of dplyr
##### https://cran.r-project.org/web/packages/dplyr/vignettes/nse.html
#Question 2
#1
data(cars)
p = ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point()
#2
p +
ggtitle('Speed Vs. Distance') +
labs(x = 'Speed (mpg)', y = 'Stopping Distance (ft)')
#3
ggplot(data = cars, aes(x = speed, y = dist)) +
geom_point(pch = 17, col = 'red')
#Question 3
data(faithful)
#View(faithful)
#1
faithful$length = ifelse(faithful$eruptions < 3.2, 'short', 'long')
faithful$length = as.factor(faithful$length)
#2
ggplot(data = faithful, aes(x = length, y = waiting)) + geom_boxplot(aes(color = length))
#3
ggplot(data= faithful, aes(x = waiting)) + geom_density(aes(color = length))
#4
#From the density curves, it seems the waiting times for the long eruptions are around 80 minutes,
#and the times for the short eruptions is around 54 minutes.
#From the box plots, you can see the same thing within the common values.
#Question 4
knicks = load('Knicks.RDA') #saves the table under "data" for some reason ??????
knicks = data #reassign the data frame to "knicks"
#View(knicks)
#1
Winratio_byseason = knicks %>%
group_by(season) %>%
summarise(winning_ratio = sum(win == 'W')/n())
#could use spread to split the win into two columns then just count the columns that have it
ggplot(Winratio_byseason, aes(x = season, y = winning_ratio)) +
geom_bar(stat = 'identity', aes(fill = season)) #doesn't work unless use stat = 'identity'
#2
Winratio_byhome = knicks %>%
group_by(season, visiting) %>%
mutate(winning_ratio = sum(win == 'W')/n()) #can use summarise instead of mutate
ggplot(Winratio_byhome,
aes(x = season, y = winning_ratio)) +
geom_bar(aes(fill = visiting),
position = 'dodge',
stat = 'identity')
#3
ggplot(knicks, aes(x = points)) +
geom_histogram(binwidth = 5,
aes(fill = season)) +
facet_wrap(~season)
#4
#optional
knicks3 <- group_by(knicks, opponent) %>%
summarise(ratio=sum(win=="W")/n(), diff=mean(points-opp))
ggplot(knicks3,aes(x=diff, y=ratio)) +
geom_point(color='red4',size=4)+
geom_hline(yintercept=0.5,colour='grey20',size=0.5,linetype=2)+ #at 0.5 for winning/losing percentage
geom_vline(xintercept=0,colour='grey20',size=0.5,linetype=2)+ #at 0 for winning/losing point diff #could put at mean
geom_text(aes(label=substring(opponent,1,5)),
hjust=0.7, vjust=1.4,angle = -35)+
theme_bw()
|
get_pas_by_gene_single = function(pas_by_gene){
pas_by_gene_single = pas_by_gene[sapply(pas_by_gene, nrow) == 1]
pas_by_gene_single = pas_by_gene_single[sapply(pas_by_gene_single, function(x) x$LOCATION) != "Intron"]
length(pas_by_gene_single)
names(pas_by_gene_single) = sapply(pas_by_gene_single, function(x) x$Gene.Symbol[1])
return(pas_by_gene_single)
}
trunc_matrix = function(Smat){
eSmat = eigen(Smat)
eSmat$values[eSmat$values < 0] = 0
Smat = eSmat$vectors %*% diag(eSmat$values) %*% t(eSmat$vectors)
return(Smat)
}
paired_test = function(X1, X2){
npas = nrow(X1)
n = ncol(X1)
N1 = colSums(X1)
N2 = colSums(X2)
NN1 = sum(N1)
NN2 = sum(N2)
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
fraclist = lapply(1:n, function(i){
cbind(mi1[,i], mi2[, i])
})
if(NN1 <= n | NN2 <= n){
return(list(pvalue = NA, fraclist = fraclist))
}else{
idx = which(N1 > 0 & N2 > 0)
if(length(idx) < n){
if(length(idx) == 1) return(list(pvalue = NA, fraclist = fraclist))
X1 = X1[, idx]
X2 = X2[, idx]
n = ncol(X1)
}
if(n - npas < 2){
a = rowSums(X1) + rowSums(X2)
idx = order(a, decreasing = T)[1:(n-2)]
X1 = X1[idx, ]
X2 = X2[idx, ]
npas = nrow(X1)
}
N1 = colSums(X1)
N2 = colSums(X2)
NN1 = sum(N1)
NN2 = sum(N2)
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
Mc1 = (NN1 - sum(N1^2) / NN1) / (n-1)
Mc2 = (NN2 - sum(N2^2) / NN2) / (n-1)
m1 = rowSums(X1) / NN1
m2 = rowSums(X2) / NN2
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
diff1 = mi1 - m1
diff2 = mi2 - m2
S1 = diff1 %*% diag(N1) %*% t(diff1) / (n-1)
S2 = diff2 %*% diag(N2) %*% t(diff2) / (n-1)
G1 = (diag(mi1 %*% N1) - mi1 %*% diag(N1) %*% t(mi1)) / (NN1-n)
G2 = (diag(mi2 %*% N2) - mi2 %*% diag(N2) %*% t(mi2)) / (NN2-n)
V = diff1 %*% diag(N1+N2) %*% t(diff2) /(Mc1+Mc2) / (n-1)
V = (V + t(V)) * sum(N1*N2) / (NN1*NN2)
Sig1 = S1 * sum(N1^2) / Mc1 / (NN1^2) + G1 * (Mc1 - sum(N1^2) / NN1) / NN1 / Mc1
Sig2 = S2 * sum(N2^2) / Mc2 / (NN2^2) + G2 * (Mc2 - sum(N2^2) / NN2) / NN2 / Mc2
Sig1 = trunc_matrix(Sig1)
Sig2 = trunc_matrix(Sig2)
Smat = Sig1 + Sig2 - V
Smat = trunc_matrix(Smat)
stat = t(m1-m2) %*% ginv(Smat) %*% (m1-m2) * (n-npas+1)/(n-1)/(npas-1)
pval = 1-pf(as.numeric(stat), npas-1, n-npas+1)
}
return(list(Smat = Smat, pvalue = pval, fraclist = fraclist))
}
|
/R/utils.R
|
no_license
|
vallurumk/MAAPER
|
R
| false
| false
| 2,479
|
r
|
get_pas_by_gene_single = function(pas_by_gene){
pas_by_gene_single = pas_by_gene[sapply(pas_by_gene, nrow) == 1]
pas_by_gene_single = pas_by_gene_single[sapply(pas_by_gene_single, function(x) x$LOCATION) != "Intron"]
length(pas_by_gene_single)
names(pas_by_gene_single) = sapply(pas_by_gene_single, function(x) x$Gene.Symbol[1])
return(pas_by_gene_single)
}
trunc_matrix = function(Smat){
eSmat = eigen(Smat)
eSmat$values[eSmat$values < 0] = 0
Smat = eSmat$vectors %*% diag(eSmat$values) %*% t(eSmat$vectors)
return(Smat)
}
paired_test = function(X1, X2){
npas = nrow(X1)
n = ncol(X1)
N1 = colSums(X1)
N2 = colSums(X2)
NN1 = sum(N1)
NN2 = sum(N2)
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
fraclist = lapply(1:n, function(i){
cbind(mi1[,i], mi2[, i])
})
if(NN1 <= n | NN2 <= n){
return(list(pvalue = NA, fraclist = fraclist))
}else{
idx = which(N1 > 0 & N2 > 0)
if(length(idx) < n){
if(length(idx) == 1) return(list(pvalue = NA, fraclist = fraclist))
X1 = X1[, idx]
X2 = X2[, idx]
n = ncol(X1)
}
if(n - npas < 2){
a = rowSums(X1) + rowSums(X2)
idx = order(a, decreasing = T)[1:(n-2)]
X1 = X1[idx, ]
X2 = X2[idx, ]
npas = nrow(X1)
}
N1 = colSums(X1)
N2 = colSums(X2)
NN1 = sum(N1)
NN2 = sum(N2)
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
Mc1 = (NN1 - sum(N1^2) / NN1) / (n-1)
Mc2 = (NN2 - sum(N2^2) / NN2) / (n-1)
m1 = rowSums(X1) / NN1
m2 = rowSums(X2) / NN2
mi1 = t(t(X1) / N1)
mi2 = t(t(X2) / N2)
diff1 = mi1 - m1
diff2 = mi2 - m2
S1 = diff1 %*% diag(N1) %*% t(diff1) / (n-1)
S2 = diff2 %*% diag(N2) %*% t(diff2) / (n-1)
G1 = (diag(mi1 %*% N1) - mi1 %*% diag(N1) %*% t(mi1)) / (NN1-n)
G2 = (diag(mi2 %*% N2) - mi2 %*% diag(N2) %*% t(mi2)) / (NN2-n)
V = diff1 %*% diag(N1+N2) %*% t(diff2) /(Mc1+Mc2) / (n-1)
V = (V + t(V)) * sum(N1*N2) / (NN1*NN2)
Sig1 = S1 * sum(N1^2) / Mc1 / (NN1^2) + G1 * (Mc1 - sum(N1^2) / NN1) / NN1 / Mc1
Sig2 = S2 * sum(N2^2) / Mc2 / (NN2^2) + G2 * (Mc2 - sum(N2^2) / NN2) / NN2 / Mc2
Sig1 = trunc_matrix(Sig1)
Sig2 = trunc_matrix(Sig2)
Smat = Sig1 + Sig2 - V
Smat = trunc_matrix(Smat)
stat = t(m1-m2) %*% ginv(Smat) %*% (m1-m2) * (n-npas+1)/(n-1)/(npas-1)
pval = 1-pf(as.numeric(stat), npas-1, n-npas+1)
}
return(list(Smat = Smat, pvalue = pval, fraclist = fraclist))
}
|
/man/obscure.sample.lt.Rd
|
no_license
|
DistanceDevelopment/WiSP
|
R
| false
| false
| 3,088
|
rd
| ||
#====================================================================================#
# PURPOSE Test of calling the main wrapper function of the timecounts package.
# thi
#
# Authors Stefanos Kechagias, James Livsey, Vladas Pipiras, Jiajie Kong
# Date Fall 2022
# Version 4.2.1
#====================================================================================#
# load libraries
library(countsFun)
library(tictoc)
library(optimx)
library(ltsa)
library(itsmr)
# library(lavaSearch2)
library(numDeriv)
# library(sandwich)
library(MASS)
# # FIX ME: Check Where is this used?
# symmetrize = lavaSearch2:::symmetrize
# load the data
mysales = read.csv("data/MySelectedSeries.csv")
# specify parameters
n = 104
epsilon = 0.5
MaxCdf = 1000
nHC = 30
ParticleNumber = 10
data = mysales$MOVE[1:n]
ARMAorder = c(3,0)
Regressor = cbind(rep(1,length(mysales$Buy[1:n] )),mysales$Buy[1:n] )
CountDist = "Negative Binomial"
# the Poisson will yield an infinite value.
# CountDist = "Poisson"
EstMethod = "PFR"
OptMethod = "bobyqa"
maxit = 0
# initialParam = c(2.597666, 1.15373, 1.197833, -0.3748205, 0.226, 0.227)
initialParam = NULL
#initialParam = c(2.13258844, 1.16177357, -0.39141331, 0.08239859, 0.05745040)
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam, EstMethod, maxit)
# stop if there was an error in model specification
# if(mod$error) stop(mod$errorMsg)
#
# # fix me: I need a function that computes initial parameters
# if (is.null(initialParam)){
# theta = InitialEstimates(mod)
# }else{
# theta = mod$initialParam
# }
# theta = c(2.264, 1.01,-0.341, 0.223, 0.291)
# ParticleFilter_Res_AR(theta, mod)
# call the wrapper
a = countC(data, Regressor=NULL, CountDist, EstMethod, ARMAorder,
nHC, MaxCdf, ParticleNumber, epsilon, initialParam ,
OptMethod, maxit)
#
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam, EstMethod)
#
#
# theta = c(2.264, 1.01, 1.21, -0.341, 0.223, 0.291)
# optim.output <- optimx(par = theta,
# fn = GaussianLogLik,
# data = data,
# Regressor = Regressor,
# mod = mod,
# lower = mod$LB,
# upper = mod$UB,
# method = OptMethod,
# hessian = TRUE)
#
#
# ParmEst = as.numeric(optim.output[1:mod$nparms])
# loglik = optim.output$value
#
#
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = data,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # control=list(all.methods=TRUE),
# # hessian = TRUE)
#
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = data,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # method = "Rvmmin",
# # hessian = TRUE)
#
#
# # save estimates, loglik value and diagonal hessian
# ParmEst = as.numeric(optim.output[5,1:mod$nparms])
# loglik = optim.output$value
# convcode = optim.output$convcode
# kkt1 = optim.output$kkt1
# kkt2 = optim.output$kkt2
#
#
# # compute sandwich standard errors
# se = sand(ParmEst, data, Regressor, mod)
#
#
#
# h <- gHgen(fn = GaussianLogLik,
# par = theta,
# data = data, # additional arg for GaussLogLik
# Regressor = Regressor, # additional arg for GaussLogLik
# mod = mod) # additional arg for GaussLogLik
# SE.hess <- sqrt(diag(solve(h$Hn)))
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# #sand(theta, data, Regressor, mod)
#
#
# countC(data, Regressor, CountDist, EstMethod, ARMAorder,nHC, MaxCdf,ParticleNumber, epsilon, initialParam, OptMethod, maxit)
#
#
#
#
#
#
#
#
#
# #Regressor = cbind(rep(1,length(Buy)),Buy)
# theta = c(2, 0.5)
# initialParam = theta
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam)
#
# sand(theta, data, Regressor, mod)
#
# #FitGaussianLogLik (theta, data, Regressor, mod, OptMethod)
#
#
#
#
#
#
#
#
#
#
#
# GaussianLogLik(theta, data, Regressor, mod)
# GaussLogLik = function(theta, data)
#
#
#
#
# optim.output
# # xt = data
# # nparms = length(theta)
# # n = length(xt)
# # # allocate memory to save parameter estimates, hessian values, and loglik values
# # ParmEst = matrix(0,nrow=1,ncol=nparms)
# # se = matrix(NA,nrow=1,ncol=nparms)
# # loglik = rep(0,1)
# # convcode = rep(0,1)
# # kkt1 = rep(0,1)
# # kkt2 = rep(0,1)
# #
# #
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = xt,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # method = OptMethod,
# # hessian = TRUE)
# #
# #
#
# out = FitGaussianLogLik(theta, data, Regressor, mod, "L-BFGS-B")
#
# s = sand(theta, data, Regressor, mod)
#
#
#
#
#
# #====================================== PF =============================================#
#
#
# # test PF without regressor + AR
# ARMAorder = c(2,0)
# Regressor = NULL
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2,0.5, 0.5, -0.3)
# ParticleFilter_Res(theta, data, Regressor, mod)
# LB = c(0.01, 0.01, -Inf, -Inf)
# UB = c(Inf, Inf, Inf, Inf)
# FitMultiplePF_Res(theta, data, Regressor, mod, OptMethod)
#
#
#
#
# # test PF with regressor + MA
# ARMAorder = c(0,1)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Negative Binomial"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2, 1, 0.5, -0.3)
# ParticleFilter_Res(theta, data, Regressor, mod)
#
#
#
# #====================================== GL =============================================#
# # test GL with regressor + AR
# ARMAorder = c(2,0)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC, ParticleNumber, epsilon )
# theta = c(2,0.5, 0.5,0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
# OptMethod = "bobyqa"
# LB = c(-100, -100, 0.001, -Inf, -Inf)
# UB = c(100, 100, Inf, Inf, Inf)
# GaussLogLikGP_Reg(theta, data, Regressor, ARMAorder, MaxCdf, nHC, CountDist)
# # FitGaussianLogLik(theta, data, Regressor, mod, OptMethod)
#
#
# # test GL without regressor + AR
# ARMAorder = c(2,0)
# Regressor = NULL
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC, ParticleNumber, epsilon )
# theta = c(2, 0.5,0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
# OptMethod = "bobyqa"
# GaussLogLikGP(theta, data, ARMAorder, MaxCdf, nHC)
#
#
#
# # test GL with regressor + MA
# ARMAorder = c(0,1)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Negative Binomial"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2, 1, 0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
#
#
#
# # test Gen Pois with regressor and WN
# ARMAorder = c(0,0)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2,1)
# GaussianLogLik(theta, data, Regressor, mod)
#
|
/tests/TestFinalWrapper.R
|
no_license
|
jlivsey/countsFun
|
R
| false
| false
| 8,409
|
r
|
#====================================================================================#
# PURPOSE Test of calling the main wrapper function of the timecounts package.
# thi
#
# Authors Stefanos Kechagias, James Livsey, Vladas Pipiras, Jiajie Kong
# Date Fall 2022
# Version 4.2.1
#====================================================================================#
# load libraries
library(countsFun)
library(tictoc)
library(optimx)
library(ltsa)
library(itsmr)
# library(lavaSearch2)
library(numDeriv)
# library(sandwich)
library(MASS)
# # FIX ME: Check Where is this used?
# symmetrize = lavaSearch2:::symmetrize
# load the data
mysales = read.csv("data/MySelectedSeries.csv")
# specify parameters
n = 104
epsilon = 0.5
MaxCdf = 1000
nHC = 30
ParticleNumber = 10
data = mysales$MOVE[1:n]
ARMAorder = c(3,0)
Regressor = cbind(rep(1,length(mysales$Buy[1:n] )),mysales$Buy[1:n] )
CountDist = "Negative Binomial"
# the Poisson will yield an infinite value.
# CountDist = "Poisson"
EstMethod = "PFR"
OptMethod = "bobyqa"
maxit = 0
# initialParam = c(2.597666, 1.15373, 1.197833, -0.3748205, 0.226, 0.227)
initialParam = NULL
#initialParam = c(2.13258844, 1.16177357, -0.39141331, 0.08239859, 0.05745040)
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam, EstMethod, maxit)
# stop if there was an error in model specification
# if(mod$error) stop(mod$errorMsg)
#
# # fix me: I need a function that computes initial parameters
# if (is.null(initialParam)){
# theta = InitialEstimates(mod)
# }else{
# theta = mod$initialParam
# }
# theta = c(2.264, 1.01,-0.341, 0.223, 0.291)
# ParticleFilter_Res_AR(theta, mod)
# call the wrapper
a = countC(data, Regressor=NULL, CountDist, EstMethod, ARMAorder,
nHC, MaxCdf, ParticleNumber, epsilon, initialParam ,
OptMethod, maxit)
#
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam, EstMethod)
#
#
# theta = c(2.264, 1.01, 1.21, -0.341, 0.223, 0.291)
# optim.output <- optimx(par = theta,
# fn = GaussianLogLik,
# data = data,
# Regressor = Regressor,
# mod = mod,
# lower = mod$LB,
# upper = mod$UB,
# method = OptMethod,
# hessian = TRUE)
#
#
# ParmEst = as.numeric(optim.output[1:mod$nparms])
# loglik = optim.output$value
#
#
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = data,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # control=list(all.methods=TRUE),
# # hessian = TRUE)
#
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = data,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # method = "Rvmmin",
# # hessian = TRUE)
#
#
# # save estimates, loglik value and diagonal hessian
# ParmEst = as.numeric(optim.output[5,1:mod$nparms])
# loglik = optim.output$value
# convcode = optim.output$convcode
# kkt1 = optim.output$kkt1
# kkt2 = optim.output$kkt2
#
#
# # compute sandwich standard errors
# se = sand(ParmEst, data, Regressor, mod)
#
#
#
# h <- gHgen(fn = GaussianLogLik,
# par = theta,
# data = data, # additional arg for GaussLogLik
# Regressor = Regressor, # additional arg for GaussLogLik
# mod = mod) # additional arg for GaussLogLik
# SE.hess <- sqrt(diag(solve(h$Hn)))
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# #sand(theta, data, Regressor, mod)
#
#
# countC(data, Regressor, CountDist, EstMethod, ARMAorder,nHC, MaxCdf,ParticleNumber, epsilon, initialParam, OptMethod, maxit)
#
#
#
#
#
#
#
#
#
# #Regressor = cbind(rep(1,length(Buy)),Buy)
# theta = c(2, 0.5)
# initialParam = theta
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon, initialParam)
#
# sand(theta, data, Regressor, mod)
#
# #FitGaussianLogLik (theta, data, Regressor, mod, OptMethod)
#
#
#
#
#
#
#
#
#
#
#
# GaussianLogLik(theta, data, Regressor, mod)
# GaussLogLik = function(theta, data)
#
#
#
#
# optim.output
# # xt = data
# # nparms = length(theta)
# # n = length(xt)
# # # allocate memory to save parameter estimates, hessian values, and loglik values
# # ParmEst = matrix(0,nrow=1,ncol=nparms)
# # se = matrix(NA,nrow=1,ncol=nparms)
# # loglik = rep(0,1)
# # convcode = rep(0,1)
# # kkt1 = rep(0,1)
# # kkt2 = rep(0,1)
# #
# #
# # optim.output <- optimx(par = theta,
# # fn = GaussianLogLik,
# # data = xt,
# # Regressor = Regressor,
# # mod = mod,
# # lower = mod$LB,
# # upper = mod$UB,
# # method = OptMethod,
# # hessian = TRUE)
# #
# #
#
# out = FitGaussianLogLik(theta, data, Regressor, mod, "L-BFGS-B")
#
# s = sand(theta, data, Regressor, mod)
#
#
#
#
#
# #====================================== PF =============================================#
#
#
# # test PF without regressor + AR
# ARMAorder = c(2,0)
# Regressor = NULL
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2,0.5, 0.5, -0.3)
# ParticleFilter_Res(theta, data, Regressor, mod)
# LB = c(0.01, 0.01, -Inf, -Inf)
# UB = c(Inf, Inf, Inf, Inf)
# FitMultiplePF_Res(theta, data, Regressor, mod, OptMethod)
#
#
#
#
# # test PF with regressor + MA
# ARMAorder = c(0,1)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Negative Binomial"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2, 1, 0.5, -0.3)
# ParticleFilter_Res(theta, data, Regressor, mod)
#
#
#
# #====================================== GL =============================================#
# # test GL with regressor + AR
# ARMAorder = c(2,0)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC, ParticleNumber, epsilon )
# theta = c(2,0.5, 0.5,0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
# OptMethod = "bobyqa"
# LB = c(-100, -100, 0.001, -Inf, -Inf)
# UB = c(100, 100, Inf, Inf, Inf)
# GaussLogLikGP_Reg(theta, data, Regressor, ARMAorder, MaxCdf, nHC, CountDist)
# # FitGaussianLogLik(theta, data, Regressor, mod, OptMethod)
#
#
# # test GL without regressor + AR
# ARMAorder = c(2,0)
# Regressor = NULL
# CountDist = "Generalized Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC, ParticleNumber, epsilon )
# theta = c(2, 0.5,0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
# OptMethod = "bobyqa"
# GaussLogLikGP(theta, data, ARMAorder, MaxCdf, nHC)
#
#
#
# # test GL with regressor + MA
# ARMAorder = c(0,1)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Negative Binomial"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2, 1, 0.5, -0.3)
# GaussianLogLik(theta, data, Regressor, mod)
#
#
#
# # test Gen Pois with regressor and WN
# ARMAorder = c(0,0)
# Regressor = cbind(rep(1,length(Buy)),Buy)
# CountDist = "Poisson"
# mod = ModelScheme(data, Regressor, ARMAorder, CountDist, MaxCdf, nHC,ParticleNumber, epsilon )
# theta = c(2,1)
# GaussianLogLik(theta, data, Regressor, mod)
#
|
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# merge the two data sets
if(!exists("NEISCC")){
NEISCC <- merge(NEI, SCC, by="SCC")
}
library(ggplot2)
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor
# vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
# 24510 is Baltimore, see plot2.R, 06037 is LA CA
# Searching for ON-ROAD type in NEI
# Don't actually know it this is the intention, but searching for 'motor' in SCC only gave a subset (non-cars)
subsetNEI <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
aggregatedTotalByYearAndFips <- aggregate(Emissions ~ year + fips, subsetNEI, sum)
aggregatedTotalByYearAndFips$fips[aggregatedTotalByYearAndFips$fips=="24510"] <- "Baltimore, MD"
aggregatedTotalByYearAndFips$fips[aggregatedTotalByYearAndFips$fips=="06037"] <- "Los Angeles, CA"
png("Assign2Plot6.png", width=1040, height=480)
g <- ggplot(aggregatedTotalByYearAndFips, aes(factor(year), Emissions))
g <- g + facet_grid(. ~ fips)
g <- g + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) in Baltimore City, MD (fips = "24510") vs Los Angeles, CA (fips = "06037") 1999-2008')
print(g)
dev.off()
|
/Question6.R
|
no_license
|
jschlich/Exploratory-Data-Analysis-Assignment2
|
R
| false
| false
| 1,553
|
r
|
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# merge the two data sets
if(!exists("NEISCC")){
NEISCC <- merge(NEI, SCC, by="SCC")
}
library(ggplot2)
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor
# vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
# 24510 is Baltimore, see plot2.R, 06037 is LA CA
# Searching for ON-ROAD type in NEI
# Don't actually know it this is the intention, but searching for 'motor' in SCC only gave a subset (non-cars)
subsetNEI <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
aggregatedTotalByYearAndFips <- aggregate(Emissions ~ year + fips, subsetNEI, sum)
aggregatedTotalByYearAndFips$fips[aggregatedTotalByYearAndFips$fips=="24510"] <- "Baltimore, MD"
aggregatedTotalByYearAndFips$fips[aggregatedTotalByYearAndFips$fips=="06037"] <- "Los Angeles, CA"
png("Assign2Plot6.png", width=1040, height=480)
g <- ggplot(aggregatedTotalByYearAndFips, aes(factor(year), Emissions))
g <- g + facet_grid(. ~ fips)
g <- g + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) in Baltimore City, MD (fips = "24510") vs Los Angeles, CA (fips = "06037") 1999-2008')
print(g)
dev.off()
|
spatialAnalysis.plotStatMaps <- function(){
don <- .cdtData$EnvData$don
climMapOp <- .cdtData$EnvData$climMapOp
## titre
if(!climMapOp$title$user){
params <- .cdtData$EnvData$statpars$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- tclvalue(.cdtData$EnvData$climStat)
titre3 <- switch(params$analysis.method$mth.fun,
"percentile" = paste0("(", params$analysis.method$mth.perc, "th", ")"),
"frequency" = paste0("(", params$analysis.method$low.thres, " < X < ",
params$analysis.method$up.thres, ")"),
"trend" = {
if(params$analysis.method$trend.unit == 1) "per year"
if(params$analysis.method$trend.unit == 2) "over"
if(params$analysis.method$trend.unit == 3) "/ average (in %)"
},
NULL)
titre4 <- tclvalue(.cdtData$EnvData$climDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}else .titre <- climMapOp$title$title
#################
.data.type <- .cdtData$EnvData$plot.maps$.data.type
.plot.type <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$plot.type))
map.args <- cdt.plotmap.args(don, climMapOp, .cdtData$EnvData$shp)
opar <- par(mar = map.args$mar)
map.args.add <- list(titre = .titre,
SHPOp = .cdtData$EnvData$SHPOp,
MapOp = climMapOp,
data.type = .data.type,
plot.type = .plot.type)
map.args <- map.args[!(names(map.args) %in% "mar")]
map.args <- c(map.args, map.args.add)
par.plot <- do.call(cdt.plotmap.fun, map.args)
### Trend
if(str_trim(tclvalue(.cdtData$EnvData$climStat)) == "Trend"){
if(.cdtData$EnvData$statpars$params$data.type == "cdtstation"){
ipvl <- !is.na(don$p.value) & don$p.value < 0.05
if(any(ipvl)){
# points(don$x0[ipvl], don$y0[ipvl], col = adjustcolor('gray40', alpha.f = 0.8))
points(don$x0[ipvl], don$y0[ipvl], pch = 19, cex = 0.5)
}
}else{
ipvl <- c(don$pval)
ipvl <- !is.na(ipvl) & ipvl < 0.05
if(any(ipvl)){
grd <- don$x[2] - don$x[1]
dd <- expand.grid(x = don$x, y = don$y)
coordinates(dd) <- ~x+y
dd <- dd[ipvl, ]
buffer <- rgeos::gBuffer(dd, width = grd * 1.02)
dd <- sp::disaggregate(buffer)
centr <- coordinates(dd)
bbx <- lapply(seq_along(dd), function(i) bbox(dd[i]))
esp <- if(grd > 0.25) 0.25 else grd * 5
esp <- if(esp > 0.25) 0.25 else esp
dd <- lapply(seq_along(bbx), function(i){
xpt <- c(rev(seq(centr[i, 1], bbx[[i]][1, 1], -esp)[-1]), seq(centr[i, 1], bbx[[i]][1, 2], esp))
ypt <- c(rev(seq(centr[i, 2], bbx[[i]][2, 1], -esp)[-1]), seq(centr[i, 2], bbx[[i]][2, 2], esp))
xy <- expand.grid(x = xpt, y = ypt)
coordinates(xy) <- ~x+y
ij <- as.logical(over(xy, dd[i]))
ij[is.na(ij)] <- FALSE
coordinates(xy[ij, ])
})
dd <- do.call(rbind, dd)
# points(dd[, 1], dd[, 2], pch = 15, cex = 0.3, col = adjustcolor('gray20', alpha.f = 0.9))
points(dd[, 1], dd[, 2], pch = 15, cex = 0.3)
}
}
}
## scale bar
cdt.plotmap.scalebar(climMapOp$scalebar)
par(opar)
return(par.plot)
}
#######################################
spatialAnalysis.plotTSMaps <- function(){
TSMapOp <- .cdtData$EnvData$TSMapOp
if(tclvalue(.cdtData$EnvData$TSData) == "Data")
don <- .cdtData$EnvData$tsdata
if(tclvalue(.cdtData$EnvData$TSData) == "Anomaly")
don <- .cdtData$EnvData$anomData
if(!TSMapOp$title$user){
if(str_trim(tclvalue(.cdtData$EnvData$TSData)) == "Data"){
params <- .cdtData$EnvData$statpars$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- switch(params$aggr.series$aggr.fun, "sum" = "total", "mean" = "average", "count" = "number")
titre3 <- if(params$aggr.series$aggr.fun == "count")
paste("(", params$aggr.series$opr.fun, params$aggr.series$opr.thres, ")") else NULL
titre4 <- tclvalue(.cdtData$EnvData$TSDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}
if(str_trim(tclvalue(.cdtData$EnvData$TSData)) == "Anomaly"){
params <- don$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- "anomaly"
titre3 <- if(params$analysis.method$perc.anom) "% of mean" else NULL
titre4 <- tclvalue(.cdtData$EnvData$TSDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}
}else .titre <- TSMapOp$title$title
#################
.data.type <- .cdtData$EnvData$plot.maps$.data.type
.plot.type <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$plot.type))
map.args <- cdt.plotmap.args(don, TSMapOp, .cdtData$EnvData$shp)
opar <- par(mar = map.args$mar)
map.args.add <- list(titre = .titre,
SHPOp = .cdtData$EnvData$SHPOp,
MapOp = TSMapOp,
data.type = .data.type,
plot.type = .plot.type)
map.args <- map.args[!(names(map.args) %in% "mar")]
map.args <- c(map.args, map.args.add)
par.plot <- do.call(cdt.plotmap.fun, map.args)
## scale bar
cdt.plotmap.scalebar(TSMapOp$scalebar)
par(opar)
return(par.plot)
}
#######################################
spatialAnalysis.plotTSGraph <- function(){
TSGraphOp <- .cdtData$EnvData$TSGraphOp
if(.cdtData$EnvData$statpars$params$data.type == "cdtstation"){
ixy <- which(.cdtData$EnvData$tsdata$id == str_trim(tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp)))
if(length(ixy) == 0){
Insert.Messages.Out("Station not found", format = TRUE)
return(NULL)
}
don <- .cdtData$EnvData$tsdata$data[, ixy]
dates <- .cdtData$EnvData$tsdata$date
daty <- as.numeric(substr(dates, 1, 4))
.cdtData$EnvData$location <- paste0("Station: ", .cdtData$EnvData$tsdata$id[ixy])
}else{
cdtdataset <- .cdtData$EnvData$cdtdataset
xloc <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$plot.maps$lonLOC)))
yloc <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$plot.maps$latLOC)))
xyloc <- cdtdataset.extarct.TS(cdtdataset, cdtdataset$fileInfo, xloc, yloc)
if(is.null(xyloc)) return(NULL)
don <- as.numeric(xyloc$data)
dates <- xyloc$date
######
year1 <- substr(dates, 1, 4)
mon1 <- substr(dates, 6, 7)
year2 <- substr(dates, 9, 12)
mon2 <- substr(dates, 14, 15)
if(all(year1 == year2)){
if(all(mon1 == mon2)) dateTS <- paste0(year1, mon1)
else{
dateTS <- if(mon1 == "01" & mon2 == "12") year1 else dates
}
}else dateTS <- dates
ipos <- which(.cdtData$EnvData$statpars$stats == str_trim(tclvalue(.cdtData$EnvData$climDate)))
idaty <- dateTS %in% .cdtData$EnvData$statpars$timeseries[[ipos]][[2]]
dates <- dateTS[idaty]
don <- don[idaty]
daty <- as.numeric(substr(dates, 1, 4))
.cdtData$EnvData$location <- paste0("Longitude: ", round(xloc, 5), ", Latitude: ", round(yloc, 5))
}
#########
GRAPHTYPE <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$typeTSp))
#### ENSO
if(GRAPHTYPE %in% c("ENSO-Line", "ENSO-Barplot", "ENSO-Proba")){
if(nchar(dates[1]) == 4){
start.mon <- paste0(dates, "0115")
end.mon <- paste0(dates, "1215")
}
if(nchar(dates[1]) == 6){
start.mon <- paste0(dates, "15")
end.mon <- paste0(dates, "15")
}
if(nchar(dates[1]) == 15){
dates <- lapply(strsplit(dates, '_'), function(x) format(as.Date(paste0(x, "-15")), "%Y%m%d"))
start.mon <- sapply(dates, '[[', 1)
end.mon <- sapply(dates, '[[', 2)
}
ijoni <- cdt.index.flexseason(start.mon, end.mon, .cdtData$EnvData$ONI$date, "monthly")
oni <- sapply(ijoni$index, function(x) mean(.cdtData$EnvData$ONI$data[x], na.rm = TRUE))
oni[length(ijoni$nba) == 0] <- NA
oni[is.nan(oni)] <- NA
oni <- ifelse(oni >= 0.5, 3, ifelse(oni <= -0.5, 1, 2))
}
########
xlab0 <- ""
ylab0 <- ""
#########
optsgph <- switch(GRAPHTYPE,
"Line" = TSGraphOp$line,
"Barplot" = TSGraphOp$bar,
"ENSO-Line" = TSGraphOp$line.enso,
"ENSO-Barplot" = TSGraphOp$bar.enso,
"Anomaly" = TSGraphOp$anomaly,
"Probability" = TSGraphOp$proba,
"ENSO-Proba" = TSGraphOp$proba.enso)
## xlim, ylim, xlab, ylab
if(GRAPHTYPE %in% c("Probability", "ENSO-Proba")){
xlim <- range(don, na.rm = TRUE)
if(optsgph$xlim$is.min) xlim[1] <- as.numeric(optsgph$xlim$min)
if(optsgph$xlim$is.max) xlim[2] <- as.numeric(optsgph$xlim$max)
ylim <- c(0, 100)
ylab0 <- "Probability of Exceeding"
}else{
xlim <- range(daty, na.rm = TRUE)
if(optsgph$xlim$is.min) xlim[1] <- as.numeric(optsgph$xlim$min)
if(optsgph$xlim$is.max) xlim[2] <- as.numeric(optsgph$xlim$max)
idt <- daty >= xlim[1] & daty <= xlim[2]
daty <- daty[idt]
don <- don[idt]
ylim <- range(pretty(don))
if(GRAPHTYPE == "Anomaly")
if(optsgph$anom$perc.anom) ylab0 <- "Anomaly (% of Mean)"
}
if(optsgph$ylim$is.min) ylim[1] <- optsgph$ylim$min
if(optsgph$ylim$is.max) ylim[2] <- optsgph$ylim$max
xlab <- if(optsgph$axislabs$is.xlab) optsgph$axislabs$xlab else xlab0
ylab <- if(optsgph$axislabs$is.ylab) optsgph$axislabs$ylab else ylab0
if(optsgph$title$is.title){
titre <- optsgph$title$title
titre.pos <- optsgph$title$position
}else{
titre <- ""
titre.pos <- "top"
}
#########
if(GRAPHTYPE == "Line"){
legends <- NULL
if(optsgph$legend$is$mean){
legends$add$mean <- optsgph$legend$add$mean
legends$col$mean <- optsgph$legend$col$mean
legends$text$mean <- optsgph$legend$text$mean
legends$lwd$mean <- optsgph$legend$lwd$mean
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$averageTSp) == "1") legends$add$mean <- TRUE
}
if(optsgph$legend$is$linear){
legends$add$linear <- optsgph$legend$add$linear
legends$col$linear <- optsgph$legend$col$linear
legends$text$linear <- optsgph$legend$text$linear
legends$lwd$linear <- optsgph$legend$lwd$linear
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$trendTSp) == "1") legends$add$linear <- TRUE
}
if(optsgph$legend$is$tercile){
legends$add$tercile <- optsgph$legend$add$tercile
legends$col$tercile1 <- optsgph$legend$col$tercile1
legends$text$tercile1 <- optsgph$legend$text$tercile1
legends$col$tercile2 <- optsgph$legend$col$tercile2
legends$text$tercile2 <- optsgph$legend$text$tercile2
legends$lwd$tercile <- optsgph$legend$lwd$tercile
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$tercileTSp) == "1") legends$add$tercile <- TRUE
}
ret <- graphs.plot.line(daty, don, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, legends = legends,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Barplot"){
ret <- graphs.plot.bar(daty, don, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = optsgph$colors$col,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Line"){
oni <- oni[idt]
legends <- NULL
if(optsgph$legend$is$mean){
legends$add$mean <- optsgph$legend$add$mean
legends$col$mean <- optsgph$legend$col$mean
legends$text$mean <- optsgph$legend$text$mean
legends$lwd$mean <- optsgph$legend$lwd$mean
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$averageTSp) == "1") legends$add$mean <- TRUE
}
if(optsgph$legend$is$linear){
legends$add$linear <- optsgph$legend$add$linear
legends$col$linear <- optsgph$legend$col$linear
legends$text$linear <- optsgph$legend$text$linear
legends$lwd$linear <- optsgph$legend$lwd$linear
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$trendTSp) == "1") legends$add$linear <- TRUE
}
if(optsgph$legend$is$tercile){
legends$add$tercile <- optsgph$legend$add$tercile
legends$col$tercile1 <- optsgph$legend$col$tercile1
legends$text$tercile1 <- optsgph$legend$text$tercile1
legends$col$tercile2 <- optsgph$legend$col$tercile2
legends$text$tercile2 <- optsgph$legend$text$tercile2
legends$lwd$tercile <- optsgph$legend$lwd$tercile
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$tercileTSp) == "1") legends$add$tercile <- TRUE
}
ret <- graphs.plot.line.ENSO(daty, don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, legends = legends,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Barplot"){
oni <- oni[idt]
ret <- graphs.plot.bar.ENSO(daty, don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = optsgph$colors$col, location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Anomaly"){
if(!optsgph$ylim$is.min & !optsgph$ylim$is.max) ylim <- NULL
loko <- c(optsgph$colors$negative, optsgph$colors$positive)
period <- range(daty, na.rm = TRUE)
if(optsgph$anom$basePeriod){
startYr <- optsgph$anom$startYr.anom
endYr <- optsgph$anom$endYr.anom
period <- c(startYr, endYr)
}
ret <- graphs.plot.bar.Anomaly(daty, don, period = period, percent = optsgph$anom$perc.anom,
xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = loko, location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Probability"){
ret <- graphs.plot.proba(don, xlim = xlim, ylim = ylim,
xlab = xlab, xlab.sub = NULL, ylab = ylab,
title = titre, title.position = titre.pos, axis.font = 1,
proba = list(theoretical = optsgph$proba$theoretical),
plotp = optsgph$proba, plotl = optsgph$plot,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Proba"){
ret <- graphs.plot.proba.ENSO(don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, xlab.sub = NULL, ylab = ylab,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, location = .cdtData$EnvData$location)
}
return(ret)
}
##############################################################################
spatialAnalysis.DisplayStatMaps <- function(){
if(is.null(.cdtData$EnvData)) return(NULL)
if(is.null(.cdtData$EnvData$statpars)) return(NULL)
.cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')] <- .cdtData$EnvData$don[c('x0', 'y0', 'id')]
imgContainer <- CDT.Display.Map.inter(spatialAnalysis.plotStatMaps, .cdtData$EnvData$tab$climMap, 'Clim-Analysis-Maps')
.cdtData$EnvData$tab$climMap <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$climMap)
###############
tkbind(imgContainer[[2]], "<Button-1>", function(W, x, y){
if(is.null(.cdtData$EnvData$plot.maps$data.type)) return(NULL)
if(.cdtData$EnvData$plot.maps$data.type == "cdtstation"){
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getStnIDLabel,
stn.coords = .cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')])
if(xyid$plotTS)
tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp) <- xyid$crd
}else{
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getPixelLatlon)
if(xyid$plotTS){
tclvalue(.cdtData$EnvData$plot.maps$lonLOC) <- xyid$crd$x
tclvalue(.cdtData$EnvData$plot.maps$latLOC) <- xyid$crd$y
}
}
if(xyid$plotTS){
imgContainer1 <- CDT.Display.Graph(spatialAnalysis.plotTSGraph, .cdtData$EnvData$tab$TSplot, 'Time-Series-Plot')
.cdtData$EnvData$tab$TSplot <- imageNotebookTab_unik(imgContainer1, .cdtData$EnvData$tab$TSplot)
}
})
}
#######################################
spatialAnalysis.DisplayTSMaps <- function(){
if(is.null(.cdtData$EnvData)) return(NULL)
if(is.null(.cdtData$EnvData$statpars)) return(NULL)
.cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')] <- .cdtData$EnvData$tsdata[c('x0', 'y0', 'id')]
imgContainer <- CDT.Display.Map.inter(spatialAnalysis.plotTSMaps, .cdtData$EnvData$tab$TSMap, 'Aggregated-Data')
.cdtData$EnvData$tab$TSMap <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$TSMap)
###############
tkbind(imgContainer[[2]], "<Button-1>", function(W, x, y){
if(is.null(.cdtData$EnvData$plot.maps$data.type)) return(NULL)
if(.cdtData$EnvData$plot.maps$data.type == "cdtstation"){
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getStnIDLabel,
stn.coords = .cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')])
if(xyid$plotTS)
tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp) <- xyid$crd
}else{
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getPixelLatlon)
if(xyid$plotTS){
tclvalue(.cdtData$EnvData$plot.maps$lonLOC) <- xyid$crd$x
tclvalue(.cdtData$EnvData$plot.maps$latLOC) <- xyid$crd$y
}
}
if(xyid$plotTS){
imgContainer1 <- CDT.Display.Graph(spatialAnalysis.plotTSGraph, .cdtData$EnvData$tab$TSplot, 'Time-Series-Plot')
.cdtData$EnvData$tab$TSplot <- imageNotebookTab_unik(imgContainer1, .cdtData$EnvData$tab$TSplot)
}
})
}
|
/R/cdtSpatialAnalysis_Display.R
|
no_license
|
YabOusmane/CDT
|
R
| false
| false
| 19,647
|
r
|
spatialAnalysis.plotStatMaps <- function(){
don <- .cdtData$EnvData$don
climMapOp <- .cdtData$EnvData$climMapOp
## titre
if(!climMapOp$title$user){
params <- .cdtData$EnvData$statpars$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- tclvalue(.cdtData$EnvData$climStat)
titre3 <- switch(params$analysis.method$mth.fun,
"percentile" = paste0("(", params$analysis.method$mth.perc, "th", ")"),
"frequency" = paste0("(", params$analysis.method$low.thres, " < X < ",
params$analysis.method$up.thres, ")"),
"trend" = {
if(params$analysis.method$trend.unit == 1) "per year"
if(params$analysis.method$trend.unit == 2) "over"
if(params$analysis.method$trend.unit == 3) "/ average (in %)"
},
NULL)
titre4 <- tclvalue(.cdtData$EnvData$climDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}else .titre <- climMapOp$title$title
#################
.data.type <- .cdtData$EnvData$plot.maps$.data.type
.plot.type <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$plot.type))
map.args <- cdt.plotmap.args(don, climMapOp, .cdtData$EnvData$shp)
opar <- par(mar = map.args$mar)
map.args.add <- list(titre = .titre,
SHPOp = .cdtData$EnvData$SHPOp,
MapOp = climMapOp,
data.type = .data.type,
plot.type = .plot.type)
map.args <- map.args[!(names(map.args) %in% "mar")]
map.args <- c(map.args, map.args.add)
par.plot <- do.call(cdt.plotmap.fun, map.args)
### Trend
if(str_trim(tclvalue(.cdtData$EnvData$climStat)) == "Trend"){
if(.cdtData$EnvData$statpars$params$data.type == "cdtstation"){
ipvl <- !is.na(don$p.value) & don$p.value < 0.05
if(any(ipvl)){
# points(don$x0[ipvl], don$y0[ipvl], col = adjustcolor('gray40', alpha.f = 0.8))
points(don$x0[ipvl], don$y0[ipvl], pch = 19, cex = 0.5)
}
}else{
ipvl <- c(don$pval)
ipvl <- !is.na(ipvl) & ipvl < 0.05
if(any(ipvl)){
grd <- don$x[2] - don$x[1]
dd <- expand.grid(x = don$x, y = don$y)
coordinates(dd) <- ~x+y
dd <- dd[ipvl, ]
buffer <- rgeos::gBuffer(dd, width = grd * 1.02)
dd <- sp::disaggregate(buffer)
centr <- coordinates(dd)
bbx <- lapply(seq_along(dd), function(i) bbox(dd[i]))
esp <- if(grd > 0.25) 0.25 else grd * 5
esp <- if(esp > 0.25) 0.25 else esp
dd <- lapply(seq_along(bbx), function(i){
xpt <- c(rev(seq(centr[i, 1], bbx[[i]][1, 1], -esp)[-1]), seq(centr[i, 1], bbx[[i]][1, 2], esp))
ypt <- c(rev(seq(centr[i, 2], bbx[[i]][2, 1], -esp)[-1]), seq(centr[i, 2], bbx[[i]][2, 2], esp))
xy <- expand.grid(x = xpt, y = ypt)
coordinates(xy) <- ~x+y
ij <- as.logical(over(xy, dd[i]))
ij[is.na(ij)] <- FALSE
coordinates(xy[ij, ])
})
dd <- do.call(rbind, dd)
# points(dd[, 1], dd[, 2], pch = 15, cex = 0.3, col = adjustcolor('gray20', alpha.f = 0.9))
points(dd[, 1], dd[, 2], pch = 15, cex = 0.3)
}
}
}
## scale bar
cdt.plotmap.scalebar(climMapOp$scalebar)
par(opar)
return(par.plot)
}
#######################################
spatialAnalysis.plotTSMaps <- function(){
TSMapOp <- .cdtData$EnvData$TSMapOp
if(tclvalue(.cdtData$EnvData$TSData) == "Data")
don <- .cdtData$EnvData$tsdata
if(tclvalue(.cdtData$EnvData$TSData) == "Anomaly")
don <- .cdtData$EnvData$anomData
if(!TSMapOp$title$user){
if(str_trim(tclvalue(.cdtData$EnvData$TSData)) == "Data"){
params <- .cdtData$EnvData$statpars$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- switch(params$aggr.series$aggr.fun, "sum" = "total", "mean" = "average", "count" = "number")
titre3 <- if(params$aggr.series$aggr.fun == "count")
paste("(", params$aggr.series$opr.fun, params$aggr.series$opr.thres, ")") else NULL
titre4 <- tclvalue(.cdtData$EnvData$TSDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}
if(str_trim(tclvalue(.cdtData$EnvData$TSData)) == "Anomaly"){
params <- don$params
titre1 <- stringr::str_to_title(params$time.series$out.series)
titre2 <- "anomaly"
titre3 <- if(params$analysis.method$perc.anom) "% of mean" else NULL
titre4 <- tclvalue(.cdtData$EnvData$TSDate)
.titre <- paste(titre1, titre2, titre3, titre4)
}
}else .titre <- TSMapOp$title$title
#################
.data.type <- .cdtData$EnvData$plot.maps$.data.type
.plot.type <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$plot.type))
map.args <- cdt.plotmap.args(don, TSMapOp, .cdtData$EnvData$shp)
opar <- par(mar = map.args$mar)
map.args.add <- list(titre = .titre,
SHPOp = .cdtData$EnvData$SHPOp,
MapOp = TSMapOp,
data.type = .data.type,
plot.type = .plot.type)
map.args <- map.args[!(names(map.args) %in% "mar")]
map.args <- c(map.args, map.args.add)
par.plot <- do.call(cdt.plotmap.fun, map.args)
## scale bar
cdt.plotmap.scalebar(TSMapOp$scalebar)
par(opar)
return(par.plot)
}
#######################################
spatialAnalysis.plotTSGraph <- function(){
TSGraphOp <- .cdtData$EnvData$TSGraphOp
if(.cdtData$EnvData$statpars$params$data.type == "cdtstation"){
ixy <- which(.cdtData$EnvData$tsdata$id == str_trim(tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp)))
if(length(ixy) == 0){
Insert.Messages.Out("Station not found", format = TRUE)
return(NULL)
}
don <- .cdtData$EnvData$tsdata$data[, ixy]
dates <- .cdtData$EnvData$tsdata$date
daty <- as.numeric(substr(dates, 1, 4))
.cdtData$EnvData$location <- paste0("Station: ", .cdtData$EnvData$tsdata$id[ixy])
}else{
cdtdataset <- .cdtData$EnvData$cdtdataset
xloc <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$plot.maps$lonLOC)))
yloc <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$plot.maps$latLOC)))
xyloc <- cdtdataset.extarct.TS(cdtdataset, cdtdataset$fileInfo, xloc, yloc)
if(is.null(xyloc)) return(NULL)
don <- as.numeric(xyloc$data)
dates <- xyloc$date
######
year1 <- substr(dates, 1, 4)
mon1 <- substr(dates, 6, 7)
year2 <- substr(dates, 9, 12)
mon2 <- substr(dates, 14, 15)
if(all(year1 == year2)){
if(all(mon1 == mon2)) dateTS <- paste0(year1, mon1)
else{
dateTS <- if(mon1 == "01" & mon2 == "12") year1 else dates
}
}else dateTS <- dates
ipos <- which(.cdtData$EnvData$statpars$stats == str_trim(tclvalue(.cdtData$EnvData$climDate)))
idaty <- dateTS %in% .cdtData$EnvData$statpars$timeseries[[ipos]][[2]]
dates <- dateTS[idaty]
don <- don[idaty]
daty <- as.numeric(substr(dates, 1, 4))
.cdtData$EnvData$location <- paste0("Longitude: ", round(xloc, 5), ", Latitude: ", round(yloc, 5))
}
#########
GRAPHTYPE <- str_trim(tclvalue(.cdtData$EnvData$plot.maps$typeTSp))
#### ENSO
if(GRAPHTYPE %in% c("ENSO-Line", "ENSO-Barplot", "ENSO-Proba")){
if(nchar(dates[1]) == 4){
start.mon <- paste0(dates, "0115")
end.mon <- paste0(dates, "1215")
}
if(nchar(dates[1]) == 6){
start.mon <- paste0(dates, "15")
end.mon <- paste0(dates, "15")
}
if(nchar(dates[1]) == 15){
dates <- lapply(strsplit(dates, '_'), function(x) format(as.Date(paste0(x, "-15")), "%Y%m%d"))
start.mon <- sapply(dates, '[[', 1)
end.mon <- sapply(dates, '[[', 2)
}
ijoni <- cdt.index.flexseason(start.mon, end.mon, .cdtData$EnvData$ONI$date, "monthly")
oni <- sapply(ijoni$index, function(x) mean(.cdtData$EnvData$ONI$data[x], na.rm = TRUE))
oni[length(ijoni$nba) == 0] <- NA
oni[is.nan(oni)] <- NA
oni <- ifelse(oni >= 0.5, 3, ifelse(oni <= -0.5, 1, 2))
}
########
xlab0 <- ""
ylab0 <- ""
#########
optsgph <- switch(GRAPHTYPE,
"Line" = TSGraphOp$line,
"Barplot" = TSGraphOp$bar,
"ENSO-Line" = TSGraphOp$line.enso,
"ENSO-Barplot" = TSGraphOp$bar.enso,
"Anomaly" = TSGraphOp$anomaly,
"Probability" = TSGraphOp$proba,
"ENSO-Proba" = TSGraphOp$proba.enso)
## xlim, ylim, xlab, ylab
if(GRAPHTYPE %in% c("Probability", "ENSO-Proba")){
xlim <- range(don, na.rm = TRUE)
if(optsgph$xlim$is.min) xlim[1] <- as.numeric(optsgph$xlim$min)
if(optsgph$xlim$is.max) xlim[2] <- as.numeric(optsgph$xlim$max)
ylim <- c(0, 100)
ylab0 <- "Probability of Exceeding"
}else{
xlim <- range(daty, na.rm = TRUE)
if(optsgph$xlim$is.min) xlim[1] <- as.numeric(optsgph$xlim$min)
if(optsgph$xlim$is.max) xlim[2] <- as.numeric(optsgph$xlim$max)
idt <- daty >= xlim[1] & daty <= xlim[2]
daty <- daty[idt]
don <- don[idt]
ylim <- range(pretty(don))
if(GRAPHTYPE == "Anomaly")
if(optsgph$anom$perc.anom) ylab0 <- "Anomaly (% of Mean)"
}
if(optsgph$ylim$is.min) ylim[1] <- optsgph$ylim$min
if(optsgph$ylim$is.max) ylim[2] <- optsgph$ylim$max
xlab <- if(optsgph$axislabs$is.xlab) optsgph$axislabs$xlab else xlab0
ylab <- if(optsgph$axislabs$is.ylab) optsgph$axislabs$ylab else ylab0
if(optsgph$title$is.title){
titre <- optsgph$title$title
titre.pos <- optsgph$title$position
}else{
titre <- ""
titre.pos <- "top"
}
#########
if(GRAPHTYPE == "Line"){
legends <- NULL
if(optsgph$legend$is$mean){
legends$add$mean <- optsgph$legend$add$mean
legends$col$mean <- optsgph$legend$col$mean
legends$text$mean <- optsgph$legend$text$mean
legends$lwd$mean <- optsgph$legend$lwd$mean
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$averageTSp) == "1") legends$add$mean <- TRUE
}
if(optsgph$legend$is$linear){
legends$add$linear <- optsgph$legend$add$linear
legends$col$linear <- optsgph$legend$col$linear
legends$text$linear <- optsgph$legend$text$linear
legends$lwd$linear <- optsgph$legend$lwd$linear
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$trendTSp) == "1") legends$add$linear <- TRUE
}
if(optsgph$legend$is$tercile){
legends$add$tercile <- optsgph$legend$add$tercile
legends$col$tercile1 <- optsgph$legend$col$tercile1
legends$text$tercile1 <- optsgph$legend$text$tercile1
legends$col$tercile2 <- optsgph$legend$col$tercile2
legends$text$tercile2 <- optsgph$legend$text$tercile2
legends$lwd$tercile <- optsgph$legend$lwd$tercile
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$tercileTSp) == "1") legends$add$tercile <- TRUE
}
ret <- graphs.plot.line(daty, don, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, legends = legends,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Barplot"){
ret <- graphs.plot.bar(daty, don, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = optsgph$colors$col,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Line"){
oni <- oni[idt]
legends <- NULL
if(optsgph$legend$is$mean){
legends$add$mean <- optsgph$legend$add$mean
legends$col$mean <- optsgph$legend$col$mean
legends$text$mean <- optsgph$legend$text$mean
legends$lwd$mean <- optsgph$legend$lwd$mean
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$averageTSp) == "1") legends$add$mean <- TRUE
}
if(optsgph$legend$is$linear){
legends$add$linear <- optsgph$legend$add$linear
legends$col$linear <- optsgph$legend$col$linear
legends$text$linear <- optsgph$legend$text$linear
legends$lwd$linear <- optsgph$legend$lwd$linear
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$trendTSp) == "1") legends$add$linear <- TRUE
}
if(optsgph$legend$is$tercile){
legends$add$tercile <- optsgph$legend$add$tercile
legends$col$tercile1 <- optsgph$legend$col$tercile1
legends$text$tercile1 <- optsgph$legend$text$tercile1
legends$col$tercile2 <- optsgph$legend$col$tercile2
legends$text$tercile2 <- optsgph$legend$text$tercile2
legends$lwd$tercile <- optsgph$legend$lwd$tercile
}else{
if(tclvalue(.cdtData$EnvData$plot.maps$tercileTSp) == "1") legends$add$tercile <- TRUE
}
ret <- graphs.plot.line.ENSO(daty, don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, legends = legends,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Barplot"){
oni <- oni[idt]
ret <- graphs.plot.bar.ENSO(daty, don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = optsgph$colors$col, location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Anomaly"){
if(!optsgph$ylim$is.min & !optsgph$ylim$is.max) ylim <- NULL
loko <- c(optsgph$colors$negative, optsgph$colors$positive)
period <- range(daty, na.rm = TRUE)
if(optsgph$anom$basePeriod){
startYr <- optsgph$anom$startYr.anom
endYr <- optsgph$anom$endYr.anom
period <- c(startYr, endYr)
}
ret <- graphs.plot.bar.Anomaly(daty, don, period = period, percent = optsgph$anom$perc.anom,
xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, ylab.sub = NULL,
title = titre, title.position = titre.pos, axis.font = 1,
barcol = loko, location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "Probability"){
ret <- graphs.plot.proba(don, xlim = xlim, ylim = ylim,
xlab = xlab, xlab.sub = NULL, ylab = ylab,
title = titre, title.position = titre.pos, axis.font = 1,
proba = list(theoretical = optsgph$proba$theoretical),
plotp = optsgph$proba, plotl = optsgph$plot,
location = .cdtData$EnvData$location)
}
if(GRAPHTYPE == "ENSO-Proba"){
ret <- graphs.plot.proba.ENSO(don, oni, xlim = xlim, ylim = ylim,
xlab = xlab, xlab.sub = NULL, ylab = ylab,
title = titre, title.position = titre.pos, axis.font = 1,
plotl = optsgph$plot, location = .cdtData$EnvData$location)
}
return(ret)
}
##############################################################################
spatialAnalysis.DisplayStatMaps <- function(){
if(is.null(.cdtData$EnvData)) return(NULL)
if(is.null(.cdtData$EnvData$statpars)) return(NULL)
.cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')] <- .cdtData$EnvData$don[c('x0', 'y0', 'id')]
imgContainer <- CDT.Display.Map.inter(spatialAnalysis.plotStatMaps, .cdtData$EnvData$tab$climMap, 'Clim-Analysis-Maps')
.cdtData$EnvData$tab$climMap <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$climMap)
###############
tkbind(imgContainer[[2]], "<Button-1>", function(W, x, y){
if(is.null(.cdtData$EnvData$plot.maps$data.type)) return(NULL)
if(.cdtData$EnvData$plot.maps$data.type == "cdtstation"){
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getStnIDLabel,
stn.coords = .cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')])
if(xyid$plotTS)
tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp) <- xyid$crd
}else{
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getPixelLatlon)
if(xyid$plotTS){
tclvalue(.cdtData$EnvData$plot.maps$lonLOC) <- xyid$crd$x
tclvalue(.cdtData$EnvData$plot.maps$latLOC) <- xyid$crd$y
}
}
if(xyid$plotTS){
imgContainer1 <- CDT.Display.Graph(spatialAnalysis.plotTSGraph, .cdtData$EnvData$tab$TSplot, 'Time-Series-Plot')
.cdtData$EnvData$tab$TSplot <- imageNotebookTab_unik(imgContainer1, .cdtData$EnvData$tab$TSplot)
}
})
}
#######################################
spatialAnalysis.DisplayTSMaps <- function(){
if(is.null(.cdtData$EnvData)) return(NULL)
if(is.null(.cdtData$EnvData$statpars)) return(NULL)
.cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')] <- .cdtData$EnvData$tsdata[c('x0', 'y0', 'id')]
imgContainer <- CDT.Display.Map.inter(spatialAnalysis.plotTSMaps, .cdtData$EnvData$tab$TSMap, 'Aggregated-Data')
.cdtData$EnvData$tab$TSMap <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$TSMap)
###############
tkbind(imgContainer[[2]], "<Button-1>", function(W, x, y){
if(is.null(.cdtData$EnvData$plot.maps$data.type)) return(NULL)
if(.cdtData$EnvData$plot.maps$data.type == "cdtstation"){
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getStnIDLabel,
stn.coords = .cdtData$EnvData$plot.maps[c('lon', 'lat', 'id')])
if(xyid$plotTS)
tclvalue(.cdtData$EnvData$plot.maps$stnIDTSp) <- xyid$crd
}else{
xyid <- getIDLatLonCoords(W, x, y, imgContainer[[3]], getPixelLatlon)
if(xyid$plotTS){
tclvalue(.cdtData$EnvData$plot.maps$lonLOC) <- xyid$crd$x
tclvalue(.cdtData$EnvData$plot.maps$latLOC) <- xyid$crd$y
}
}
if(xyid$plotTS){
imgContainer1 <- CDT.Display.Graph(spatialAnalysis.plotTSGraph, .cdtData$EnvData$tab$TSplot, 'Time-Series-Plot')
.cdtData$EnvData$tab$TSplot <- imageNotebookTab_unik(imgContainer1, .cdtData$EnvData$tab$TSplot)
}
})
}
|
# read and load data
library(glmnet)
csv_JPN_all <- 'Z:/Uni Nils/Energy Science Master/Masterarbeit/Python/Marc GranovetterModell/pygranovetter/Workprogress Scripts/Auswertung/Arrays_all_cluster_simulations/Relaxed Lasso Data/JPN_all_Datframe_100000Simulations.csv'
csv_JPN_only_tipped <- 'Z:/Uni Nils/Energy Science Master/Masterarbeit/Python/Marc GranovetterModell/pygranovetter/Workprogress Scripts/Auswertung/Arrays_all_cluster_simulations/Relaxed Lasso Data/JPN_Only_tipped_Datframe_56740Simulations.csv'
data_JPN_all <- read.csv(file = csv_JPN_all)
data_JPN_only_tipped <- read.csv(file = csv_JPN_only_tipped)
data_JPN_all[data_JPN_all==-2] <- Inf
# JPN variables for all simulations and the only tipped ones
JPN_tipped_or_not <- data_JPN_all$is_tipped
data_JPN_all[2]<- NULL
JPN_t_tip_all <- data_JPN_all$t_tip
JPN_tipping_transformed_all <- 1/data_JPN_all$t_tip
data_JPN_all[2]<- NULL
data_JPN_all$JPN_theta_a_all <- data_JPN_all$Theta_a
data_JPN_all$JPN_theta_a_all_log <- log(data_JPN_all$Theta_a)
data_JPN_all$JPN_theta_a_all_log_minus_1 <- log(1 - data_JPN_all$Theta_a)
data_JPN_all$JPN_1_div_theta_a_all <- 1/(data_JPN_all$Theta_a)
data_JPN_all$JPN_root_theta_a_all <- sqrt(data_JPN_all$Theta_a)
data_JPN_all$JPN_logistic_theta_a_all <- 1/(1 + exp( - data_JPN_all$Theta_a))
data_JPN_all$JPN_quad_theta_a_all <- (data_JPN_all$Theta_a)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_theta_d_all <- data_JPN_all$Theta_d
data_JPN_all$JPN_theta_d_all_log <- log(data_JPN_all$Theta_d)
data_JPN_all$JPN_theta_d_all_log_minus_1 <- log(1 - data_JPN_all$Theta_d)
data_JPN_all$JPN_1_div_theta_d_all <- 1/(data_JPN_all$Theta_d)
data_JPN_all$JPN_root_theta_d_all <- sqrt(data_JPN_all$Theta_d)
data_JPN_all$JPN_logistic_theta_d_all <- 1/(1 + exp( - data_JPN_all$Theta_d))
data_JPN_all$JPN_quad_theta_d_all <- (data_JPN_all$Theta_d)**2
data_JPN_all[3]<- NULL
data_JPN_all$JPN_p_a_all <- data_JPN_all$p_a
data_JPN_all$JPN_p_a_all_log <- log(data_JPN_all$p_a)
data_JPN_all$JPN_p_a_all_log_minus_1 <- log(1 - data_JPN_all$p_a)
data_JPN_all$JPN_1_div_p_a_all <- 1/(data_JPN_all$p_a)
data_JPN_all$JPN_root_p_a_all <- sqrt(data_JPN_all$p_a)
data_JPN_all$JPN_logistic_p_a_all <- 1/(1 + exp( - data_JPN_all$p_a))
data_JPN_all$JPN_quad_p_a_all <- (data_JPN_all$p_a)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_d_all <- data_JPN_all$p_d
data_JPN_all$JPN_p_d_all_log <- log(data_JPN_all$p_d)
data_JPN_all$JPN_p_d_all_log_minus_1 <- log(1 - data_JPN_all$p_d)
data_JPN_all$JPN_1_div_p_d_all <- 1/(data_JPN_all$p_d)
data_JPN_all$JPN_root_p_d_all <- sqrt(data_JPN_all$p_d)
data_JPN_all$JPN_logistic_p_d_all <- 1/(1 + exp( - data_JPN_all$p_d))
data_JPN_all$JPN_quad_p_d_all <- (data_JPN_all$p_d)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_e_all <- data_JPN_all$p_e
data_JPN_all$JPN_p_e_all_log <- log(data_JPN_all$p_e)
data_JPN_all$JPN_p_e_all_log_minus_1 <- log(1 - data_JPN_all$p_e)
data_JPN_all$JPN_1_div_p_e_all <- 1/(data_JPN_all$p_e)
data_JPN_all$JPN_root_p_e_all <- sqrt(data_JPN_all$p_e)
data_JPN_all$JPN_logistic_p_e_all <- 1/(1 + exp( - data_JPN_all$p_e))
data_JPN_all$JPN_quad_p_e_all <- (data_JPN_all$p_e)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_en_all <- data_JPN_all$p_en
data_JPN_all$JPN_p_en_all_log <- log(data_JPN_all$p_en)
data_JPN_all$JPN_p_en_all_log_minus_1 <- log(1 - data_JPN_all$p_en)
data_JPN_all$JPN_1_div_p_en_all <- 1/(data_JPN_all$p_en)
data_JPN_all$JPN_root_p_en_all <- sqrt(data_JPN_all$p_en)
data_JPN_all$JPN_logistic_p_en_all <- 1/(1 + exp( - data_JPN_all$p_en))
data_JPN_all$JPN_quad_p_en_all <- (data_JPN_all$p_en)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_ra_all <- data_JPN_all$p_ra
data_JPN_all$JPN_p_ra_all_log <- log(data_JPN_all$p_ra)
data_JPN_all$JPN_p_ra_all_log_minus_1 <- log(1 - data_JPN_all$p_ra)
data_JPN_all$JPN_1_div_p_ra_all <- 1/(data_JPN_all$p_ra)
data_JPN_all$JPN_root_p_ra_all <- sqrt(data_JPN_all$p_ra)
data_JPN_all$JPN_logistic_p_ra_all <- 1/(1 + exp( - data_JPN_all$p_ra))
data_JPN_all$JPN_quad_p_ra_all <- (data_JPN_all$p_ra)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_rd_all <- data_JPN_all$p_rd
data_JPN_all$JPN_p_rd_all_log <- log(data_JPN_all$p_rd)
data_JPN_all$JPN_p_rd_all_log_minus_1 <- log(1 - data_JPN_all$p_rd)
data_JPN_all$JPN_1_div_p_rd_all <- 1/(data_JPN_all$p_rd)
data_JPN_all$JPN_root_p_rd_all <- sqrt(data_JPN_all$p_rd)
data_JPN_all$JPN_logistic_p_rd_all <- 1/(1 + exp( - data_JPN_all$p_rd))
data_JPN_all$JPN_quad_p_rd_all <- (data_JPN_all$p_rd)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_c_all <- data_JPN_all$n_c
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_p_all <- data_JPN_all$n_p
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_e_all <- data_JPN_all$n_e
data_JPN_all[2]<- NULL
data_JPN_all$JPN_N_all <- data_JPN_all$N
data_JPN_all[2]<- NULL
data_JPN_all$JPN_APL_all <- data_JPN_all$APL
data_JPN_all[2]<- NULL
data_JPN_all$JPN_all_extra_1 <- data_JPN_all$JPN_p_a_all * data_JPN_all$JPN_n_p_all
data_JPN_all$JPN_all_extra_2 <- data_JPN_all$JPN_p_e_all * data_JPN_all$JPN_n_e_all
data_JPN_all$JPN_all_extra_3 <- data_JPN_all$JPN_p_e_all * data_JPN_all$JPN_n_e_all * data_JPN_all$JPN_n_p_all
data_JPN_all$JPN_all_extra_4 <- data_JPN_all$JPN_p_d_all * data_JPN_all$JPN_n_c_all
data_JPN_all$JPN_all_certainly_1 <- log((data_JPN_all$JPN_N_all/2) - data_JPN_all$JPN_n_c_all)
data_JPN_all$JPN_all_certainly_2 <- log(data_JPN_all$JPN_n_c_all + ((data_JPN_all$JPN_n_p_all - data_JPN_all$JPN_n_c_all)/2))
data_JPN_all[1]<- NULL
# JPN Only Tipped
JPN_t_tip_only_tipped <- data_JPN_only_tipped$t_tip
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_theta_a_only_tipped <- data_JPN_only_tipped$Theta_a
data_JPN_only_tipped$JPN_theta_a_only_tipped_log <- log(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_theta_a_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_1_div_theta_a_only_tipped <- 1/(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_root_theta_a_only_tipped <- sqrt(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_logistic_theta_a_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$Theta_a))
data_JPN_only_tipped$JPN_quad_theta_a_only_tipped <- (data_JPN_only_tipped$Theta_a)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_theta_d_only_tipped <- data_JPN_only_tipped$Theta_d
data_JPN_only_tipped$JPN_theta_d_only_tipped_log <- log(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_theta_d_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_1_div_theta_d_only_tipped <- 1/(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_root_theta_d_only_tipped <- sqrt(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_logistic_theta_d_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$Theta_d))
data_JPN_only_tipped$JPN_quad_theta_d_only_tipped <- (data_JPN_only_tipped$Theta_d)**2
data_JPN_only_tipped[3]<- NULL
data_JPN_only_tipped$JPN_p_a_only_tipped <- data_JPN_only_tipped$p_a
data_JPN_only_tipped$JPN_p_a_only_tipped_log <- log(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_p_a_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_1_div_p_a_only_tipped <- 1/(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_root_p_a_only_tipped <- sqrt(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_logistic_p_a_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_a))
data_JPN_only_tipped$JPN_quad_p_a_only_tipped <- (data_JPN_only_tipped$p_a)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_d_only_tipped <- data_JPN_only_tipped$p_d
data_JPN_only_tipped$JPN_p_d_only_tipped_log <- log(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_p_d_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_1_div_p_d_only_tipped <- 1/(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_root_p_d_only_tipped <- sqrt(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_logistic_p_d_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_d))
data_JPN_only_tipped$JPN_quad_p_d_only_tipped <- (data_JPN_only_tipped$p_d)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_e_only_tipped <- data_JPN_only_tipped$p_e
data_JPN_only_tipped$JPN_p_e_only_tipped_log <- log(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_p_e_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_1_div_p_e_only_tipped <- 1/(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_root_p_e_only_tipped <- sqrt(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_logistic_p_e_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_e))
data_JPN_only_tipped$JPN_quad_p_e_only_tipped <- (data_JPN_only_tipped$p_e)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_en_only_tipped <- data_JPN_only_tipped$p_en
data_JPN_only_tipped$JPN_p_en_only_tipped_log <- log(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_p_en_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_1_div_p_en_only_tipped <- 1/(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_root_p_en_only_tipped <- sqrt(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_logistic_p_en_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_en))
data_JPN_only_tipped$JPN_quad_p_en_only_tipped <- (data_JPN_only_tipped$p_en)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_ra_only_tipped <- data_JPN_only_tipped$p_ra
data_JPN_only_tipped$JPN_p_ra_only_tipped_log <- log(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_p_ra_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_1_div_p_ra_only_tipped <- 1/(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_root_p_ra_only_tipped <- sqrt(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_logistic_p_ra_only_tipped <- 0.0001/(1 + exp( - data_JPN_only_tipped$p_ra))
data_JPN_only_tipped$JPN_quad_p_ra_only_tipped <- (data_JPN_only_tipped$p_ra)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_rd_only_tipped <- data_JPN_only_tipped$p_rd
data_JPN_only_tipped$JPN_p_rd_only_tipped_log <- log(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_p_rd_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_1_div_p_rd_only_tipped <- 1/(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_root_p_rd_only_tipped <- sqrt(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_logistic_p_rd_only_tipped <- 0.0001/(1 + exp( - data_JPN_only_tipped$p_rd))
data_JPN_only_tipped$JPN_quad_p_rd_only_tipped <- (data_JPN_only_tipped$p_rd)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_c_only_tipped <- data_JPN_only_tipped$n_c
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_p_only_tipped <- data_JPN_only_tipped$n_p
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_e_only_tipped <- data_JPN_only_tipped$n_e
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_N_only_tipped <- data_JPN_only_tipped$N
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_APL_only_tipped <- data_JPN_only_tipped$APL
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_only_tipped_extra_1 <- data_JPN_only_tipped$JPN_p_a_only_tipped * data_JPN_only_tipped$JPN_n_p_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_2 <- data_JPN_only_tipped$JPN_p_e_only_tipped * data_JPN_only_tipped$JPN_n_e_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_3 <- data_JPN_only_tipped$JPN_p_e_only_tipped * data_JPN_only_tipped$JPN_n_e_only_tipped * data_JPN_only_tipped$JPN_n_p_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_4 <- data_JPN_only_tipped$JPN_p_d_only_tipped * data_JPN_only_tipped$JPN_n_c_only_tipped
data_JPN_only_tipped$JPN_only_tipped_certainly_1 <- log((data_JPN_only_tipped$JPN_N_only_tipped/2) - data_JPN_only_tipped$JPN_n_c_only_tipped)
data_JPN_only_tipped$JPN_only_tipped_certainly_2 <- log(data_JPN_only_tipped$JPN_n_c_only_tipped + ((data_JPN_only_tipped$JPN_n_p_only_tipped - data_JPN_only_tipped$JPN_n_c_only_tipped)/2))
data_JPN_only_tipped[1]<- NULL
# -----------------------------------------------------------------------
# Do the regressions
set.seed(42) # to avoid randomized results
JPN_parameter_log_ttip <- model.matrix(log(JPN_t_tip_only_tipped) ~. -1, data =data_JPN_only_tipped)
JPN_model_log_ttip <- cv.glmnet(JPN_parameter_log_ttip, log(JPN_t_tip_only_tipped), relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='gaussian')
JPN_model_log_ttip_coeff_values <- coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se")
JPN_model_log_ttip_coeff <- c(rownames(coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_log_ttip_coeff_keep <- JPN_model_log_ttip_coeff[2:length(JPN_model_log_ttip_coeff)]
data_JPN_only_tipped_ols_coef_log_ttip <- data_JPN_only_tipped[JPN_model_log_ttip_coeff_keep]
JPN_OLS_model_log_ttip <- lm(log(JPN_t_tip_only_tipped) ~. -1, data=data_JPN_only_tipped_ols_coef_log_ttip)
JPN_OLS_model_log_ttip_coeff_values <- summary(JPN_OLS_model_log_ttip)
set.seed(42)
JPN_parameter_ttip_trans <- model.matrix(JPN_tipping_transformed_all ~. , data=data_JPN_all)[,-1]
JPN_model_ttip_trans <- cv.glmnet(JPN_parameter_ttip_trans, JPN_tipping_transformed_all, relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='gaussian')
JPN_model_ttip_trans_coeff_values <- coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se")
JPN_model_ttip_trans_coeff <- c(rownames(coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_ttip_trans_coeff_keep <- JPN_model_ttip_trans_coeff[2:length(JPN_model_ttip_trans_coeff)]
data_JPN_all_OLS_coef_ttip_trans <- data_JPN_all[JPN_model_ttip_trans_coeff_keep]
JPN_OLS_model_ttip_trans <- lm(JPN_tipping_transformed_all ~. -1, data=data_JPN_all_OLS_coef_ttip_trans)
JPN_OLS_model_ttip_trans_coeff_values <- summary(JPN_OLS_model_ttip_trans)
JPN_parameter_logistic <- model.matrix(JPN_tipped_or_not ~. , data=data_JPN_all)[,-1]
JPN_model_logistic <- cv.glmnet(JPN_parameter_logistic, JPN_tipped_or_not, relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='binomial')
JPN_model_logistic_coeff_values <- coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se")
JPN_model_logistic_coeff <- c(rownames(coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_logistic_coeff_keep <- JPN_model_logistic_coeff[2:length(JPN_model_logistic_coeff)]
data_JPN_all_OLS_coef_logistic <- data_JPN_all[JPN_model_logistic_coeff_keep]
JPN_OLS_model_logistic <- glm(JPN_tipped_or_not ~. -1, data=data_JPN_all_OLS_coef_logistic, family='binomial')
JPN_OLS_model_logistic_coeff_values <- summary(JPN_OLS_model_logistic)
|
/Relaxed Lasso Regressions/RProject_JPN/RelaxedLasso_FinalAnalysis_JPN.R
|
no_license
|
NilsDunker/Master-thesis-Dunker
|
R
| false
| false
| 15,086
|
r
|
# read and load data
library(glmnet)
csv_JPN_all <- 'Z:/Uni Nils/Energy Science Master/Masterarbeit/Python/Marc GranovetterModell/pygranovetter/Workprogress Scripts/Auswertung/Arrays_all_cluster_simulations/Relaxed Lasso Data/JPN_all_Datframe_100000Simulations.csv'
csv_JPN_only_tipped <- 'Z:/Uni Nils/Energy Science Master/Masterarbeit/Python/Marc GranovetterModell/pygranovetter/Workprogress Scripts/Auswertung/Arrays_all_cluster_simulations/Relaxed Lasso Data/JPN_Only_tipped_Datframe_56740Simulations.csv'
data_JPN_all <- read.csv(file = csv_JPN_all)
data_JPN_only_tipped <- read.csv(file = csv_JPN_only_tipped)
data_JPN_all[data_JPN_all==-2] <- Inf
# JPN variables for all simulations and the only tipped ones
JPN_tipped_or_not <- data_JPN_all$is_tipped
data_JPN_all[2]<- NULL
JPN_t_tip_all <- data_JPN_all$t_tip
JPN_tipping_transformed_all <- 1/data_JPN_all$t_tip
data_JPN_all[2]<- NULL
data_JPN_all$JPN_theta_a_all <- data_JPN_all$Theta_a
data_JPN_all$JPN_theta_a_all_log <- log(data_JPN_all$Theta_a)
data_JPN_all$JPN_theta_a_all_log_minus_1 <- log(1 - data_JPN_all$Theta_a)
data_JPN_all$JPN_1_div_theta_a_all <- 1/(data_JPN_all$Theta_a)
data_JPN_all$JPN_root_theta_a_all <- sqrt(data_JPN_all$Theta_a)
data_JPN_all$JPN_logistic_theta_a_all <- 1/(1 + exp( - data_JPN_all$Theta_a))
data_JPN_all$JPN_quad_theta_a_all <- (data_JPN_all$Theta_a)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_theta_d_all <- data_JPN_all$Theta_d
data_JPN_all$JPN_theta_d_all_log <- log(data_JPN_all$Theta_d)
data_JPN_all$JPN_theta_d_all_log_minus_1 <- log(1 - data_JPN_all$Theta_d)
data_JPN_all$JPN_1_div_theta_d_all <- 1/(data_JPN_all$Theta_d)
data_JPN_all$JPN_root_theta_d_all <- sqrt(data_JPN_all$Theta_d)
data_JPN_all$JPN_logistic_theta_d_all <- 1/(1 + exp( - data_JPN_all$Theta_d))
data_JPN_all$JPN_quad_theta_d_all <- (data_JPN_all$Theta_d)**2
data_JPN_all[3]<- NULL
data_JPN_all$JPN_p_a_all <- data_JPN_all$p_a
data_JPN_all$JPN_p_a_all_log <- log(data_JPN_all$p_a)
data_JPN_all$JPN_p_a_all_log_minus_1 <- log(1 - data_JPN_all$p_a)
data_JPN_all$JPN_1_div_p_a_all <- 1/(data_JPN_all$p_a)
data_JPN_all$JPN_root_p_a_all <- sqrt(data_JPN_all$p_a)
data_JPN_all$JPN_logistic_p_a_all <- 1/(1 + exp( - data_JPN_all$p_a))
data_JPN_all$JPN_quad_p_a_all <- (data_JPN_all$p_a)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_d_all <- data_JPN_all$p_d
data_JPN_all$JPN_p_d_all_log <- log(data_JPN_all$p_d)
data_JPN_all$JPN_p_d_all_log_minus_1 <- log(1 - data_JPN_all$p_d)
data_JPN_all$JPN_1_div_p_d_all <- 1/(data_JPN_all$p_d)
data_JPN_all$JPN_root_p_d_all <- sqrt(data_JPN_all$p_d)
data_JPN_all$JPN_logistic_p_d_all <- 1/(1 + exp( - data_JPN_all$p_d))
data_JPN_all$JPN_quad_p_d_all <- (data_JPN_all$p_d)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_e_all <- data_JPN_all$p_e
data_JPN_all$JPN_p_e_all_log <- log(data_JPN_all$p_e)
data_JPN_all$JPN_p_e_all_log_minus_1 <- log(1 - data_JPN_all$p_e)
data_JPN_all$JPN_1_div_p_e_all <- 1/(data_JPN_all$p_e)
data_JPN_all$JPN_root_p_e_all <- sqrt(data_JPN_all$p_e)
data_JPN_all$JPN_logistic_p_e_all <- 1/(1 + exp( - data_JPN_all$p_e))
data_JPN_all$JPN_quad_p_e_all <- (data_JPN_all$p_e)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_en_all <- data_JPN_all$p_en
data_JPN_all$JPN_p_en_all_log <- log(data_JPN_all$p_en)
data_JPN_all$JPN_p_en_all_log_minus_1 <- log(1 - data_JPN_all$p_en)
data_JPN_all$JPN_1_div_p_en_all <- 1/(data_JPN_all$p_en)
data_JPN_all$JPN_root_p_en_all <- sqrt(data_JPN_all$p_en)
data_JPN_all$JPN_logistic_p_en_all <- 1/(1 + exp( - data_JPN_all$p_en))
data_JPN_all$JPN_quad_p_en_all <- (data_JPN_all$p_en)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_ra_all <- data_JPN_all$p_ra
data_JPN_all$JPN_p_ra_all_log <- log(data_JPN_all$p_ra)
data_JPN_all$JPN_p_ra_all_log_minus_1 <- log(1 - data_JPN_all$p_ra)
data_JPN_all$JPN_1_div_p_ra_all <- 1/(data_JPN_all$p_ra)
data_JPN_all$JPN_root_p_ra_all <- sqrt(data_JPN_all$p_ra)
data_JPN_all$JPN_logistic_p_ra_all <- 1/(1 + exp( - data_JPN_all$p_ra))
data_JPN_all$JPN_quad_p_ra_all <- (data_JPN_all$p_ra)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_p_rd_all <- data_JPN_all$p_rd
data_JPN_all$JPN_p_rd_all_log <- log(data_JPN_all$p_rd)
data_JPN_all$JPN_p_rd_all_log_minus_1 <- log(1 - data_JPN_all$p_rd)
data_JPN_all$JPN_1_div_p_rd_all <- 1/(data_JPN_all$p_rd)
data_JPN_all$JPN_root_p_rd_all <- sqrt(data_JPN_all$p_rd)
data_JPN_all$JPN_logistic_p_rd_all <- 1/(1 + exp( - data_JPN_all$p_rd))
data_JPN_all$JPN_quad_p_rd_all <- (data_JPN_all$p_rd)**2
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_c_all <- data_JPN_all$n_c
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_p_all <- data_JPN_all$n_p
data_JPN_all[2]<- NULL
data_JPN_all$JPN_n_e_all <- data_JPN_all$n_e
data_JPN_all[2]<- NULL
data_JPN_all$JPN_N_all <- data_JPN_all$N
data_JPN_all[2]<- NULL
data_JPN_all$JPN_APL_all <- data_JPN_all$APL
data_JPN_all[2]<- NULL
data_JPN_all$JPN_all_extra_1 <- data_JPN_all$JPN_p_a_all * data_JPN_all$JPN_n_p_all
data_JPN_all$JPN_all_extra_2 <- data_JPN_all$JPN_p_e_all * data_JPN_all$JPN_n_e_all
data_JPN_all$JPN_all_extra_3 <- data_JPN_all$JPN_p_e_all * data_JPN_all$JPN_n_e_all * data_JPN_all$JPN_n_p_all
data_JPN_all$JPN_all_extra_4 <- data_JPN_all$JPN_p_d_all * data_JPN_all$JPN_n_c_all
data_JPN_all$JPN_all_certainly_1 <- log((data_JPN_all$JPN_N_all/2) - data_JPN_all$JPN_n_c_all)
data_JPN_all$JPN_all_certainly_2 <- log(data_JPN_all$JPN_n_c_all + ((data_JPN_all$JPN_n_p_all - data_JPN_all$JPN_n_c_all)/2))
data_JPN_all[1]<- NULL
# JPN Only Tipped
JPN_t_tip_only_tipped <- data_JPN_only_tipped$t_tip
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_theta_a_only_tipped <- data_JPN_only_tipped$Theta_a
data_JPN_only_tipped$JPN_theta_a_only_tipped_log <- log(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_theta_a_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_1_div_theta_a_only_tipped <- 1/(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_root_theta_a_only_tipped <- sqrt(data_JPN_only_tipped$Theta_a)
data_JPN_only_tipped$JPN_logistic_theta_a_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$Theta_a))
data_JPN_only_tipped$JPN_quad_theta_a_only_tipped <- (data_JPN_only_tipped$Theta_a)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_theta_d_only_tipped <- data_JPN_only_tipped$Theta_d
data_JPN_only_tipped$JPN_theta_d_only_tipped_log <- log(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_theta_d_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_1_div_theta_d_only_tipped <- 1/(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_root_theta_d_only_tipped <- sqrt(data_JPN_only_tipped$Theta_d)
data_JPN_only_tipped$JPN_logistic_theta_d_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$Theta_d))
data_JPN_only_tipped$JPN_quad_theta_d_only_tipped <- (data_JPN_only_tipped$Theta_d)**2
data_JPN_only_tipped[3]<- NULL
data_JPN_only_tipped$JPN_p_a_only_tipped <- data_JPN_only_tipped$p_a
data_JPN_only_tipped$JPN_p_a_only_tipped_log <- log(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_p_a_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_1_div_p_a_only_tipped <- 1/(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_root_p_a_only_tipped <- sqrt(data_JPN_only_tipped$p_a)
data_JPN_only_tipped$JPN_logistic_p_a_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_a))
data_JPN_only_tipped$JPN_quad_p_a_only_tipped <- (data_JPN_only_tipped$p_a)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_d_only_tipped <- data_JPN_only_tipped$p_d
data_JPN_only_tipped$JPN_p_d_only_tipped_log <- log(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_p_d_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_1_div_p_d_only_tipped <- 1/(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_root_p_d_only_tipped <- sqrt(data_JPN_only_tipped$p_d)
data_JPN_only_tipped$JPN_logistic_p_d_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_d))
data_JPN_only_tipped$JPN_quad_p_d_only_tipped <- (data_JPN_only_tipped$p_d)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_e_only_tipped <- data_JPN_only_tipped$p_e
data_JPN_only_tipped$JPN_p_e_only_tipped_log <- log(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_p_e_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_1_div_p_e_only_tipped <- 1/(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_root_p_e_only_tipped <- sqrt(data_JPN_only_tipped$p_e)
data_JPN_only_tipped$JPN_logistic_p_e_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_e))
data_JPN_only_tipped$JPN_quad_p_e_only_tipped <- (data_JPN_only_tipped$p_e)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_en_only_tipped <- data_JPN_only_tipped$p_en
data_JPN_only_tipped$JPN_p_en_only_tipped_log <- log(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_p_en_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_1_div_p_en_only_tipped <- 1/(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_root_p_en_only_tipped <- sqrt(data_JPN_only_tipped$p_en)
data_JPN_only_tipped$JPN_logistic_p_en_only_tipped <- 1/(1 + exp( - data_JPN_only_tipped$p_en))
data_JPN_only_tipped$JPN_quad_p_en_only_tipped <- (data_JPN_only_tipped$p_en)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_ra_only_tipped <- data_JPN_only_tipped$p_ra
data_JPN_only_tipped$JPN_p_ra_only_tipped_log <- log(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_p_ra_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_1_div_p_ra_only_tipped <- 1/(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_root_p_ra_only_tipped <- sqrt(data_JPN_only_tipped$p_ra)
data_JPN_only_tipped$JPN_logistic_p_ra_only_tipped <- 0.0001/(1 + exp( - data_JPN_only_tipped$p_ra))
data_JPN_only_tipped$JPN_quad_p_ra_only_tipped <- (data_JPN_only_tipped$p_ra)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_p_rd_only_tipped <- data_JPN_only_tipped$p_rd
data_JPN_only_tipped$JPN_p_rd_only_tipped_log <- log(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_p_rd_only_tipped_log_minus_1 <- log(1 - data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_1_div_p_rd_only_tipped <- 1/(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_root_p_rd_only_tipped <- sqrt(data_JPN_only_tipped$p_rd)
data_JPN_only_tipped$JPN_logistic_p_rd_only_tipped <- 0.0001/(1 + exp( - data_JPN_only_tipped$p_rd))
data_JPN_only_tipped$JPN_quad_p_rd_only_tipped <- (data_JPN_only_tipped$p_rd)**2
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_c_only_tipped <- data_JPN_only_tipped$n_c
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_p_only_tipped <- data_JPN_only_tipped$n_p
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_n_e_only_tipped <- data_JPN_only_tipped$n_e
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_N_only_tipped <- data_JPN_only_tipped$N
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_APL_only_tipped <- data_JPN_only_tipped$APL
data_JPN_only_tipped[2]<- NULL
data_JPN_only_tipped$JPN_only_tipped_extra_1 <- data_JPN_only_tipped$JPN_p_a_only_tipped * data_JPN_only_tipped$JPN_n_p_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_2 <- data_JPN_only_tipped$JPN_p_e_only_tipped * data_JPN_only_tipped$JPN_n_e_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_3 <- data_JPN_only_tipped$JPN_p_e_only_tipped * data_JPN_only_tipped$JPN_n_e_only_tipped * data_JPN_only_tipped$JPN_n_p_only_tipped
data_JPN_only_tipped$JPN_only_tipped_extra_4 <- data_JPN_only_tipped$JPN_p_d_only_tipped * data_JPN_only_tipped$JPN_n_c_only_tipped
data_JPN_only_tipped$JPN_only_tipped_certainly_1 <- log((data_JPN_only_tipped$JPN_N_only_tipped/2) - data_JPN_only_tipped$JPN_n_c_only_tipped)
data_JPN_only_tipped$JPN_only_tipped_certainly_2 <- log(data_JPN_only_tipped$JPN_n_c_only_tipped + ((data_JPN_only_tipped$JPN_n_p_only_tipped - data_JPN_only_tipped$JPN_n_c_only_tipped)/2))
data_JPN_only_tipped[1]<- NULL
# -----------------------------------------------------------------------
# Do the regressions
set.seed(42) # to avoid randomized results
JPN_parameter_log_ttip <- model.matrix(log(JPN_t_tip_only_tipped) ~. -1, data =data_JPN_only_tipped)
JPN_model_log_ttip <- cv.glmnet(JPN_parameter_log_ttip, log(JPN_t_tip_only_tipped), relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='gaussian')
JPN_model_log_ttip_coeff_values <- coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se")
JPN_model_log_ttip_coeff <- c(rownames(coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_log_ttip, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_log_ttip_coeff_keep <- JPN_model_log_ttip_coeff[2:length(JPN_model_log_ttip_coeff)]
data_JPN_only_tipped_ols_coef_log_ttip <- data_JPN_only_tipped[JPN_model_log_ttip_coeff_keep]
JPN_OLS_model_log_ttip <- lm(log(JPN_t_tip_only_tipped) ~. -1, data=data_JPN_only_tipped_ols_coef_log_ttip)
JPN_OLS_model_log_ttip_coeff_values <- summary(JPN_OLS_model_log_ttip)
set.seed(42)
JPN_parameter_ttip_trans <- model.matrix(JPN_tipping_transformed_all ~. , data=data_JPN_all)[,-1]
JPN_model_ttip_trans <- cv.glmnet(JPN_parameter_ttip_trans, JPN_tipping_transformed_all, relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='gaussian')
JPN_model_ttip_trans_coeff_values <- coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se")
JPN_model_ttip_trans_coeff <- c(rownames(coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_ttip_trans, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_ttip_trans_coeff_keep <- JPN_model_ttip_trans_coeff[2:length(JPN_model_ttip_trans_coeff)]
data_JPN_all_OLS_coef_ttip_trans <- data_JPN_all[JPN_model_ttip_trans_coeff_keep]
JPN_OLS_model_ttip_trans <- lm(JPN_tipping_transformed_all ~. -1, data=data_JPN_all_OLS_coef_ttip_trans)
JPN_OLS_model_ttip_trans_coeff_values <- summary(JPN_OLS_model_ttip_trans)
JPN_parameter_logistic <- model.matrix(JPN_tipped_or_not ~. , data=data_JPN_all)[,-1]
JPN_model_logistic <- cv.glmnet(JPN_parameter_logistic, JPN_tipped_or_not, relax = TRUE, trace.it = TRUE, gamma=seq(from=0, to=1, by=0.1), alpha = 1, family='binomial')
JPN_model_logistic_coeff_values <- coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se")
JPN_model_logistic_coeff <- c(rownames(coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se"))[coef(JPN_model_logistic, s="lambda.1se", gamma="gamma.1se")[,1]!= 0])
JPN_model_logistic_coeff_keep <- JPN_model_logistic_coeff[2:length(JPN_model_logistic_coeff)]
data_JPN_all_OLS_coef_logistic <- data_JPN_all[JPN_model_logistic_coeff_keep]
JPN_OLS_model_logistic <- glm(JPN_tipped_or_not ~. -1, data=data_JPN_all_OLS_coef_logistic, family='binomial')
JPN_OLS_model_logistic_coeff_values <- summary(JPN_OLS_model_logistic)
|
\name{macc-package}
\alias{macc-package}
\docType{package}
\title{
Causal Mediation Analysis under Correlated Errors
}
\description{
macc performs causal mediation analysis under confounding or correlated errors. This package includes a single level mediation model, a two-level mediation model and a three-level mediation model for data with hierarchical structure. Under the two/three-level mediation model, the correlation parameter is identifiable and estimated based on a hierarchical-likelihood or a two-stage method.
}
\details{
\tabular{ll}{
Package: \tab macc \cr
Type: \tab Package \cr
Version: \tab 1.0.1 \cr
Date: \tab 2017-08-20 \cr
License: \tab GPL (>=2) \cr
}
}
\author{
Yi Zhao <yi_zhao@alumni.brown.edu> and Xi Luo <xi.rossi.luo@gmail.com> \cr
Maintainer: Yi Zhao <yi_zhao@alumni.brown.edu>
}
\references{Zhao, Y., & Luo, X. (2014). \emph{Estimating Mediation Effects under Correlated Errors with an Application to fMRI.} arXiv preprint arXiv:1410.7217.
}
\keyword{ package }
|
/man/macc-package.Rd
|
no_license
|
cran/macc
|
R
| false
| false
| 996
|
rd
|
\name{macc-package}
\alias{macc-package}
\docType{package}
\title{
Causal Mediation Analysis under Correlated Errors
}
\description{
macc performs causal mediation analysis under confounding or correlated errors. This package includes a single level mediation model, a two-level mediation model and a three-level mediation model for data with hierarchical structure. Under the two/three-level mediation model, the correlation parameter is identifiable and estimated based on a hierarchical-likelihood or a two-stage method.
}
\details{
\tabular{ll}{
Package: \tab macc \cr
Type: \tab Package \cr
Version: \tab 1.0.1 \cr
Date: \tab 2017-08-20 \cr
License: \tab GPL (>=2) \cr
}
}
\author{
Yi Zhao <yi_zhao@alumni.brown.edu> and Xi Luo <xi.rossi.luo@gmail.com> \cr
Maintainer: Yi Zhao <yi_zhao@alumni.brown.edu>
}
\references{Zhao, Y., & Luo, X. (2014). \emph{Estimating Mediation Effects under Correlated Errors with an Application to fMRI.} arXiv preprint arXiv:1410.7217.
}
\keyword{ package }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/steps-blogdown.R
\name{step_build_blogdown}
\alias{step_build_blogdown}
\title{Step: Build a Blogdown Site}
\usage{
step_build_blogdown(...)
}
\arguments{
\item{...}{
Arguments passed on to \code{\link[blogdown:build_site]{blogdown::build_site}}
\describe{
\item{\code{local}}{Whether to build the website locally. This argument is passed to
\code{\link[blogdown]{hugo_build}()}, and \code{local = TRUE} is mainly for serving
the site locally via \code{\link[blogdown]{serve_site}()}.}
\item{\code{run_hugo}}{Whether to run \code{hugo_build()} after R Markdown files are
compiled.}
\item{\code{build_rmd}}{Whether to (re)build R Markdown files. By default, they are
not built. See \sQuote{Details} for how \code{build_rmd = TRUE} works.
Alternatively, it can take a vector of file paths, which means these files
are to be (re)built. Or you can provide a function that takes a vector of
paths of all R Markdown files under the \file{content/} directory, and
returns a vector of paths of files to be built, e.g., \code{build_rmd =
blogdown::filter_timestamp}. A few aliases are currently provided for such
functions: \code{build_rmd = 'newfile'} is equivalent to \code{build_rmd =
blogdown::filter_newfile}, \code{build_rmd = 'timestamp'} is equivalent to
\code{build_rmd = blogdown::filter_timestamp}, and \code{build_rmd =
'md5sum'} is equivalent to \code{build_rmd = blogdown::filter_md5sum}.}
}}
}
\description{
Build a Blogdown site using \code{\link[blogdown:build_site]{blogdown::build_site()}}.
}
\examples{
dsl_init()
get_stage("script") \%>\%
add_step(step_build_blogdown("."))
dsl_get()
}
|
/man/step_build_blogdown.Rd
|
no_license
|
ropensci/tic
|
R
| false
| true
| 1,698
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/steps-blogdown.R
\name{step_build_blogdown}
\alias{step_build_blogdown}
\title{Step: Build a Blogdown Site}
\usage{
step_build_blogdown(...)
}
\arguments{
\item{...}{
Arguments passed on to \code{\link[blogdown:build_site]{blogdown::build_site}}
\describe{
\item{\code{local}}{Whether to build the website locally. This argument is passed to
\code{\link[blogdown]{hugo_build}()}, and \code{local = TRUE} is mainly for serving
the site locally via \code{\link[blogdown]{serve_site}()}.}
\item{\code{run_hugo}}{Whether to run \code{hugo_build()} after R Markdown files are
compiled.}
\item{\code{build_rmd}}{Whether to (re)build R Markdown files. By default, they are
not built. See \sQuote{Details} for how \code{build_rmd = TRUE} works.
Alternatively, it can take a vector of file paths, which means these files
are to be (re)built. Or you can provide a function that takes a vector of
paths of all R Markdown files under the \file{content/} directory, and
returns a vector of paths of files to be built, e.g., \code{build_rmd =
blogdown::filter_timestamp}. A few aliases are currently provided for such
functions: \code{build_rmd = 'newfile'} is equivalent to \code{build_rmd =
blogdown::filter_newfile}, \code{build_rmd = 'timestamp'} is equivalent to
\code{build_rmd = blogdown::filter_timestamp}, and \code{build_rmd =
'md5sum'} is equivalent to \code{build_rmd = blogdown::filter_md5sum}.}
}}
}
\description{
Build a Blogdown site using \code{\link[blogdown:build_site]{blogdown::build_site()}}.
}
\examples{
dsl_init()
get_stage("script") \%>\%
add_step(step_build_blogdown("."))
dsl_get()
}
|
library(ape)
testtree <- read.tree("262_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="262_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/262_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 133
|
r
|
library(ape)
testtree <- read.tree("262_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="262_0_unrooted.txt")
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "chscase_census5")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.plr", par.vals = list(), predict.type = "prob")
#:# hash
#:# 021fb729ee89ca332f9feacc25c36c14
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_chscase_census5/classification_binaryClass/021fb729ee89ca332f9feacc25c36c14/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 692
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "chscase_census5")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.plr", par.vals = list(), predict.type = "prob")
#:# hash
#:# 021fb729ee89ca332f9feacc25c36c14
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.node.R
\name{as.node}
\alias{as.node}
\alias{as.node.character}
\alias{as.node.tree}
\title{Conversion to a node}
\usage{
as.node(x, ...)
\method{as.node}{character}(x, ...)
\method{as.node}{tree}(x, ...)
}
\arguments{
\item{x}{An object to be converted.}
\item{...}{Additional parameters.}
}
\value{
A node.
}
\description{
These methods convert an object to a node.
A node is defined as an \code{rtree} object with no subtrees.
}
\examples{
## Rooted tree
(tr0 = c_("Bob", "Carl", "Daniel"))
(tr1 = c_("Bill", "Caroline", "Dimitri", "Enoc"))
(tr2 = r_("Alice", s = list(tr0, tr1)))
as.node(tr2) # the root of 'tr2'
## Unrooted tree
(tr3 = r_(s = list(tr2, c_("Grand-Mother", "Father", "Son"))))
\dontrun{
as.node(tr3) # generates an error since 'tr3' is unrooted
}
}
|
/man/as.node.Rd
|
no_license
|
paulponcet/oak
|
R
| false
| true
| 856
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.node.R
\name{as.node}
\alias{as.node}
\alias{as.node.character}
\alias{as.node.tree}
\title{Conversion to a node}
\usage{
as.node(x, ...)
\method{as.node}{character}(x, ...)
\method{as.node}{tree}(x, ...)
}
\arguments{
\item{x}{An object to be converted.}
\item{...}{Additional parameters.}
}
\value{
A node.
}
\description{
These methods convert an object to a node.
A node is defined as an \code{rtree} object with no subtrees.
}
\examples{
## Rooted tree
(tr0 = c_("Bob", "Carl", "Daniel"))
(tr1 = c_("Bill", "Caroline", "Dimitri", "Enoc"))
(tr2 = r_("Alice", s = list(tr0, tr1)))
as.node(tr2) # the root of 'tr2'
## Unrooted tree
(tr3 = r_(s = list(tr2, c_("Grand-Mother", "Father", "Son"))))
\dontrun{
as.node(tr3) # generates an error since 'tr3' is unrooted
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forest_plot_1-to-many.R
\name{format_1_to_many}
\alias{format_1_to_many}
\title{Format MR results for a 1-to-many forest plot}
\usage{
format_1_to_many(
mr_res,
b = "b",
se = "se",
exponentiate = FALSE,
ao_slc = FALSE,
by = NULL,
TraitM = "outcome",
addcols = NULL,
weight = NULL
)
}
\arguments{
\item{mr_res}{Data frame of results supplied by the user.}
\item{b}{Name of the column specifying the effect of the exposure on the outcome. Default = \code{"b"}.}
\item{se}{Name of the column specifying the standard error for b. Default = \code{"se"}.}
\item{exponentiate}{Convert log odds ratios to odds ratios? Default=\code{FALSE}.}
\item{ao_slc}{Logical; retrieve trait subcategory information using \code{\link[=available_outcomes]{available_outcomes()}}. Default=\code{FALSE}.}
\item{by}{Name of the column indicating a grouping variable to stratify results on. Default=\code{NULL}.}
\item{TraitM}{The column specifying the names of the traits. Corresponds to 'many' in the 1-to-many forest plot. Default=\code{"outcome"}.}
\item{addcols}{Name of any additional columns to add to the plot. Character vector. The default is \code{NULL}.}
\item{weight}{The default is \code{NULL}.}
}
\value{
data frame.
}
\description{
This function formats user-supplied results for the \code{\link[=forest_plot_1_to_many]{forest_plot_1_to_many()}} function.
The user supplies their results in the form of a data frame.
The data frame is assumed to contain at least three columns of data:
\enumerate{
\item effect estimates, from an analysis of the effect of an exposure on an outcome;
\item standard errors for the effect estimates; and
\item a column of trait names, corresponding to the 'many' in a 1-to-many forest plot.
}
}
|
/man/format_1_to_many.Rd
|
permissive
|
MRCIEU/TwoSampleMR
|
R
| false
| true
| 1,819
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forest_plot_1-to-many.R
\name{format_1_to_many}
\alias{format_1_to_many}
\title{Format MR results for a 1-to-many forest plot}
\usage{
format_1_to_many(
mr_res,
b = "b",
se = "se",
exponentiate = FALSE,
ao_slc = FALSE,
by = NULL,
TraitM = "outcome",
addcols = NULL,
weight = NULL
)
}
\arguments{
\item{mr_res}{Data frame of results supplied by the user.}
\item{b}{Name of the column specifying the effect of the exposure on the outcome. Default = \code{"b"}.}
\item{se}{Name of the column specifying the standard error for b. Default = \code{"se"}.}
\item{exponentiate}{Convert log odds ratios to odds ratios? Default=\code{FALSE}.}
\item{ao_slc}{Logical; retrieve trait subcategory information using \code{\link[=available_outcomes]{available_outcomes()}}. Default=\code{FALSE}.}
\item{by}{Name of the column indicating a grouping variable to stratify results on. Default=\code{NULL}.}
\item{TraitM}{The column specifying the names of the traits. Corresponds to 'many' in the 1-to-many forest plot. Default=\code{"outcome"}.}
\item{addcols}{Name of any additional columns to add to the plot. Character vector. The default is \code{NULL}.}
\item{weight}{The default is \code{NULL}.}
}
\value{
data frame.
}
\description{
This function formats user-supplied results for the \code{\link[=forest_plot_1_to_many]{forest_plot_1_to_many()}} function.
The user supplies their results in the form of a data frame.
The data frame is assumed to contain at least three columns of data:
\enumerate{
\item effect estimates, from an analysis of the effect of an exposure on an outcome;
\item standard errors for the effect estimates; and
\item a column of trait names, corresponding to the 'many' in a 1-to-many forest plot.
}
}
|
## makeCacheMatrix is a function that generates a matrix
## and caches its inverse using the solve function in R.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve is a function that will solve for the inverse of the matrix
## that is generated by makeCacheMatrix. cacheSolve will pull the inverse
## from the cache if the inverse has already been solved and display
## "getting cached data" otherwise, it will solve for the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
ec953/ProgrammingAssignment2
|
R
| false
| false
| 909
|
r
|
## makeCacheMatrix is a function that generates a matrix
## and caches its inverse using the solve function in R.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve is a function that will solve for the inverse of the matrix
## that is generated by makeCacheMatrix. cacheSolve will pull the inverse
## from the cache if the inverse has already been solved and display
## "getting cached data" otherwise, it will solve for the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
##' MScPack
##' Description: Random generation of DFM with s = 2 and h = 3
##' Author: Rafael Barcellos
##' Last updated 21st June 2014
##' R 3.1.0
# defining parms ----------------------------------------------------------
TT <- 500 # time span
psi <- c(0.02, 0.19, 0.36, 0.02, 0.02,
0.19, 0.19, 0.36, 0.36,
0.02, 0.19, 0.36, 0.02, 0.02,
0.19, 0.19, 0.36, 0.36) # idiosyncratic variances
q <- length(psi) # number of variables
k <- 2 # number of factors
h <- 3 # VAR order
r <- h*k # number of state parms in FFBS
time.id <- (1-h):TT
Phi1 <- matrix(c(0, -0.1, 0, -0.7), 2, 2) + diag(rep(1, k)) # VAR parms
Phi2 <- matrix(c(0.02, -0.08, 0.4, -0.2), 2, 2)
Phi3 <- matrix(c(-0.06, 0.07, -0.6, 0.35), 2, 2)
PhiBar <- cbind(Phi1, Phi2, Phi3)
s <- scan(n = 1)
set.seed(4091) # generating loadings
LambdaBar <- matrix(runif(k*q*(s+1), -0.1, 0.1), q, k*(s+1))
# generating factors ------------------------------------------------------
set.seed(7623)
factors <- array(rnorm((TT+h)*k), c(TT+h, k))
phi = rep(0, r)
for (i in time.id+h){
factors[i, ] <- PhiBar %*% phi + factors[i, ]
phi <- c(factors[i, ], phi[1:(r-k)])
}
par(mfrow = c(1, 1), mar = c(4.1, 4.2, 2.1, 1.0))
plot(factors, pch = 20, col = rgb(0, 0, 0.5, 0.5),
xlab = "factor 1", ylab = "factor 2")
par(mfrow = c(1, 2), mar = c(2.1, 2.1, 2.1, 0.2))
apply(factors, 2, plot, x = time.id, type = "l",
col = rgb(0,0,0.5,0.5), bty = "l")
# generating data ---------------------------------------------------------
set.seed(3708)
y <- t(array(rnorm(q*TT, 0, sqrt(psi)), c(q, TT)))
f.star <- as.vector(t(factors[h:(h-s),])) # auxiliar vector to handle lagged factors
for (i in 1:TT){
f.star <- c(factors[i+h, ], f.star[-((k*s+1):(k*s+k))])
y[i, ] <- LambdaBar %*% f.star + y[i, ]
}
par(mfrow = c(3, 3), mar = c(2.1, 2.1, 0.5, 0.5))
apply(y, 2, plot, x = time.id[-(1:h)], type = "l",
col = rgb(0, 0.5, 0, 0.5), bty = "l")
|
/tests/RandGenDfm2Var3.R
|
no_license
|
rbarcellos/MScPack
|
R
| false
| false
| 1,937
|
r
|
##' MScPack
##' Description: Random generation of DFM with s = 2 and h = 3
##' Author: Rafael Barcellos
##' Last updated 21st June 2014
##' R 3.1.0
# defining parms ----------------------------------------------------------
TT <- 500 # time span
psi <- c(0.02, 0.19, 0.36, 0.02, 0.02,
0.19, 0.19, 0.36, 0.36,
0.02, 0.19, 0.36, 0.02, 0.02,
0.19, 0.19, 0.36, 0.36) # idiosyncratic variances
q <- length(psi) # number of variables
k <- 2 # number of factors
h <- 3 # VAR order
r <- h*k # number of state parms in FFBS
time.id <- (1-h):TT
Phi1 <- matrix(c(0, -0.1, 0, -0.7), 2, 2) + diag(rep(1, k)) # VAR parms
Phi2 <- matrix(c(0.02, -0.08, 0.4, -0.2), 2, 2)
Phi3 <- matrix(c(-0.06, 0.07, -0.6, 0.35), 2, 2)
PhiBar <- cbind(Phi1, Phi2, Phi3)
s <- scan(n = 1)
set.seed(4091) # generating loadings
LambdaBar <- matrix(runif(k*q*(s+1), -0.1, 0.1), q, k*(s+1))
# generating factors ------------------------------------------------------
set.seed(7623)
factors <- array(rnorm((TT+h)*k), c(TT+h, k))
phi = rep(0, r)
for (i in time.id+h){
factors[i, ] <- PhiBar %*% phi + factors[i, ]
phi <- c(factors[i, ], phi[1:(r-k)])
}
par(mfrow = c(1, 1), mar = c(4.1, 4.2, 2.1, 1.0))
plot(factors, pch = 20, col = rgb(0, 0, 0.5, 0.5),
xlab = "factor 1", ylab = "factor 2")
par(mfrow = c(1, 2), mar = c(2.1, 2.1, 2.1, 0.2))
apply(factors, 2, plot, x = time.id, type = "l",
col = rgb(0,0,0.5,0.5), bty = "l")
# generating data ---------------------------------------------------------
set.seed(3708)
y <- t(array(rnorm(q*TT, 0, sqrt(psi)), c(q, TT)))
f.star <- as.vector(t(factors[h:(h-s),])) # auxiliar vector to handle lagged factors
for (i in 1:TT){
f.star <- c(factors[i+h, ], f.star[-((k*s+1):(k*s+k))])
y[i, ] <- LambdaBar %*% f.star + y[i, ]
}
par(mfrow = c(3, 3), mar = c(2.1, 2.1, 0.5, 0.5))
apply(y, 2, plot, x = time.id[-(1:h)], type = "l",
col = rgb(0, 0.5, 0, 0.5), bty = "l")
|
library(tidyverse)
melbourne <- read.csv("melb_data_raw.csv", header=TRUE)
glimpse(melbourne)
# We don't need method, seller, or address.
melbourne <- subset(melbourne, select = -c(Address, SellerG, Method))
# telling R which predictors are categorical
melbourne$Type <- factor(melbourne$Type)
melbourne$Regionname <- factor(melbourne$Regionname)
melbourne$Postcode <- factor(melbourne$Postcode)
melbourne$CouncilArea <- factor(melbourne$CouncilArea)
melbourne$Suburb <- factor(melbourne$Suburb)
# telling R which predictor is a date column
melbourne$Date <- as.Date(melbourne$Date) # - the dates are in different formats so we'll have to standardize the formatting
# Checking for null columns
colSums(is.na(melbourne))
# We can drop the 62 instances where the cars predictor is NA
melbourne <- melbourne[!is.na(melbourne$Car),]
"Only building area and year built have NA values.
We can two extra tables with year built, or building area
dropped. Then filter the two tables to pull all rows where
building area and year built contain values. We can then use the
non-null datasets to test each predictors significance separately.
I suspect buildingArea will not be important because it's likely
correlated to number of rooms, but we may have to do something about year built."
yearBuiltAnalysis = na.omit(subset(melbourne, select = -c(BuildingArea)))
buildingAreaAnalysis = na.omit(subset(melbourne, select = -c(YearBuilt)))
yearBuiltResult <- lm(Price~Rooms+Type+Distance+Bedroom2+Bathroom+Car+Landsize+YearBuilt+CouncilArea+Regionname+Propertycount, data=yearBuiltAnalysis)
summary(yearBuiltResult)
# in the prescense of the other predictors, year built is significant in estimating the price of a house, so we should keep it, < 2e-16 ***
buildingAreaResult <- lm(Price~Rooms+Type+Distance+Bedroom2+Bathroom+Car+Landsize+BuildingArea+CouncilArea+Regionname+Propertycount, data=buildingAreaAnalysis)
summary(buildingAreaResult)
# building area is less significant, but still significant, 0.012366 *
### NEXT STEPS: refine dataset via multicollinearity tests, outlier detection, possible transforms, etc.
|
/melbourne-analysis.R
|
no_license
|
mds9b/melbourne-housing
|
R
| false
| false
| 2,125
|
r
|
library(tidyverse)
melbourne <- read.csv("melb_data_raw.csv", header=TRUE)
glimpse(melbourne)
# We don't need method, seller, or address.
melbourne <- subset(melbourne, select = -c(Address, SellerG, Method))
# telling R which predictors are categorical
melbourne$Type <- factor(melbourne$Type)
melbourne$Regionname <- factor(melbourne$Regionname)
melbourne$Postcode <- factor(melbourne$Postcode)
melbourne$CouncilArea <- factor(melbourne$CouncilArea)
melbourne$Suburb <- factor(melbourne$Suburb)
# telling R which predictor is a date column
melbourne$Date <- as.Date(melbourne$Date) # - the dates are in different formats so we'll have to standardize the formatting
# Checking for null columns
colSums(is.na(melbourne))
# We can drop the 62 instances where the cars predictor is NA
melbourne <- melbourne[!is.na(melbourne$Car),]
"Only building area and year built have NA values.
We can two extra tables with year built, or building area
dropped. Then filter the two tables to pull all rows where
building area and year built contain values. We can then use the
non-null datasets to test each predictors significance separately.
I suspect buildingArea will not be important because it's likely
correlated to number of rooms, but we may have to do something about year built."
yearBuiltAnalysis = na.omit(subset(melbourne, select = -c(BuildingArea)))
buildingAreaAnalysis = na.omit(subset(melbourne, select = -c(YearBuilt)))
yearBuiltResult <- lm(Price~Rooms+Type+Distance+Bedroom2+Bathroom+Car+Landsize+YearBuilt+CouncilArea+Regionname+Propertycount, data=yearBuiltAnalysis)
summary(yearBuiltResult)
# in the prescense of the other predictors, year built is significant in estimating the price of a house, so we should keep it, < 2e-16 ***
buildingAreaResult <- lm(Price~Rooms+Type+Distance+Bedroom2+Bathroom+Car+Landsize+BuildingArea+CouncilArea+Regionname+Propertycount, data=buildingAreaAnalysis)
summary(buildingAreaResult)
# building area is less significant, but still significant, 0.012366 *
### NEXT STEPS: refine dataset via multicollinearity tests, outlier detection, possible transforms, etc.
|
library(EMCluster)
b = function(init_p,init_u1,init_u2,init_s1,init_s2,TOL){
n = 200;
x = rnorm(n,0,1);
y = rnorm(n,0,5);
u = runif(n,0,1);
mix = 1:n;
for(i in 1:n){
if(u[i]<0.4){
mix[i] = x[i];
}else{
mix[i] = y[i];
}
}
p_old = 0.1;
u1_old = 0.3;
s1_old = 2.1;
u2_old = 0.1;
s2_old = 0.3;
TOL = 10^(-6);
oldParam = c(p_old,u1_old,s1_old,u2_old,s2_old);
numer = p_old * dnorm(mix,u1_old,s1_old);
denom = numer + ((1-p_old)*dnorm(mix,u2_old,s2_old));
m = numer/denom;
#print(m);
p_new = sum(m)/n;
u1_new = sum(m*(mix))/sum(m);
u2_new = sum((1-m)*(mix))/sum(1-m);
s1_new = sum( m * ((mix) - u1_new)^2 )/sum(m);
s1_new = sqrt(s1_new);
s2_new = sum( (1-m) * ((mix) - u2_new)^2 )/sum(1-m);
s2_new = sqrt(s2_new);
newParam = c(p_new,u1_new,s1_new,u2_new,s2_new);
while(sqrt(sum((newParam-oldParam)*(newParam-oldParam))) >= TOL){
oldParam = newParam;
p_old = p_new;
u1_old = u1_new;
s1_old = s1_new;
u2_old = u2_new;
s2_old = s2_new;
numer = p_old * dnorm(mix,u1_old,s1_old);
denom = numer + ((1-p_old)*dnorm(mix,u2_old,s2_old));
m = numer/denom;
#print(m);
if(sum(m)==0){
break;
}
p_new = sum(m)/n;
u1_new = sum(m*(mix))/sum(m);
u2_new = sum((1-m)*(mix))/sum(1-m);
s1_new = sum( m * ((mix) - u1_new) * ((mix) - u1_new))/sum(m);
s1_new = sqrt(s1_new);
s2_new = sum( (1-m) * ((mix) - u2_new) * ((mix) - u2_new))/sum(1-m);
s2_new = sqrt(s2_new);
newParam = c(p_new,u1_new,s1_new,u2_new,s2_new);
}
return(c(p_new,u1_new,s1_new,u2_new,s2_new));
}
param = b(0.1,0.3,2.1,0.1,0.3,10^(-6));
print(param);
|
/r.bandaMA471lab3/b.r
|
no_license
|
Santhoshrbanda/safd
|
R
| false
| false
| 1,672
|
r
|
library(EMCluster)
b = function(init_p,init_u1,init_u2,init_s1,init_s2,TOL){
n = 200;
x = rnorm(n,0,1);
y = rnorm(n,0,5);
u = runif(n,0,1);
mix = 1:n;
for(i in 1:n){
if(u[i]<0.4){
mix[i] = x[i];
}else{
mix[i] = y[i];
}
}
p_old = 0.1;
u1_old = 0.3;
s1_old = 2.1;
u2_old = 0.1;
s2_old = 0.3;
TOL = 10^(-6);
oldParam = c(p_old,u1_old,s1_old,u2_old,s2_old);
numer = p_old * dnorm(mix,u1_old,s1_old);
denom = numer + ((1-p_old)*dnorm(mix,u2_old,s2_old));
m = numer/denom;
#print(m);
p_new = sum(m)/n;
u1_new = sum(m*(mix))/sum(m);
u2_new = sum((1-m)*(mix))/sum(1-m);
s1_new = sum( m * ((mix) - u1_new)^2 )/sum(m);
s1_new = sqrt(s1_new);
s2_new = sum( (1-m) * ((mix) - u2_new)^2 )/sum(1-m);
s2_new = sqrt(s2_new);
newParam = c(p_new,u1_new,s1_new,u2_new,s2_new);
while(sqrt(sum((newParam-oldParam)*(newParam-oldParam))) >= TOL){
oldParam = newParam;
p_old = p_new;
u1_old = u1_new;
s1_old = s1_new;
u2_old = u2_new;
s2_old = s2_new;
numer = p_old * dnorm(mix,u1_old,s1_old);
denom = numer + ((1-p_old)*dnorm(mix,u2_old,s2_old));
m = numer/denom;
#print(m);
if(sum(m)==0){
break;
}
p_new = sum(m)/n;
u1_new = sum(m*(mix))/sum(m);
u2_new = sum((1-m)*(mix))/sum(1-m);
s1_new = sum( m * ((mix) - u1_new) * ((mix) - u1_new))/sum(m);
s1_new = sqrt(s1_new);
s2_new = sum( (1-m) * ((mix) - u2_new) * ((mix) - u2_new))/sum(1-m);
s2_new = sqrt(s2_new);
newParam = c(p_new,u1_new,s1_new,u2_new,s2_new);
}
return(c(p_new,u1_new,s1_new,u2_new,s2_new));
}
param = b(0.1,0.3,2.1,0.1,0.3,10^(-6));
print(param);
|
#ui.R for pot randomisation based on randomised block desing
#Load shiny package
library(shiny)
# Define UI for dataset viewer application
shinyUI(navbarPage("RandomisationApps",
#Tab with explanation
tabPanel("About",
"These apps are developed to ease the randomized design of an experiment. Currently it contains two apps:
A pot randomisation app and a plate randomisation app. The pot randomisation is meant for randomizing different plants over trays.
For each treatment the same randomisation is used (which is necessary for treatments only possible to apply to a whole tray, such as salt stress.
The app directly shows the design in the table and you can download both the design and the setup (with a row for each
plant). The plate randomisation app is designed for experiments with two different genotypes on 1 plate."),
# Numeric input of variables used in pot design
tabPanel("Pot randomisation",
sidebarLayout(
sidebarPanel(
textInput("Treatments", "List your treatments seperated with comma:", "0 mM, 75 mM, 125 mM"),
textInput("Genotypes", "List your genotypes seperated with comma:", "Col-0, 5A, 5B, 5D"),
numericInput("nRep", "Number of replicates per genotype:", 20),
textInput("Dimension", "Dimensions of tray (rows,columns)", "8,5"),
textInput("ExpName", "Name of your experiment:", "Exp1"),
submitButton(text = "Apply changes"),
downloadButton("downloadDesign", "Download the potdesign"),
downloadButton("downloadSetup", "Download the setup")
),
# Show the pot design
mainPanel(
tableOutput("potdesign")
)
)),
tabPanel("Plate randomisation",
sidebarLayout(
sidebarPanel(
textInput("TM", "List your treatments seperated with comma:", "0 mM, 75 mM, 125 mM"),
textInput("GT", "List your genotypes seperated with comma:", "Col-0, 5A, 5B, 5D"),
numericInput("SpP", "Number of seedlings per plate", 4),
numericInput("nRep", "Number of replicates per genotype:", 20),
textInput("Exp", "Name of your experiment:", "Exp1"),
submitButton(text = "Apply changes"),
downloadButton("downloadD", "Download the potdesign"),
downloadButton("downloadS", "Download the setup")
),
# Show the pot design
mainPanel(tableOutput("platedesign"))
)
)
))
|
/ui.R
|
no_license
|
IkoKoevoets/RandomizationApp
|
R
| false
| false
| 2,517
|
r
|
#ui.R for pot randomisation based on randomised block desing
#Load shiny package
library(shiny)
# Define UI for dataset viewer application
shinyUI(navbarPage("RandomisationApps",
#Tab with explanation
tabPanel("About",
"These apps are developed to ease the randomized design of an experiment. Currently it contains two apps:
A pot randomisation app and a plate randomisation app. The pot randomisation is meant for randomizing different plants over trays.
For each treatment the same randomisation is used (which is necessary for treatments only possible to apply to a whole tray, such as salt stress.
The app directly shows the design in the table and you can download both the design and the setup (with a row for each
plant). The plate randomisation app is designed for experiments with two different genotypes on 1 plate."),
# Numeric input of variables used in pot design
tabPanel("Pot randomisation",
sidebarLayout(
sidebarPanel(
textInput("Treatments", "List your treatments seperated with comma:", "0 mM, 75 mM, 125 mM"),
textInput("Genotypes", "List your genotypes seperated with comma:", "Col-0, 5A, 5B, 5D"),
numericInput("nRep", "Number of replicates per genotype:", 20),
textInput("Dimension", "Dimensions of tray (rows,columns)", "8,5"),
textInput("ExpName", "Name of your experiment:", "Exp1"),
submitButton(text = "Apply changes"),
downloadButton("downloadDesign", "Download the potdesign"),
downloadButton("downloadSetup", "Download the setup")
),
# Show the pot design
mainPanel(
tableOutput("potdesign")
)
)),
tabPanel("Plate randomisation",
sidebarLayout(
sidebarPanel(
textInput("TM", "List your treatments seperated with comma:", "0 mM, 75 mM, 125 mM"),
textInput("GT", "List your genotypes seperated with comma:", "Col-0, 5A, 5B, 5D"),
numericInput("SpP", "Number of seedlings per plate", 4),
numericInput("nRep", "Number of replicates per genotype:", 20),
textInput("Exp", "Name of your experiment:", "Exp1"),
submitButton(text = "Apply changes"),
downloadButton("downloadD", "Download the potdesign"),
downloadButton("downloadS", "Download the setup")
),
# Show the pot design
mainPanel(tableOutput("platedesign"))
)
)
))
|
RDA2
A
2
196866
131840
1026
1
262153
5
value
787
134
787
41
16
1
262153
11
effect_test
10
1
0
10
1
1
14
1
3
14
1
0.2
14
1
0.5
14
1
1
10
1
0
10
1
0
10
1
0
16
1
262153
10
simstats0c
10
1
NA
14
1
0
16
1
262153
0
10
1
0
14
1
1
10
1
1
10
1
0
14
1
0.2
14
1
1
14
1
0
14
1
12345
14
1
0.05
14
1
0.05
14
1
0.2
14
1
0.3
14
1
0.3
14
1
0.05
14
1
0.05
14
1
40
14
1
2
14
1
20
14
1
5
14
1
5
14
1
0
10
1
1
10
1
1
10
1
0
10
1
0
10
1
0
10
1
0
1026
1
262153
5
names
16
41
262153
8
projname
262153
11
useStdInits
262153
9
checktime
262153
2
n3
262153
6
firstg
262153
7
reduceg
262153
6
maxrat
262153
7
maxlike
262153
7
simOnly
262153
7
localML
262153
8
FRANname
262153
12
cconditional
262153
9
condvarno
262153
8
condname
262153
14
FinDiff.method
262153
4
nsub
262153
5
dolby
262153
5
diagg
262153
11
diagonalize
262153
9
modelType
262153
9
MaxDegree
262153
10
randomSeed
262153
5
pridg
262153
5
prcdg
262153
5
prper
262153
5
pripr
262153
5
prdpr
262153
6
prirms
262153
6
prdrms
262153
24
maximumPermutationLength
262153
24
minimumPermutationLength
262153
24
initialPermutationLength
262153
4
mult
262153
10
truncation
262153
15
doubleAveraging
262153
25
standardizeWithTruncation
262153
14
standardizeVar
262153
13
noAggregation
262153
7
browse1
262153
7
browse2
262153
7
browse3
1026
1
262153
5
class
16
1
262153
14
sienaAlgorithm
254
14
1
1
14
1
1
16
1
262153
0
16
1
262153
7
1.1-289
10
1
0
14
1
275
10
1
1
10
1
1
10
1
0
10
1
0
10
1
0
10
1
0
14
1
0
14
1
4
14
1
0.1
10
1
0
10
1
0
16
1
262153
3
eff
14
2
0.3771715178878389
-0.2111470834071039
10
2
0
0
10
2
0
0
13
1
2
10
2
0
0
10
2
0
0
10
1
1
14
1
1
16
1
262153
3
atr
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
10
1
0
13
2
1
2
10
1
0
14
1
1
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
16
2
9
-1
9
-1
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1
262153
9
row.names
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
16
2
9
-1
9
-1
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1279
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
525
4
1
1
1
2
1026
1
262153
3
dim
13
2
2
2
254
14
2
11.66666666666667
121.0711111111112
526
4
-0.6666666666666643
70.50888888888895
12.33333333333334
50.56222222222223
1026
1535
13
2
2
2
254
10
1
1
531
13
22
254
254
531
1
787
33
16
0
16
0
16
0
16
0
16
0
16
0
16
0
10
0
10
0
10
0
10
0
10
0
16
0
14
0
13
0
16
0
13
0
16
0
14
0
13
0
13
0
13
0
16
0
10
0
19
0
19
0
16
0
16
0
13
0
13
0
16
0
10
0
10
0
1026
767
16
33
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
262153
9
effectPtr
1026
1279
13
0
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
1026
767
16
1
262153
3
atr
254
531
1
787
33
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
13
2
NA
NA
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
13
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
10
2
NA
NA
1026
767
16
33
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
262153
9
effectPtr
1026
1279
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
1026
767
16
1
262153
3
atr
254
531
1
10
2
0
0
1026
767
16
1
262153
3
atr
254
531
1
10
2
1
1
1026
767
16
1
262153
3
atr
254
22
254
254
10
1
1
14
1
3
16
1
262153
3
atr
16
1
262153
5
Data1
14
1
1
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
19
0
1026
767
16
13
262153
5
pData
262153
18
interactionEffects
262153
12
basicEffects
262153
19
interactionEffectsl
262153
13
basicEffectsl
262153
6
pModel
262153
11
simpleRates
262153
12
observations
262153
8
depNames
262153
10
groupNames
262153
6
nGroup
262153
5
types
262153
9
myeffects
1026
1
262153
8
netnames
16
1
262153
3
atr
1026
1
262153
9
symmetric
522
1
NA
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
allUpOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
11
allDownOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
allHigher
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
11
allDisjoint
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
13
allAtLeastOne
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
9
anyUpOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
11
anyDownOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
anyHigher
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
11
anyDisjoint
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
13
anyAtLeastOne
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
5
types
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
1026
1
262153
12
observations
14
1
2
1026
1
262153
17
compositionChange
10
1
0
1026
1
262153
10
exooptions
16
0
1026
1
262153
12
groupPeriods
13
1
3
1026
1
262153
9
periodNos
14
2
1
2
1026
1
262153
23
numberNonMissingNetwork
14
2
0
0
1026
1
262153
20
numberMissingNetwork
14
2
0
0
1026
1
262153
24
numberNonMissingBehavior
14
2
50
50
1026
1
262153
21
numberMissingBehavior
14
2
0
0
1026
1
262153
6
change
525
2
27
33
1026
1535
13
2
2
1
1026
1
262153
8
dimnames
19
2
254
16
1
262153
5
Data1
254
1026
1
262153
11
condEffects
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
19
rate\040atr\040(period\0401)
262153
19
rate\040atr\040(period\0402)
16
2
262153
46
Amount\040of\040behavioral\040change\040in\040period\0401\040on\040atr
262153
46
Amount\040of\040behavioral\040change\040in\040period\0402\040on\040atr
16
2
262153
4
Rate
262153
4
Rate
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
rate
262153
4
rate
10
2
1
1
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.7057142857142857
0.8493877551020408
13
2
0
0
16
2
262153
4
rate
262153
4
rate
16
2
262153
1
1
262153
1
2
16
2
9
-1
9
-1
14
2
0
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
0
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
1
2
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1279
13
2
1
2
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
254
14
2
1
2
10
1
0
10
1
0
14
1
3
10
1
0
10
1
0
10
1
0
10
1
0
13
1
1
10
2
0
0
10
1
0
526
4
0.03954241163838698
0
0
1
1026
1535
13
2
2
2
254
14
2
0.1
0.1
14
2
0.1
0.1
14
2
0.3223690206761288
0
10
1
0
14
1
50
10
1
0
10
1
0
14
1
3
14
1
9
14
1
22
14
1
222
14
2
0
222
14
1
150
10
1
0
10
1
0
14
1
50
526
6
-4
-8
0
-1.093333333333305
-0.1866666666666674
-4
1026
1535
13
2
3
2
254
526
12
-4.666666666666664
-2.666666666666664
3.333333333333336
12.33333333333334
6.333333333333336
8.333333333333336
61.4155555555556
54.96222222222224
59.60222222222226
58.56222222222227
65.92222222222226
57.46888888888893
1026
1535
13
3
3
2
2
254
526
12
-6.971352874752451
-1.264243869931946
6.462659717255978
1.501611282197868
-4.997809017791867
-3.678697552001522
-4.16711618926293
4.930339678279372
-2.525858379296903
7.95445617335811
5.906896061477797
4.877132117474978
1026
1535
13
3
3
2
2
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
14
2
0
0
526
6
1.1809416600467
1.265074390734391
0.9045470974824197
1.724516019250948
1.205150063422383
1.863470002952116
1026
1535
13
2
3
2
254
19
1
254
14
1
500
10
1
1
14
1
3
526
1
2.359
1026
767
16
1
262153
7
elapsed
254
14
2
0.7208852687511493
0.3468060519809525
14
2
0.902852354526557
0.7906832413165021
526
1
0.0005102040816326513
1026
767
16
1
262153
7
elapsed
254
14
2
-4
-1.759999999999991
10
2
0
0
526
4
25.28930225968363
0
0
1
1026
1535
13
2
2
2
1026
8191
19
2
16
2
262153
6
linear
262153
4
quad
254
254
10
1
0
526
4
0.0149712574944926
0.0009376944724058593
0.000567847429618878
0.0062682310894739
1026
1535
13
2
2
2
254
14
1
50
526
100
4
-8
4
0
-8
-8
-12
4
4
-2
12
-12
2
0
-8
-2
6
6
-20
-8
-8
0
10
0
6
4
-28
2
-22
-6
4
-10
-12
6
-14
4
-2
-12
-4
0
-6
-4
-12
10
12
4
-8
2
-6
0
17.09333333333333
49.81333333333328
29.09333333333333
32
49.81333333333328
41.81333333333333
40.72
27.09333333333331
27.09333333333331
18.45333333333332
35.27999999999997
50.71999999999997
27.54666666666668
28
45.8133333333333
44.45333333333332
36.63999999999999
48.63999999999999
30.5333333333333
43.81333333333333
25.81333333333336
32
29.73333333333335
0
44.63999999999996
21.09333333333336
30.34666666666666
11.54666666666668
34.98666666666668
25.36000000000001
27.09333333333333
28.26666666666671
12.72000000000003
44.63999999999996
25.17333333333332
21.09333333333333
26.45333333333332
60.71999999999997
58.90666666666667
26
29.36000000000001
34.90666666666667
20.72000000000003
55.73333333333329
27.28000000000003
33.09333333333331
59.81333333333333
27.54666666666665
43.35999999999996
28
1026
1535
13
2
50
2
254
526
200
3.332443480861405
5.116279367599536
1.315321875073689
-0.6700024628225519
-1.794954440181859
4.317767818757651
-6.625871051772602
-0.5738984395520698
2.266801946774507
6.696180859495592
9.926621689481211
-3.573898439552068
-5.625367681653906
1.404088067292294
-2.308711027872322
0.04279663234206721
0.2167719081192474
5.410922528225484
-5.245012067350691
3.737866326861591
0.3226597061255687
1.012837513277174
-3.237066441441431
-0.1513539877641682
-1.836639907547857
2.679059253707879
-16.93029798713437
10.94618923895288
-4.087655027242537
-1.677340293874429
8.541269763071398
-1.414598825878646
-7.478297786400276
-1.259687729454448
-3.25234989840257
6.374128948227403
2.332443480861407
-2.057695908177641
-5.837143277666545
2.796673400015307
-5.576847753354718
8.325105649809529
-1.031293897772783
6.268744520339773
8.329997537177444
2.266298576655814
-1.893001037017611
1.936908834335745
-0.8763828013485845
-11.73614736702816
3.138397285493861
-10.42632517417976
-2.291484997345917
4.20504555981814
-11.82480913450808
-7.682232181242355
-7.903288181872136
3.359956656242338
2.150627003913654
-2.173870851038491
3.300646212969938
-7.040973247769928
8.31542629981238
-3.257241785770486
1.45260799559148
-1.824809134508063
4.49673940664144
6.99816185117342
-8.793011866616586
-5.584185584406596
-11.65033048861221
1.470232971497888
11.53148598833555
-3.654719005861415
14.49918535032538
1.988378076437582
-9.118948925015326
-7.922855731343811
-13.22044820577241
-1.267025560506328
-2.121898238817968
-2.782724721762061
-13.5742973849321
9.504077237693314
-12.08999654618781
1.086928043392025
-1.638100770192394
-14.07287494040009
4.143289172861778
-1.797400383865815
3.408476584541524
-8.767944634919623
-7.342954239447766
3.089373987075982
8.303092156653893
1.696285284234278
-7.136573900921725
3.582955230437395
-4.011621923562418
11.91489534118011
7.581708268030594
17.5770000577776
-2.260366266061788
-0.5350762534571807
4.83067103504258
-4.425675353491206
20.65910649572268
-17.48505129648316
-20.56557243965013
2.070309047338129
16.42472465843747
4.489088494024074
4.460429965502115
-14.98939997210784
-1.403164767069929
-8.062793993018451
8.344895363120029
7.314876807533301
10.31899292757717
11.78464697741598
2.898432011222531
14.86920358952684
-11.29898627071078
-21.38090458748985
5.739468605376285
-6.728139091931654
-5.335149310137348
-2.790060256433461
-4.135926955194064
6.618391959682189
-5.578357288192887
4.800949515819758
-5.639074518369808
9.689480545881191
-16.10848552221032
-5.838509284012535
-4.558311757739574
-0.5078043577565672
21.09755408697321
7.174317572122225
4.452753648627834
2.024172191074269
-1.975372375354904
22.65061986643483
-0.7446630119851334
7.659395117163717
-6.622683763323916
1.239151478156725
12.60232972162075
-4.107351802925397
-21.72277265296531
-15.68838129146199
5.8096643258221
-6.108797993644288
9.985309126454448
6.943320356413093
-7.220581671987771
-1.361293729590398
0.2729905534418415
-12.74178826659419
-1.00317682983412
8.593733966471332
-21.33234624227181
8.349520747763297
13.16865884024448
14.53337329891869
13.84343263804799
16.60008197339105
-3.321581371693467
2.024699275703481
-5.945698073397811
-17.00900017286487
1.191083813655198
-20.90534156763717
2.176665967510496
-3.630867690321064
-23.25067313066832
3.851738919537694
7.90858679150859
-13.75141134032033
-9.412217059623169
-14.21676133627234
-3.625291994716717
8.290219884999242
10.43912012486914
-3.275873859412268
2.149869750229251
20.59857500437631
9.420954171391681
-14.42124489170302
0.6026853695651728
-9.995732915788242
-13.3163945738848
0.4795131562779358
-9.253294409333453
-7.264150377704164
19.23637725818887
0.4735714955666461
3.110494664219529
-4.418710788094543
1026
1535
13
3
50
2
2
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
14
2
8.586128347514961
12.79900511689178
14
2
7.738809716183902
12.41920941284984
526
4
0.01346844935881434
0.001030534531904058
0.001030534531904058
0.006061222714152954
1026
1535
13
2
2
2
254
14
1
0.5
14
1
22
14
1
222
14
1
1
10
222
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
14
2
0
0
14
1
2.179
14
2
84.10924848898807
-47.08579959978416
14
1
223
14
2
2067.737973531256
9995.741135975784
14
2
-410.2754784998486
1422.041052791091
14
1
0.1422646938777763
14
1
-0.1984175382721175
14
2
-0.1984175382721175
0.1422646938777763
14
1
3
14
1
3
526
1
0.001000000000000038
1026
767
16
1
262153
7
elapsed
254
14
2
9.816786834385422
117.3486686674613
10
2
0
0
526
4
16
-7.626666666666665
-7.626666666666665
3.968711111111129
1026
1535
13
2
2
2
254
14
2
-0.5
-0.3118616735352036
526
4
67.17581230378116
-12.56143618859383
-7.606933240966379
160.4450073786322
1026
1535
13
2
2
2
254
526
4
25.28930225968363
0
0
1
1026
1535
13
2
2
2
254
526
4
16
-7.626666666666665
-7.626666666666665
3.968711111111129
1026
1535
13
2
2
2
254
526
2
-0.1581696465535479
-1.759999999999991
1026
1535
13
2
2
1
254
14
2
-1
-0.8834621020683713
526
1
6.429100507328444
1026
1535
13
2
1
1
254
14
2
-1
-0.8834621020683713
14
1
1
526
4
0.7441333333332947
1.429999999999919
1.429999999999919
2.999999999999831
1026
1535
13
2
2
2
254
526
4
0.02501763709087428
-0.301576792762098
-0.301576792762098
3.968711111111129
1026
1535
13
2
2
2
254
16
1
262153
0
14
2
1.116854382754504
1.597712028541816
14
2
0.1886143330395212
0.3469952805737122
14
2
0.1581696465535479
1.992162420866112
16
1
262153
2
OK
1026
767
16
134
262153
1
x
262153
3
int
262153
4
int2
262153
8
revision
262153
7
version
262153
14
FinDiff.method
262153
1
n
262153
2
OK
262153
5
error
262153
9
restarted
262153
17
DerivativeProblem
262153
17
ForceFinDifPhase1
262153
15
Phase3Interrupt
262153
17
repeatsForEpsilon
262153
17
maxrepeatsubphase
262153
4
gain
262153
8
haveDfra
262153
7
maxlike
262153
11
effectsName
262153
5
theta
262153
5
fixed
262153
4
test
262153
2
pp
262153
4
posj
262153
17
BasicRateFunction
262153
12
cconditional
262153
9
condvarno
262153
8
condname
262153
8
condtype
262153
9
symmetric
262153
7
condvar
262153
23
FinDiffBecauseSymmetric
262153
9
modelType
262153
7
effects
262153
16
requestedEffects
262153
8
callGrid
262153
7
targets
262153
8
targets2
262153
11
simpleRates
262153
1
f
262153
9
periodNos
262153
10
returnDeps
262153
16
returnDepsStored
262153
12
observations
262153
12
returnChains
262153
7
byGroup
262153
6
byWave
262153
15
returnDataFrame
262153
19
nDependentVariables
262153
8
newFixed
262153
11
AllNowFixed
262153
4
dinv
262153
5
scale
262153
7
epsilon
262153
6
theta0
262153
7
anyposj
262153
2
n1
262153
12
AllUserFixed
262153
14
epsilonProblem
262153
5
Phase
262153
6
n2min0
262153
9
n2minimum
262153
9
n2maximum
262153
9
n2partsum
262153
5
n1pos
262153
7
restart
262153
9
SomeFixed
262153
9
phase1Its
262153
2
sf
262153
3
sf2
262153
3
ssc
262153
7
accepts
262153
7
rejects
262153
6
aborts
262153
4
npos
262153
4
ntim
262153
4
sims
262153
9
writefreq
262153
5
Deriv
262153
3
nit
262153
5
ctime
262153
8
regrCoef
262153
7
regrCor
262153
10
timePhase1
262153
5
mnfra
262153
13
jacobianwarn1
262153
4
dfra
262153
11
cdSomeFixed
262153
5
dinvv
262153
9
nitPhase1
262153
10
phase1devs
262153
12
phase1scores
262153
13
phase1accepts
262153
13
phase1rejects
262153
12
phase1aborts
262153
2
sd
262153
15
standardization
262153
9
sf.invcov
262153
7
reduceg
262153
5
n2min
262153
5
n2max
262153
14
repeatsubphase
262153
9
truncated
262153
11
positivized
262153
5
time1
262153
4
thav
262153
5
thavn
262153
5
prod0
262153
5
prod1
262153
7
maxacor
262153
7
minacor
262153
2
ac
262153
2
n3
262153
10
Phase3nits
262153
10
timePhase3
262153
8
estMeans
262153
5
diver
262153
3
msf
262153
3
sfl
262153
5
dfra1
262153
5
dfrac
262153
4
msfc
262153
7
fchange
262153
5
tstat
262153
9
tconv.max
262153
5
tconv
262153
4
tmax
262153
6
msfinv
262153
8
covtheta
262153
16
errorMessage.cov
262153
4
rate
262153
5
vrate
262153
2
se
262153
11
termination
1026
1023
16
1
262153
8
sienaFit
254
254
|
/R/lib/RSienaTest/unitTests/behaviorObjective.Rd
|
no_license
|
BRICOMATA/Bricomata_
|
R
| false
| false
| 24,382
|
rd
|
RDA2
A
2
196866
131840
1026
1
262153
5
value
787
134
787
41
16
1
262153
11
effect_test
10
1
0
10
1
1
14
1
3
14
1
0.2
14
1
0.5
14
1
1
10
1
0
10
1
0
10
1
0
16
1
262153
10
simstats0c
10
1
NA
14
1
0
16
1
262153
0
10
1
0
14
1
1
10
1
1
10
1
0
14
1
0.2
14
1
1
14
1
0
14
1
12345
14
1
0.05
14
1
0.05
14
1
0.2
14
1
0.3
14
1
0.3
14
1
0.05
14
1
0.05
14
1
40
14
1
2
14
1
20
14
1
5
14
1
5
14
1
0
10
1
1
10
1
1
10
1
0
10
1
0
10
1
0
10
1
0
1026
1
262153
5
names
16
41
262153
8
projname
262153
11
useStdInits
262153
9
checktime
262153
2
n3
262153
6
firstg
262153
7
reduceg
262153
6
maxrat
262153
7
maxlike
262153
7
simOnly
262153
7
localML
262153
8
FRANname
262153
12
cconditional
262153
9
condvarno
262153
8
condname
262153
14
FinDiff.method
262153
4
nsub
262153
5
dolby
262153
5
diagg
262153
11
diagonalize
262153
9
modelType
262153
9
MaxDegree
262153
10
randomSeed
262153
5
pridg
262153
5
prcdg
262153
5
prper
262153
5
pripr
262153
5
prdpr
262153
6
prirms
262153
6
prdrms
262153
24
maximumPermutationLength
262153
24
minimumPermutationLength
262153
24
initialPermutationLength
262153
4
mult
262153
10
truncation
262153
15
doubleAveraging
262153
25
standardizeWithTruncation
262153
14
standardizeVar
262153
13
noAggregation
262153
7
browse1
262153
7
browse2
262153
7
browse3
1026
1
262153
5
class
16
1
262153
14
sienaAlgorithm
254
14
1
1
14
1
1
16
1
262153
0
16
1
262153
7
1.1-289
10
1
0
14
1
275
10
1
1
10
1
1
10
1
0
10
1
0
10
1
0
10
1
0
14
1
0
14
1
4
14
1
0.1
10
1
0
10
1
0
16
1
262153
3
eff
14
2
0.3771715178878389
-0.2111470834071039
10
2
0
0
10
2
0
0
13
1
2
10
2
0
0
10
2
0
0
10
1
1
14
1
1
16
1
262153
3
atr
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
10
1
0
13
2
1
2
10
1
0
14
1
1
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
16
2
9
-1
9
-1
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1
262153
9
row.names
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
16
2
9
-1
9
-1
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1279
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
525
4
1
1
1
2
1026
1
262153
3
dim
13
2
2
2
254
14
2
11.66666666666667
121.0711111111112
526
4
-0.6666666666666643
70.50888888888895
12.33333333333334
50.56222222222223
1026
1535
13
2
2
2
254
10
1
1
531
13
22
254
254
531
1
787
33
16
0
16
0
16
0
16
0
16
0
16
0
16
0
10
0
10
0
10
0
10
0
10
0
16
0
14
0
13
0
16
0
13
0
16
0
14
0
13
0
13
0
13
0
16
0
10
0
19
0
19
0
16
0
16
0
13
0
13
0
16
0
10
0
10
0
1026
767
16
33
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
262153
9
effectPtr
1026
1279
13
0
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
1026
767
16
1
262153
3
atr
254
531
1
787
33
16
2
262153
3
atr
262153
3
atr
16
2
262153
16
atr\040linear\040shape
262153
19
atr\040quadratic\040shape
16
2
262153
16
atr\040centered\040sum
262153
24
atr\040sum\040of\040cent.\040squares
16
2
262153
6
linear
262153
4
quad
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
eval
262153
4
eval
10
2
0
0
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.3223690206761288
0
13
2
0
0
16
2
262153
9
objective
262153
9
objective
13
2
NA
NA
16
2
9
-1
9
-1
14
2
0.3223690206761288
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
2
OK
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
13
2
1
1
13
2
4
7
16
2
262153
0
262153
0
10
2
1
1
10
2
NA
NA
1026
767
16
33
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
262153
9
effectPtr
1026
1279
13
2
3
4
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
1026
767
16
1
262153
3
atr
254
531
1
10
2
0
0
1026
767
16
1
262153
3
atr
254
531
1
10
2
1
1
1026
767
16
1
262153
3
atr
254
22
254
254
10
1
1
14
1
3
16
1
262153
3
atr
16
1
262153
5
Data1
14
1
1
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
19
0
1026
767
16
13
262153
5
pData
262153
18
interactionEffects
262153
12
basicEffects
262153
19
interactionEffectsl
262153
13
basicEffectsl
262153
6
pModel
262153
11
simpleRates
262153
12
observations
262153
8
depNames
262153
10
groupNames
262153
6
nGroup
262153
5
types
262153
9
myeffects
1026
1
262153
8
netnames
16
1
262153
3
atr
1026
1
262153
9
symmetric
522
1
NA
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
allUpOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
11
allDownOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
allHigher
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
11
allDisjoint
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
13
allAtLeastOne
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
9
anyUpOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
11
anyDownOnly
522
1
0
1026
767
16
1
262153
3
atr
254
1026
1
262153
9
anyHigher
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
11
anyDisjoint
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
13
anyAtLeastOne
522
1
0
1026
767
16
1
262153
7
atr,atr
254
1026
1
262153
5
types
528
1
262153
8
behavior
1026
767
16
1
262153
3
atr
254
1026
1
262153
12
observations
14
1
2
1026
1
262153
17
compositionChange
10
1
0
1026
1
262153
10
exooptions
16
0
1026
1
262153
12
groupPeriods
13
1
3
1026
1
262153
9
periodNos
14
2
1
2
1026
1
262153
23
numberNonMissingNetwork
14
2
0
0
1026
1
262153
20
numberMissingNetwork
14
2
0
0
1026
1
262153
24
numberNonMissingBehavior
14
2
50
50
1026
1
262153
21
numberMissingBehavior
14
2
0
0
1026
1
262153
6
change
525
2
27
33
1026
1535
13
2
2
1
1026
1
262153
8
dimnames
19
2
254
16
1
262153
5
Data1
254
1026
1
262153
11
condEffects
787
32
16
2
262153
3
atr
262153
3
atr
16
2
262153
19
rate\040atr\040(period\0401)
262153
19
rate\040atr\040(period\0402)
16
2
262153
46
Amount\040of\040behavioral\040change\040in\040period\0401\040on\040atr
262153
46
Amount\040of\040behavioral\040change\040in\040period\0402\040on\040atr
16
2
262153
4
Rate
262153
4
Rate
16
2
262153
0
262153
0
16
2
262153
0
262153
0
16
2
262153
4
rate
262153
4
rate
10
2
1
1
10
2
1
1
10
2
0
0
10
2
0
0
10
2
0
0
16
2
262153
1
,
262153
1
,
14
2
0.7057142857142857
0.8493877551020408
13
2
0
0
16
2
262153
4
rate
262153
4
rate
16
2
262153
1
1
262153
1
2
16
2
9
-1
9
-1
14
2
0
0
13
2
0
0
13
2
0
0
13
2
0
0
16
2
262153
0
262153
0
10
2
0
0
19
2
254
254
19
2
254
254
16
2
262153
8
behavior
262153
8
behavior
16
2
262153
6
Group1
262153
6
Group1
14
2
1
1
13
2
1
2
16
2
262153
0
262153
0
10
2
1
1
1026
767
16
32
262153
4
name
262153
10
effectName
262153
12
functionName
262153
9
shortName
262153
12
interaction1
262153
12
interaction2
262153
4
type
262153
9
basicRate
262153
7
include
262153
13
randomEffects
262153
3
fix
262153
4
test
262153
9
timeDummy
262153
12
initialValue
262153
4
parm
262153
12
functionType
262153
6
period
262153
8
rateType
262153
14
untrimmedValue
262153
7
effect1
262153
7
effect2
262153
7
effect3
262153
15
interactionType
262153
5
local
262153
8
effectFn
262153
11
statisticFn
262153
7
netType
262153
9
groupName
262153
5
group
262153
12
effectNumber
262153
7
setting
262153
9
requested
1026
1279
13
2
1
2
1026
1023
16
2
262153
12
sienaEffects
262153
10
data.frame
254
254
14
2
1
2
10
1
0
10
1
0
14
1
3
10
1
0
10
1
0
10
1
0
10
1
0
13
1
1
10
2
0
0
10
1
0
526
4
0.03954241163838698
0
0
1
1026
1535
13
2
2
2
254
14
2
0.1
0.1
14
2
0.1
0.1
14
2
0.3223690206761288
0
10
1
0
14
1
50
10
1
0
10
1
0
14
1
3
14
1
9
14
1
22
14
1
222
14
2
0
222
14
1
150
10
1
0
10
1
0
14
1
50
526
6
-4
-8
0
-1.093333333333305
-0.1866666666666674
-4
1026
1535
13
2
3
2
254
526
12
-4.666666666666664
-2.666666666666664
3.333333333333336
12.33333333333334
6.333333333333336
8.333333333333336
61.4155555555556
54.96222222222224
59.60222222222226
58.56222222222227
65.92222222222226
57.46888888888893
1026
1535
13
3
3
2
2
254
526
12
-6.971352874752451
-1.264243869931946
6.462659717255978
1.501611282197868
-4.997809017791867
-3.678697552001522
-4.16711618926293
4.930339678279372
-2.525858379296903
7.95445617335811
5.906896061477797
4.877132117474978
1026
1535
13
3
3
2
2
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
526
27
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
3
1
9
254
14
2
0
0
526
6
1.1809416600467
1.265074390734391
0.9045470974824197
1.724516019250948
1.205150063422383
1.863470002952116
1026
1535
13
2
3
2
254
19
1
254
14
1
500
10
1
1
14
1
3
526
1
2.359
1026
767
16
1
262153
7
elapsed
254
14
2
0.7208852687511493
0.3468060519809525
14
2
0.902852354526557
0.7906832413165021
526
1
0.0005102040816326513
1026
767
16
1
262153
7
elapsed
254
14
2
-4
-1.759999999999991
10
2
0
0
526
4
25.28930225968363
0
0
1
1026
1535
13
2
2
2
1026
8191
19
2
16
2
262153
6
linear
262153
4
quad
254
254
10
1
0
526
4
0.0149712574944926
0.0009376944724058593
0.000567847429618878
0.0062682310894739
1026
1535
13
2
2
2
254
14
1
50
526
100
4
-8
4
0
-8
-8
-12
4
4
-2
12
-12
2
0
-8
-2
6
6
-20
-8
-8
0
10
0
6
4
-28
2
-22
-6
4
-10
-12
6
-14
4
-2
-12
-4
0
-6
-4
-12
10
12
4
-8
2
-6
0
17.09333333333333
49.81333333333328
29.09333333333333
32
49.81333333333328
41.81333333333333
40.72
27.09333333333331
27.09333333333331
18.45333333333332
35.27999999999997
50.71999999999997
27.54666666666668
28
45.8133333333333
44.45333333333332
36.63999999999999
48.63999999999999
30.5333333333333
43.81333333333333
25.81333333333336
32
29.73333333333335
0
44.63999999999996
21.09333333333336
30.34666666666666
11.54666666666668
34.98666666666668
25.36000000000001
27.09333333333333
28.26666666666671
12.72000000000003
44.63999999999996
25.17333333333332
21.09333333333333
26.45333333333332
60.71999999999997
58.90666666666667
26
29.36000000000001
34.90666666666667
20.72000000000003
55.73333333333329
27.28000000000003
33.09333333333331
59.81333333333333
27.54666666666665
43.35999999999996
28
1026
1535
13
2
50
2
254
526
200
3.332443480861405
5.116279367599536
1.315321875073689
-0.6700024628225519
-1.794954440181859
4.317767818757651
-6.625871051772602
-0.5738984395520698
2.266801946774507
6.696180859495592
9.926621689481211
-3.573898439552068
-5.625367681653906
1.404088067292294
-2.308711027872322
0.04279663234206721
0.2167719081192474
5.410922528225484
-5.245012067350691
3.737866326861591
0.3226597061255687
1.012837513277174
-3.237066441441431
-0.1513539877641682
-1.836639907547857
2.679059253707879
-16.93029798713437
10.94618923895288
-4.087655027242537
-1.677340293874429
8.541269763071398
-1.414598825878646
-7.478297786400276
-1.259687729454448
-3.25234989840257
6.374128948227403
2.332443480861407
-2.057695908177641
-5.837143277666545
2.796673400015307
-5.576847753354718
8.325105649809529
-1.031293897772783
6.268744520339773
8.329997537177444
2.266298576655814
-1.893001037017611
1.936908834335745
-0.8763828013485845
-11.73614736702816
3.138397285493861
-10.42632517417976
-2.291484997345917
4.20504555981814
-11.82480913450808
-7.682232181242355
-7.903288181872136
3.359956656242338
2.150627003913654
-2.173870851038491
3.300646212969938
-7.040973247769928
8.31542629981238
-3.257241785770486
1.45260799559148
-1.824809134508063
4.49673940664144
6.99816185117342
-8.793011866616586
-5.584185584406596
-11.65033048861221
1.470232971497888
11.53148598833555
-3.654719005861415
14.49918535032538
1.988378076437582
-9.118948925015326
-7.922855731343811
-13.22044820577241
-1.267025560506328
-2.121898238817968
-2.782724721762061
-13.5742973849321
9.504077237693314
-12.08999654618781
1.086928043392025
-1.638100770192394
-14.07287494040009
4.143289172861778
-1.797400383865815
3.408476584541524
-8.767944634919623
-7.342954239447766
3.089373987075982
8.303092156653893
1.696285284234278
-7.136573900921725
3.582955230437395
-4.011621923562418
11.91489534118011
7.581708268030594
17.5770000577776
-2.260366266061788
-0.5350762534571807
4.83067103504258
-4.425675353491206
20.65910649572268
-17.48505129648316
-20.56557243965013
2.070309047338129
16.42472465843747
4.489088494024074
4.460429965502115
-14.98939997210784
-1.403164767069929
-8.062793993018451
8.344895363120029
7.314876807533301
10.31899292757717
11.78464697741598
2.898432011222531
14.86920358952684
-11.29898627071078
-21.38090458748985
5.739468605376285
-6.728139091931654
-5.335149310137348
-2.790060256433461
-4.135926955194064
6.618391959682189
-5.578357288192887
4.800949515819758
-5.639074518369808
9.689480545881191
-16.10848552221032
-5.838509284012535
-4.558311757739574
-0.5078043577565672
21.09755408697321
7.174317572122225
4.452753648627834
2.024172191074269
-1.975372375354904
22.65061986643483
-0.7446630119851334
7.659395117163717
-6.622683763323916
1.239151478156725
12.60232972162075
-4.107351802925397
-21.72277265296531
-15.68838129146199
5.8096643258221
-6.108797993644288
9.985309126454448
6.943320356413093
-7.220581671987771
-1.361293729590398
0.2729905534418415
-12.74178826659419
-1.00317682983412
8.593733966471332
-21.33234624227181
8.349520747763297
13.16865884024448
14.53337329891869
13.84343263804799
16.60008197339105
-3.321581371693467
2.024699275703481
-5.945698073397811
-17.00900017286487
1.191083813655198
-20.90534156763717
2.176665967510496
-3.630867690321064
-23.25067313066832
3.851738919537694
7.90858679150859
-13.75141134032033
-9.412217059623169
-14.21676133627234
-3.625291994716717
8.290219884999242
10.43912012486914
-3.275873859412268
2.149869750229251
20.59857500437631
9.420954171391681
-14.42124489170302
0.6026853695651728
-9.995732915788242
-13.3163945738848
0.4795131562779358
-9.253294409333453
-7.264150377704164
19.23637725818887
0.4735714955666461
3.110494664219529
-4.418710788094543
1026
1535
13
3
50
2
2
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
526
450
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1026
1535
13
3
50
1
9
254
14
2
8.586128347514961
12.79900511689178
14
2
7.738809716183902
12.41920941284984
526
4
0.01346844935881434
0.001030534531904058
0.001030534531904058
0.006061222714152954
1026
1535
13
2
2
2
254
14
1
0.5
14
1
22
14
1
222
14
1
1
10
222
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
14
2
0
0
14
1
2.179
14
2
84.10924848898807
-47.08579959978416
14
1
223
14
2
2067.737973531256
9995.741135975784
14
2
-410.2754784998486
1422.041052791091
14
1
0.1422646938777763
14
1
-0.1984175382721175
14
2
-0.1984175382721175
0.1422646938777763
14
1
3
14
1
3
526
1
0.001000000000000038
1026
767
16
1
262153
7
elapsed
254
14
2
9.816786834385422
117.3486686674613
10
2
0
0
526
4
16
-7.626666666666665
-7.626666666666665
3.968711111111129
1026
1535
13
2
2
2
254
14
2
-0.5
-0.3118616735352036
526
4
67.17581230378116
-12.56143618859383
-7.606933240966379
160.4450073786322
1026
1535
13
2
2
2
254
526
4
25.28930225968363
0
0
1
1026
1535
13
2
2
2
254
526
4
16
-7.626666666666665
-7.626666666666665
3.968711111111129
1026
1535
13
2
2
2
254
526
2
-0.1581696465535479
-1.759999999999991
1026
1535
13
2
2
1
254
14
2
-1
-0.8834621020683713
526
1
6.429100507328444
1026
1535
13
2
1
1
254
14
2
-1
-0.8834621020683713
14
1
1
526
4
0.7441333333332947
1.429999999999919
1.429999999999919
2.999999999999831
1026
1535
13
2
2
2
254
526
4
0.02501763709087428
-0.301576792762098
-0.301576792762098
3.968711111111129
1026
1535
13
2
2
2
254
16
1
262153
0
14
2
1.116854382754504
1.597712028541816
14
2
0.1886143330395212
0.3469952805737122
14
2
0.1581696465535479
1.992162420866112
16
1
262153
2
OK
1026
767
16
134
262153
1
x
262153
3
int
262153
4
int2
262153
8
revision
262153
7
version
262153
14
FinDiff.method
262153
1
n
262153
2
OK
262153
5
error
262153
9
restarted
262153
17
DerivativeProblem
262153
17
ForceFinDifPhase1
262153
15
Phase3Interrupt
262153
17
repeatsForEpsilon
262153
17
maxrepeatsubphase
262153
4
gain
262153
8
haveDfra
262153
7
maxlike
262153
11
effectsName
262153
5
theta
262153
5
fixed
262153
4
test
262153
2
pp
262153
4
posj
262153
17
BasicRateFunction
262153
12
cconditional
262153
9
condvarno
262153
8
condname
262153
8
condtype
262153
9
symmetric
262153
7
condvar
262153
23
FinDiffBecauseSymmetric
262153
9
modelType
262153
7
effects
262153
16
requestedEffects
262153
8
callGrid
262153
7
targets
262153
8
targets2
262153
11
simpleRates
262153
1
f
262153
9
periodNos
262153
10
returnDeps
262153
16
returnDepsStored
262153
12
observations
262153
12
returnChains
262153
7
byGroup
262153
6
byWave
262153
15
returnDataFrame
262153
19
nDependentVariables
262153
8
newFixed
262153
11
AllNowFixed
262153
4
dinv
262153
5
scale
262153
7
epsilon
262153
6
theta0
262153
7
anyposj
262153
2
n1
262153
12
AllUserFixed
262153
14
epsilonProblem
262153
5
Phase
262153
6
n2min0
262153
9
n2minimum
262153
9
n2maximum
262153
9
n2partsum
262153
5
n1pos
262153
7
restart
262153
9
SomeFixed
262153
9
phase1Its
262153
2
sf
262153
3
sf2
262153
3
ssc
262153
7
accepts
262153
7
rejects
262153
6
aborts
262153
4
npos
262153
4
ntim
262153
4
sims
262153
9
writefreq
262153
5
Deriv
262153
3
nit
262153
5
ctime
262153
8
regrCoef
262153
7
regrCor
262153
10
timePhase1
262153
5
mnfra
262153
13
jacobianwarn1
262153
4
dfra
262153
11
cdSomeFixed
262153
5
dinvv
262153
9
nitPhase1
262153
10
phase1devs
262153
12
phase1scores
262153
13
phase1accepts
262153
13
phase1rejects
262153
12
phase1aborts
262153
2
sd
262153
15
standardization
262153
9
sf.invcov
262153
7
reduceg
262153
5
n2min
262153
5
n2max
262153
14
repeatsubphase
262153
9
truncated
262153
11
positivized
262153
5
time1
262153
4
thav
262153
5
thavn
262153
5
prod0
262153
5
prod1
262153
7
maxacor
262153
7
minacor
262153
2
ac
262153
2
n3
262153
10
Phase3nits
262153
10
timePhase3
262153
8
estMeans
262153
5
diver
262153
3
msf
262153
3
sfl
262153
5
dfra1
262153
5
dfrac
262153
4
msfc
262153
7
fchange
262153
5
tstat
262153
9
tconv.max
262153
5
tconv
262153
4
tmax
262153
6
msfinv
262153
8
covtheta
262153
16
errorMessage.cov
262153
4
rate
262153
5
vrate
262153
2
se
262153
11
termination
1026
1023
16
1
262153
8
sienaFit
254
254
|
#【讀入資料】
install.packages("xlsx")
library(xlsx)
brain <-read.xlsx(file ="data/brain.xlsx",sheetIndex = 1)
babies =read.csv(file="data/babies.txt",sep = " ")
cancer <- read.csv("data/cancer.csv", header=T, sep = ",")
#【相關常態判別】
#【plot】
plot(~perimeter_worst+area_worst,data = cancer)
plot(~perimeter_worst+area_worst+smoothness_worst,data = cancer)
plot(perimeter_mean~area_worst+smoothness_worst,data = cancer)
#p.10【pairs】
#【transform】----------------------------------------------------------
#【function】
std <- function(x) {x+1}
std(c(11,12,13,15))
----------------------------------------------------------------------
#【sapply】
x <- list(a = 1, b = 1:3, c = 10:100)
sapply(x, FUN = length)
sapply(x, FUN = sum)
#【lapply】
x <- list(a = 1, b = 1:3, c = 10:100)
lapply(x, FUN = length)
lapply(x, FUN = sum)
#【apply】
M <- matrix(1:16, 4, 4)
apply(M, 1, min)
apply(M, 2, max)
M <- array( 1:32, dim = c(4,4,2))
apply(M, 1, sum)
apply(M,2,sum)
apply(M, c(1,2), sum)
-----------------------------------------------------------------------
#【log10】
ncol(cancer)
fun=function(x){log10(x+1)}
lapply(cancer[,3:ncol(cancer)],fun)
tra <- as.data.frame(lapply(cancer[,3:ncol(cancer)],fun))
cbind <-cbind(cancer[,1:2],tra)
qqnorm(cbind[,3],ylab = " ",main = " ")
qqline(cbind[,3])
hist(cbind[,3],breaks =5)
#【z score】
zscore<-function(x) {(x-mean(x))/sd(x)}
z1<-as.data.frame(lapply(cancer[,3:ncol(cancer)],zscore))
#exercise_1【range standardization】
cc = function(x){(x-min(x))/(max(x)-min(x))}
e1= as.data.frame(lapply(cancer[,3:ncol(cancer)],cc))
#【NA】
na <-c(1,2,NA)
is.na(na)
na1<-5
ifelse(na1>10, 1, 2)
#p.25【impute】
na_impute <- read.csv("data/na_cancer.csv",header = T)
na_f = function(x){ifelse(is.na(x),mean(x,na.rm = T),x)}
na_impute <-as.data.frame(lapply(na_impute[,c(1:ncol(na_impute))],na_f))
write.csv(na_impute, "data/na_cancer.csv",row.names = T)
#【dummy】
install.packages("dummies")
library("dummies")
require("dummies")
Bdummy <-dummy.data.frame(cancer)
#【split】
spli<-split(cancer$dimension_worst,cancer$diagnosis,drop = F)
#p.30 exercise_2【split】
anova <- read.csv("data/anova.csv",header = T)
sp <-split(anova$Expression,anova$Gender)
length(sp$f)
length(sp$m)
#指標來切割資料
A = matrix(1:20,nrow =4, ncol =5);A
A1 = A[,c(2,4,5)];A1
A2 = A[1:3, c(2,4,5)];A2
#使用名稱指標作切割
colnames(A) = c("C1","C2","C3","C4","C5")
rownames(A) = c("R1","R2","R3","R4")
A3 = A[c("R1","R3"),c("C2","C5")];A3
#subset切割資料
head(iris)
factor(iris$Species)
subset(iris, Sepal.Length > 5)
subset(iris, Sepal.Length > 5,select = Sepal.Length)
subset(iris, Sepal.Length > 5,select = -Sepal.Length)
#【training/test sample】
set.seed(323)
train_indx=sample(1:nrow(cancer),size=398)
train=cancer[train_indx,]
test=cancer[-train_indx,]
aggdata = aggregate(Bdummy,by=list(Bdummy$diagnosisB),FUN=mean,na.rm=TRUE)
#p.38【aggregate】
#exercise_3【na_iris】
|
/data_cleaning.R
|
no_license
|
lucy851023/BigDataTeam_exercise
|
R
| false
| false
| 2,999
|
r
|
#【讀入資料】
install.packages("xlsx")
library(xlsx)
brain <-read.xlsx(file ="data/brain.xlsx",sheetIndex = 1)
babies =read.csv(file="data/babies.txt",sep = " ")
cancer <- read.csv("data/cancer.csv", header=T, sep = ",")
#【相關常態判別】
#【plot】
plot(~perimeter_worst+area_worst,data = cancer)
plot(~perimeter_worst+area_worst+smoothness_worst,data = cancer)
plot(perimeter_mean~area_worst+smoothness_worst,data = cancer)
#p.10【pairs】
#【transform】----------------------------------------------------------
#【function】
std <- function(x) {x+1}
std(c(11,12,13,15))
----------------------------------------------------------------------
#【sapply】
x <- list(a = 1, b = 1:3, c = 10:100)
sapply(x, FUN = length)
sapply(x, FUN = sum)
#【lapply】
x <- list(a = 1, b = 1:3, c = 10:100)
lapply(x, FUN = length)
lapply(x, FUN = sum)
#【apply】
M <- matrix(1:16, 4, 4)
apply(M, 1, min)
apply(M, 2, max)
M <- array( 1:32, dim = c(4,4,2))
apply(M, 1, sum)
apply(M,2,sum)
apply(M, c(1,2), sum)
-----------------------------------------------------------------------
#【log10】
ncol(cancer)
fun=function(x){log10(x+1)}
lapply(cancer[,3:ncol(cancer)],fun)
tra <- as.data.frame(lapply(cancer[,3:ncol(cancer)],fun))
cbind <-cbind(cancer[,1:2],tra)
qqnorm(cbind[,3],ylab = " ",main = " ")
qqline(cbind[,3])
hist(cbind[,3],breaks =5)
#【z score】
zscore<-function(x) {(x-mean(x))/sd(x)}
z1<-as.data.frame(lapply(cancer[,3:ncol(cancer)],zscore))
#exercise_1【range standardization】
cc = function(x){(x-min(x))/(max(x)-min(x))}
e1= as.data.frame(lapply(cancer[,3:ncol(cancer)],cc))
#【NA】
na <-c(1,2,NA)
is.na(na)
na1<-5
ifelse(na1>10, 1, 2)
#p.25【impute】
na_impute <- read.csv("data/na_cancer.csv",header = T)
na_f = function(x){ifelse(is.na(x),mean(x,na.rm = T),x)}
na_impute <-as.data.frame(lapply(na_impute[,c(1:ncol(na_impute))],na_f))
write.csv(na_impute, "data/na_cancer.csv",row.names = T)
#【dummy】
install.packages("dummies")
library("dummies")
require("dummies")
Bdummy <-dummy.data.frame(cancer)
#【split】
spli<-split(cancer$dimension_worst,cancer$diagnosis,drop = F)
#p.30 exercise_2【split】
anova <- read.csv("data/anova.csv",header = T)
sp <-split(anova$Expression,anova$Gender)
length(sp$f)
length(sp$m)
#指標來切割資料
A = matrix(1:20,nrow =4, ncol =5);A
A1 = A[,c(2,4,5)];A1
A2 = A[1:3, c(2,4,5)];A2
#使用名稱指標作切割
colnames(A) = c("C1","C2","C3","C4","C5")
rownames(A) = c("R1","R2","R3","R4")
A3 = A[c("R1","R3"),c("C2","C5")];A3
#subset切割資料
head(iris)
factor(iris$Species)
subset(iris, Sepal.Length > 5)
subset(iris, Sepal.Length > 5,select = Sepal.Length)
subset(iris, Sepal.Length > 5,select = -Sepal.Length)
#【training/test sample】
set.seed(323)
train_indx=sample(1:nrow(cancer),size=398)
train=cancer[train_indx,]
test=cancer[-train_indx,]
aggdata = aggregate(Bdummy,by=list(Bdummy$diagnosisB),FUN=mean,na.rm=TRUE)
#p.38【aggregate】
#exercise_3【na_iris】
|
library(magick)
library(cowplot)
cfr1_plot <- image_read("~/Desktop/Covid disparities/health disparity/Output/CFR Total Univariable.png")
cfr2_plot <- image_read("~/Desktop/Covid disparities/health disparity/Output/CFR Total Multivariable.png")
cfr2_crop <- image_crop(cfr2_plot, "725x1200+550")
img <- c(cfr1_plot, cfr2_crop)
combined_img <- image_append(img)
main = ggdraw() + draw_image(combined_img)
png(file="~/Desktop/Covid disparities/health disparity/Output/Figure S5.png",width=2500,height=1500)
main
dev.off()
|
/Scripts/FigureGen_FigureS5.R
|
no_license
|
lin-lab/COVID-Health-Disparities
|
R
| false
| false
| 539
|
r
|
library(magick)
library(cowplot)
cfr1_plot <- image_read("~/Desktop/Covid disparities/health disparity/Output/CFR Total Univariable.png")
cfr2_plot <- image_read("~/Desktop/Covid disparities/health disparity/Output/CFR Total Multivariable.png")
cfr2_crop <- image_crop(cfr2_plot, "725x1200+550")
img <- c(cfr1_plot, cfr2_crop)
combined_img <- image_append(img)
main = ggdraw() + draw_image(combined_img)
png(file="~/Desktop/Covid disparities/health disparity/Output/Figure S5.png",width=2500,height=1500)
main
dev.off()
|
## ----global_options, include = FALSE------------------------------------------------
try(source("../../.Rprofile"))
## -----------------------------------------------------------------------------------
# polynomial coefficients
set.seed(123)
ar_coef_poly <- rnorm(4)
# time right hand side matrix
ar_t <- 0:3
ar_power <- 0:3
mt_t_data <- do.call(rbind, lapply(ar_power, function(power) {
ar_t^power
}))
# Final matrix, each row is an observation, or time.
mt_t_data <- t(mt_t_data)
# General model prediction
ar_y <- mt_t_data %*% matrix(ar_coef_poly, ncol = 1, nrow = 4)
# Prediction and Input time matrix
mt_all_data <- cbind(ar_y, mt_t_data)
st_cap <- paste0(
"C1=Y, each row is time, t=0, incremental by 1, ",
"each column a polynomial term from 0th to higher."
)
kable(mt_all_data, caption = st_cap) %>% kable_styling_fc()
## -----------------------------------------------------------------------------------
# The constant term
alpha_0 <- ar_y[1]
# The cubic term
alpha_3 <- as.numeric((t(ar_y) %*% c(-1, +3, -3, +1))/(3*2))
# The quadratic term, difference cubic out, alpha_2_1t3 = alpha_2_2t4
ar_y_hat <- ar_y - alpha_3*ar_t^3
alpha_2_1t3 <- as.numeric((t(ar_y_hat[1:3]) %*% c(1, -2, +1))/(2))
alpha_2_2t4 <- as.numeric((t(ar_y_hat[2:4]) %*% c(1, -2, +1))/(2))
alpha_2 <- alpha_2_1t3
# The linear term, difference cubic out and quadratic
ar_y_hat <- ar_y - alpha_3*ar_t^3 - alpha_2*ar_t^2
alpha_1_1t2 <- as.numeric((t(ar_y_hat[1:2]) %*% c(-1, +1))/(1))
alpha_1_2t3 <- as.numeric((t(ar_y_hat[2:3]) %*% c(-1, +1))/(1))
alpha_1_3t4 <- as.numeric((t(ar_y_hat[3:4]) %*% c(-1, +1))/(1))
alpha_1 <- alpha_1_1t2
# Collect results
ar_names <- c("Constant", "Linear", "Quadratic", "Cubic")
ar_alpha_solved <- c(alpha_0, alpha_1, alpha_2, alpha_3)
mt_alpha <- cbind(ar_names, ar_alpha_solved, ar_coef_poly)
# Display
ar_st_varnames <- c('Coefficient Counter', 'Polynomial Terms', 'Solved Coefficient Given Y', 'Actual DGP Coefficient')
tb_alpha <- as_tibble(mt_alpha) %>%
rowid_to_column(var = "polynomial_term_coef") %>%
rename_all(~c(ar_st_varnames))
# Display
st_cap = paste0('Solving for polynomial coefficients.')
kable(tb_alpha, caption = st_cap) %>% kable_styling_fc()
|
/linreg/polynomial/htmlpdfr/fs_poly_fit.R
|
permissive
|
FanWangEcon/R4Econ
|
R
| false
| false
| 2,192
|
r
|
## ----global_options, include = FALSE------------------------------------------------
try(source("../../.Rprofile"))
## -----------------------------------------------------------------------------------
# polynomial coefficients
set.seed(123)
ar_coef_poly <- rnorm(4)
# time right hand side matrix
ar_t <- 0:3
ar_power <- 0:3
mt_t_data <- do.call(rbind, lapply(ar_power, function(power) {
ar_t^power
}))
# Final matrix, each row is an observation, or time.
mt_t_data <- t(mt_t_data)
# General model prediction
ar_y <- mt_t_data %*% matrix(ar_coef_poly, ncol = 1, nrow = 4)
# Prediction and Input time matrix
mt_all_data <- cbind(ar_y, mt_t_data)
st_cap <- paste0(
"C1=Y, each row is time, t=0, incremental by 1, ",
"each column a polynomial term from 0th to higher."
)
kable(mt_all_data, caption = st_cap) %>% kable_styling_fc()
## -----------------------------------------------------------------------------------
# The constant term
alpha_0 <- ar_y[1]
# The cubic term
alpha_3 <- as.numeric((t(ar_y) %*% c(-1, +3, -3, +1))/(3*2))
# The quadratic term, difference cubic out, alpha_2_1t3 = alpha_2_2t4
ar_y_hat <- ar_y - alpha_3*ar_t^3
alpha_2_1t3 <- as.numeric((t(ar_y_hat[1:3]) %*% c(1, -2, +1))/(2))
alpha_2_2t4 <- as.numeric((t(ar_y_hat[2:4]) %*% c(1, -2, +1))/(2))
alpha_2 <- alpha_2_1t3
# The linear term, difference cubic out and quadratic
ar_y_hat <- ar_y - alpha_3*ar_t^3 - alpha_2*ar_t^2
alpha_1_1t2 <- as.numeric((t(ar_y_hat[1:2]) %*% c(-1, +1))/(1))
alpha_1_2t3 <- as.numeric((t(ar_y_hat[2:3]) %*% c(-1, +1))/(1))
alpha_1_3t4 <- as.numeric((t(ar_y_hat[3:4]) %*% c(-1, +1))/(1))
alpha_1 <- alpha_1_1t2
# Collect results
ar_names <- c("Constant", "Linear", "Quadratic", "Cubic")
ar_alpha_solved <- c(alpha_0, alpha_1, alpha_2, alpha_3)
mt_alpha <- cbind(ar_names, ar_alpha_solved, ar_coef_poly)
# Display
ar_st_varnames <- c('Coefficient Counter', 'Polynomial Terms', 'Solved Coefficient Given Y', 'Actual DGP Coefficient')
tb_alpha <- as_tibble(mt_alpha) %>%
rowid_to_column(var = "polynomial_term_coef") %>%
rename_all(~c(ar_st_varnames))
# Display
st_cap = paste0('Solving for polynomial coefficients.')
kable(tb_alpha, caption = st_cap) %>% kable_styling_fc()
|
require(pathview)
require(KEGGREST)
publicPathlines = readLines("data/publicPath.txt")
sim.mol.data2=function(mol.type=c("gene","gene.ko","cpd")[1], id.type=NULL, species="hsa", discrete=FALSE, nmol=1000, nexp=1, rand.seed=100)
{
msg.fmt="\"%s\" is not a good \"%s\" \"%s\" ID type for simulation!"
msg.fmt2="\"%s\" has only %i unique IDs!"
set.seed(rand.seed)
if(species!="ko"){
species.data=kegg.species.code(species, na.rm=T, code.only=FALSE)
species=species.data["kegg.code"]
} else if(mol.type=="gene") mol.type="gene.ko"
if(mol.type=="gene"){
if(is.null(id.type)) id.type="KEGG"
id.type=toupper(id.type)
##data(bods, package="gage")
load(paste(publicPathlines,"/scripts/org19.gid.types.RData"))
org19=bods[,"kegg code"]
if(!species %in% c(org19, "ko")){
if(!id.type %in% c("ENTREZ","KEGG")){
msg=sprintf(msg.fmt, id.type, species, mol.type)
stop(msg)
}
if(is.na(species.data["ncbi.geneid"])){
if(!is.na(species.data["kegg.geneid"])){
msg.fmt3="Only native KEGG gene ID is supported for species \"%s\"!"
msg=sprintf(msg.fmt3, species)
message("Note: ", msg)
} else{
msg.fmt3="Simulation is not supported for species \"%s\"!"
msg=sprintf(msg.fmt3, species)
stop(msg)
}
}
gid.map=keggConv("ncbi-geneid",species)
if(id.type=="KEGG") {
all.mn=gsub(paste(species, ":", sep=""), "", names(gid.map))
} else all.mn=gsub("ncbi-geneid:", "", gid.map)
} else if(species %in% org19){
if(id.type=="ENTREZ") id.type="ENTREZID"
if(id.type=="KEGG") {
gid.map=keggConv("ncbi-geneid",species)
all.mn=gsub(paste(species, ":", sep=""), "", names(gid.map))
} else if(id.type %in% gid.types[[species]]){
idx=which(bods[,3]==species)
annot.db=bods[idx,1]
requireNamespace(annot.db)
db.obj <- eval(parse(text=paste0(annot.db, "::", annot.db)))
all.mn <-keys(db.obj, keytype=id.type)
} else stop("Wrong gene ID type!")
}
} else if(mol.type=="cpd"){
data(cpd.accs)
data(cpd.simtypes)
data(rn.list)
accn=cpd.accs$ACCESSION_NUMBER
if(is.null(id.type)) id.type="KEGG COMPOUND accession"
if(!id.type %in% cpd.simtypes){
msg=sprintf(msg.fmt, id.type, mol.type)
stop(msg)
}
all.mn=unique(as.character(accn[rn.list[[id.type]]]))
} else if(mol.type=="gene.ko"){
data(ko.ids)
all.mn=ko.ids
} else stop("Invalid mol.type!")
nuids=length(all.mn)
if(nmol>nuids){
msg=sprintf(msg.fmt2, id.type, nuids)
message("Note: ", msg)
nmol=nuids
}
sel.mn=sample(all.mn, nmol)
if(discrete) return(sel.mn)
sel.mn.data=matrix(rnorm(nmol*nexp), ncol=nexp)
rownames(sel.mn.data)=sel.mn
colnames(sel.mn.data)=paste("exp", 1:nexp, sep="")
return(sel.mn.data[, 1:nexp])
}
|
/public/scripts/sim.mol.data2.R
|
no_license
|
gauravp99/pathviewdev
|
R
| false
| false
| 3,305
|
r
|
require(pathview)
require(KEGGREST)
publicPathlines = readLines("data/publicPath.txt")
sim.mol.data2=function(mol.type=c("gene","gene.ko","cpd")[1], id.type=NULL, species="hsa", discrete=FALSE, nmol=1000, nexp=1, rand.seed=100)
{
msg.fmt="\"%s\" is not a good \"%s\" \"%s\" ID type for simulation!"
msg.fmt2="\"%s\" has only %i unique IDs!"
set.seed(rand.seed)
if(species!="ko"){
species.data=kegg.species.code(species, na.rm=T, code.only=FALSE)
species=species.data["kegg.code"]
} else if(mol.type=="gene") mol.type="gene.ko"
if(mol.type=="gene"){
if(is.null(id.type)) id.type="KEGG"
id.type=toupper(id.type)
##data(bods, package="gage")
load(paste(publicPathlines,"/scripts/org19.gid.types.RData"))
org19=bods[,"kegg code"]
if(!species %in% c(org19, "ko")){
if(!id.type %in% c("ENTREZ","KEGG")){
msg=sprintf(msg.fmt, id.type, species, mol.type)
stop(msg)
}
if(is.na(species.data["ncbi.geneid"])){
if(!is.na(species.data["kegg.geneid"])){
msg.fmt3="Only native KEGG gene ID is supported for species \"%s\"!"
msg=sprintf(msg.fmt3, species)
message("Note: ", msg)
} else{
msg.fmt3="Simulation is not supported for species \"%s\"!"
msg=sprintf(msg.fmt3, species)
stop(msg)
}
}
gid.map=keggConv("ncbi-geneid",species)
if(id.type=="KEGG") {
all.mn=gsub(paste(species, ":", sep=""), "", names(gid.map))
} else all.mn=gsub("ncbi-geneid:", "", gid.map)
} else if(species %in% org19){
if(id.type=="ENTREZ") id.type="ENTREZID"
if(id.type=="KEGG") {
gid.map=keggConv("ncbi-geneid",species)
all.mn=gsub(paste(species, ":", sep=""), "", names(gid.map))
} else if(id.type %in% gid.types[[species]]){
idx=which(bods[,3]==species)
annot.db=bods[idx,1]
requireNamespace(annot.db)
db.obj <- eval(parse(text=paste0(annot.db, "::", annot.db)))
all.mn <-keys(db.obj, keytype=id.type)
} else stop("Wrong gene ID type!")
}
} else if(mol.type=="cpd"){
data(cpd.accs)
data(cpd.simtypes)
data(rn.list)
accn=cpd.accs$ACCESSION_NUMBER
if(is.null(id.type)) id.type="KEGG COMPOUND accession"
if(!id.type %in% cpd.simtypes){
msg=sprintf(msg.fmt, id.type, mol.type)
stop(msg)
}
all.mn=unique(as.character(accn[rn.list[[id.type]]]))
} else if(mol.type=="gene.ko"){
data(ko.ids)
all.mn=ko.ids
} else stop("Invalid mol.type!")
nuids=length(all.mn)
if(nmol>nuids){
msg=sprintf(msg.fmt2, id.type, nuids)
message("Note: ", msg)
nmol=nuids
}
sel.mn=sample(all.mn, nmol)
if(discrete) return(sel.mn)
sel.mn.data=matrix(rnorm(nmol*nexp), ncol=nexp)
rownames(sel.mn.data)=sel.mn
colnames(sel.mn.data)=paste("exp", 1:nexp, sep="")
return(sel.mn.data[, 1:nexp])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ftrl.Dataset.R
\name{dim.ftrl.Dataset}
\alias{dim.ftrl.Dataset}
\title{Dimensions of ftrl.Dataset}
\usage{
\method{dim}{ftrl.Dataset}(x)
}
\arguments{
\item{x}{Object of class \code{ftrl.Dataset}}
}
\description{
Returns a vector of numbers of rows and of columns in an \code{ftrl.Dataset}.
}
\details{
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
be directly used with an \code{ftrl.Dataset} object.
}
|
/man/dim.ftrl.Dataset.Rd
|
no_license
|
yanyachen/rFTRLProximal
|
R
| false
| true
| 517
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ftrl.Dataset.R
\name{dim.ftrl.Dataset}
\alias{dim.ftrl.Dataset}
\title{Dimensions of ftrl.Dataset}
\usage{
\method{dim}{ftrl.Dataset}(x)
}
\arguments{
\item{x}{Object of class \code{ftrl.Dataset}}
}
\description{
Returns a vector of numbers of rows and of columns in an \code{ftrl.Dataset}.
}
\details{
Note: since \code{nrow} and \code{ncol} internally use \code{dim}, they can also
be directly used with an \code{ftrl.Dataset} object.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/st_sample_geotools.R
\name{st_sample_geotools}
\alias{st_sample_geotools}
\title{st_sample_geotools}
\usage{
st_sample_geotools(
geodata,
n,
fraction = NULL,
weight_var,
type = "random",
iter = 9,
...
)
}
\arguments{
\item{geodata}{\code{sf} object}
\item{n}{number of features to sample}
\item{fraction}{fraction of features to sample}
\item{weight_var}{(optional) variable in \code{geodata} to weight by}
\item{type}{passed to \code{spsample}; defaults to "random"}
\item{iter}{passed to \code{spsample}}
\item{...}{reserved for future use}
}
\description{
st_sample_geotools
}
\examples{
county_geodata <- TIGER2015::TIGER2015_SFBA_counties \%>\% with_county_populations()
county_sample <- county_geodata \%>\% st_sample(n = 1000, weight_var = "county_pop_total")
tract_geodata <- TIGER2015::TIGER2015_SFBA_tracts \%>\% filter(str_detect(GEOID, "^06095")) \%>\% with_tract_populations()
tract_sample <- tract_geodata \%>\% st_sample(n = 1000, weight_var = "tract_pop_total")
ALA_block_geodata <- TIGER2015::TIGER2015_SFBA_blocks \%>\% filter(str_detect(GEOID10, "^06001")) \%>\% with_block_populations()
ALA_block_sample <- ALA_block_geodata \%>\% dplyr::select(block_id, block_pop_total) \%>\% st_sample(frac = 0.1, weight_var = "block_pop_total")
mapview::mapview(ALA_block_sample, cex = 1, color = NULL)
}
|
/man/st_sample_geotools.Rd
|
no_license
|
BAAQMD/geotools
|
R
| false
| true
| 1,414
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/st_sample_geotools.R
\name{st_sample_geotools}
\alias{st_sample_geotools}
\title{st_sample_geotools}
\usage{
st_sample_geotools(
geodata,
n,
fraction = NULL,
weight_var,
type = "random",
iter = 9,
...
)
}
\arguments{
\item{geodata}{\code{sf} object}
\item{n}{number of features to sample}
\item{fraction}{fraction of features to sample}
\item{weight_var}{(optional) variable in \code{geodata} to weight by}
\item{type}{passed to \code{spsample}; defaults to "random"}
\item{iter}{passed to \code{spsample}}
\item{...}{reserved for future use}
}
\description{
st_sample_geotools
}
\examples{
county_geodata <- TIGER2015::TIGER2015_SFBA_counties \%>\% with_county_populations()
county_sample <- county_geodata \%>\% st_sample(n = 1000, weight_var = "county_pop_total")
tract_geodata <- TIGER2015::TIGER2015_SFBA_tracts \%>\% filter(str_detect(GEOID, "^06095")) \%>\% with_tract_populations()
tract_sample <- tract_geodata \%>\% st_sample(n = 1000, weight_var = "tract_pop_total")
ALA_block_geodata <- TIGER2015::TIGER2015_SFBA_blocks \%>\% filter(str_detect(GEOID10, "^06001")) \%>\% with_block_populations()
ALA_block_sample <- ALA_block_geodata \%>\% dplyr::select(block_id, block_pop_total) \%>\% st_sample(frac = 0.1, weight_var = "block_pop_total")
mapview::mapview(ALA_block_sample, cex = 1, color = NULL)
}
|
\name{dagR-package}
\Rdversion{1.1}
\alias{dagR-package}
\alias{dagR}
\docType{package}
\title{
R functions for directed acyclic graphs
}
\description{
The package dagR (pronounce "dagger") contains a couple of functions to draw, manipulate and evaluate directed acyclic graphs (DAG), with a focus on epidemiologic applications, namely the assessment of adjustment sets and potentially biasing paths.
The functions for finding and evaluating paths essentially implement the graphical algorithms outlined in Greenland (1999).\cr\cr
When using this package for your work, please cite Breitling (2010).\cr\cr
\emph{Note: As spelled out in the license, this suite of functions comes without any warranty, and cautious use is strongly advised.
Although testing was carried out as meticulously as possible, it must be expected that bugs or errors remain, in particular in the early versions of the package.
Please report any problems, concerns, but also suggestions for improvements or extensions to the author.}\cr\cr
Important additions in future versions could be e.g. improved drawing routines with better formatting of alternative node symbols in the DAG (taking into account the string length) and algorithms with intelligent/efficient search for minimal adjustment sets.
}
\details{
\tabular{ll}{
Package: \tab dagR\cr
Type: \tab Package\cr
Version: \tab 1.1.3\cr
Date: \tab 2014-01-08\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
\code{\link{dag.init}} is used for setting up DAGs. See the code of the functions \code{demo.dag0} to \code{demo.dag6} for example code.
To adjust and/or evalute DAGs for biasing paths, use \code{\link{dag.adjust}}, \code{\link{dag.draw}} for drawing a DAG.
\code{\link{dag.search}} uses \code{\link{brute.search}} to evaluate all possible adjustment sets, allowing the identification of minimal sufficient adjustment sets using \code{\link{msas}}.
\code{\link{dag.sim}} simulates data (normally distributed or binary) according to
the causal structure given by a DAG object (Breitling (submitted)).
At present, \code{summary_dagRdag} can summarize a DAG object. This should later become an S3 method.\cr Several helper functions currently are not hidden and should later be made internal.
\cr \cr \emph{Please see the NEWS file for version changes and known open issues.}
}
\author{
Lutz P Breitling <lutz.breitling@gmail.com>
}
\references{
Breitling LP (2010). dagR: a suite of R functions for directed acyclic graphs. Epidemiology 21(4):586-587.\cr
Breitling LP (submitted). Understanding confounding and data analytical strategies
through DAG-based data simulations.\cr
Greenland S, Pearl J, Robins JM (1999). Causal diagrams for epidemiologic research. Epidemiology 10(1):37-48.
}
\keyword{ package }
\examples{
dag1<-demo.dag1();
dag.draw(dag1);
dag1a<-dag.adjust(dag1, 3);
dag.draw(dag1a);
dag1s<-dag.search(dag1);
summary_dagRdag(dag1);
summary_dagRdag(dag1a);
summary_dagRdag(dag1s);
}
|
/man/dagR-package.Rd
|
no_license
|
mjaquiery/dagR
|
R
| false
| false
| 2,946
|
rd
|
\name{dagR-package}
\Rdversion{1.1}
\alias{dagR-package}
\alias{dagR}
\docType{package}
\title{
R functions for directed acyclic graphs
}
\description{
The package dagR (pronounce "dagger") contains a couple of functions to draw, manipulate and evaluate directed acyclic graphs (DAG), with a focus on epidemiologic applications, namely the assessment of adjustment sets and potentially biasing paths.
The functions for finding and evaluating paths essentially implement the graphical algorithms outlined in Greenland (1999).\cr\cr
When using this package for your work, please cite Breitling (2010).\cr\cr
\emph{Note: As spelled out in the license, this suite of functions comes without any warranty, and cautious use is strongly advised.
Although testing was carried out as meticulously as possible, it must be expected that bugs or errors remain, in particular in the early versions of the package.
Please report any problems, concerns, but also suggestions for improvements or extensions to the author.}\cr\cr
Important additions in future versions could be e.g. improved drawing routines with better formatting of alternative node symbols in the DAG (taking into account the string length) and algorithms with intelligent/efficient search for minimal adjustment sets.
}
\details{
\tabular{ll}{
Package: \tab dagR\cr
Type: \tab Package\cr
Version: \tab 1.1.3\cr
Date: \tab 2014-01-08\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
\code{\link{dag.init}} is used for setting up DAGs. See the code of the functions \code{demo.dag0} to \code{demo.dag6} for example code.
To adjust and/or evalute DAGs for biasing paths, use \code{\link{dag.adjust}}, \code{\link{dag.draw}} for drawing a DAG.
\code{\link{dag.search}} uses \code{\link{brute.search}} to evaluate all possible adjustment sets, allowing the identification of minimal sufficient adjustment sets using \code{\link{msas}}.
\code{\link{dag.sim}} simulates data (normally distributed or binary) according to
the causal structure given by a DAG object (Breitling (submitted)).
At present, \code{summary_dagRdag} can summarize a DAG object. This should later become an S3 method.\cr Several helper functions currently are not hidden and should later be made internal.
\cr \cr \emph{Please see the NEWS file for version changes and known open issues.}
}
\author{
Lutz P Breitling <lutz.breitling@gmail.com>
}
\references{
Breitling LP (2010). dagR: a suite of R functions for directed acyclic graphs. Epidemiology 21(4):586-587.\cr
Breitling LP (submitted). Understanding confounding and data analytical strategies
through DAG-based data simulations.\cr
Greenland S, Pearl J, Robins JM (1999). Causal diagrams for epidemiologic research. Epidemiology 10(1):37-48.
}
\keyword{ package }
\examples{
dag1<-demo.dag1();
dag.draw(dag1);
dag1a<-dag.adjust(dag1, 3);
dag.draw(dag1a);
dag1s<-dag.search(dag1);
summary_dagRdag(dag1);
summary_dagRdag(dag1a);
summary_dagRdag(dag1s);
}
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM SURVIVAL FOREST 3.6.4
####
#### Copyright 2013, Cleveland Clinic Foundation
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 2
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### Written by:
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Dept of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: commerce@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
max.subtree.rsf <- function(object, max.order=2, sub.order=FALSE, ...) {
if (is.null(object)) stop("Object is empty!")
if (sum(inherits(object, c("rsf", "grow"), TRUE) == c(1, 2)) != 2 &
sum(inherits(object, c("rsf", "forest"), TRUE) == c(1, 2)) != 2)
stop("This function only works for objects of class `(rsf, grow)' or '(rsf, forest)'")
if (sum(inherits(object, c("rsf", "grow"), TRUE) == c(1, 2)) == 2) {
if (is.null(object$forest))
stop("Forest is empty! Re-run grow call with forest set to 'TRUE'.")
object <- object$forest
}
nativeArray <- object$nativeArray
if (is.null(nativeArray)) {
stop("RSF nativeArray content is NULL. Please ensure the object is valid.")
}
predictorNames <- object$predictorNames
if (is.null(predictorNames)) {
stop("RSF predictorNames content is NULL. Please ensure the object is valid.")
}
if (is.null(object$predictors)) {
stop("RSF predictors content is NULL. Please ensure the object is valid.")
}
max.order <- floor(max.order)
if (max.order < 0) {
stop("RSF 'max.order' requested for distance order statistic must be an integer greater than zero (0).")
}
MAX.DEPTH <- 100
numTree <- length(as.vector(unique(nativeArray$treeID)))
numParm <- length(predictorNames)
subtree <- vector("list", 8)
names(subtree) <- c("count",
"order",
"meanSum",
"depth",
"terminalDepthSum",
"subOrder",
"subOrderDiag",
"nodesAtDepth")
forestMeanSum <- rep(0, numParm)
if (max.order > 0) {
orderSum <- matrix(0, nrow=numParm, ncol=max.order)
}
else {
order.tree <- matrix(NA, nrow=numParm, ncol=numTree)
}
recursiveObject <- list(offset = 1,
subtree = subtree,
diagnostic = 0,
diagnostic2 = 0)
subtreeCountSum <- rep(0, numParm)
subOrderSum <- matrix(0, nrow=numParm, ncol=numParm)
terminalDepth <- rep(0, numTree)
nodesAtDepthMatrix <- matrix(NA, nrow = MAX.DEPTH, ncol = numTree)
offsetMark <- 1
stumpCnt <- 0
for (b in 1:numTree) {
recursiveObject$subtree$nodesAtDepth <- rep(NA, MAX.DEPTH)
recursiveObject$subtree$meanSum <- rep(NA, numParm)
if (max.order > 0) {
recursiveObject$subtree$order <- matrix(NA, nrow=numParm, ncol=max.order)
}
else {
recursiveObject$subtree$order <- rep(NA, numParm)
}
if (sub.order ==TRUE) {
recursiveObject$subtree$subOrder <- matrix(1.0, nrow=numParm, ncol=numParm)
recursiveObject$subtree$subOrderDiag <- rep(NA, numParm)
}
recursiveObject$subtree$depth <- 0
recursiveObject$subtree$terminalDepthSum <- 0
recursiveObject$subtree$count <- rep(0, numParm)
rootParmID <- nativeArray$parmID[recursiveObject$offset]
offsetMark <- recursiveObject$offset
recursiveObject <- rsfParseTree(
recursiveObject,
max.order,
sub.order,
nativeArray,
b,
distance=0,
subtreeFlag=rep(FALSE, numParm))
if (rootParmID != 0) {
index <- which(recursiveObject$subtree$count == 0)
recursiveObject$subtree$meanSum[index] <- recursiveObject$subtree$depth
forestMeanSum <- forestMeanSum + recursiveObject$subtree$meanSum
if (max.order > 0) {
index <- which(is.na(recursiveObject$subtree$order))
recursiveObject$subtree$order[index] <- recursiveObject$subtree$depth
orderSum <- orderSum + recursiveObject$subtree$order
}
else {
index <- which(is.na(recursiveObject$subtree$order))
recursiveObject$subtree$order[index] <- recursiveObject$subtree$depth
order.tree[ , b] <- recursiveObject$subtree$order
}
subtreeCountSum <- subtreeCountSum + (recursiveObject$subtree$count / ((recursiveObject$offset - offsetMark + 1) / 4))
terminalDepth[b] <- recursiveObject$subtree$terminalDepthSum / ((recursiveObject$offset - offsetMark + 1) / 2)
if (sub.order == TRUE) {
index <- which(recursiveObject$subtree$count > 0)
diag(recursiveObject$subtree$subOrder)[index] <- recursiveObject$subtree$subOrderDiag[index]
index <- which(recursiveObject$subtree$count == 0)
diag(recursiveObject$subtree$subOrder)[index] <- recursiveObject$subtree$depth
diag(recursiveObject$subtree$subOrder) <- diag(recursiveObject$subtree$subOrder) / recursiveObject$subtree$depth
subOrderSum <- subOrderSum + recursiveObject$subtree$subOrder
}
nodesAtDepthMatrix[, b] <- recursiveObject$subtree$nodesAtDepth
}
else {
stumpCnt <- stumpCnt + 1
nodesAtDepthMatrix[, b] <- NA
}
}
nameVector <- c("mean",
"order",
"count",
"terminal",
"nodesAtDepth",
"subOrder")
result <- vector("list", length(nameVector))
names(result) <- nameVector
if(numTree != stumpCnt) {
result$terminal <- terminalDepth
result$mean <- forestMeanSum / (numTree - stumpCnt)
names(result$mean) <- predictorNames
if (max.order > 0) {
result$order <- orderSum / (numTree - stumpCnt)
rownames(result$order) <- predictorNames
}
else {
result$order <- order.tree
rownames(result$order) <- predictorNames
}
result$count <- subtreeCountSum / (numTree - stumpCnt)
names(result$count) <- predictorNames
result$nodesAtDepth <- nodesAtDepthMatrix
if (sub.order == TRUE) {
result$subOrder <- subOrderSum / (numTree - stumpCnt)
rownames(result$subOrder) <- predictorNames
colnames(result$subOrder) <- predictorNames
}
}
threshold <- ExactThreshold(result)
result <- c(result, threshold=threshold)
return (result)
}
rsfParseTree <- function(recursiveObject,
max.order,
sub.order,
nativeArray,
b,
distance,
subtreeFlag) {
recursiveObject$diagnostic <- recursiveObject$diagnostic + 1
if(b != nativeArray$treeID[recursiveObject$offset]) {
stop("Invalid nativeArray input record (treeID) at ", recursiveObject$offset, ". Please contact Technical Support.")
}
if (distance > 0) {
if (distance <= length(recursiveObject$subtree$nodesAtDepth)) {
if (is.na(recursiveObject$subtree$nodesAtDepth[distance])) {
recursiveObject$subtree$nodesAtDepth[distance] <- 1
}
else {
recursiveObject$subtree$nodesAtDepth[distance] <- recursiveObject$subtree$nodesAtDepth[distance] + 1
}
}
}
splitParameter <- nativeArray$parmID[recursiveObject$offset]
if (splitParameter == 0) {
terminalFlag <- TRUE
}
else if (splitParameter != 0) {
terminalFlag <- FALSE
}
if (!terminalFlag) {
if (subtreeFlag[splitParameter] == FALSE) {
recursiveObject$subtree$count[splitParameter] <- recursiveObject$subtree$count[splitParameter] + 1
if (is.na(recursiveObject$subtree$meanSum[splitParameter])) {
recursiveObject$subtree$meanSum[splitParameter] <- distance
}
else {
recursiveObject$subtree$meanSum[splitParameter] <- recursiveObject$subtree$meanSum[splitParameter] + distance
}
if (max.order > 0) {
orderVector <- c(recursiveObject$subtree$order[splitParameter, ], distance, NA)
index <- which.max(is.na(orderVector))
orderVector[index] <- distance
sortedVector <- sort(orderVector[1:index])
if (index <= max.order) {
orderVector <- c(sortedVector, rep(NA, max.order-index))
}
else {
orderVector <- sortedVector[1:max.order]
}
recursiveObject$subtree$order[splitParameter, ] <- orderVector
}
else {
if (is.na(recursiveObject$subtree$order[splitParameter])) {
recursiveObject$subtree$order[splitParameter] <- distance
}
else {
recursiveObject$subtree$order[splitParameter] <- min(recursiveObject$order[splitParameter], distance)
}
}
subtreeFlag[splitParameter] <- TRUE
if (sub.order == TRUE) {
if (is.na(recursiveObject$subtree$subOrderDiag[splitParameter])) {
recursiveObject$subtree$subOrderDiag[splitParameter] <- distance
}
else {
recursiveObject$subtree$subOrderDiag[splitParameter] <- min(recursiveObject$subtree$subOrderDiag[splitParameter], distance)
}
recursive2Object <- list(offset = recursiveObject$offset,
depth = 0,
minimumVector = rep(NA, dim(recursiveObject$subtree$subOrder)[2]),
diagnostic = recursiveObject$diagnostic2)
subtree2Flag <- rep(FALSE, dim(recursiveObject$subtree$subOrder)[2])
subtree2Flag[splitParameter] <- TRUE
recursive2Object <- rsfParse2Tree(recursive2Object,
nativeArray,
b,
distance=0,
subtreeFlag=subtree2Flag)
recursiveObject$diagnostic2 <- recursiveObject$diagnostic2 + recursive2Object$diagnostic
recursive2Object$minimumVector[splitParameter] <- recursive2Object$depth
recursive2Object$minimumVector[which(is.na(recursive2Object$minimumVector))] <- recursive2Object$depth
recursive2Object$minimumVector <- recursive2Object$minimumVector / recursive2Object$depth
recursiveObject$subtree$subOrder[splitParameter, ] <- pmin(recursiveObject$subtree$subOrder[splitParameter, ], recursive2Object$minimumVector)
}
}
}
recursiveObject$subtree$depth <- max(recursiveObject$subtree$depth, distance)
recursiveObject$offset <- recursiveObject$offset + 1
if (terminalFlag == FALSE) {
distance <- distance + 1
recursiveObject <- rsfParseTree(recursiveObject, max.order, sub.order, nativeArray, b, distance, subtreeFlag)
recursiveObject <- rsfParseTree(recursiveObject, max.order, sub.order, nativeArray, b, distance, subtreeFlag)
}
else {
recursiveObject$subtree$terminalDepthSum <- recursiveObject$subtree$terminalDepthSum + distance
}
return (recursiveObject)
}
rsfParse2Tree <- function(recursiveObject,
nativeArray,
b,
distance,
subtreeFlag) {
recursiveObject$diagnostic = recursiveObject$diagnostic + 1
if(b != nativeArray$treeID[recursiveObject$offset]) {
stop("Invalid nativeArray input record (treeID) at ", recursiveObject$offset, ". Please contact Technical Support.")
}
splitParameter = nativeArray$parmID[recursiveObject$offset]
if (splitParameter == 0) {
terminalFlag = TRUE
}
else if (splitParameter != 0) {
terminalFlag = FALSE
}
if (splitParameter != 0) {
if (subtreeFlag[splitParameter] == FALSE) {
if (is.na(recursiveObject$minimumVector[splitParameter])) {
recursiveObject$minimumVector[splitParameter] = distance
}
else {
recursiveObject$minimumVector[splitParameter] = min(recursiveObject$minimumVector[splitParameter], distance)
}
subtreeFlag[splitParameter] = TRUE
}
}
recursiveObject$depth = max(recursiveObject$depth, distance)
distance = distance + 1
recursiveObject$offset = recursiveObject$offset + 1
if (terminalFlag == FALSE) {
recursiveObject = rsfParse2Tree(recursiveObject, nativeArray, b, distance, subtreeFlag)
recursiveObject = rsfParse2Tree(recursiveObject, nativeArray, b, distance, subtreeFlag)
}
return (recursiveObject)
}
maxDepthProb <- function(p, D, l) {
if (!is.null(l)) Ld <- 0
prob <- rep(0, D+1)
for (d in 0:(D-1)) {
if (is.null(l)) {
Ld <- 2^d-1
ld <- 2^d
}
else{
ld <- l[d+1]
if (d > 0) Ld <- Ld + l[d]
}
prob.d.1 <- Ld*log(1-1/p)
prob.d.2 <- ld*(log(1-1/p))
prob[d+1] <- exp(prob.d.1)*(1-exp(prob.d.2))
}
prob[D+1] = 1 - sum(prob[1:D])
if (prob[D+1] < 0) {
prob[D+1] <- 0
prob <- prob/sum(prob)
}
prob
}
maxDepthStat <- function(pseq, D=NULL, l=NULL) {
mn <- std <- rep(0, length(pseq))
if (is.null(D) & is.null(l)) stop("set D or l")
if (!is.null(l)) {
D <- length(l)
}
D.support <- (0:D)
for (j in 1:length(pseq)) {
prob <- maxDepthProb(pseq[j], D=D, l=l)
mn[j] <- sum(D.support*prob)
std[j] <- sqrt(sum((D.support^2)*prob) - mn[j]^2)
}
return(list(mean=mn, std=std))
}
ExactThreshold <- function(v) {
if (is.null(v$mean)) return(NULL)
n.at.d <- round(c(1, c(na.omit(apply(v$nodesAtDepth, 1, mean, na.rm=TRUE)))))
avg.depth <- round(mean(apply(v$nodesAtDepth, 2, function(x){sum(!is.na(x))}), na.rm=TRUE))
l <- n.at.d[1:max(avg.depth - 1, 1)]
if (length(v$mean) == 1) {
return(0)
}
else {
return(maxDepthStat(length(v$mean), l=l)$mean)
}
}
max.subtree <- max.subtree.rsf
|
/R/max.subtree.R
|
no_license
|
cran/randomSurvivalForest
|
R
| false
| false
| 15,018
|
r
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM SURVIVAL FOREST 3.6.4
####
#### Copyright 2013, Cleveland Clinic Foundation
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 2
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### Written by:
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Dept of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: commerce@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
max.subtree.rsf <- function(object, max.order=2, sub.order=FALSE, ...) {
if (is.null(object)) stop("Object is empty!")
if (sum(inherits(object, c("rsf", "grow"), TRUE) == c(1, 2)) != 2 &
sum(inherits(object, c("rsf", "forest"), TRUE) == c(1, 2)) != 2)
stop("This function only works for objects of class `(rsf, grow)' or '(rsf, forest)'")
if (sum(inherits(object, c("rsf", "grow"), TRUE) == c(1, 2)) == 2) {
if (is.null(object$forest))
stop("Forest is empty! Re-run grow call with forest set to 'TRUE'.")
object <- object$forest
}
nativeArray <- object$nativeArray
if (is.null(nativeArray)) {
stop("RSF nativeArray content is NULL. Please ensure the object is valid.")
}
predictorNames <- object$predictorNames
if (is.null(predictorNames)) {
stop("RSF predictorNames content is NULL. Please ensure the object is valid.")
}
if (is.null(object$predictors)) {
stop("RSF predictors content is NULL. Please ensure the object is valid.")
}
max.order <- floor(max.order)
if (max.order < 0) {
stop("RSF 'max.order' requested for distance order statistic must be an integer greater than zero (0).")
}
MAX.DEPTH <- 100
numTree <- length(as.vector(unique(nativeArray$treeID)))
numParm <- length(predictorNames)
subtree <- vector("list", 8)
names(subtree) <- c("count",
"order",
"meanSum",
"depth",
"terminalDepthSum",
"subOrder",
"subOrderDiag",
"nodesAtDepth")
forestMeanSum <- rep(0, numParm)
if (max.order > 0) {
orderSum <- matrix(0, nrow=numParm, ncol=max.order)
}
else {
order.tree <- matrix(NA, nrow=numParm, ncol=numTree)
}
recursiveObject <- list(offset = 1,
subtree = subtree,
diagnostic = 0,
diagnostic2 = 0)
subtreeCountSum <- rep(0, numParm)
subOrderSum <- matrix(0, nrow=numParm, ncol=numParm)
terminalDepth <- rep(0, numTree)
nodesAtDepthMatrix <- matrix(NA, nrow = MAX.DEPTH, ncol = numTree)
offsetMark <- 1
stumpCnt <- 0
for (b in 1:numTree) {
recursiveObject$subtree$nodesAtDepth <- rep(NA, MAX.DEPTH)
recursiveObject$subtree$meanSum <- rep(NA, numParm)
if (max.order > 0) {
recursiveObject$subtree$order <- matrix(NA, nrow=numParm, ncol=max.order)
}
else {
recursiveObject$subtree$order <- rep(NA, numParm)
}
if (sub.order ==TRUE) {
recursiveObject$subtree$subOrder <- matrix(1.0, nrow=numParm, ncol=numParm)
recursiveObject$subtree$subOrderDiag <- rep(NA, numParm)
}
recursiveObject$subtree$depth <- 0
recursiveObject$subtree$terminalDepthSum <- 0
recursiveObject$subtree$count <- rep(0, numParm)
rootParmID <- nativeArray$parmID[recursiveObject$offset]
offsetMark <- recursiveObject$offset
recursiveObject <- rsfParseTree(
recursiveObject,
max.order,
sub.order,
nativeArray,
b,
distance=0,
subtreeFlag=rep(FALSE, numParm))
if (rootParmID != 0) {
index <- which(recursiveObject$subtree$count == 0)
recursiveObject$subtree$meanSum[index] <- recursiveObject$subtree$depth
forestMeanSum <- forestMeanSum + recursiveObject$subtree$meanSum
if (max.order > 0) {
index <- which(is.na(recursiveObject$subtree$order))
recursiveObject$subtree$order[index] <- recursiveObject$subtree$depth
orderSum <- orderSum + recursiveObject$subtree$order
}
else {
index <- which(is.na(recursiveObject$subtree$order))
recursiveObject$subtree$order[index] <- recursiveObject$subtree$depth
order.tree[ , b] <- recursiveObject$subtree$order
}
subtreeCountSum <- subtreeCountSum + (recursiveObject$subtree$count / ((recursiveObject$offset - offsetMark + 1) / 4))
terminalDepth[b] <- recursiveObject$subtree$terminalDepthSum / ((recursiveObject$offset - offsetMark + 1) / 2)
if (sub.order == TRUE) {
index <- which(recursiveObject$subtree$count > 0)
diag(recursiveObject$subtree$subOrder)[index] <- recursiveObject$subtree$subOrderDiag[index]
index <- which(recursiveObject$subtree$count == 0)
diag(recursiveObject$subtree$subOrder)[index] <- recursiveObject$subtree$depth
diag(recursiveObject$subtree$subOrder) <- diag(recursiveObject$subtree$subOrder) / recursiveObject$subtree$depth
subOrderSum <- subOrderSum + recursiveObject$subtree$subOrder
}
nodesAtDepthMatrix[, b] <- recursiveObject$subtree$nodesAtDepth
}
else {
stumpCnt <- stumpCnt + 1
nodesAtDepthMatrix[, b] <- NA
}
}
nameVector <- c("mean",
"order",
"count",
"terminal",
"nodesAtDepth",
"subOrder")
result <- vector("list", length(nameVector))
names(result) <- nameVector
if(numTree != stumpCnt) {
result$terminal <- terminalDepth
result$mean <- forestMeanSum / (numTree - stumpCnt)
names(result$mean) <- predictorNames
if (max.order > 0) {
result$order <- orderSum / (numTree - stumpCnt)
rownames(result$order) <- predictorNames
}
else {
result$order <- order.tree
rownames(result$order) <- predictorNames
}
result$count <- subtreeCountSum / (numTree - stumpCnt)
names(result$count) <- predictorNames
result$nodesAtDepth <- nodesAtDepthMatrix
if (sub.order == TRUE) {
result$subOrder <- subOrderSum / (numTree - stumpCnt)
rownames(result$subOrder) <- predictorNames
colnames(result$subOrder) <- predictorNames
}
}
threshold <- ExactThreshold(result)
result <- c(result, threshold=threshold)
return (result)
}
rsfParseTree <- function(recursiveObject,
max.order,
sub.order,
nativeArray,
b,
distance,
subtreeFlag) {
recursiveObject$diagnostic <- recursiveObject$diagnostic + 1
if(b != nativeArray$treeID[recursiveObject$offset]) {
stop("Invalid nativeArray input record (treeID) at ", recursiveObject$offset, ". Please contact Technical Support.")
}
if (distance > 0) {
if (distance <= length(recursiveObject$subtree$nodesAtDepth)) {
if (is.na(recursiveObject$subtree$nodesAtDepth[distance])) {
recursiveObject$subtree$nodesAtDepth[distance] <- 1
}
else {
recursiveObject$subtree$nodesAtDepth[distance] <- recursiveObject$subtree$nodesAtDepth[distance] + 1
}
}
}
splitParameter <- nativeArray$parmID[recursiveObject$offset]
if (splitParameter == 0) {
terminalFlag <- TRUE
}
else if (splitParameter != 0) {
terminalFlag <- FALSE
}
if (!terminalFlag) {
if (subtreeFlag[splitParameter] == FALSE) {
recursiveObject$subtree$count[splitParameter] <- recursiveObject$subtree$count[splitParameter] + 1
if (is.na(recursiveObject$subtree$meanSum[splitParameter])) {
recursiveObject$subtree$meanSum[splitParameter] <- distance
}
else {
recursiveObject$subtree$meanSum[splitParameter] <- recursiveObject$subtree$meanSum[splitParameter] + distance
}
if (max.order > 0) {
orderVector <- c(recursiveObject$subtree$order[splitParameter, ], distance, NA)
index <- which.max(is.na(orderVector))
orderVector[index] <- distance
sortedVector <- sort(orderVector[1:index])
if (index <= max.order) {
orderVector <- c(sortedVector, rep(NA, max.order-index))
}
else {
orderVector <- sortedVector[1:max.order]
}
recursiveObject$subtree$order[splitParameter, ] <- orderVector
}
else {
if (is.na(recursiveObject$subtree$order[splitParameter])) {
recursiveObject$subtree$order[splitParameter] <- distance
}
else {
recursiveObject$subtree$order[splitParameter] <- min(recursiveObject$order[splitParameter], distance)
}
}
subtreeFlag[splitParameter] <- TRUE
if (sub.order == TRUE) {
if (is.na(recursiveObject$subtree$subOrderDiag[splitParameter])) {
recursiveObject$subtree$subOrderDiag[splitParameter] <- distance
}
else {
recursiveObject$subtree$subOrderDiag[splitParameter] <- min(recursiveObject$subtree$subOrderDiag[splitParameter], distance)
}
recursive2Object <- list(offset = recursiveObject$offset,
depth = 0,
minimumVector = rep(NA, dim(recursiveObject$subtree$subOrder)[2]),
diagnostic = recursiveObject$diagnostic2)
subtree2Flag <- rep(FALSE, dim(recursiveObject$subtree$subOrder)[2])
subtree2Flag[splitParameter] <- TRUE
recursive2Object <- rsfParse2Tree(recursive2Object,
nativeArray,
b,
distance=0,
subtreeFlag=subtree2Flag)
recursiveObject$diagnostic2 <- recursiveObject$diagnostic2 + recursive2Object$diagnostic
recursive2Object$minimumVector[splitParameter] <- recursive2Object$depth
recursive2Object$minimumVector[which(is.na(recursive2Object$minimumVector))] <- recursive2Object$depth
recursive2Object$minimumVector <- recursive2Object$minimumVector / recursive2Object$depth
recursiveObject$subtree$subOrder[splitParameter, ] <- pmin(recursiveObject$subtree$subOrder[splitParameter, ], recursive2Object$minimumVector)
}
}
}
recursiveObject$subtree$depth <- max(recursiveObject$subtree$depth, distance)
recursiveObject$offset <- recursiveObject$offset + 1
if (terminalFlag == FALSE) {
distance <- distance + 1
recursiveObject <- rsfParseTree(recursiveObject, max.order, sub.order, nativeArray, b, distance, subtreeFlag)
recursiveObject <- rsfParseTree(recursiveObject, max.order, sub.order, nativeArray, b, distance, subtreeFlag)
}
else {
recursiveObject$subtree$terminalDepthSum <- recursiveObject$subtree$terminalDepthSum + distance
}
return (recursiveObject)
}
rsfParse2Tree <- function(recursiveObject,
nativeArray,
b,
distance,
subtreeFlag) {
recursiveObject$diagnostic = recursiveObject$diagnostic + 1
if(b != nativeArray$treeID[recursiveObject$offset]) {
stop("Invalid nativeArray input record (treeID) at ", recursiveObject$offset, ". Please contact Technical Support.")
}
splitParameter = nativeArray$parmID[recursiveObject$offset]
if (splitParameter == 0) {
terminalFlag = TRUE
}
else if (splitParameter != 0) {
terminalFlag = FALSE
}
if (splitParameter != 0) {
if (subtreeFlag[splitParameter] == FALSE) {
if (is.na(recursiveObject$minimumVector[splitParameter])) {
recursiveObject$minimumVector[splitParameter] = distance
}
else {
recursiveObject$minimumVector[splitParameter] = min(recursiveObject$minimumVector[splitParameter], distance)
}
subtreeFlag[splitParameter] = TRUE
}
}
recursiveObject$depth = max(recursiveObject$depth, distance)
distance = distance + 1
recursiveObject$offset = recursiveObject$offset + 1
if (terminalFlag == FALSE) {
recursiveObject = rsfParse2Tree(recursiveObject, nativeArray, b, distance, subtreeFlag)
recursiveObject = rsfParse2Tree(recursiveObject, nativeArray, b, distance, subtreeFlag)
}
return (recursiveObject)
}
maxDepthProb <- function(p, D, l) {
if (!is.null(l)) Ld <- 0
prob <- rep(0, D+1)
for (d in 0:(D-1)) {
if (is.null(l)) {
Ld <- 2^d-1
ld <- 2^d
}
else{
ld <- l[d+1]
if (d > 0) Ld <- Ld + l[d]
}
prob.d.1 <- Ld*log(1-1/p)
prob.d.2 <- ld*(log(1-1/p))
prob[d+1] <- exp(prob.d.1)*(1-exp(prob.d.2))
}
prob[D+1] = 1 - sum(prob[1:D])
if (prob[D+1] < 0) {
prob[D+1] <- 0
prob <- prob/sum(prob)
}
prob
}
maxDepthStat <- function(pseq, D=NULL, l=NULL) {
mn <- std <- rep(0, length(pseq))
if (is.null(D) & is.null(l)) stop("set D or l")
if (!is.null(l)) {
D <- length(l)
}
D.support <- (0:D)
for (j in 1:length(pseq)) {
prob <- maxDepthProb(pseq[j], D=D, l=l)
mn[j] <- sum(D.support*prob)
std[j] <- sqrt(sum((D.support^2)*prob) - mn[j]^2)
}
return(list(mean=mn, std=std))
}
ExactThreshold <- function(v) {
if (is.null(v$mean)) return(NULL)
n.at.d <- round(c(1, c(na.omit(apply(v$nodesAtDepth, 1, mean, na.rm=TRUE)))))
avg.depth <- round(mean(apply(v$nodesAtDepth, 2, function(x){sum(!is.na(x))}), na.rm=TRUE))
l <- n.at.d[1:max(avg.depth - 1, 1)]
if (length(v$mean) == 1) {
return(0)
}
else {
return(maxDepthStat(length(v$mean), l=l)$mean)
}
}
max.subtree <- max.subtree.rsf
|
# <<- operator can be used to assign a value to an object in an environment
# that is different from the current environment
## The first function, makeVector creates a special "vector"
## which is really a list containing a function to
## set the value of the vector
## get the value of the vector
## set the value of the mean
## get the value of the mean
makeVector <- function(x = numeric()) { #contains 4 functions set/get/setmean/getmean
m <- NULL
set <- function(y) { #assigns input y to x in makeVector environment, for easy customisation
x <<- y #cleans m
m <<- NULL
}
get <- function() x #returns the value of x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## The following function calculates the mean of the special "vector"
## created with the above function.
## However, it first checks to see if the mean has already been calculated
## If so, it gets the mean from the cache and skips the computation
## Otherwise, it calculates the mean of the data
## and sets the value of the mean in the cache via the setmean function.
cachemean <- function(x, ...) {
m <- x$getmean() #Calls the getmean() function on the input object.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() #get the original vector from makeVector()
m <- mean(data, ...)
x$setmean(m) #input m value into setmean function which is stored as m in makeVector
m
}
##### How it works
aVector <- makeVector(1:10)
aVector$get() # retrieve the value of x
aVector$getmean() # retrieve the value of m, which should be NULL
aVector$set(30:50) # reset value with a new vector
cachemean(aVector) # notice mean calculated is mean of 30:50, not 1:10
aVector$getmean() # retrieve it directly, now that it has been cached
|
/Cache Vector Mean.R
|
no_license
|
difu1994/Week-3
|
R
| false
| false
| 2,107
|
r
|
# <<- operator can be used to assign a value to an object in an environment
# that is different from the current environment
## The first function, makeVector creates a special "vector"
## which is really a list containing a function to
## set the value of the vector
## get the value of the vector
## set the value of the mean
## get the value of the mean
makeVector <- function(x = numeric()) { #contains 4 functions set/get/setmean/getmean
m <- NULL
set <- function(y) { #assigns input y to x in makeVector environment, for easy customisation
x <<- y #cleans m
m <<- NULL
}
get <- function() x #returns the value of x
setmean <- function(mean) m <<- mean
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## The following function calculates the mean of the special "vector"
## created with the above function.
## However, it first checks to see if the mean has already been calculated
## If so, it gets the mean from the cache and skips the computation
## Otherwise, it calculates the mean of the data
## and sets the value of the mean in the cache via the setmean function.
cachemean <- function(x, ...) {
m <- x$getmean() #Calls the getmean() function on the input object.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get() #get the original vector from makeVector()
m <- mean(data, ...)
x$setmean(m) #input m value into setmean function which is stored as m in makeVector
m
}
##### How it works
aVector <- makeVector(1:10)
aVector$get() # retrieve the value of x
aVector$getmean() # retrieve the value of m, which should be NULL
aVector$set(30:50) # reset value with a new vector
cachemean(aVector) # notice mean calculated is mean of 30:50, not 1:10
aVector$getmean() # retrieve it directly, now that it has been cached
|
function(input, output) {
library(dplyr)
temperatures_by_state <- read.csv("Data/temperature.txt", header = TRUE, stringsAsFactors = FALSE)
temperatures_by_state <- temperatures_by_state[order(-temperatures_by_state$Avg..F),]
# create a vector of 50 in descending order
a1 <- seq(1:50)
a1 <- as.data.frame(a1)
a1 <- a1[order(-a1),]
# bind the rank level score
temperature_rank <- cbind(temperatures_by_state, a1)
colnames(temperature_rank)[3] <- "Temp_Score"
rm(temperatures_by_state)
# Add State codes
states_codes <- read.csv("Data/states.txt", stringsAsFactors = FALSE, header = TRUE)
temperature_rank <- left_join(temperature_rank, states_codes,"State" )
rm(states_codes)
# healthcare
# well being ranking for older americans
# http://www.bankrate.com/finance/retirement/best-places-retire-how-state-ranks.aspx
well_being <- read.csv("Data/well_being_rank.txt", header = TRUE, stringsAsFactors = FALSE)
colnames(well_being)[1] <- "Well_Being_Rank"
well_being <- well_being[with (well_being, order(-Well_Being_Rank)),]
a1 <- a1[order(a1)]
wb_rank_score <- cbind(well_being,a1)
colnames(wb_rank_score)[3] <- "wb_r_score"
rm(well_being)
# join tables together
tbl_join <- inner_join(temperature_rank, wb_rank_score, 'State')
tbl_join <- select(tbl_join, State, code, Temp_Score, wb_r_score)
tbl_join$total_score <- tbl_join$Temp_Score + tbl_join$wb_r_score
rm(temperature_rank)
rm(wb_rank_score)
# prepare data for the map
colnames(tbl_join)[3] <- "Temperature_Ranking"
colnames(tbl_join)[4] <- "Well_Being_Ranking"
rm(a1)
tbl_join$hover <- with(tbl_join, paste(State, '<br>', "Score", total_score))
make_table <- select(tbl_join, State, Temperature_Ranking, Well_Being_Ranking, total_score)
}
|
/State_Scores/helpers.R
|
permissive
|
bthomas-ds/developing-data-products
|
R
| false
| false
| 1,704
|
r
|
function(input, output) {
library(dplyr)
temperatures_by_state <- read.csv("Data/temperature.txt", header = TRUE, stringsAsFactors = FALSE)
temperatures_by_state <- temperatures_by_state[order(-temperatures_by_state$Avg..F),]
# create a vector of 50 in descending order
a1 <- seq(1:50)
a1 <- as.data.frame(a1)
a1 <- a1[order(-a1),]
# bind the rank level score
temperature_rank <- cbind(temperatures_by_state, a1)
colnames(temperature_rank)[3] <- "Temp_Score"
rm(temperatures_by_state)
# Add State codes
states_codes <- read.csv("Data/states.txt", stringsAsFactors = FALSE, header = TRUE)
temperature_rank <- left_join(temperature_rank, states_codes,"State" )
rm(states_codes)
# healthcare
# well being ranking for older americans
# http://www.bankrate.com/finance/retirement/best-places-retire-how-state-ranks.aspx
well_being <- read.csv("Data/well_being_rank.txt", header = TRUE, stringsAsFactors = FALSE)
colnames(well_being)[1] <- "Well_Being_Rank"
well_being <- well_being[with (well_being, order(-Well_Being_Rank)),]
a1 <- a1[order(a1)]
wb_rank_score <- cbind(well_being,a1)
colnames(wb_rank_score)[3] <- "wb_r_score"
rm(well_being)
# join tables together
tbl_join <- inner_join(temperature_rank, wb_rank_score, 'State')
tbl_join <- select(tbl_join, State, code, Temp_Score, wb_r_score)
tbl_join$total_score <- tbl_join$Temp_Score + tbl_join$wb_r_score
rm(temperature_rank)
rm(wb_rank_score)
# prepare data for the map
colnames(tbl_join)[3] <- "Temperature_Ranking"
colnames(tbl_join)[4] <- "Well_Being_Ranking"
rm(a1)
tbl_join$hover <- with(tbl_join, paste(State, '<br>', "Score", total_score))
make_table <- select(tbl_join, State, Temperature_Ranking, Well_Being_Ranking, total_score)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_pfts.R
\name{query_pfts}
\alias{query_pfts}
\title{Retrieve PFT ID, name, and type from BETY}
\usage{
query_pfts(dbcon, pft_names, modeltype = NULL, strict = FALSE)
}
\arguments{
\item{dbcon}{Database connection object}
\item{pft_names}{character vector of PFT names}
\item{modeltype}{character.
If specified, only returns PFTs matching this modeltype.
If NULL, considers all modeltypes.}
}
\value{
`data.frame` containing PFT ID (`id`), type (`pft_type`),
and name (`name`).
}
\description{
Retrieve PFT ID, name, and type from BETY
}
\author{
Alexey Shiklomanov, Chris Black
}
|
/base/db/man/query_pfts.Rd
|
permissive
|
ashiklom/pecan
|
R
| false
| true
| 667
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_pfts.R
\name{query_pfts}
\alias{query_pfts}
\title{Retrieve PFT ID, name, and type from BETY}
\usage{
query_pfts(dbcon, pft_names, modeltype = NULL, strict = FALSE)
}
\arguments{
\item{dbcon}{Database connection object}
\item{pft_names}{character vector of PFT names}
\item{modeltype}{character.
If specified, only returns PFTs matching this modeltype.
If NULL, considers all modeltypes.}
}
\value{
`data.frame` containing PFT ID (`id`), type (`pft_type`),
and name (`name`).
}
\description{
Retrieve PFT ID, name, and type from BETY
}
\author{
Alexey Shiklomanov, Chris Black
}
|
setwd("//ahmct-065/teams/PMRF/Amir")
library(data.table)
library(lubridate)
library(anytime)
library(purrr)
LEMO_WorkOrder.df=fread(file="./bin/Final Datasets/LEMO_WorkOrder+odom.csv", sep=",", header=TRUE)
LEMO_WorkOrder.df=cbind(LEMO_WorkOrder.df, ID=seq.int(nrow(LEMO_WorkOrder.df)))
tempLEMO.df=LEMO_WorkOrder.df[,c("Dist", "rID", "Workdate", "from.odom.R", "to.odom.R", "from.odom.L", "to.odom.L", "ID")]
tempLEMO.df[tempLEMO.df==""]=NA
CHP.df=fread(file="./bin/Final Datasets/CHP.csv", sep=",", header=TRUE)
CHP.df=CHP.df[-c(which(CHP.df$ACCIDENT_YEAR==2011 | CHP.df$ACCIDENT_YEAR==2012)),]
routes=regmatches(CHP.df$PRIMARY_RD[which(is.na(CHP.df$STATE_ROUTE))],
gregexpr("[[:digit:]]+",
CHP.df$PRIMARY_RD[which(is.na(CHP.df$STATE_ROUTE))]
)
)
routes=map(routes, 1)
routes[sapply(routes, is.null)]=NA
routes=unlist(routes)
CHP.df$STATE_ROUTE[which(is.na(CHP.df$STATE_ROUTE))]=routes
routes=regmatches(CHP.df$SECONDARY_RD[which(is.na(CHP.df$STATE_ROUTE))],
gregexpr("[[:digit:]]+",
CHP.df$SECONDARY_RD[which(is.na(CHP.df$STATE_ROUTE))]
)
)
routes=map(routes, 1)
routes[sapply(routes, is.null)]=NA
routes=unlist(routes)
CHP.df$STATE_ROUTE[which(is.na(CHP.df$STATE_ROUTE))]=routes
tempCHP=CHP.df[,c("CASE_ID", "STATE_ROUTE", "COLLISION_DATE", "Odometer", "SIDE_OF_HWY")]
tempCHP[tempCHP==""]=NA
tempCHP=tempCHP[which(!is.na(CHP.df$Odometer) & !is.na(CHP.df$STATE_ROUTE)),]
tempCHP$COLLISION_DATE=anydate(tempCHP$COLLISION_DATE)
collision_match_ID.df=tempCHP[FALSE,]
collision_match_ID.df=cbind.data.frame("ID"=integer(), collision_match_ID.df)
pm.tol=0.25
for (i in 1500001:dim(tempLEMO.df)[1]){
#filter.chp=tempCHP[which(tempCHP$CALTRANS_DISTRICT==tempLEMO.df$Dist[i]),]
filter.chp=tempCHP[which(tempCHP$STATE_ROUTE==tempLEMO.df$rID[i]),]
filter.chp=filter.chp[which(filter.chp$COLLISION_DATE==tempLEMO.df$Workdate[i]),]
filter.chp=filter.chp[which((filter.chp$Odometer>=tempLEMO.df$from.odom.R[i]-pm.tol &
filter.chp$Odometer<=tempLEMO.df$to.odom.R[i]+pm.tol &
(filter.chp$SIDE_OF_HWY=="R" | is.na(filter.chp$SIDE_OF_HWY))) |
(filter.chp$Odometer>=tempLEMO.df$from.odom.L[i]-pm.tol &
filter.chp$Odometer<=tempLEMO.df$to.odom.L[i]+pm.tol &
(filter.chp$SIDE_OF_HWY=="L" | is.na(filter.chp$SIDE_OF_HWY)))),]
if (dim(filter.chp)[1]!=0){
collision_match_ID.df=rbind(collision_match_ID.df, cbind.data.frame("ID"=tempLEMO.df$ID[i], filter.chp), use.names=TRUE)
}
if (i%%100000==0){
print(i)
fwrite(collision_match_ID.df, file="./bin/LEMO_ID.matches.CHP.csv", sep=",", append=FALSE)
}
}
fwrite(collision_match_ID.df, file="./bin/LEMO_ID.matches.CHP.csv", sep=",", append=FALSE)
|
/MATCH(SWITRS, WorkOrderNo_Activity_Workdate).R
|
no_license
|
AmirAli-N/PMRF-DataAnalysis
|
R
| false
| false
| 2,984
|
r
|
setwd("//ahmct-065/teams/PMRF/Amir")
library(data.table)
library(lubridate)
library(anytime)
library(purrr)
LEMO_WorkOrder.df=fread(file="./bin/Final Datasets/LEMO_WorkOrder+odom.csv", sep=",", header=TRUE)
LEMO_WorkOrder.df=cbind(LEMO_WorkOrder.df, ID=seq.int(nrow(LEMO_WorkOrder.df)))
tempLEMO.df=LEMO_WorkOrder.df[,c("Dist", "rID", "Workdate", "from.odom.R", "to.odom.R", "from.odom.L", "to.odom.L", "ID")]
tempLEMO.df[tempLEMO.df==""]=NA
CHP.df=fread(file="./bin/Final Datasets/CHP.csv", sep=",", header=TRUE)
CHP.df=CHP.df[-c(which(CHP.df$ACCIDENT_YEAR==2011 | CHP.df$ACCIDENT_YEAR==2012)),]
routes=regmatches(CHP.df$PRIMARY_RD[which(is.na(CHP.df$STATE_ROUTE))],
gregexpr("[[:digit:]]+",
CHP.df$PRIMARY_RD[which(is.na(CHP.df$STATE_ROUTE))]
)
)
routes=map(routes, 1)
routes[sapply(routes, is.null)]=NA
routes=unlist(routes)
CHP.df$STATE_ROUTE[which(is.na(CHP.df$STATE_ROUTE))]=routes
routes=regmatches(CHP.df$SECONDARY_RD[which(is.na(CHP.df$STATE_ROUTE))],
gregexpr("[[:digit:]]+",
CHP.df$SECONDARY_RD[which(is.na(CHP.df$STATE_ROUTE))]
)
)
routes=map(routes, 1)
routes[sapply(routes, is.null)]=NA
routes=unlist(routes)
CHP.df$STATE_ROUTE[which(is.na(CHP.df$STATE_ROUTE))]=routes
tempCHP=CHP.df[,c("CASE_ID", "STATE_ROUTE", "COLLISION_DATE", "Odometer", "SIDE_OF_HWY")]
tempCHP[tempCHP==""]=NA
tempCHP=tempCHP[which(!is.na(CHP.df$Odometer) & !is.na(CHP.df$STATE_ROUTE)),]
tempCHP$COLLISION_DATE=anydate(tempCHP$COLLISION_DATE)
collision_match_ID.df=tempCHP[FALSE,]
collision_match_ID.df=cbind.data.frame("ID"=integer(), collision_match_ID.df)
pm.tol=0.25
for (i in 1500001:dim(tempLEMO.df)[1]){
#filter.chp=tempCHP[which(tempCHP$CALTRANS_DISTRICT==tempLEMO.df$Dist[i]),]
filter.chp=tempCHP[which(tempCHP$STATE_ROUTE==tempLEMO.df$rID[i]),]
filter.chp=filter.chp[which(filter.chp$COLLISION_DATE==tempLEMO.df$Workdate[i]),]
filter.chp=filter.chp[which((filter.chp$Odometer>=tempLEMO.df$from.odom.R[i]-pm.tol &
filter.chp$Odometer<=tempLEMO.df$to.odom.R[i]+pm.tol &
(filter.chp$SIDE_OF_HWY=="R" | is.na(filter.chp$SIDE_OF_HWY))) |
(filter.chp$Odometer>=tempLEMO.df$from.odom.L[i]-pm.tol &
filter.chp$Odometer<=tempLEMO.df$to.odom.L[i]+pm.tol &
(filter.chp$SIDE_OF_HWY=="L" | is.na(filter.chp$SIDE_OF_HWY)))),]
if (dim(filter.chp)[1]!=0){
collision_match_ID.df=rbind(collision_match_ID.df, cbind.data.frame("ID"=tempLEMO.df$ID[i], filter.chp), use.names=TRUE)
}
if (i%%100000==0){
print(i)
fwrite(collision_match_ID.df, file="./bin/LEMO_ID.matches.CHP.csv", sep=",", append=FALSE)
}
}
fwrite(collision_match_ID.df, file="./bin/LEMO_ID.matches.CHP.csv", sep=",", append=FALSE)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("ano",
"Number of bins:",
min = min(influx$ano),
max = max(influx$ano),
value = c(2005,2018))
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
influx<- influx%>%
filter(ano == input$ano)
ggplot(influx, aes(y = influx$influx_reais, x = ano)) +
geom_col(position = "dodge", colour = "black") +
scale_fill_brewer(palette = "Pastel1")+
guides(fill = FALSE)+
theme(axis.text.y = element_text(angle = 00, hjust = 1, vjust = 1, size = 7))+
coord_flip()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
andreferraribr/sankey
|
R
| false
| false
| 1,424
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("ano",
"Number of bins:",
min = min(influx$ano),
max = max(influx$ano),
value = c(2005,2018))
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
influx<- influx%>%
filter(ano == input$ano)
ggplot(influx, aes(y = influx$influx_reais, x = ano)) +
geom_col(position = "dodge", colour = "black") +
scale_fill_brewer(palette = "Pastel1")+
guides(fill = FALSE)+
theme(axis.text.y = element_text(angle = 00, hjust = 1, vjust = 1, size = 7))+
coord_flip()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
ggplot(blah3, aes(x=Month, y=Principal, group=group, col=group, fill=group)) +
+ geom_point() +
+ geom_smooth(size=1)
blah$group <- "15 year"
blah2$group <- "30 year"
blah3 <- rbind(blah,blah2)
|
/graphing notes.R
|
no_license
|
Noah-Hughes/DataProducts
|
R
| false
| false
| 207
|
r
|
ggplot(blah3, aes(x=Month, y=Principal, group=group, col=group, fill=group)) +
+ geom_point() +
+ geom_smooth(size=1)
blah$group <- "15 year"
blah2$group <- "30 year"
blah3 <- rbind(blah,blah2)
|
rm(list=ls())
library(shiny); library(Reol); library(xml2); library(rnbn); library(stringr)
# Quick fix for Reol bug when matching EOL entries for instances where search
# returns more than one result for the same EOL ID
insertSource("./NBN_hack_series/BirdBingo/MatchTaxatoEOLID.R", package = "Reol", functions = "MatchTaxatoEOLID")
nbnLogin(username = "drcrc", password = "Turdusmerula")
load("./NBN_hack_series/BirdBingo/shiny_app/shef_data.rdata")
birdTVKs <- getGroupSpeciesTVKs("bird")
shef_data <- shef_data[shef_data$pTaxonVersionKey %in% birdTVKs,]
save(shef_data, file = "./NBN_hack_series/BirdBingo/ShefBirdData.Rdata")
load("./NBN_hack_series/BirdBingo/ShefBirdData.Rdata")
spp <- table(shef_data$pTaxonName)
spp <- spp[order(spp, decreasing = T)]
spp.names <- names(spp)[1:40] # Pick number of species to include
# FIXME: drop species if name contains < 2 words, as some entries are genus only
sppdat <- MatchTaxatoEOLID(spp.names, exact = T)
sppdat <- sppdat[!is.na(sppdat$eolPageNumbers),]
sppdat$N_records <- spp[match(sppdat$ListOfTaxa, names(spp))] # Attach number of records (i.e. commonness)
#spp.list <- as.numeric(sample(sppdat$eolPageNumbers, size = length(sppdat$eolPageNumbers), prob = spp, replace=F))
#chosen.names <- sppdat$ListOfTaxa[match(spp.list, sppdat$eolPageNumbers)]
myEOL <- DownloadEOLpages(sppdat$eolPageNumbers, to.file = FALSE)
#DataObjectOverview(myEOL)
#PageProcessing(data1[1])
url.list <- as.list(rep(NA, length(myEOL)))
for (i in 1:length(myEOL)) {
myxml <- read_xml(myEOL[[i]])
nsData <- xml_ns(myxml)
# Find data objects with appropriate mime type (image/jpeg)
dataObjs <- xml_find_all(myxml, ".//d1:dataObject", ns=nsData)
mimeTypes <- xml_text(xml_find_all(dataObjs, ".//d1:mimeType", ns=nsData))
imageObjs <- dataObjs[mimeTypes=="image/jpeg"]
# Get media URL and license info
licenseUrl <- sourceUrl <- rightsHolder <- mediaUrl <- list()
for (j in seq_along(imageObjs)) {
## Convert to R list object for convenience
imgObj <- as_list(imageObjs[[j]])
licenseUrl[[j]] <- unlist(imgObj$license)
sourceUrl[[j]] <- unlist(imgObj$source)
rightsHolder[[j]] <- ifelse(is.null(imgObj$rightsHolder), NA, unlist(imgObj$rightsHolder))
# There are two mediaURL entries (original image and EOL copy), this only gets the first:
#mediaUrl[[j]] <- unlist(imgObj$mediaURL)
# Use this to get both URLs:
mediaUrl[[j]] <- xml_text(xml_find_all(imageObjs[[j]], ".//d1:mediaURL", ns=nsData))
}
url.list[[i]] <- list(mediaUrl = unlist(mediaUrl),
licenseUrl = unlist(licenseUrl),
sourceUrl = unlist(sourceUrl),
rightsHolder = unlist(rightsHolder))
}
save(sppdat, spp, url.list, file = "./NBN_hack_series/BirdBingo/AppData.Rdata")
load(file = "./NBN_hack_series/BirdBingo/AppData.Rdata")
# # # UI # # #
img.height <- "150px"
img.width <- "150px"
grid.size <- 3
col.width <- 3
ui <- fluidPage(
includeCSS("./NBN_hack_series/BirdBingo/shiny_app/styles.css"),
# Application title
titlePanel("Bird Bingo!"),
hr(),
fluidRow(
column(3*col.width,
uiOutput("grid_check_info")
)
),
lapply(1:grid.size, function(i) {
fluidRow(
lapply(1:grid.size, function(j) {
column(col.width,
div(
uiOutput(paste0("image_title", i, ".", j),
class = "bb-image-title"),
div(
imageOutput(paste0("image", i, ".", j),
height=img.height,
click = paste0("image_click", i, ".", j)),
uiOutput(paste0("image_overlay", i, ".", j),
class = "bb-image-overlay"),
class="bb-image"
),
uiOutput(paste0("image_info", i, ".", j),
class = "bb-photo-credit"),
class = "bb-square"
)
)
})
)
})
)
checkGrid <- function(input) {
grid <- array(dim = rep(grid.size,2))
for (i in 1:grid.size) {
for (j in 1:grid.size) {
grid[i,j] <- !is.null(input[[paste0('image_click', i, ".", j)]]$x)
}
}
res <- any(apply(grid, MARGIN=1, all)) |
any(apply(grid, MARGIN=2, all)) |
all(diag(grid)) | all(diag(grid[,grid.size:1]))
return(res)
}
server <- function(input, output, session) {
addResourcePath("images", "./NBN_hack_series/BirdBingo/shiny_app/images")
output$grid_check_info <- renderUI({
if (checkGrid(input)) {
div("BINGO!", class="bb-bingo")
}
})
# Image output:
n.urls <- grid.size^2
spp.index <- sample(c(1:length(sppdat$eolPageNumbers)),
size = n.urls, ##prob = spp[sppdat$ListOfTaxa],
replace=F)
lapply(1:grid.size, function(i) {
lapply(1:grid.size, function(j) {
index <- (i-1)*grid.size+j
imageInfo <- url.list[[spp.index[index]]]
titleId <- paste0('image_title', i, ".", j)
output[[titleId]] <- renderUI ({
h2(sppdat$ListOfTaxa[spp.index[index]])
})
imgId <- paste0('image', i, ".", j)
output[[imgId]] <- renderImage({
# A temp file to save the output
outfile <- tempfile(fileext='.jpg')
# FIXME catch problem downloading image
download.file(imageInfo$mediaUrl[2], outfile, method = "libcurl")
# FIXME specify width/height according to image orientation
list(
src = outfile,
width = "100%",
contentType = "image/jpeg",
alt = sppdat$ListOfTaxa[index]
)
}, deleteFile = FALSE)
infoId <- paste0('image_info', i, ".", j)
output[[infoId]] <- renderUI ({
# Attribution: Title, Source, Author, License e.g. Photo by XXXX / CC BY
# Source: dc:source
# Author: dcterms:rightsholder
# Title (extract file name from source URL)
src <- imageInfo$sourceUrl[1]
src_path <- unlist(strsplit(url_parse(src)$path, split="[/]"))
src_path <- src_path[length(src_path)]
src_path <- str_replace(src_path, "^File:", "")
src_path <- str_replace_all(src_path, "_", "-")
# Author (use rights holder)
rh <- imageInfo$rightsHolder[1]
# License e.g.
# http://creativecommons.org/licenses/by-sa/2.5/
lic_text <- lic_url <- imageInfo$licenseUrl[1]
lic <- url_parse(lic_url)
# Parse known license types
if (match("creativecommons.org", lic$server)) {
path <- unlist(strsplit(lic$path, split="[/]"))
if ("licenses" %in% path) {
lic_text <- paste("CC", str_to_upper(path[3]), sep = "-")
} else {
lic_text <- "Public domain"
}
lic_text <- str_c("(", lic_text, ")")
}
list(
tags$a(href=src, src_path),
tags$span(ifelse(!is.na(rh), paste("by", rh), "")),
tags$a(href=lic_url, lic_text)
)
})
# interaction click in image
observeEvent(input[[paste0('image_click', i, ".", j)]], {
output[[paste0('image_overlay', i, ".", j)]] <- renderUI({
list(
tags$img(
src = "images/overlay.png",
width = "100%",
class = "bb-image-overlay"
)
)
})
})
})
})
}
shinyApp(ui, server)
|
/BirdBingo/shiny_app/ui.R
|
no_license
|
christophercooney/NBN_hack_series
|
R
| false
| false
| 7,539
|
r
|
rm(list=ls())
library(shiny); library(Reol); library(xml2); library(rnbn); library(stringr)
# Quick fix for Reol bug when matching EOL entries for instances where search
# returns more than one result for the same EOL ID
insertSource("./NBN_hack_series/BirdBingo/MatchTaxatoEOLID.R", package = "Reol", functions = "MatchTaxatoEOLID")
nbnLogin(username = "drcrc", password = "Turdusmerula")
load("./NBN_hack_series/BirdBingo/shiny_app/shef_data.rdata")
birdTVKs <- getGroupSpeciesTVKs("bird")
shef_data <- shef_data[shef_data$pTaxonVersionKey %in% birdTVKs,]
save(shef_data, file = "./NBN_hack_series/BirdBingo/ShefBirdData.Rdata")
load("./NBN_hack_series/BirdBingo/ShefBirdData.Rdata")
spp <- table(shef_data$pTaxonName)
spp <- spp[order(spp, decreasing = T)]
spp.names <- names(spp)[1:40] # Pick number of species to include
# FIXME: drop species if name contains < 2 words, as some entries are genus only
sppdat <- MatchTaxatoEOLID(spp.names, exact = T)
sppdat <- sppdat[!is.na(sppdat$eolPageNumbers),]
sppdat$N_records <- spp[match(sppdat$ListOfTaxa, names(spp))] # Attach number of records (i.e. commonness)
#spp.list <- as.numeric(sample(sppdat$eolPageNumbers, size = length(sppdat$eolPageNumbers), prob = spp, replace=F))
#chosen.names <- sppdat$ListOfTaxa[match(spp.list, sppdat$eolPageNumbers)]
myEOL <- DownloadEOLpages(sppdat$eolPageNumbers, to.file = FALSE)
#DataObjectOverview(myEOL)
#PageProcessing(data1[1])
url.list <- as.list(rep(NA, length(myEOL)))
for (i in 1:length(myEOL)) {
myxml <- read_xml(myEOL[[i]])
nsData <- xml_ns(myxml)
# Find data objects with appropriate mime type (image/jpeg)
dataObjs <- xml_find_all(myxml, ".//d1:dataObject", ns=nsData)
mimeTypes <- xml_text(xml_find_all(dataObjs, ".//d1:mimeType", ns=nsData))
imageObjs <- dataObjs[mimeTypes=="image/jpeg"]
# Get media URL and license info
licenseUrl <- sourceUrl <- rightsHolder <- mediaUrl <- list()
for (j in seq_along(imageObjs)) {
## Convert to R list object for convenience
imgObj <- as_list(imageObjs[[j]])
licenseUrl[[j]] <- unlist(imgObj$license)
sourceUrl[[j]] <- unlist(imgObj$source)
rightsHolder[[j]] <- ifelse(is.null(imgObj$rightsHolder), NA, unlist(imgObj$rightsHolder))
# There are two mediaURL entries (original image and EOL copy), this only gets the first:
#mediaUrl[[j]] <- unlist(imgObj$mediaURL)
# Use this to get both URLs:
mediaUrl[[j]] <- xml_text(xml_find_all(imageObjs[[j]], ".//d1:mediaURL", ns=nsData))
}
url.list[[i]] <- list(mediaUrl = unlist(mediaUrl),
licenseUrl = unlist(licenseUrl),
sourceUrl = unlist(sourceUrl),
rightsHolder = unlist(rightsHolder))
}
save(sppdat, spp, url.list, file = "./NBN_hack_series/BirdBingo/AppData.Rdata")
load(file = "./NBN_hack_series/BirdBingo/AppData.Rdata")
# # # UI # # #
img.height <- "150px"
img.width <- "150px"
grid.size <- 3
col.width <- 3
ui <- fluidPage(
includeCSS("./NBN_hack_series/BirdBingo/shiny_app/styles.css"),
# Application title
titlePanel("Bird Bingo!"),
hr(),
fluidRow(
column(3*col.width,
uiOutput("grid_check_info")
)
),
lapply(1:grid.size, function(i) {
fluidRow(
lapply(1:grid.size, function(j) {
column(col.width,
div(
uiOutput(paste0("image_title", i, ".", j),
class = "bb-image-title"),
div(
imageOutput(paste0("image", i, ".", j),
height=img.height,
click = paste0("image_click", i, ".", j)),
uiOutput(paste0("image_overlay", i, ".", j),
class = "bb-image-overlay"),
class="bb-image"
),
uiOutput(paste0("image_info", i, ".", j),
class = "bb-photo-credit"),
class = "bb-square"
)
)
})
)
})
)
checkGrid <- function(input) {
grid <- array(dim = rep(grid.size,2))
for (i in 1:grid.size) {
for (j in 1:grid.size) {
grid[i,j] <- !is.null(input[[paste0('image_click', i, ".", j)]]$x)
}
}
res <- any(apply(grid, MARGIN=1, all)) |
any(apply(grid, MARGIN=2, all)) |
all(diag(grid)) | all(diag(grid[,grid.size:1]))
return(res)
}
server <- function(input, output, session) {
addResourcePath("images", "./NBN_hack_series/BirdBingo/shiny_app/images")
output$grid_check_info <- renderUI({
if (checkGrid(input)) {
div("BINGO!", class="bb-bingo")
}
})
# Image output:
n.urls <- grid.size^2
spp.index <- sample(c(1:length(sppdat$eolPageNumbers)),
size = n.urls, ##prob = spp[sppdat$ListOfTaxa],
replace=F)
lapply(1:grid.size, function(i) {
lapply(1:grid.size, function(j) {
index <- (i-1)*grid.size+j
imageInfo <- url.list[[spp.index[index]]]
titleId <- paste0('image_title', i, ".", j)
output[[titleId]] <- renderUI ({
h2(sppdat$ListOfTaxa[spp.index[index]])
})
imgId <- paste0('image', i, ".", j)
output[[imgId]] <- renderImage({
# A temp file to save the output
outfile <- tempfile(fileext='.jpg')
# FIXME catch problem downloading image
download.file(imageInfo$mediaUrl[2], outfile, method = "libcurl")
# FIXME specify width/height according to image orientation
list(
src = outfile,
width = "100%",
contentType = "image/jpeg",
alt = sppdat$ListOfTaxa[index]
)
}, deleteFile = FALSE)
infoId <- paste0('image_info', i, ".", j)
output[[infoId]] <- renderUI ({
# Attribution: Title, Source, Author, License e.g. Photo by XXXX / CC BY
# Source: dc:source
# Author: dcterms:rightsholder
# Title (extract file name from source URL)
src <- imageInfo$sourceUrl[1]
src_path <- unlist(strsplit(url_parse(src)$path, split="[/]"))
src_path <- src_path[length(src_path)]
src_path <- str_replace(src_path, "^File:", "")
src_path <- str_replace_all(src_path, "_", "-")
# Author (use rights holder)
rh <- imageInfo$rightsHolder[1]
# License e.g.
# http://creativecommons.org/licenses/by-sa/2.5/
lic_text <- lic_url <- imageInfo$licenseUrl[1]
lic <- url_parse(lic_url)
# Parse known license types
if (match("creativecommons.org", lic$server)) {
path <- unlist(strsplit(lic$path, split="[/]"))
if ("licenses" %in% path) {
lic_text <- paste("CC", str_to_upper(path[3]), sep = "-")
} else {
lic_text <- "Public domain"
}
lic_text <- str_c("(", lic_text, ")")
}
list(
tags$a(href=src, src_path),
tags$span(ifelse(!is.na(rh), paste("by", rh), "")),
tags$a(href=lic_url, lic_text)
)
})
# interaction click in image
observeEvent(input[[paste0('image_click', i, ".", j)]], {
output[[paste0('image_overlay', i, ".", j)]] <- renderUI({
list(
tags$img(
src = "images/overlay.png",
width = "100%",
class = "bb-image-overlay"
)
)
})
})
})
})
}
shinyApp(ui, server)
|
# Reading data set
data<-read.table("household_power_consumption.txt",sep=";",header=T)
data$Date
tomatch <- c(grep("\\b1/2/2007\\b", data$Date),grep("\\b2/2/2007\\b", data$Date))
df <- data[tomatch,]
# Convert to date and time
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df$Time <- strptime(df$Time, "%H:%M:%S")
df$Time <- sub(".*\\s+", "", df$Time)
# Plot 2
# 1. Open png file
png("plot2.png", width = 480, height = 480)
# 2. Create the plot
plot(as.numeric(df$Global_active_power),
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)",
xaxt = "n")
axis(side = 1, at=c(1, 1441, 2880), labels=c("Thu","Fri","Sat"))
# 3. Close the file
dev.off()
|
/Plot2.R
|
no_license
|
jvrojas/ExData_Plotting1
|
R
| false
| false
| 685
|
r
|
# Reading data set
data<-read.table("household_power_consumption.txt",sep=";",header=T)
data$Date
tomatch <- c(grep("\\b1/2/2007\\b", data$Date),grep("\\b2/2/2007\\b", data$Date))
df <- data[tomatch,]
# Convert to date and time
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df$Time <- strptime(df$Time, "%H:%M:%S")
df$Time <- sub(".*\\s+", "", df$Time)
# Plot 2
# 1. Open png file
png("plot2.png", width = 480, height = 480)
# 2. Create the plot
plot(as.numeric(df$Global_active_power),
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)",
xaxt = "n")
axis(side = 1, at=c(1, 1441, 2880), labels=c("Thu","Fri","Sat"))
# 3. Close the file
dev.off()
|
library(dplyr)
library(readr)
library(metafor)
###glmm_fe
ptm<-proc.time()
bias=c()
bias.prop=c()
RMSE=c()
RMSE.prop=c()
coverage=c()
##dobby1
dobby1<-function(vec)as.numeric(as.character(vec))
for(i in 1:792){
data.path<-paste0("sim_data/scenario_", i, ".csv")
dat<-suppressWarnings(read_csv(data.path))
##use dplyr to obtain an rma object for each run
##back-transform with predict()
##store estimate and CI
##deselect the rma object column
res<-dat %>%
group_by(run) %>%
summarise(obj=list(unlist(predict(rma.glmm(measure="PLO", method="FE",
xi=num.of.events, ni=sample.sizes),
transf=transf.ilogit))[c(1,3,4)]),
est=dobby1(unlist(obj)[1]),
ci.lb=dobby1(unlist(obj)[2]),
ci.ub=dobby1(unlist(obj)[3])) %>%
select(-obj)
##scenario information
scenario_info<-dat%>%
filter(!duplicated(dat$run)) %>%
select(-X1, -sample.sizes, -num.of.events)
##scenario information joined with estimates of each run by "run"
res<-left_join(scenario_info, res, by="run")
file.path<-paste0("sim_res/glmm_fe/scenario_", res$scenario[1], "_glmm_fe_res.csv")
write.csv(res, file=file.path)
true.pii<-res$true.pi[1]
bias[i]<-mean(true.pii-res$est)
bias.prop[i]<-mean((true.pii-res$est)/true.pii)
RMSE[i]<-mean((res$est-true.pii)^2)
RMSE.prop[i]<-mean((res$est-true.pii)^2/true.pii)
coverage[i]<-sum(res$ci.lb<=true.pii & res$ci.ub>=true.pii)/1000
}#ends for(i in 792)
metrics<-data.frame(bias=bias,
bias.prop=bias.prop,
RMSE=RMSE,
RMSE.prop=RMSE.prop,
coverage=coverage)
metrics<-cbind(scenarios, metrics)
write.csv(metrics, file="sim_res/glmm_fe/metrics_glmm_fe.csv")
proc.time()-ptm
# user system elapsed
# 7736.858 160.744 7975.229
|
/sim13/analysis_glmm_fe.R
|
no_license
|
ShawnFries/meta_code
|
R
| false
| false
| 1,911
|
r
|
library(dplyr)
library(readr)
library(metafor)
###glmm_fe
ptm<-proc.time()
bias=c()
bias.prop=c()
RMSE=c()
RMSE.prop=c()
coverage=c()
##dobby1
dobby1<-function(vec)as.numeric(as.character(vec))
for(i in 1:792){
data.path<-paste0("sim_data/scenario_", i, ".csv")
dat<-suppressWarnings(read_csv(data.path))
##use dplyr to obtain an rma object for each run
##back-transform with predict()
##store estimate and CI
##deselect the rma object column
res<-dat %>%
group_by(run) %>%
summarise(obj=list(unlist(predict(rma.glmm(measure="PLO", method="FE",
xi=num.of.events, ni=sample.sizes),
transf=transf.ilogit))[c(1,3,4)]),
est=dobby1(unlist(obj)[1]),
ci.lb=dobby1(unlist(obj)[2]),
ci.ub=dobby1(unlist(obj)[3])) %>%
select(-obj)
##scenario information
scenario_info<-dat%>%
filter(!duplicated(dat$run)) %>%
select(-X1, -sample.sizes, -num.of.events)
##scenario information joined with estimates of each run by "run"
res<-left_join(scenario_info, res, by="run")
file.path<-paste0("sim_res/glmm_fe/scenario_", res$scenario[1], "_glmm_fe_res.csv")
write.csv(res, file=file.path)
true.pii<-res$true.pi[1]
bias[i]<-mean(true.pii-res$est)
bias.prop[i]<-mean((true.pii-res$est)/true.pii)
RMSE[i]<-mean((res$est-true.pii)^2)
RMSE.prop[i]<-mean((res$est-true.pii)^2/true.pii)
coverage[i]<-sum(res$ci.lb<=true.pii & res$ci.ub>=true.pii)/1000
}#ends for(i in 792)
metrics<-data.frame(bias=bias,
bias.prop=bias.prop,
RMSE=RMSE,
RMSE.prop=RMSE.prop,
coverage=coverage)
metrics<-cbind(scenarios, metrics)
write.csv(metrics, file="sim_res/glmm_fe/metrics_glmm_fe.csv")
proc.time()-ptm
# user system elapsed
# 7736.858 160.744 7975.229
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ppiIDN2012}
\alias{ppiIDN2012}
\title{Poverty Probability Index (PPI) lookup table for Indonesia using legacy
poverty definitions}
\format{
A data frame with 4 columns and 101 rows:
\describe{
\item{\code{score}}{PPI score}
\item{\code{nl100}}{National poverty line (100\%)}
\item{\code{ppp125}}{Below $1.25 per day purchasing power parity (2005)}
\item{\code{ppp250}}{Below $2.50 per day purchasing power parity (2005)}
}
}
\source{
\url{https://www.povertyindex.org}
}
\usage{
ppiIDN2012
}
\description{
Poverty Probability Index (PPI) lookup table for Indonesia using legacy
poverty definitions
}
\examples{
# Access Indonesia PPI table
ppiIDN2012
# Given a specific PPI score (from 0 - 100), get the row of poverty
# probabilities from PPI table it corresponds to
ppiScore <- 50
ppiIDN2012[ppiIDN2012$score == ppiScore, ]
# Use subset() function to get the row of poverty probabilities corresponding
# to specific PPI score
ppiScore <- 50
subset(ppiIDN2012, score == ppiScore)
# Given a specific PPI score (from 0 - 100), get a poverty probability
# based on a specific poverty definition. In this example, the national
# poverty line definition
ppiScore <- 50
ppiIDN2012[ppiIDN2012$score == ppiScore, "nl100"]
}
\keyword{datasets}
|
/man/ppiIDN2012.Rd
|
permissive
|
katilingban/ppitables
|
R
| false
| true
| 1,376
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ppiIDN2012}
\alias{ppiIDN2012}
\title{Poverty Probability Index (PPI) lookup table for Indonesia using legacy
poverty definitions}
\format{
A data frame with 4 columns and 101 rows:
\describe{
\item{\code{score}}{PPI score}
\item{\code{nl100}}{National poverty line (100\%)}
\item{\code{ppp125}}{Below $1.25 per day purchasing power parity (2005)}
\item{\code{ppp250}}{Below $2.50 per day purchasing power parity (2005)}
}
}
\source{
\url{https://www.povertyindex.org}
}
\usage{
ppiIDN2012
}
\description{
Poverty Probability Index (PPI) lookup table for Indonesia using legacy
poverty definitions
}
\examples{
# Access Indonesia PPI table
ppiIDN2012
# Given a specific PPI score (from 0 - 100), get the row of poverty
# probabilities from PPI table it corresponds to
ppiScore <- 50
ppiIDN2012[ppiIDN2012$score == ppiScore, ]
# Use subset() function to get the row of poverty probabilities corresponding
# to specific PPI score
ppiScore <- 50
subset(ppiIDN2012, score == ppiScore)
# Given a specific PPI score (from 0 - 100), get a poverty probability
# based on a specific poverty definition. In this example, the national
# poverty line definition
ppiScore <- 50
ppiIDN2012[ppiIDN2012$score == ppiScore, "nl100"]
}
\keyword{datasets}
|
library(ISLR)
glm.model = glm(default~income+balance,data=Default,family=binomial)
glm.model
set.seed(1)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance, data=Default, family=binomial, subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,], type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
#computing error rate
sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100
for(i in 2:4){
set.seed(i)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance,data=Default,family=binomial, subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,],type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
print(sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100)
}
(2.15+2.55+2.85)/3
set.seed(1)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance+student,data=Default,family=binomial,subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,],type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
#computing the error rate
sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100
|
/Chapter 5 Resampling methods/Q5/q5.r
|
no_license
|
bhrzali/ISLR_assignments_applied
|
R
| false
| false
| 1,219
|
r
|
library(ISLR)
glm.model = glm(default~income+balance,data=Default,family=binomial)
glm.model
set.seed(1)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance, data=Default, family=binomial, subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,], type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
#computing error rate
sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100
for(i in 2:4){
set.seed(i)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance,data=Default,family=binomial, subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,],type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
print(sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100)
}
(2.15+2.55+2.85)/3
set.seed(1)
train = sample(nrow(Default),8000)
glm.model = glm(default~income+balance+student,data=Default,family=binomial,subset=train)
glm.prob = predict(glm.model,newdata=Default[-train,],type="response")
glm.pred = rep("No",length(glm.prob))
glm.pred[glm.prob>0.5]="Yes"
#computing the error rate
sum(glm.pred!=Default[-train,]$default)/length(glm.pred)*100
|
ranking <- function(tournament){ # tournament odpowiada macierzy A z artyułu
n <- dim(tournament)[1]
matches_matrix <- tournament + t(tournament) # macierz M
matches_sums <- rowSums(matches_matrix) # wektor m
scores <- rowSums(tournament, na.rm = TRUE)/matches_sums #wektor s
matches_centered <- matches_matrix/matches_sums - diag(n) #Macierz (M z kreską) - I
scores_moved <- scores - 0.5 #wektor (s z daszkiem)
#Dodajemy poniżej warunek, że mają się sumować do zera w następujący sposób:
# dodajemy wiersz jedynek na dole (będzie sumował wszystie wyrazy) i 0 na końcu wyników (żeby sumowłąo się do 0). Żeby macierz była kwadratowa dorzucamy wektor jedynek po prawej (będą mnożone przez to zero więc to może byc cokolwiek różnego od zera)
matches_centered <- rbind(matches_centered, rep(1, n))
matches_centered <- cbind(matches_centered, rep(1, n+1))
scores_moved <- c(scores_moved, 0)
solve(matches_centered, -scores_moved)[1:n]
}
kendall <- function(x, y){ #cor(..., method = 'kendall') nie działa na factorach, etyiety chyba tak właśnie należy interpretować, więc napisałem funkcję sam
x <- as.factor(x)
y <- as.factor(y)
n <- length(x)
M <- matrix(integer(length(x)^2), nrow = n)
for(i in 1:(n - 1)){
for(j in (i + 1):n){
pos.x <- match(levels(x)[i], x) - match(levels(x)[j], x)
pos.y <- match(levels(y)[i], y) - match(levels(y)[j], y)
M[i,j] <- sign(pos.x * pos.y)
}
}
sum(M)/(n*(n-1)/2)
}
round_robin <- function(true_ranks, k){#symulcja k rund kazdy z każdym
n <- length(true_ranks)
A <- matrix(numeric(n*n), nrow = n)
for(r in 1:k){
for(i in 1:(n-1)){
for(j in (i+1):n){
tmp_prob <- e1071::sigmoid(true_ranks[i] - true_ranks[j])
result <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A[i, j] <- A[i, j] + result
A[j, i] <- A[j, i] + 1 - result
}
}
}
factor(order(ranking(A), decreasing = TRUE))
}
circle <- function(true_ranks, n){# Po kółeczku (1 gra z 28 i 2, 2 gra z 1 i 3 itd.) a potem losowo n meczy
k <- length(true_ranks)
A1 <- matrix(numeric(k*k), nrow = k)
tmp_prob <- e1071::sigmoid(true_ranks[1] - true_ranks[k])
A1[1, k] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[k, 1] <- 1 - A1[1, k]
for(i in 1:k-1){
tmp_prob <- e1071::sigmoid(true_ranks[i] - true_ranks[i+1])
A1[i, i+1] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[i+1, i] <- 1 - A1[i, i+1]
}
for(i in 1:n){
teams <- sample(1:k, 2, replace = FALSE)
tmp_prob <- e1071::sigmoid(true_ranks[teams[1]] - true_ranks[teams[2]])
A1[teams[1], teams[2]] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[teams[2], teams[1]] <- 1 - A1[teams[1], teams[2]]
}
tournament_ranks <- ranking(A1)
factor(order(tournament_ranks, decreasing = TRUE))
}
# 8 drużyn każdy z każdym
true8 <- runif(8, -1.2, 1.2)
order8 <- factor(order(true8, decreasing = TRUE))
simulation8 <- sapply(seq(1, 101, 10), function(x) replicate(50, kendall(order8, round_robin(true8, x))))
simulation8 <- as.data.frame(simulation)
long_simulation8 <- tidyr::gather(simulation8, rounds, kendall)
# 28 drużyn każdy z każdym
true28 <- runif(28, -1.2, 1.2)
order28 <- factor(order(true28, decreasing = TRUE))
simulationdf <- sapply(seq(1, 101, 10), function(x) replicate(50, kendall(order28, round_robin(true28, x))))
simulationdf <- as.data.frame(simulationdf)
long_simulation <- tidyr::gather(simulationdf, rounds, kendall)
ggplot2::qplot(x = rounds, y = kendall, data = long_simulation,
ylim = c(0, 1), main = 'Turnieje każdy z każdym', xlab = 'Liczba rund', geom = 'point')
# 28 drużyn po kółeczku i losowo
many_sim <- function(n){
out <- numeric(500)
for(i in 1:500) out[i] <- kendall(true_order, cirlce(true_ranks, n))
out
}
library(parallel)
clust <- makeCluster(3)
clusterExport(clust, c("true_order", "true_ranks", "simulation", "ranking", "kendall"))
big_simulation <- as.data.frame(parSapply(clust, seq(0, 1000, 20), many_sim))
big_summary <- data.frame(up = sapply(big_simulation, mean) + sapply(big_simulation, sd),
avg = sapply(big_simulation, mean),
bott = sapply(big_simulation, mean) - sapply(big_simulation, sd))
plot(big_summary$avg, type = 'l', ylim = c(0, 0.85))
lines(big_summary$up, col = 'red')
lines(big_summary$bott, col = 'red')
|
/simulations.R
|
no_license
|
jacek789/ranking
|
R
| false
| false
| 4,523
|
r
|
ranking <- function(tournament){ # tournament odpowiada macierzy A z artyułu
n <- dim(tournament)[1]
matches_matrix <- tournament + t(tournament) # macierz M
matches_sums <- rowSums(matches_matrix) # wektor m
scores <- rowSums(tournament, na.rm = TRUE)/matches_sums #wektor s
matches_centered <- matches_matrix/matches_sums - diag(n) #Macierz (M z kreską) - I
scores_moved <- scores - 0.5 #wektor (s z daszkiem)
#Dodajemy poniżej warunek, że mają się sumować do zera w następujący sposób:
# dodajemy wiersz jedynek na dole (będzie sumował wszystie wyrazy) i 0 na końcu wyników (żeby sumowłąo się do 0). Żeby macierz była kwadratowa dorzucamy wektor jedynek po prawej (będą mnożone przez to zero więc to może byc cokolwiek różnego od zera)
matches_centered <- rbind(matches_centered, rep(1, n))
matches_centered <- cbind(matches_centered, rep(1, n+1))
scores_moved <- c(scores_moved, 0)
solve(matches_centered, -scores_moved)[1:n]
}
kendall <- function(x, y){ #cor(..., method = 'kendall') nie działa na factorach, etyiety chyba tak właśnie należy interpretować, więc napisałem funkcję sam
x <- as.factor(x)
y <- as.factor(y)
n <- length(x)
M <- matrix(integer(length(x)^2), nrow = n)
for(i in 1:(n - 1)){
for(j in (i + 1):n){
pos.x <- match(levels(x)[i], x) - match(levels(x)[j], x)
pos.y <- match(levels(y)[i], y) - match(levels(y)[j], y)
M[i,j] <- sign(pos.x * pos.y)
}
}
sum(M)/(n*(n-1)/2)
}
round_robin <- function(true_ranks, k){#symulcja k rund kazdy z każdym
n <- length(true_ranks)
A <- matrix(numeric(n*n), nrow = n)
for(r in 1:k){
for(i in 1:(n-1)){
for(j in (i+1):n){
tmp_prob <- e1071::sigmoid(true_ranks[i] - true_ranks[j])
result <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A[i, j] <- A[i, j] + result
A[j, i] <- A[j, i] + 1 - result
}
}
}
factor(order(ranking(A), decreasing = TRUE))
}
circle <- function(true_ranks, n){# Po kółeczku (1 gra z 28 i 2, 2 gra z 1 i 3 itd.) a potem losowo n meczy
k <- length(true_ranks)
A1 <- matrix(numeric(k*k), nrow = k)
tmp_prob <- e1071::sigmoid(true_ranks[1] - true_ranks[k])
A1[1, k] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[k, 1] <- 1 - A1[1, k]
for(i in 1:k-1){
tmp_prob <- e1071::sigmoid(true_ranks[i] - true_ranks[i+1])
A1[i, i+1] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[i+1, i] <- 1 - A1[i, i+1]
}
for(i in 1:n){
teams <- sample(1:k, 2, replace = FALSE)
tmp_prob <- e1071::sigmoid(true_ranks[teams[1]] - true_ranks[teams[2]])
A1[teams[1], teams[2]] <- sample(c(1, 0), size = 1, prob = c(tmp_prob, 1 - tmp_prob))
A1[teams[2], teams[1]] <- 1 - A1[teams[1], teams[2]]
}
tournament_ranks <- ranking(A1)
factor(order(tournament_ranks, decreasing = TRUE))
}
# 8 drużyn każdy z każdym
true8 <- runif(8, -1.2, 1.2)
order8 <- factor(order(true8, decreasing = TRUE))
simulation8 <- sapply(seq(1, 101, 10), function(x) replicate(50, kendall(order8, round_robin(true8, x))))
simulation8 <- as.data.frame(simulation)
long_simulation8 <- tidyr::gather(simulation8, rounds, kendall)
# 28 drużyn każdy z każdym
true28 <- runif(28, -1.2, 1.2)
order28 <- factor(order(true28, decreasing = TRUE))
simulationdf <- sapply(seq(1, 101, 10), function(x) replicate(50, kendall(order28, round_robin(true28, x))))
simulationdf <- as.data.frame(simulationdf)
long_simulation <- tidyr::gather(simulationdf, rounds, kendall)
ggplot2::qplot(x = rounds, y = kendall, data = long_simulation,
ylim = c(0, 1), main = 'Turnieje każdy z każdym', xlab = 'Liczba rund', geom = 'point')
# 28 drużyn po kółeczku i losowo
many_sim <- function(n){
out <- numeric(500)
for(i in 1:500) out[i] <- kendall(true_order, cirlce(true_ranks, n))
out
}
library(parallel)
clust <- makeCluster(3)
clusterExport(clust, c("true_order", "true_ranks", "simulation", "ranking", "kendall"))
big_simulation <- as.data.frame(parSapply(clust, seq(0, 1000, 20), many_sim))
big_summary <- data.frame(up = sapply(big_simulation, mean) + sapply(big_simulation, sd),
avg = sapply(big_simulation, mean),
bott = sapply(big_simulation, mean) - sapply(big_simulation, sd))
plot(big_summary$avg, type = 'l', ylim = c(0, 0.85))
lines(big_summary$up, col = 'red')
lines(big_summary$bott, col = 'red')
|
/第11章 地理空间型图表/深圳地铁线路案例/深圳地铁线路图.R
|
no_license
|
EasyChart/Beautiful-Visualization-with-R
|
R
| false
| false
| 3,674
|
r
| ||
#' Density computation on x axis.
#'
#' Horizontal version of \code{\link[ggplot2]{stat_ydensity}}().
#' @inheritParams ggplot2::stat_ydensity
#' @export
stat_xdensity <- function(mapping = NULL, data = NULL,
geom = "violinh", position = "dodgev",
...,
bw = "nrd0",
adjust = 1,
kernel = "gaussian",
trim = TRUE,
scale = "area",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
scale <- match.arg(scale, c("area", "count", "width"))
layer(
data = data,
mapping = mapping,
stat = StatXdensity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
bw = bw,
adjust = adjust,
kernel = kernel,
trim = trim,
scale = scale,
na.rm = na.rm,
...
)
)
}
calc_bw <- generate("calc_bw")
compute_density <- generate("compute_density")
#' @rdname ggstance-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatXdensity <- ggproto("StatXdensity", Stat,
required_aes = c("x", "y"),
non_missing_aes = "weight",
compute_group = function(data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE) {
if (nrow(data) < 3) return(data.frame())
range <- range(data$x, na.rm = TRUE)
modifier <- if (trim) 0 else 3
bw <- calc_bw(data$x, bw)
dens <- compute_density(data$x, data$w, from = range[1] - modifier*bw, to = range[2] + modifier*bw,
bw = bw, adjust = adjust, kernel = kernel)
# dens$y <- dens$x
dens$y <- mean(range(data$y))
# Compute width if x has multiple values
if (length(unique(data$y)) > 1) {
width <- diff(range(data$y)) * 0.9
}
dens$width <- width
dens
},
compute_panel = function(self, data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE,
scale = "area") {
data <- ggproto_parent(Stat, self)$compute_panel(
data, scales, width = width, bw = bw, adjust = adjust, kernel = kernel,
trim = trim, na.rm = na.rm
)
# choose how violins are scaled relative to each other
data$violinwidth <- switch(scale,
# area : keep the original densities but scale them to a max width of 1
# for plotting purposes only
area = data$density / max(data$density),
# count: use the original densities scaled to a maximum of 1 (as above)
# and then scale them according to the number of observations
count = data$density / max(data$density) * data$n / max(data$n),
# width: constant width (density scaled to a maximum of 1)
width = data$scaled
)
data
}
)
|
/R/stat-xdensity.R
|
no_license
|
mjskay/ggstance
|
R
| false
| false
| 2,971
|
r
|
#' Density computation on x axis.
#'
#' Horizontal version of \code{\link[ggplot2]{stat_ydensity}}().
#' @inheritParams ggplot2::stat_ydensity
#' @export
stat_xdensity <- function(mapping = NULL, data = NULL,
geom = "violinh", position = "dodgev",
...,
bw = "nrd0",
adjust = 1,
kernel = "gaussian",
trim = TRUE,
scale = "area",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
scale <- match.arg(scale, c("area", "count", "width"))
layer(
data = data,
mapping = mapping,
stat = StatXdensity,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
bw = bw,
adjust = adjust,
kernel = kernel,
trim = trim,
scale = scale,
na.rm = na.rm,
...
)
)
}
calc_bw <- generate("calc_bw")
compute_density <- generate("compute_density")
#' @rdname ggstance-ggproto
#' @format NULL
#' @usage NULL
#' @export
StatXdensity <- ggproto("StatXdensity", Stat,
required_aes = c("x", "y"),
non_missing_aes = "weight",
compute_group = function(data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE) {
if (nrow(data) < 3) return(data.frame())
range <- range(data$x, na.rm = TRUE)
modifier <- if (trim) 0 else 3
bw <- calc_bw(data$x, bw)
dens <- compute_density(data$x, data$w, from = range[1] - modifier*bw, to = range[2] + modifier*bw,
bw = bw, adjust = adjust, kernel = kernel)
# dens$y <- dens$x
dens$y <- mean(range(data$y))
# Compute width if x has multiple values
if (length(unique(data$y)) > 1) {
width <- diff(range(data$y)) * 0.9
}
dens$width <- width
dens
},
compute_panel = function(self, data, scales, width = NULL, bw = "nrd0", adjust = 1,
kernel = "gaussian", trim = TRUE, na.rm = FALSE,
scale = "area") {
data <- ggproto_parent(Stat, self)$compute_panel(
data, scales, width = width, bw = bw, adjust = adjust, kernel = kernel,
trim = trim, na.rm = na.rm
)
# choose how violins are scaled relative to each other
data$violinwidth <- switch(scale,
# area : keep the original densities but scale them to a max width of 1
# for plotting purposes only
area = data$density / max(data$density),
# count: use the original densities scaled to a maximum of 1 (as above)
# and then scale them according to the number of observations
count = data$density / max(data$density) * data$n / max(data$n),
# width: constant width (density scaled to a maximum of 1)
width = data$scaled
)
data
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ThreadNet_Misc.R
\name{make_subsets}
\alias{make_subsets}
\title{make_subsets}
\usage{
make_subsets(d, n)
}
\arguments{
\item{d}{data frame with occurrences or events}
\item{n}{number of buckets}
}
\value{
list of smaller data frames
}
\description{
this function is used to split up the threads into n ~equal buckets
}
|
/man/make_subsets.Rd
|
no_license
|
ThreadNet/ThreadNet
|
R
| false
| true
| 399
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ThreadNet_Misc.R
\name{make_subsets}
\alias{make_subsets}
\title{make_subsets}
\usage{
make_subsets(d, n)
}
\arguments{
\item{d}{data frame with occurrences or events}
\item{n}{number of buckets}
}
\value{
list of smaller data frames
}
\description{
this function is used to split up the threads into n ~equal buckets
}
|
skisloplot <- function (RSinput= 30, ytop = 100) {
plot(RSvector, nnt, xaxs="i", yaxs="i", ## log='y',
xlim=c(0, 50),
ylim=c(0, ytop), type="l", lwd=5,
main = "Number Needed to Treat by Recurrence Score",
xlab="Recurrence score", ylab="Number needed to treat")
AERiskTable = c(.029, .15, .57, .20, .05, .001) #Data from B20 trial; CMFT
names(AERiskTable) = c("G0", "G1", "G2", "G3", "G4", "G5")
Gvec = kronecker(matrix(AERiskTable, nrow=1), matrix(nnt-1))
#str(Gvec)
Gcum = t(apply(Gvec, 1, cumsum))+1
colnames(Gcum) <- c("G0", "G1", "G2", "G3", "G4", "G5")
#str(Gcum)
for (i in 1:ncol(Gcum))
lines(x = RSvector, y = Gcum[, i], col = boxcolors[i])
rect(0, 0, 50, 1, col = 'green')
for (i in 1:ncol(Gcum)) {
if (i == 1)
polygon(x = c(0:100, 100:0), y = c(rep(1, 101), Gcum[101:1, 1]),
border = FALSE, col = boxcolors[1])
else
polygon(x = c(0:100, 100:0), y = c(Gcum[1:101, i-1], Gcum[101:1, i]),
border = FALSE, col = boxcolors[i])
}
points(x = RSinput, y = nnt[RSinput + 1], type = "h", lwd = 3, col = "CornflowerBlue")
text(x = RSinput, y = nnt[RSinput + 1], "RS", col = "CornflowerBlue", cex = 2, adj = c(0,0))
legend("topright", legend =
c("NNT", "User selected RS", "Benefitted", "No AE", "Mild", "Moderate", "Severe", "Life-threatening", "Died"),
text.col = c("black", "CornflowerBlue", "green", boxcolors), cex=1.2,
lwd=9, col = c("black", "CornflowerBlue", "green", boxcolors)
)
### Remember to remove the helped patient! NNT-1.
}
|
/inst/shinyAE/skislope.R
|
no_license
|
lilyokc/NNTbiomarkerHome
|
R
| false
| false
| 1,629
|
r
|
skisloplot <- function (RSinput= 30, ytop = 100) {
plot(RSvector, nnt, xaxs="i", yaxs="i", ## log='y',
xlim=c(0, 50),
ylim=c(0, ytop), type="l", lwd=5,
main = "Number Needed to Treat by Recurrence Score",
xlab="Recurrence score", ylab="Number needed to treat")
AERiskTable = c(.029, .15, .57, .20, .05, .001) #Data from B20 trial; CMFT
names(AERiskTable) = c("G0", "G1", "G2", "G3", "G4", "G5")
Gvec = kronecker(matrix(AERiskTable, nrow=1), matrix(nnt-1))
#str(Gvec)
Gcum = t(apply(Gvec, 1, cumsum))+1
colnames(Gcum) <- c("G0", "G1", "G2", "G3", "G4", "G5")
#str(Gcum)
for (i in 1:ncol(Gcum))
lines(x = RSvector, y = Gcum[, i], col = boxcolors[i])
rect(0, 0, 50, 1, col = 'green')
for (i in 1:ncol(Gcum)) {
if (i == 1)
polygon(x = c(0:100, 100:0), y = c(rep(1, 101), Gcum[101:1, 1]),
border = FALSE, col = boxcolors[1])
else
polygon(x = c(0:100, 100:0), y = c(Gcum[1:101, i-1], Gcum[101:1, i]),
border = FALSE, col = boxcolors[i])
}
points(x = RSinput, y = nnt[RSinput + 1], type = "h", lwd = 3, col = "CornflowerBlue")
text(x = RSinput, y = nnt[RSinput + 1], "RS", col = "CornflowerBlue", cex = 2, adj = c(0,0))
legend("topright", legend =
c("NNT", "User selected RS", "Benefitted", "No AE", "Mild", "Moderate", "Severe", "Life-threatening", "Died"),
text.col = c("black", "CornflowerBlue", "green", boxcolors), cex=1.2,
lwd=9, col = c("black", "CornflowerBlue", "green", boxcolors)
)
### Remember to remove the helped patient! NNT-1.
}
|
/etc/howm-doc/README.ja.rd
|
no_license
|
rsvdpbr/emacs-config
|
R
| false
| false
| 65,506
|
rd
| ||
library(DEXSeq)
library(multicore)
source('~/Documents/Rscripts/120704-sortDataFrame.R')
setwd('~/Documents/RNAdata/danBatch1/dexSeq_count/')
samples = c('long1', 'long2', 'long3', 'short1', 'short2', 'short3')
# Read in design matrix
dm = read.csv('../bowtieGem/revHTSeq/designMatrix.csv')
dm = dm[,c(1,2,4,5)]
condition = as.factor(c('long', 'long', 'long', 'short', 'short', 'short'))
# Read the output of DEXSeq count into the appropriate object
data = read.HTSeqCounts(c('GIC_011.countExons.txt', 'GIC_020.countExons.txt', 'GIC_034.countExons.txt', 'GIC_035.countExons.txt',
'GIC_039.countExons.txt', 'GIC_041.countExons.txt'), condition,
flattenedfile='~/Documents/RNAdata/pilotRNAseq/121121_TopHatNovelAlignment/121203_countExons/exonAnnotation.gtf')
head(fData(data))
data = estimateSizeFactors(data)
# Scale for library size
sizeFactors(data)
# Estimate dispersion
data = estimateDispersions(data, minCount=20)
data = fitDispersionFunction(data)
head(fData(data)$dispBeforeSharing)
fData(data)$testable = ifelse((rowSums(counts(data) > 50)), TRUE, FALSE) #only test exon usage for genes with more than 50 counts
data@dispFitCoefs
head(fData(data)$dispFitted)
# Can't find the fuction -> plotDispEsts(data)
data@phenoData@data$condition
# Test for differential expression using the TRT method
data = testForDEUTRT(data)
data= estimatelog2FoldChanges(data)
result = DEUresultTable(data)
table(result$padjust != NA)
# Sort the dataframe for p-value
resultSorted = sort.dataframe(result, 5, highFirst=FALSE)
colnames(resultSorted) = c("geneID","exonID","dispersion","pvalue","padjust",'meanBase', 'lfc_short' )
# Subset the dataframe for interesting genes
sigGenes = resultSorted[(resultSorted$padjust < 0.1) & (abs(resultSorted$lfc_short) >= 1), ]
# This plot function doesn't work well
plot(resultSorted$meanBase, resultSorted$lfc_short, log = "x",
col = ifelse(resultSorted$padjust < 0.1, "red", "black"), ylim = c(-4,4), main = "CD133 MvsA")
# Write the results out to file
write.table(resultSorted, '131114_dexSeqResultSorted.txt', sep ='\t')
write.table(sigGenes, '131114_dexSeqSigGenes.txt', sep ='\t')
plotDEXSeq(data, 'ENSG00000206503+ENSG00000227766', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='HLA-A + HLA complex group 4')
plotDEXSeq(data, 'ENSG00000214940+ENSG00000205746+ENSG00000233024+ENSG00000254681', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='Various psuedogenes')
plotDEXSeq(data, 'ENSG00000221988+ENSG00000241404+ENSG00000204314+ENSG00000204310+ENSG00000258388', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='PPT2 + EGFL8 + PRRT1 + AGPAT1 + PPT2-EGFL8 readthrough')
plotDEXSeq(data, 'ENSG00000126822+ENSG00000070182', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='PLEKHG3 + SPTB')
plotDEXSeq(data, 'ENSG00000227199+ENSG00000226367+ENSG00000214188+ENSG00000004866', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='ST7 antisense RNA 1 + 2 + ST7 overlapping transcript 4 + ST7')
plotDEXSeq(data, 'ENSG00000129675', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='Rac/Cdc42 guanine nucleotide exchange factor (GEF) 6')
plotDEXSeq(data, 'ENSG00000163617+ENSG00000151576+ENSG00000184307', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='KIAA1407 + QTRTD1 + ZDHHC23')
|
/PhD/rnaSeqBatch1/131105_DEXSeq.R
|
no_license
|
dvbrown/Rscripts
|
R
| false
| false
| 3,613
|
r
|
library(DEXSeq)
library(multicore)
source('~/Documents/Rscripts/120704-sortDataFrame.R')
setwd('~/Documents/RNAdata/danBatch1/dexSeq_count/')
samples = c('long1', 'long2', 'long3', 'short1', 'short2', 'short3')
# Read in design matrix
dm = read.csv('../bowtieGem/revHTSeq/designMatrix.csv')
dm = dm[,c(1,2,4,5)]
condition = as.factor(c('long', 'long', 'long', 'short', 'short', 'short'))
# Read the output of DEXSeq count into the appropriate object
data = read.HTSeqCounts(c('GIC_011.countExons.txt', 'GIC_020.countExons.txt', 'GIC_034.countExons.txt', 'GIC_035.countExons.txt',
'GIC_039.countExons.txt', 'GIC_041.countExons.txt'), condition,
flattenedfile='~/Documents/RNAdata/pilotRNAseq/121121_TopHatNovelAlignment/121203_countExons/exonAnnotation.gtf')
head(fData(data))
data = estimateSizeFactors(data)
# Scale for library size
sizeFactors(data)
# Estimate dispersion
data = estimateDispersions(data, minCount=20)
data = fitDispersionFunction(data)
head(fData(data)$dispBeforeSharing)
fData(data)$testable = ifelse((rowSums(counts(data) > 50)), TRUE, FALSE) #only test exon usage for genes with more than 50 counts
data@dispFitCoefs
head(fData(data)$dispFitted)
# Can't find the fuction -> plotDispEsts(data)
data@phenoData@data$condition
# Test for differential expression using the TRT method
data = testForDEUTRT(data)
data= estimatelog2FoldChanges(data)
result = DEUresultTable(data)
table(result$padjust != NA)
# Sort the dataframe for p-value
resultSorted = sort.dataframe(result, 5, highFirst=FALSE)
colnames(resultSorted) = c("geneID","exonID","dispersion","pvalue","padjust",'meanBase', 'lfc_short' )
# Subset the dataframe for interesting genes
sigGenes = resultSorted[(resultSorted$padjust < 0.1) & (abs(resultSorted$lfc_short) >= 1), ]
# This plot function doesn't work well
plot(resultSorted$meanBase, resultSorted$lfc_short, log = "x",
col = ifelse(resultSorted$padjust < 0.1, "red", "black"), ylim = c(-4,4), main = "CD133 MvsA")
# Write the results out to file
write.table(resultSorted, '131114_dexSeqResultSorted.txt', sep ='\t')
write.table(sigGenes, '131114_dexSeqSigGenes.txt', sep ='\t')
plotDEXSeq(data, 'ENSG00000206503+ENSG00000227766', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='HLA-A + HLA complex group 4')
plotDEXSeq(data, 'ENSG00000214940+ENSG00000205746+ENSG00000233024+ENSG00000254681', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='Various psuedogenes')
plotDEXSeq(data, 'ENSG00000221988+ENSG00000241404+ENSG00000204314+ENSG00000204310+ENSG00000258388', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, main='PPT2 + EGFL8 + PRRT1 + AGPAT1 + PPT2-EGFL8 readthrough')
plotDEXSeq(data, 'ENSG00000126822+ENSG00000070182', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='PLEKHG3 + SPTB')
plotDEXSeq(data, 'ENSG00000227199+ENSG00000226367+ENSG00000214188+ENSG00000004866', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='ST7 antisense RNA 1 + 2 + ST7 overlapping transcript 4 + ST7')
plotDEXSeq(data, 'ENSG00000129675', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='Rac/Cdc42 guanine nucleotide exchange factor (GEF) 6')
plotDEXSeq(data, 'ENSG00000163617+ENSG00000151576+ENSG00000184307', legend=TRUE, color=c('darkgreen', 'blue'),
names=TRUE, expression=TRUE, norCounts=F, main='KIAA1407 + QTRTD1 + ZDHHC23')
|
#Author: Supat Thongjuea, MRC Molecular Haematology Unit, Weatherall Institute of Molecular Medicine, University of Oxford, UK
#Contact email : supat.thongjuea@ndcls.ox.ac.uk or supat.thongjuea@gmail.com
#Maintainer: Supat Thongjuea and Alice Giustacchini
#Title: Single-cell Transcriptomics Uncovers Distinct and Clinically Predictive Molecular Signatures of Stem Cells in Chronic Myeloid Leukemia
#Journal : Nature Medicine
#Year : 2017
###############################################################################
#This R script is used to generate data for the GSEA analysis of Fig_3a and Fig_3b.
###############################################################################
###############################################################################
###############################################################################
my.anno<-read.delim("../Data/CML_PROJECT_ALL_CELLs.Freezed-5.anno.txt",header=T)
my.anno<-subset(my.anno,used4analysis==1)
###############################################################################
load(file="../Data/CML_PROJECT_ALL_CELLs.rdata")
my.cells<-CML_PROJECT_ALL_CELLs[,as.character(my.anno$Cell)]
###############################################################################
###############################################################################
S.m<-as.matrix(my.cells)
###############################################################################
###############################################################################
groupA.cells<-subset(my.anno,Stage_2=="normal_hsc")
groupA<-as.character(groupA.cells$Cell)
groupB.cells<-subset(my.anno,BCR_ABL=="positive" & Stage_2=="diagnosis")
groupB<-as.character(groupB.cells$Cell)
##########################Get All Data#########################################
groupA.m<-S.m[,colnames(S.m) %in% groupA]
groupB.m<-S.m[,colnames(S.m) %in% groupB]
my.phenotype.s1<-rep("Normal_HSCs",ncol(groupA.m))
my.phenotype.s2<-rep("Diagnosis+",ncol(groupB.m))
my.phenotype<-c(my.phenotype.s1,my.phenotype.s2)
my.data<-cbind(groupA.m,groupB.m)
my.info<-data.frame(NAME=rownames(my.data))
my.info$DESCRIPTION<-"na"
my.final<-cbind(my.info,my.data)
###############################################################################
##############################################################################
h1<-paste(ncol(my.data),"2","1",sep=" ")
h2<-paste("#","Normal_HSCs","Diagnosis+",sep=" ")
h3<-paste(c(rep("Normal_HSCs",length(groupA)),rep("Diagnosis+",length(groupB)),sep=" "))
cat(h1,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),sep="\n")
cat(h2,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),sep="\n",append=TRUE)
cat(h3,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),append=TRUE)
write.table(my.final,file="NormalHSCs-Diagnosis+_GSEA.txt",
append=FALSE, sep="\t", quote=FALSE,row.names=FALSE, col.names=TRUE)
|
/Fig3a_and_3b/Script-to-generate-data-for-GSEA-analysis.R
|
no_license
|
supatt-lab/Giustacchini-Thongjuea-et-al.-Nat.Med.2017
|
R
| false
| false
| 2,897
|
r
|
#Author: Supat Thongjuea, MRC Molecular Haematology Unit, Weatherall Institute of Molecular Medicine, University of Oxford, UK
#Contact email : supat.thongjuea@ndcls.ox.ac.uk or supat.thongjuea@gmail.com
#Maintainer: Supat Thongjuea and Alice Giustacchini
#Title: Single-cell Transcriptomics Uncovers Distinct and Clinically Predictive Molecular Signatures of Stem Cells in Chronic Myeloid Leukemia
#Journal : Nature Medicine
#Year : 2017
###############################################################################
#This R script is used to generate data for the GSEA analysis of Fig_3a and Fig_3b.
###############################################################################
###############################################################################
###############################################################################
my.anno<-read.delim("../Data/CML_PROJECT_ALL_CELLs.Freezed-5.anno.txt",header=T)
my.anno<-subset(my.anno,used4analysis==1)
###############################################################################
load(file="../Data/CML_PROJECT_ALL_CELLs.rdata")
my.cells<-CML_PROJECT_ALL_CELLs[,as.character(my.anno$Cell)]
###############################################################################
###############################################################################
S.m<-as.matrix(my.cells)
###############################################################################
###############################################################################
groupA.cells<-subset(my.anno,Stage_2=="normal_hsc")
groupA<-as.character(groupA.cells$Cell)
groupB.cells<-subset(my.anno,BCR_ABL=="positive" & Stage_2=="diagnosis")
groupB<-as.character(groupB.cells$Cell)
##########################Get All Data#########################################
groupA.m<-S.m[,colnames(S.m) %in% groupA]
groupB.m<-S.m[,colnames(S.m) %in% groupB]
my.phenotype.s1<-rep("Normal_HSCs",ncol(groupA.m))
my.phenotype.s2<-rep("Diagnosis+",ncol(groupB.m))
my.phenotype<-c(my.phenotype.s1,my.phenotype.s2)
my.data<-cbind(groupA.m,groupB.m)
my.info<-data.frame(NAME=rownames(my.data))
my.info$DESCRIPTION<-"na"
my.final<-cbind(my.info,my.data)
###############################################################################
##############################################################################
h1<-paste(ncol(my.data),"2","1",sep=" ")
h2<-paste("#","Normal_HSCs","Diagnosis+",sep=" ")
h3<-paste(c(rep("Normal_HSCs",length(groupA)),rep("Diagnosis+",length(groupB)),sep=" "))
cat(h1,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),sep="\n")
cat(h2,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),sep="\n",append=TRUE)
cat(h3,file=paste("NormalHSCs-Diagnosis+_GSEA-phenotype",".cls",sep=""),append=TRUE)
write.table(my.final,file="NormalHSCs-Diagnosis+_GSEA.txt",
append=FALSE, sep="\t", quote=FALSE,row.names=FALSE, col.names=TRUE)
|
png(filename='plot2.png',bg='transparent')
data<-read.table('household_power_consumption.txt',header=TRUE,sep=';',nrows=71000,colClasses='character')
power<-subset(data,data$Date=='1/2/2007'|data$Date=='2/2/2007')
power$DateTime <- as.POSIXct(paste(power$Date, power$Time), format="%d/%m/%Y %H:%M:%S")
ap<-power$'Global_active_power'
dt<-power$DateTime
par(mfrow=c(1,1))
plot(ap~dt,type='n',xlab='',ylab='Global Active Power (kilowatts)')
lines(dt,ap)
dev.off()
|
/plot2.R
|
no_license
|
MisterNi/ExData_Plotting1
|
R
| false
| false
| 462
|
r
|
png(filename='plot2.png',bg='transparent')
data<-read.table('household_power_consumption.txt',header=TRUE,sep=';',nrows=71000,colClasses='character')
power<-subset(data,data$Date=='1/2/2007'|data$Date=='2/2/2007')
power$DateTime <- as.POSIXct(paste(power$Date, power$Time), format="%d/%m/%Y %H:%M:%S")
ap<-power$'Global_active_power'
dt<-power$DateTime
par(mfrow=c(1,1))
plot(ap~dt,type='n',xlab='',ylab='Global Active Power (kilowatts)')
lines(dt,ap)
dev.off()
|
library(caret); library(kernlab); data("spam") # Importação do pacote "caret" e do dateset "spam"
folds <- createFolds(y=spam$type, k = 10, list = TRUE, returnTrain = TRUE) # Criação dos k-folds, 10 k-folds
sapply(folds, length)
|
/cross-validation.r
|
no_license
|
diegofsousa/ExampleOfK-fold
|
R
| false
| false
| 233
|
r
|
library(caret); library(kernlab); data("spam") # Importação do pacote "caret" e do dateset "spam"
folds <- createFolds(y=spam$type, k = 10, list = TRUE, returnTrain = TRUE) # Criação dos k-folds, 10 k-folds
sapply(folds, length)
|
#' @param opt.criterion Optimality criterion that bandwidth is designed to
#' optimize. The options are:
#'
#' \describe{
#'
#' \item{\code{"MSE"}}{Finite-sample maximum MSE}
#'
#' \item{\code{"FLCI"}}{Length of (fixed-length) two-sided
#' confidence intervals.}
#'
#' \item{\code{"OCI"}}{Given quantile of excess length of one-sided
#' confidence intervals}
#'
#' }
#'
#' The methods use conditional variance given by \code{sigma2}, if supplied.
#' Otherwise, for the purpose of estimating the optimal bandwidth,
#' conditional variance is estimated using the method specified by
#' \code{se.initial}.
#' @param beta Determines quantile of excess length to optimize, if bandwidth
#' optimizes given quantile of excess length of one-sided confidence
#' intervals; otherwise ignored.
#' @param alpha determines confidence level, \code{1-alpha} for
#' constructing/optimizing confidence intervals.
|
/man-roxygen/RDoptBW.R
|
no_license
|
mdroste/RDHonest
|
R
| false
| false
| 958
|
r
|
#' @param opt.criterion Optimality criterion that bandwidth is designed to
#' optimize. The options are:
#'
#' \describe{
#'
#' \item{\code{"MSE"}}{Finite-sample maximum MSE}
#'
#' \item{\code{"FLCI"}}{Length of (fixed-length) two-sided
#' confidence intervals.}
#'
#' \item{\code{"OCI"}}{Given quantile of excess length of one-sided
#' confidence intervals}
#'
#' }
#'
#' The methods use conditional variance given by \code{sigma2}, if supplied.
#' Otherwise, for the purpose of estimating the optimal bandwidth,
#' conditional variance is estimated using the method specified by
#' \code{se.initial}.
#' @param beta Determines quantile of excess length to optimize, if bandwidth
#' optimizes given quantile of excess length of one-sided confidence
#' intervals; otherwise ignored.
#' @param alpha determines confidence level, \code{1-alpha} for
#' constructing/optimizing confidence intervals.
|
#' Get population data
#'
#' @param country Country name
#' @param iso3c ISO 3C Country Code
#' @param simple_SEIR Logical. Is the population for the \code{simple_SEIR}.
#' Default = FALSE
#'
#' @return Population data.frame
#' @importFrom utils head tail
#' @export
get_population <- function(country = NULL, iso3c = NULL, simple_SEIR = FALSE){
## country route
if(!is.null(country)) {
assert_string(country)
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
pc <- squire::population[squire::population$country == country, ] %>%
dplyr::arrange(.data$age_group)
}
# iso3c route
if(!is.null(iso3c)) {
assert_string(iso3c)
if(!iso3c %in% unique(squire::population$iso3c)){
stop("iso3c not found")
}
pc <- squire::population[squire::population$iso3c == iso3c, ] %>%
dplyr::arrange(.data$age_group)
}
if (simple_SEIR) {
pc$n <- c(head(pc$n, -2), sum(tail(pc$n, 2)), 0)
pc$age_group <- as.character(pc$age_group)
pc$age_group[length(pc$n)-1] <- "75+"
pc <- head(pc, -1)
}
return(pc)
}
#' Get mixing matrix
#'
#' @param country Country name
#'
#' @return Age mixing matrix
#' @export
get_mixing_matrix <- function(country){
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
pop <- get_population(country)
mm <- squire::population$matrix[match(country, squire::population$country)]
mm <- squire::contact_matrices[[mm]]
return(mm)
}
#'
#' Get healthcare capacity data
#'
#' @param country Country name
#' @param simple_SEIR Logical. Is the population for the \code{simple_SEIR}.
#' Default = FALSE
#'
#' @return Healthcare capacity data
#' @importFrom utils head tail
#' @export
get_healthcare_capacity <- function(country, simple_SEIR = FALSE){
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
if(country %in% unique(squire::country_specific_healthcare_capacity$country)) {
beds <- squire::country_specific_healthcare_capacity[match(country, squire::country_specific_healthcare_capacity$country), ]
hosp_beds <- beds$hosp_beds
ICU_beds <- beds$ICU_beds
hc <- list(hosp_beds = hosp_beds, ICU_beds = ICU_beds)
} else {
income_group <- squire::income_group$income_group[match(country, squire::income_group$country)]
if (is.na(income_group)) {
stop("healthcare capacity data not available for this country - specify hospital and ICU beds in the run_explicit_SEEIR call manually")
}
beds <- squire::income_strata_healthcare_capacity[squire::income_strata_healthcare_capacity$income_group == income_group, ]
hosp_beds <- as.vector(beds$hosp_beds)
ICU_beds <- as.vector(beds$ICU_beds)
hc <- list(hosp_beds = hosp_beds, ICU_beds = ICU_beds)
}
return(hc)
}
|
/R/population.R
|
permissive
|
tdm32/squire
|
R
| false
| false
| 2,830
|
r
|
#' Get population data
#'
#' @param country Country name
#' @param iso3c ISO 3C Country Code
#' @param simple_SEIR Logical. Is the population for the \code{simple_SEIR}.
#' Default = FALSE
#'
#' @return Population data.frame
#' @importFrom utils head tail
#' @export
get_population <- function(country = NULL, iso3c = NULL, simple_SEIR = FALSE){
## country route
if(!is.null(country)) {
assert_string(country)
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
pc <- squire::population[squire::population$country == country, ] %>%
dplyr::arrange(.data$age_group)
}
# iso3c route
if(!is.null(iso3c)) {
assert_string(iso3c)
if(!iso3c %in% unique(squire::population$iso3c)){
stop("iso3c not found")
}
pc <- squire::population[squire::population$iso3c == iso3c, ] %>%
dplyr::arrange(.data$age_group)
}
if (simple_SEIR) {
pc$n <- c(head(pc$n, -2), sum(tail(pc$n, 2)), 0)
pc$age_group <- as.character(pc$age_group)
pc$age_group[length(pc$n)-1] <- "75+"
pc <- head(pc, -1)
}
return(pc)
}
#' Get mixing matrix
#'
#' @param country Country name
#'
#' @return Age mixing matrix
#' @export
get_mixing_matrix <- function(country){
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
pop <- get_population(country)
mm <- squire::population$matrix[match(country, squire::population$country)]
mm <- squire::contact_matrices[[mm]]
return(mm)
}
#'
#' Get healthcare capacity data
#'
#' @param country Country name
#' @param simple_SEIR Logical. Is the population for the \code{simple_SEIR}.
#' Default = FALSE
#'
#' @return Healthcare capacity data
#' @importFrom utils head tail
#' @export
get_healthcare_capacity <- function(country, simple_SEIR = FALSE){
if(!country %in% unique(squire::population$country)){
stop("Country not found")
}
if(country %in% unique(squire::country_specific_healthcare_capacity$country)) {
beds <- squire::country_specific_healthcare_capacity[match(country, squire::country_specific_healthcare_capacity$country), ]
hosp_beds <- beds$hosp_beds
ICU_beds <- beds$ICU_beds
hc <- list(hosp_beds = hosp_beds, ICU_beds = ICU_beds)
} else {
income_group <- squire::income_group$income_group[match(country, squire::income_group$country)]
if (is.na(income_group)) {
stop("healthcare capacity data not available for this country - specify hospital and ICU beds in the run_explicit_SEEIR call manually")
}
beds <- squire::income_strata_healthcare_capacity[squire::income_strata_healthcare_capacity$income_group == income_group, ]
hosp_beds <- as.vector(beds$hosp_beds)
ICU_beds <- as.vector(beds$ICU_beds)
hc <- list(hosp_beds = hosp_beds, ICU_beds = ICU_beds)
}
return(hc)
}
|
## last modified June 2002
grpintprob <- function(mixdat, mixpar, dist, constr)
{
m <- nrow(mixdat)
k <- nrow(mixpar)
mu <- mixpar[, 2]
sigma <- mixpar[, 3]
if (dist == "norm") {
par1 <- mu
par2 <- sigma
mixcdf <- t(sapply(mixdat[-m, 1], pnorm, par1, par2))
}
else if (dist == "lnorm") {
par2 <- sqrt(log((sigma/mu)^2 + 1))
par1 <- log(mu) - (par2^2)/2
mixcdf <- t(sapply(mixdat[-m, 1], plnorm, par1, par2))
}
else if (dist == "gamma") {
par1 <- (mu/sigma)^2
par2 <- mu/(sigma^2)
mixcdf <- t(sapply(mixdat[-m, 1], pgamma, par1, par2))
}
else if (dist == "weibull") {
par <- weibullpar(mu, sigma)
par1 <- par$shape
par2 <- par$scale
mixcdf <- t(sapply(mixdat[-m, 1], pweibull, par1, par2))
}
else if (dist == "binom") {
par1 <- constr$size
par2 <- mu/constr$size
mixcdf <- t(sapply(mixdat[-m, 1], pbinom, par1, par2))
}
else if (dist == "nbinom") {
if (constr$consigma == "NBINOM")
par1 <- constr$size
else par1 <- mu^2/(sigma^2 - mu)
mixcdf <- t(sapply(mixdat[-m, 1], pnbinom, par1, mu = mu))
}
else if (dist == "pois") {
par <- mu
mixcdf <- t(sapply(mixdat[-m, 1], ppois, par))
}
if (k == 1)
mixcdf <- t(mixcdf)
rbind(mixcdf, 1) - rbind(0, mixcdf)
}
|
/R/grpintprob.R
|
no_license
|
cran/mixdist
|
R
| false
| false
| 1,471
|
r
|
## last modified June 2002
grpintprob <- function(mixdat, mixpar, dist, constr)
{
m <- nrow(mixdat)
k <- nrow(mixpar)
mu <- mixpar[, 2]
sigma <- mixpar[, 3]
if (dist == "norm") {
par1 <- mu
par2 <- sigma
mixcdf <- t(sapply(mixdat[-m, 1], pnorm, par1, par2))
}
else if (dist == "lnorm") {
par2 <- sqrt(log((sigma/mu)^2 + 1))
par1 <- log(mu) - (par2^2)/2
mixcdf <- t(sapply(mixdat[-m, 1], plnorm, par1, par2))
}
else if (dist == "gamma") {
par1 <- (mu/sigma)^2
par2 <- mu/(sigma^2)
mixcdf <- t(sapply(mixdat[-m, 1], pgamma, par1, par2))
}
else if (dist == "weibull") {
par <- weibullpar(mu, sigma)
par1 <- par$shape
par2 <- par$scale
mixcdf <- t(sapply(mixdat[-m, 1], pweibull, par1, par2))
}
else if (dist == "binom") {
par1 <- constr$size
par2 <- mu/constr$size
mixcdf <- t(sapply(mixdat[-m, 1], pbinom, par1, par2))
}
else if (dist == "nbinom") {
if (constr$consigma == "NBINOM")
par1 <- constr$size
else par1 <- mu^2/(sigma^2 - mu)
mixcdf <- t(sapply(mixdat[-m, 1], pnbinom, par1, mu = mu))
}
else if (dist == "pois") {
par <- mu
mixcdf <- t(sapply(mixdat[-m, 1], ppois, par))
}
if (k == 1)
mixcdf <- t(mixcdf)
rbind(mixcdf, 1) - rbind(0, mixcdf)
}
|
library(vlad)
### Name: racusum_arl_sim
### Title: Compute ARLs of RA-CUSUM control charts using simulation
### Aliases: racusum_arl_sim
### ** Examples
## Not run:
##D library("vlad")
##D library("spcadjust")
##D set.seed(1234)
##D data("cardiacsurgery")
##D df1 <- subset(cardiacsurgery, select=c(Parsonnet, status))
##D coeff1 <- round(coef(glm(status ~ Parsonnet, data=df1, family="binomial")), 3)
##D
##D ## Parallel Simulation 1: y = random (10^4 runs, RA=2)
##D m <- 10^4; h_vec <- 2.7; yemp <- FALSE
##D no_cores <- parallel::detectCores()
##D cl <- parallel::makeCluster(no_cores)
##D parallel::clusterExport(cl, c("h_vec", "racusum_arl_sim", "coeff1", "df1", "yemp"))
##D time <- system.time( {
##D ARL <- array(NA, dim=c( length(h_vec), m))
##D for (h in h_vec) {
##D ARL[which(h_vec==h), ] <- parallel::parSapply(cl, 1:m, racusum_arl_sim, h=h, coeff=coeff1,
##D df=df1, yemp=yemp, USE.NAMES=FALSE) }
##D } )
##D simMean <- apply(ARL, c(1), mean)
##D simSE <- sqrt(apply(ARL, c(1), var)/m)
##D print(list(simMean, simSE, time))
##D parallel::stopCluster(cl)
##D df.sim1 <- data.frame("RA"=2, "h"=h, "ARL"=simMean, "ARLSE"=simSE, "nsim"=m)
##D
##D ## Parallel Simulation 2: y = empirical (10^4 runs, RA=2)
##D m <- 10^4; h_vec <- 2.7
##D no_cores <- parallel::detectCores()
##D cl <- parallel::makeCluster(no_cores)
##D parallel::clusterExport(cl, c("h_vec", "racusum_arl_sim", "coeff1", "df1"))
##D time <- system.time( {
##D ARL <- array(NA, dim=c( length(h_vec), m))
##D for (h in h_vec) {
##D ARL[which(h_vec==h), ] <- parallel::parSapply(cl, 1:m, racusum_arl_sim, h=h, coeff=coeff1,
##D df=df1, USE.NAMES=FALSE) }
##D } )
##D simMean <- apply(ARL, c(1), mean)
##D simSE <- sqrt(apply(ARL, c(1), var)/m)
##D print(list(simMean, simSE, time))
##D parallel::stopCluster(cl)
##D df.sim2 <- data.frame("RA"=2, "h"=h, "ARL"=simMean, "ARLSE"=simSE, "nsim"=m)
##D
##D rbind(df.sim1, df.sim2)
## End(Not run)
|
/data/genthat_extracted_code/vlad/examples/racusum_arl_sim.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,040
|
r
|
library(vlad)
### Name: racusum_arl_sim
### Title: Compute ARLs of RA-CUSUM control charts using simulation
### Aliases: racusum_arl_sim
### ** Examples
## Not run:
##D library("vlad")
##D library("spcadjust")
##D set.seed(1234)
##D data("cardiacsurgery")
##D df1 <- subset(cardiacsurgery, select=c(Parsonnet, status))
##D coeff1 <- round(coef(glm(status ~ Parsonnet, data=df1, family="binomial")), 3)
##D
##D ## Parallel Simulation 1: y = random (10^4 runs, RA=2)
##D m <- 10^4; h_vec <- 2.7; yemp <- FALSE
##D no_cores <- parallel::detectCores()
##D cl <- parallel::makeCluster(no_cores)
##D parallel::clusterExport(cl, c("h_vec", "racusum_arl_sim", "coeff1", "df1", "yemp"))
##D time <- system.time( {
##D ARL <- array(NA, dim=c( length(h_vec), m))
##D for (h in h_vec) {
##D ARL[which(h_vec==h), ] <- parallel::parSapply(cl, 1:m, racusum_arl_sim, h=h, coeff=coeff1,
##D df=df1, yemp=yemp, USE.NAMES=FALSE) }
##D } )
##D simMean <- apply(ARL, c(1), mean)
##D simSE <- sqrt(apply(ARL, c(1), var)/m)
##D print(list(simMean, simSE, time))
##D parallel::stopCluster(cl)
##D df.sim1 <- data.frame("RA"=2, "h"=h, "ARL"=simMean, "ARLSE"=simSE, "nsim"=m)
##D
##D ## Parallel Simulation 2: y = empirical (10^4 runs, RA=2)
##D m <- 10^4; h_vec <- 2.7
##D no_cores <- parallel::detectCores()
##D cl <- parallel::makeCluster(no_cores)
##D parallel::clusterExport(cl, c("h_vec", "racusum_arl_sim", "coeff1", "df1"))
##D time <- system.time( {
##D ARL <- array(NA, dim=c( length(h_vec), m))
##D for (h in h_vec) {
##D ARL[which(h_vec==h), ] <- parallel::parSapply(cl, 1:m, racusum_arl_sim, h=h, coeff=coeff1,
##D df=df1, USE.NAMES=FALSE) }
##D } )
##D simMean <- apply(ARL, c(1), mean)
##D simSE <- sqrt(apply(ARL, c(1), var)/m)
##D print(list(simMean, simSE, time))
##D parallel::stopCluster(cl)
##D df.sim2 <- data.frame("RA"=2, "h"=h, "ARL"=simMean, "ARLSE"=simSE, "nsim"=m)
##D
##D rbind(df.sim1, df.sim2)
## End(Not run)
|
# This script tests the change_speed() function
input_signal <- read.csv("data/bark.csv", colClasses=c('numeric'))[[1]]
test_that("Result of changing the speed of a known input signal with a rate of 2 matches the expected output", {
expected_output <- read.table("data/change_speed/bark_double_speed.csv", colClasses=c('numeric'))[[1]]
output_signal <- change_speed(input_signal, 2.0)
# Mean squared error between input and output signal
mse <- mean(
((expected_output - output_signal)**2)
)
expect_lt(mse, 0.002)
})
test_that("Result of changing the speed of a known input signal with a rate of 0.5 matches the expected output", {
expected_output <- read.table("data/change_speed/bark_half_speed.csv", colClasses=c('numeric'))[[1]]
output_signal <- change_speed(input_signal, 0.5)
# Mean squared error between input and output signal
mse <- mean(
((expected_output - output_signal)**2)
)
expect_lt(mse, 0.002)
})
test_that("Exception is raised for invalid zero rate argument", {
expect_error(change_speed(input_signal, 0), "Error: rate must be a positive number")
})
test_that("Eexception is raised for unsupported input_signal argument type", {
expect_error(change_speed("test", 3), "Error: input_signal must be numeric")
})
|
/tests/testthat/test_change_speed.R
|
permissive
|
UBC-MDS/AudioFilters_R
|
R
| false
| false
| 1,276
|
r
|
# This script tests the change_speed() function
input_signal <- read.csv("data/bark.csv", colClasses=c('numeric'))[[1]]
test_that("Result of changing the speed of a known input signal with a rate of 2 matches the expected output", {
expected_output <- read.table("data/change_speed/bark_double_speed.csv", colClasses=c('numeric'))[[1]]
output_signal <- change_speed(input_signal, 2.0)
# Mean squared error between input and output signal
mse <- mean(
((expected_output - output_signal)**2)
)
expect_lt(mse, 0.002)
})
test_that("Result of changing the speed of a known input signal with a rate of 0.5 matches the expected output", {
expected_output <- read.table("data/change_speed/bark_half_speed.csv", colClasses=c('numeric'))[[1]]
output_signal <- change_speed(input_signal, 0.5)
# Mean squared error between input and output signal
mse <- mean(
((expected_output - output_signal)**2)
)
expect_lt(mse, 0.002)
})
test_that("Exception is raised for invalid zero rate argument", {
expect_error(change_speed(input_signal, 0), "Error: rate must be a positive number")
})
test_that("Eexception is raised for unsupported input_signal argument type", {
expect_error(change_speed("test", 3), "Error: input_signal must be numeric")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_squared_poisson.R
\name{r_squared_poisson}
\alias{r_squared_poisson}
\title{Pseudo R-Squared regarding Poisson deviance}
\usage{
r_squared_poisson(actual, predicted, w = NULL, ...)
}
\arguments{
\item{actual}{Observed values.}
\item{predicted}{Predicted values.}
\item{w}{Optional case weights.}
\item{...}{Further arguments passed to \code{r_squared}.}
}
\value{
A numeric vector of length one.
}
\description{
Wrapper to \code{r_squared} with \code{deviance_function = deviance_poisson}.
}
\examples{
r_squared(0:2, c(0.1, 1, 2), w = rep(1, 3), deviance_function = deviance_poisson)
r_squared_poisson(0:2, c(0.1, 1, 2), w = rep(1, 3))
}
\seealso{
\code{\link{r_squared}}.
}
|
/release/MetricsWeighted/man/r_squared_poisson.Rd
|
no_license
|
JosepER/MetricsWeighted
|
R
| false
| true
| 759
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_squared_poisson.R
\name{r_squared_poisson}
\alias{r_squared_poisson}
\title{Pseudo R-Squared regarding Poisson deviance}
\usage{
r_squared_poisson(actual, predicted, w = NULL, ...)
}
\arguments{
\item{actual}{Observed values.}
\item{predicted}{Predicted values.}
\item{w}{Optional case weights.}
\item{...}{Further arguments passed to \code{r_squared}.}
}
\value{
A numeric vector of length one.
}
\description{
Wrapper to \code{r_squared} with \code{deviance_function = deviance_poisson}.
}
\examples{
r_squared(0:2, c(0.1, 1, 2), w = rep(1, 3), deviance_function = deviance_poisson)
r_squared_poisson(0:2, c(0.1, 1, 2), w = rep(1, 3))
}
\seealso{
\code{\link{r_squared}}.
}
|
#SplitPlaysByWeek
setwd('F:/BigDataBowl2021')
games = read.csv('games.csv')
load('FinalPlays.Rdata')
# load('Combined2018PassingData.RData')
# w1Plays = plays[plays$gameId %in% games[games$week == 1,'gameId'],]
# w2Plays = plays[plays$gameId %in% games[games$week == 2,'gameId'],]
# w3Plays = plays[plays$gameId %in% games[games$week == 3,'gameId'],]
# w4Plays = plays[plays$gameId %in% games[games$week == 4,'gameId'],]
# w5Plays = plays[plays$gameId %in% games[games$week == 5,'gameId'],]
# w6Plays = plays[plays$gameId %in% games[games$week == 6,'gameId'],]
# w7Plays = plays[plays$gameId %in% games[games$week == 7,'gameId'],]
# w8Plays = plays[plays$gameId %in% games[games$week == 8,'gameId'],]
# w9Plays = plays[plays$gameId %in% games[games$week == 9,'gameId'],]
# w10Plays = plays[plays$gameId %in% games[games$week == 10,'gameId'],]
# w11Plays = plays[plays$gameId %in% games[games$week == 11,'gameId'],]
# w12Plays = plays[plays$gameId %in% games[games$week == 12,'gameId'],]
# w13Plays = plays[plays$gameId %in% games[games$week == 13,'gameId'],]
# w14Plays = plays[plays$gameId %in% games[games$week == 14,'gameId'],]
# w15Plays = plays[plays$gameId %in% games[games$week == 15,'gameId'],]
# w16Plays = plays[plays$gameId %in% games[games$week == 16,'gameId'],]
# w17Plays = plays[plays$gameId %in% games[games$week == 17,'gameId'],]
findWeekNum <- function(gameNum){
return(games[games$gameId == gameNum,'week'])
}
splitPlaysByWeek <- function(weekNum){
return(playsToUse[playsToUse$gameId %in% games[games$week == weekNum,'gameId'],])
}
splitTrackingByWeek <- function(weekNum){
return(read.csv(paste('week',weekNum,'.csv',sep='')))
}
|
/SplitPlaysByWeek.R
|
no_license
|
prestonbiro/BigDataBowl2020
|
R
| false
| false
| 1,715
|
r
|
#SplitPlaysByWeek
setwd('F:/BigDataBowl2021')
games = read.csv('games.csv')
load('FinalPlays.Rdata')
# load('Combined2018PassingData.RData')
# w1Plays = plays[plays$gameId %in% games[games$week == 1,'gameId'],]
# w2Plays = plays[plays$gameId %in% games[games$week == 2,'gameId'],]
# w3Plays = plays[plays$gameId %in% games[games$week == 3,'gameId'],]
# w4Plays = plays[plays$gameId %in% games[games$week == 4,'gameId'],]
# w5Plays = plays[plays$gameId %in% games[games$week == 5,'gameId'],]
# w6Plays = plays[plays$gameId %in% games[games$week == 6,'gameId'],]
# w7Plays = plays[plays$gameId %in% games[games$week == 7,'gameId'],]
# w8Plays = plays[plays$gameId %in% games[games$week == 8,'gameId'],]
# w9Plays = plays[plays$gameId %in% games[games$week == 9,'gameId'],]
# w10Plays = plays[plays$gameId %in% games[games$week == 10,'gameId'],]
# w11Plays = plays[plays$gameId %in% games[games$week == 11,'gameId'],]
# w12Plays = plays[plays$gameId %in% games[games$week == 12,'gameId'],]
# w13Plays = plays[plays$gameId %in% games[games$week == 13,'gameId'],]
# w14Plays = plays[plays$gameId %in% games[games$week == 14,'gameId'],]
# w15Plays = plays[plays$gameId %in% games[games$week == 15,'gameId'],]
# w16Plays = plays[plays$gameId %in% games[games$week == 16,'gameId'],]
# w17Plays = plays[plays$gameId %in% games[games$week == 17,'gameId'],]
findWeekNum <- function(gameNum){
return(games[games$gameId == gameNum,'week'])
}
splitPlaysByWeek <- function(weekNum){
return(playsToUse[playsToUse$gameId %in% games[games$week == weekNum,'gameId'],])
}
splitTrackingByWeek <- function(weekNum){
return(read.csv(paste('week',weekNum,'.csv',sep='')))
}
|
# 1.0 Loading Libraries ---------------------------------------------------
library(shiny)
library(argonR)
library(argonDash)
# library(tidyverse)
library(shinycssloaders)
library(shinyWidgets)
library(tidyverse)
library(readxl)
# library(pander)
library(highcharter)
library(DT)
# 1.1 Loading Data --------------------------------------------------------
mfl_data <- read_excel(path = 'data/MFL_v28Abril2020.xlsx', sheet = 'RSP2020')
# mfl_data <- as_data_frame(mfl_data)
# # examine the data
# mfl_data %>% glimpse()
#
# mfl_data[is.na(mfl_data$PROVINCIA), ]
# rename the variables
# mfl_tbl <- mfl_data
colnames(mfl_data) <- c('CODIGO', 'PROVINCIA', 'DISTRITO', 'UNIDADE_SANITARIA', 'CLASSIFICACAO', 'NIVEL', 'TIPO_US', 'TIPO', 'MATERNIDADE', 'NUM_CAMAS_MATERNIDADE', 'NUM_CAMAS_INTERNAMENTO', 'TOTAL', 'BS')
# # 1.2 convert all character columns to factor: --------------------------
# mfl_data <- mutate_if(mfl_data, is.character, as.factor)
# adicionar mais uma variavel para agrupar os dados por Unidades Sanitarias
mfl_data$TIPO_US_GRUPO <- NA
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US %in%
# c('Hospital Central',
# 'Hospital Geral',
# 'Hospital Provincial',
# 'Hospital Distrital',
# 'Hospital Rural',
# 'Hospital Militar',
# 'Hospital Psiquiatrico'))] <- 'Hospitais'
#
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US %in% c('Centro de Saúde Urbano',
# 'Centro de Saúde Rural'))] <- 'Centros_Saude'
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US == 'Posto de Saúde')] <- 'Postos_Saude'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO %in%
c('HC',
'HG',
'HP',
'HD',
'HR',
'HM',
'HPsi'))] <- 'Hospitais'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO == 'CS')] <- 'Centros_Saude'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO == 'PS')] <- 'Postos_Saude'
# # calcular o número de unidades sanitárias por província
# mfl_data[is.na(mfl_data$PROVINCIA), ]
grupos_unid_sanitaria_tbl <-
mfl_data %>%
group_by(PROVINCIA, DISTRITO) %>%
count(TIPO_US_GRUPO, name = "numero_US") %>%
pivot_wider(names_from = TIPO_US_GRUPO, values_from = numero_US, values_fill = list(numero_US = 0)) %>%
ungroup()
grupos_unid_sanitaria_tbl$Total <- rowSums(grupos_unid_sanitaria_tbl[, -c(1, 2)])
# 3.0 Mapa ----------------------------------------------------------------
# preparing mapdata
df_mapdata <-
grupos_unid_sanitaria_tbl %>%
group_by(PROVINCIA) %>%
mutate(province_name = case_when(
as.character(PROVINCIA) == 'CABO DELGADO' ~ 'Cabo Delgado',
as.character(PROVINCIA) == 'NIASSA' ~ 'Niassa',
as.character(PROVINCIA) == 'NAMPULA' ~ 'Nampula',
as.character(PROVINCIA) == 'ZAMBÉZIA' ~ 'Zambezia',
as.character(PROVINCIA) == 'TETE' ~ 'Tete',
as.character(PROVINCIA) == 'MANICA' ~ 'Manica',
as.character(PROVINCIA) == 'SOFALA' ~ 'Sofala',
as.character(PROVINCIA) == 'INHAMBANE' ~ 'Inhambane',
as.character(PROVINCIA) == 'GAZA' ~ 'Gaza',
as.character(PROVINCIA) == 'MAPUTO PROVÍNCIA' ~ 'Maputo',
as.character(PROVINCIA) == 'MAPUTO CIDADE' ~ 'Maputo',
TRUE ~ as.character(PROVINCIA))
)
df_mapdata <-
df_mapdata %>%
select(-DISTRITO) %>%
group_by(province_name) %>%
# select(2:11) %>%
summarise_if(is.numeric, sum)
# summarise_all(sum)
df_mapdata <-
df_mapdata %>%
rename(value = Total)
|
/global.R
|
no_license
|
rzezela77/MFL_project
|
R
| false
| false
| 3,921
|
r
|
# 1.0 Loading Libraries ---------------------------------------------------
library(shiny)
library(argonR)
library(argonDash)
# library(tidyverse)
library(shinycssloaders)
library(shinyWidgets)
library(tidyverse)
library(readxl)
# library(pander)
library(highcharter)
library(DT)
# 1.1 Loading Data --------------------------------------------------------
mfl_data <- read_excel(path = 'data/MFL_v28Abril2020.xlsx', sheet = 'RSP2020')
# mfl_data <- as_data_frame(mfl_data)
# # examine the data
# mfl_data %>% glimpse()
#
# mfl_data[is.na(mfl_data$PROVINCIA), ]
# rename the variables
# mfl_tbl <- mfl_data
colnames(mfl_data) <- c('CODIGO', 'PROVINCIA', 'DISTRITO', 'UNIDADE_SANITARIA', 'CLASSIFICACAO', 'NIVEL', 'TIPO_US', 'TIPO', 'MATERNIDADE', 'NUM_CAMAS_MATERNIDADE', 'NUM_CAMAS_INTERNAMENTO', 'TOTAL', 'BS')
# # 1.2 convert all character columns to factor: --------------------------
# mfl_data <- mutate_if(mfl_data, is.character, as.factor)
# adicionar mais uma variavel para agrupar os dados por Unidades Sanitarias
mfl_data$TIPO_US_GRUPO <- NA
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US %in%
# c('Hospital Central',
# 'Hospital Geral',
# 'Hospital Provincial',
# 'Hospital Distrital',
# 'Hospital Rural',
# 'Hospital Militar',
# 'Hospital Psiquiatrico'))] <- 'Hospitais'
#
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US %in% c('Centro de Saúde Urbano',
# 'Centro de Saúde Rural'))] <- 'Centros_Saude'
# mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO_US == 'Posto de Saúde')] <- 'Postos_Saude'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO %in%
c('HC',
'HG',
'HP',
'HD',
'HR',
'HM',
'HPsi'))] <- 'Hospitais'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO == 'CS')] <- 'Centros_Saude'
mfl_data$TIPO_US_GRUPO[which(mfl_data$TIPO == 'PS')] <- 'Postos_Saude'
# # calcular o número de unidades sanitárias por província
# mfl_data[is.na(mfl_data$PROVINCIA), ]
grupos_unid_sanitaria_tbl <-
mfl_data %>%
group_by(PROVINCIA, DISTRITO) %>%
count(TIPO_US_GRUPO, name = "numero_US") %>%
pivot_wider(names_from = TIPO_US_GRUPO, values_from = numero_US, values_fill = list(numero_US = 0)) %>%
ungroup()
grupos_unid_sanitaria_tbl$Total <- rowSums(grupos_unid_sanitaria_tbl[, -c(1, 2)])
# 3.0 Mapa ----------------------------------------------------------------
# preparing mapdata
df_mapdata <-
grupos_unid_sanitaria_tbl %>%
group_by(PROVINCIA) %>%
mutate(province_name = case_when(
as.character(PROVINCIA) == 'CABO DELGADO' ~ 'Cabo Delgado',
as.character(PROVINCIA) == 'NIASSA' ~ 'Niassa',
as.character(PROVINCIA) == 'NAMPULA' ~ 'Nampula',
as.character(PROVINCIA) == 'ZAMBÉZIA' ~ 'Zambezia',
as.character(PROVINCIA) == 'TETE' ~ 'Tete',
as.character(PROVINCIA) == 'MANICA' ~ 'Manica',
as.character(PROVINCIA) == 'SOFALA' ~ 'Sofala',
as.character(PROVINCIA) == 'INHAMBANE' ~ 'Inhambane',
as.character(PROVINCIA) == 'GAZA' ~ 'Gaza',
as.character(PROVINCIA) == 'MAPUTO PROVÍNCIA' ~ 'Maputo',
as.character(PROVINCIA) == 'MAPUTO CIDADE' ~ 'Maputo',
TRUE ~ as.character(PROVINCIA))
)
df_mapdata <-
df_mapdata %>%
select(-DISTRITO) %>%
group_by(province_name) %>%
# select(2:11) %>%
summarise_if(is.numeric, sum)
# summarise_all(sum)
df_mapdata <-
df_mapdata %>%
rename(value = Total)
|
## ----echo = FALSE, message = FALSE--------------------------------------------
library(knitr)
library(kableExtra)
library(TestDesign)
constraints_science_data[is.na(constraints_science_data)] <- ""
constraints_reading_data[is.na(constraints_reading_data)] <- ""
constraints_fatigue_data[is.na(constraints_fatigue_data)] <- ""
constraints_bayes_data[is.na(constraints_bayes_data)] <- ""
## ----echo = FALSE-------------------------------------------------------------
knitr::kable(constraints_science_data[1:5, ]) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[1, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[2, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[32, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[33, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[34, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[35, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[36, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[2, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[3, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[3, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_fatigue_data[1, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em", background = 'cyan') %>%
column_spec(6, "3em", background = 'cyan') %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[17, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em", background = 'cyan') %>%
column_spec(6, "3em", background = 'cyan') %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[18, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em", background = 'cyan')
|
/TestDesign/inst/doc/constraints.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 6,980
|
r
|
## ----echo = FALSE, message = FALSE--------------------------------------------
library(knitr)
library(kableExtra)
library(TestDesign)
constraints_science_data[is.na(constraints_science_data)] <- ""
constraints_reading_data[is.na(constraints_reading_data)] <- ""
constraints_fatigue_data[is.na(constraints_fatigue_data)] <- ""
constraints_bayes_data[is.na(constraints_bayes_data)] <- ""
## ----echo = FALSE-------------------------------------------------------------
knitr::kable(constraints_science_data[1:5, ]) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[1, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[2, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[32, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[33, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[34, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[35, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_science_data[36, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em", background = 'cyan') %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[2, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
tmp = constraints_bayes_data[3, ]
tmp$ONOFF = ""
knitr::kable(tmp, row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[3, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em", background = 'cyan') %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_fatigue_data[1, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em", background = 'cyan') %>%
column_spec(6, "3em", background = 'cyan') %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[17, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em", background = 'cyan') %>%
column_spec(6, "3em", background = 'cyan') %>%
column_spec(7, "3em")
## ---- echo = FALSE------------------------------------------------------------
knitr::kable(constraints_reading_data[18, ], row.names = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive")) %>%
column_spec(1, "5em") %>%
column_spec(2, "5em") %>%
column_spec(3, "5em") %>%
column_spec(4, "10em") %>%
column_spec(5, "3em") %>%
column_spec(6, "3em") %>%
column_spec(7, "3em", background = 'cyan')
|
#' crypto_correlation
#'
#' Function to calculate the correlation between two crypto currencies
#'
#' This function is designed to calcualte the correlation between two cryptocurrencies in a given timeframe
#'
#' @param firstDay first day to analyse in dd/mm/yyyy format
#' @param lastDay last day to analyse in dd/mm/yyyy format
#' @param cryptoA first cryptocurrency to correlate
#' @param cryptoB second cryptocurrency to correlate
#'
#' @return correlation between crypto A and crypto B
#' @importFrom dplyr select mutate
#' @importFrom stats cor
#'
#' @examples
#' \dontrun{
#' crypto_correlation("01/09/2018", "01/10/2018", "BTC", "ETH")
#'}
#' @export
crypto_correlation <- function(firstDay, lastDay, cryptoA, cryptoB) {
cryptoAData <- day_hour("hour", firstDay, lastDay, cryptoA) %>%
mutate(avg = (high + low) / 2) %>%
select(avg)
cryptoBData <- day_hour("hour", firstDay, lastDay, cryptoB) %>%
mutate(avg = (high + low) / 2) %>%
select(avg)
return(cor(cryptoAData, cryptoBData)[1])
}
|
/CryptoShiny/R/crypto_correlation.R
|
permissive
|
fernandopf/ThinkRProject
|
R
| false
| false
| 1,016
|
r
|
#' crypto_correlation
#'
#' Function to calculate the correlation between two crypto currencies
#'
#' This function is designed to calcualte the correlation between two cryptocurrencies in a given timeframe
#'
#' @param firstDay first day to analyse in dd/mm/yyyy format
#' @param lastDay last day to analyse in dd/mm/yyyy format
#' @param cryptoA first cryptocurrency to correlate
#' @param cryptoB second cryptocurrency to correlate
#'
#' @return correlation between crypto A and crypto B
#' @importFrom dplyr select mutate
#' @importFrom stats cor
#'
#' @examples
#' \dontrun{
#' crypto_correlation("01/09/2018", "01/10/2018", "BTC", "ETH")
#'}
#' @export
crypto_correlation <- function(firstDay, lastDay, cryptoA, cryptoB) {
cryptoAData <- day_hour("hour", firstDay, lastDay, cryptoA) %>%
mutate(avg = (high + low) / 2) %>%
select(avg)
cryptoBData <- day_hour("hour", firstDay, lastDay, cryptoB) %>%
mutate(avg = (high + low) / 2) %>%
select(avg)
return(cor(cryptoAData, cryptoBData)[1])
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/siber.MVN.R
\name{siber.MVN}
\alias{siber.MVN}
\title{Fit Bayesian bivariate normal distributions to each group in each community}
\usage{
siber.MVN(siber, parms, priors)
}
\arguments{
\item{siber}{a siber object as created by \code{\link{create.siber.object}}}
\item{parms}{a list containing four items providing details of the
\code{\link[rjags]{rjags}} run to be sampled.
\itemize{
\item {n.iter}{The number of iterations to sample}
\item {n.burnin}{The number of iterations to discard as a burnin from the
start of sampling.}
\item {n.thin}{The number of samples to thin by.}
\item {n.chains}{The number of chains to fit.}
}}
\item{priors}{a list of three items specifying the priors to be passed to
the jags model.
\itemize{
\item {R}{The scaling vector for the diagonal of Inverse Wishart
distribution prior on the covariance matrix Sigma. Typically
set to a 2x2 matrix matrix(c(1, 0, 0, 1), 2, 2).}
\item {k}{The degrees of freedom of the Inverse Wishart distribution for
the covariance matrix Sigma. Typically set to the dimensionality of Sigma,
which in this bivariate case is 2.}
\item {tau}{The precision on the normal prior on the means mu.}
}}
}
\value{
A list of length equal to the total number of groups in all
communities. Each entry is named 1.1 1.2... 2.1.. with the first number
designating the community, and the second number the group within that
community. So, 2.3 would be the third group within the second community.
Each list entry is a 6 x n matrix representing the back-transformed posterior
distributions of the bivariate normal distribution, where n is the number of
posterior draws in the saved sample. The first two columns are the back-
transformed means, and the remaining four columns are the covariance matrix
Sigma in vector format. This vector converts to the covariance matrix as
\code{matrix(v[1:4], nrow = 2, ncol = 2)}.
}
\description{
This function loops over each community and then loops over each group
member, fitting a Bayesian multivariate (bivariate in this case) normal
distribution to each group of data. Not intended for direct calling by users.
}
|
/man/siber.MVN.Rd
|
no_license
|
andrewcparnell/SIBER
|
R
| false
| false
| 2,246
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/siber.MVN.R
\name{siber.MVN}
\alias{siber.MVN}
\title{Fit Bayesian bivariate normal distributions to each group in each community}
\usage{
siber.MVN(siber, parms, priors)
}
\arguments{
\item{siber}{a siber object as created by \code{\link{create.siber.object}}}
\item{parms}{a list containing four items providing details of the
\code{\link[rjags]{rjags}} run to be sampled.
\itemize{
\item {n.iter}{The number of iterations to sample}
\item {n.burnin}{The number of iterations to discard as a burnin from the
start of sampling.}
\item {n.thin}{The number of samples to thin by.}
\item {n.chains}{The number of chains to fit.}
}}
\item{priors}{a list of three items specifying the priors to be passed to
the jags model.
\itemize{
\item {R}{The scaling vector for the diagonal of Inverse Wishart
distribution prior on the covariance matrix Sigma. Typically
set to a 2x2 matrix matrix(c(1, 0, 0, 1), 2, 2).}
\item {k}{The degrees of freedom of the Inverse Wishart distribution for
the covariance matrix Sigma. Typically set to the dimensionality of Sigma,
which in this bivariate case is 2.}
\item {tau}{The precision on the normal prior on the means mu.}
}}
}
\value{
A list of length equal to the total number of groups in all
communities. Each entry is named 1.1 1.2... 2.1.. with the first number
designating the community, and the second number the group within that
community. So, 2.3 would be the third group within the second community.
Each list entry is a 6 x n matrix representing the back-transformed posterior
distributions of the bivariate normal distribution, where n is the number of
posterior draws in the saved sample. The first two columns are the back-
transformed means, and the remaining four columns are the covariance matrix
Sigma in vector format. This vector converts to the covariance matrix as
\code{matrix(v[1:4], nrow = 2, ncol = 2)}.
}
\description{
This function loops over each community and then loops over each group
member, fitting a Bayesian multivariate (bivariate in this case) normal
distribution to each group of data. Not intended for direct calling by users.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{cmdstan_model}
\alias{cmdstan_model}
\title{Create a new CmdStanModel object}
\usage{
cmdstan_model(stan_file, compile = TRUE, ...)
}
\arguments{
\item{stan_file}{The path to a \code{.stan} file containing a Stan program. The
helper function \code{\link[=write_stan_file]{write_stan_file()}} is provided for cases when it is
more convenient to specify the Stan program as a string.}
\item{compile}{Do compilation? The default is \code{TRUE}. If \code{FALSE}
compilation can be done later via the \code{\link[=model-method-compile]{$compile()}}
method.}
\item{...}{Optionally, additional arguments to pass to the
\code{\link[=model-method-compile]{$compile()}} method if \code{compile=TRUE}.}
}
\value{
A \code{\link{CmdStanModel}} object.
}
\description{
\if{html}{\figure{logo.png}{options: width="25px" alt="https://mc-stan.org/about/logo/"}}
Create a new \code{\link{CmdStanModel}} object from a file containing a Stan program.
}
\examples{
\dontrun{
library(cmdstanr)
library(posterior)
library(bayesplot)
color_scheme_set("brightblue")
# Set path to CmdStan
# (Note: if you installed CmdStan via install_cmdstan() with default settings
# then setting the path is unnecessary but the default below should still work.
# Otherwise use the `path` argument to specify the location of your
# CmdStan installation.)
set_cmdstan_path(path = NULL)
# Create a CmdStanModel object from a Stan program,
# here using the example model that comes with CmdStan
file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.stan")
mod <- cmdstan_model(file)
mod$print()
# Data as a named list (like RStan)
stan_data <- list(N = 10, y = c(0,1,0,0,0,0,0,0,0,1))
# Run MCMC using the 'sample' method
fit_mcmc <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
parallel_chains = 2
)
# Use 'posterior' package for summaries
fit_mcmc$summary()
# Get posterior draws
draws <- fit_mcmc$draws()
print(draws)
# Convert to data frame using posterior::as_draws_df
as_draws_df(draws)
# Plot posterior using bayesplot (ggplot2)
mcmc_hist(fit_mcmc$draws("theta"))
# Call CmdStan's diagnose and stansummary utilities
fit_mcmc$cmdstan_diagnose()
fit_mcmc$cmdstan_summary()
# For models fit using MCMC, if you like working with RStan's stanfit objects
# then you can create one with rstan::read_stan_csv()
# stanfit <- rstan::read_stan_csv(fit_mcmc$output_files())
# Run 'optimize' method to get a point estimate (default is Stan's LBFGS algorithm)
# and also demonstrate specifying data as a path to a file instead of a list
my_data_file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.data.json")
fit_optim <- mod$optimize(data = my_data_file, seed = 123)
fit_optim$summary()
# Run 'variational' method to approximate the posterior (default is meanfield ADVI)
fit_vb <- mod$variational(data = stan_data, seed = 123)
fit_vb$summary()
# Plot approximate posterior using bayesplot
mcmc_hist(fit_vb$draws("theta"))
# Specifying initial values as a function
fit_mcmc_w_init_fun <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function() list(theta = runif(1))
)
fit_mcmc_w_init_fun_2 <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function(chain_id) {
# silly but demonstrates optional use of chain_id
list(theta = 1 / (chain_id + 1))
}
)
fit_mcmc_w_init_fun_2$init()
# Specifying initial values as a list of lists
fit_mcmc_w_init_list <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = list(
list(theta = 0.75), # chain 1
list(theta = 0.25) # chain 2
)
)
fit_optim_w_init_list <- mod$optimize(
data = stan_data,
seed = 123,
init = list(
list(theta = 0.75)
)
)
fit_optim_w_init_list$init()
}
}
\seealso{
\code{\link[=install_cmdstan]{install_cmdstan()}}, \code{\link[=model-method-compile]{$compile()}},
\code{\link[=model-method-check_syntax]{$check_syntax()}}
The CmdStanR website
(\href{https://mc-stan.org/cmdstanr/}{mc-stan.org/cmdstanr}) for online
documentation and tutorials.
The Stan and CmdStan documentation:
\itemize{
\item Stan documentation: \href{https://mc-stan.org/users/documentation/}{mc-stan.org/users/documentation}
\item CmdStan User’s Guide: \href{https://mc-stan.org/docs/cmdstan-guide/}{mc-stan.org/docs/cmdstan-guide}
}
}
|
/man/cmdstan_model.Rd
|
permissive
|
spinkney/cmdstanr
|
R
| false
| true
| 4,411
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{cmdstan_model}
\alias{cmdstan_model}
\title{Create a new CmdStanModel object}
\usage{
cmdstan_model(stan_file, compile = TRUE, ...)
}
\arguments{
\item{stan_file}{The path to a \code{.stan} file containing a Stan program. The
helper function \code{\link[=write_stan_file]{write_stan_file()}} is provided for cases when it is
more convenient to specify the Stan program as a string.}
\item{compile}{Do compilation? The default is \code{TRUE}. If \code{FALSE}
compilation can be done later via the \code{\link[=model-method-compile]{$compile()}}
method.}
\item{...}{Optionally, additional arguments to pass to the
\code{\link[=model-method-compile]{$compile()}} method if \code{compile=TRUE}.}
}
\value{
A \code{\link{CmdStanModel}} object.
}
\description{
\if{html}{\figure{logo.png}{options: width="25px" alt="https://mc-stan.org/about/logo/"}}
Create a new \code{\link{CmdStanModel}} object from a file containing a Stan program.
}
\examples{
\dontrun{
library(cmdstanr)
library(posterior)
library(bayesplot)
color_scheme_set("brightblue")
# Set path to CmdStan
# (Note: if you installed CmdStan via install_cmdstan() with default settings
# then setting the path is unnecessary but the default below should still work.
# Otherwise use the `path` argument to specify the location of your
# CmdStan installation.)
set_cmdstan_path(path = NULL)
# Create a CmdStanModel object from a Stan program,
# here using the example model that comes with CmdStan
file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.stan")
mod <- cmdstan_model(file)
mod$print()
# Data as a named list (like RStan)
stan_data <- list(N = 10, y = c(0,1,0,0,0,0,0,0,0,1))
# Run MCMC using the 'sample' method
fit_mcmc <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
parallel_chains = 2
)
# Use 'posterior' package for summaries
fit_mcmc$summary()
# Get posterior draws
draws <- fit_mcmc$draws()
print(draws)
# Convert to data frame using posterior::as_draws_df
as_draws_df(draws)
# Plot posterior using bayesplot (ggplot2)
mcmc_hist(fit_mcmc$draws("theta"))
# Call CmdStan's diagnose and stansummary utilities
fit_mcmc$cmdstan_diagnose()
fit_mcmc$cmdstan_summary()
# For models fit using MCMC, if you like working with RStan's stanfit objects
# then you can create one with rstan::read_stan_csv()
# stanfit <- rstan::read_stan_csv(fit_mcmc$output_files())
# Run 'optimize' method to get a point estimate (default is Stan's LBFGS algorithm)
# and also demonstrate specifying data as a path to a file instead of a list
my_data_file <- file.path(cmdstan_path(), "examples/bernoulli/bernoulli.data.json")
fit_optim <- mod$optimize(data = my_data_file, seed = 123)
fit_optim$summary()
# Run 'variational' method to approximate the posterior (default is meanfield ADVI)
fit_vb <- mod$variational(data = stan_data, seed = 123)
fit_vb$summary()
# Plot approximate posterior using bayesplot
mcmc_hist(fit_vb$draws("theta"))
# Specifying initial values as a function
fit_mcmc_w_init_fun <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function() list(theta = runif(1))
)
fit_mcmc_w_init_fun_2 <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = function(chain_id) {
# silly but demonstrates optional use of chain_id
list(theta = 1 / (chain_id + 1))
}
)
fit_mcmc_w_init_fun_2$init()
# Specifying initial values as a list of lists
fit_mcmc_w_init_list <- mod$sample(
data = stan_data,
seed = 123,
chains = 2,
refresh = 0,
init = list(
list(theta = 0.75), # chain 1
list(theta = 0.25) # chain 2
)
)
fit_optim_w_init_list <- mod$optimize(
data = stan_data,
seed = 123,
init = list(
list(theta = 0.75)
)
)
fit_optim_w_init_list$init()
}
}
\seealso{
\code{\link[=install_cmdstan]{install_cmdstan()}}, \code{\link[=model-method-compile]{$compile()}},
\code{\link[=model-method-check_syntax]{$check_syntax()}}
The CmdStanR website
(\href{https://mc-stan.org/cmdstanr/}{mc-stan.org/cmdstanr}) for online
documentation and tutorials.
The Stan and CmdStan documentation:
\itemize{
\item Stan documentation: \href{https://mc-stan.org/users/documentation/}{mc-stan.org/users/documentation}
\item CmdStan User’s Guide: \href{https://mc-stan.org/docs/cmdstan-guide/}{mc-stan.org/docs/cmdstan-guide}
}
}
|
### Model 1
# a ~ Cue
#
### modelSpec is a list containing:
# 1. The parameters to fit, and the factors they depend on
# 2. constants in the model
# 3. The factors from (1), and their levels
modelSpec = list('variablePars'=list('B' = 1,
't0' = 1,
'eta1' = 1,
'b0' = 1,
'b1' = 1,
'bU' = 1),
'constants'=c('st0'=0, 'A'=0, 'b2'=0, 'eta2'=-Inf, 'startingValue'=0.00001),
'condition'=c('SPD', 'ACC'),
'learningRule'='SARSA')
obj <- objRLRWMultiCond
### transformLearningRate is a function transforming
### "global" parameters to trial-by-trial values, dependent
### on the condition
transformLearningRate <- function(pars, condition) {
# "Declare"
eta1 <- eta2 <- rep(pars[['eta1']], length(condition))
return(list(eta1=eta1, eta2=eta2))
}
### the following function gets trial-by-trial DDM pars
transformChoicePars <- function(pars, condition, ev) {
### Gets trial-by-trial DDM parameters ###
nTrials = length(condition)
B <- t0 <- A <- m <- rep(NA, nTrials)
# all current models have no variability in sz, sv, s, t0
A = rep(pars[['A']], nTrials)
t0 = rep(pars[['t0']], nTrials)
st0 <- rep(pars[['st0']], nTrials)
# Accumulating advantages (Van Ravenzwaaij paper)
trialN <- 1:nrow(ev)
v1 = pars[['b0']] + pars[['bU']]*trialN + pars[['b1']]*(ev[,1]-ev[,2]) #+ pars[['b2']]*(ev[,1]+ev[,2])
v2 = pars[['b0']] + pars[['bU']]*trialN + pars[['b1']]*(ev[,2]-ev[,1]) #+ pars[['b2']]*(ev[,1]+ev[,2])
# B differs by condition
B <- pars[['B']]
# B[condition=='SPD'] <- pars[['B.SPD']]
# B[condition=='ACC'] <- pars[['B.ACC']]
# rescale z from [0, 1] to [0, a]
return(list(t0=t0, B=B, A=A, v1=v1, v2=v2, st0=st0))
}
defaultBounds <- function() {
list('a'=c(1e-3, 5),
'A'=c(0, 5),
'B'=c(1e-3, 10),
'v1'=c(1e-3, 5),
'v2'=c(1e-3, 5),
'v'=c(1e-3, 5),
'm'=c(1e-3, 50),
'z'=c(.2, .8),
't0'=c(0, .5),
'eta1'=c(0, 1),
'eta2'=c(0, 1),
'sz'=c(0, 5),
'sv'=c(0, 5),
's'=c(0, 5),
'vmax'=c(0, 100),
'k'=c(0, 50),
'st0'=c(0, .5),
'beta'=c(0, 50),
'startValue'=c(0, 2),
'b0'=c(0, 10),
'b1'=c(-100, 100),
'b2'=c(-100, 100),
'bU'=c(0, 0.05))
}
|
/analysis/models/old_models/modelracingWaldAccAdvantagesU.R
|
permissive
|
StevenM1/RLDDM
|
R
| false
| false
| 2,462
|
r
|
### Model 1
# a ~ Cue
#
### modelSpec is a list containing:
# 1. The parameters to fit, and the factors they depend on
# 2. constants in the model
# 3. The factors from (1), and their levels
modelSpec = list('variablePars'=list('B' = 1,
't0' = 1,
'eta1' = 1,
'b0' = 1,
'b1' = 1,
'bU' = 1),
'constants'=c('st0'=0, 'A'=0, 'b2'=0, 'eta2'=-Inf, 'startingValue'=0.00001),
'condition'=c('SPD', 'ACC'),
'learningRule'='SARSA')
obj <- objRLRWMultiCond
### transformLearningRate is a function transforming
### "global" parameters to trial-by-trial values, dependent
### on the condition
transformLearningRate <- function(pars, condition) {
# "Declare"
eta1 <- eta2 <- rep(pars[['eta1']], length(condition))
return(list(eta1=eta1, eta2=eta2))
}
### the following function gets trial-by-trial DDM pars
transformChoicePars <- function(pars, condition, ev) {
### Gets trial-by-trial DDM parameters ###
nTrials = length(condition)
B <- t0 <- A <- m <- rep(NA, nTrials)
# all current models have no variability in sz, sv, s, t0
A = rep(pars[['A']], nTrials)
t0 = rep(pars[['t0']], nTrials)
st0 <- rep(pars[['st0']], nTrials)
# Accumulating advantages (Van Ravenzwaaij paper)
trialN <- 1:nrow(ev)
v1 = pars[['b0']] + pars[['bU']]*trialN + pars[['b1']]*(ev[,1]-ev[,2]) #+ pars[['b2']]*(ev[,1]+ev[,2])
v2 = pars[['b0']] + pars[['bU']]*trialN + pars[['b1']]*(ev[,2]-ev[,1]) #+ pars[['b2']]*(ev[,1]+ev[,2])
# B differs by condition
B <- pars[['B']]
# B[condition=='SPD'] <- pars[['B.SPD']]
# B[condition=='ACC'] <- pars[['B.ACC']]
# rescale z from [0, 1] to [0, a]
return(list(t0=t0, B=B, A=A, v1=v1, v2=v2, st0=st0))
}
defaultBounds <- function() {
list('a'=c(1e-3, 5),
'A'=c(0, 5),
'B'=c(1e-3, 10),
'v1'=c(1e-3, 5),
'v2'=c(1e-3, 5),
'v'=c(1e-3, 5),
'm'=c(1e-3, 50),
'z'=c(.2, .8),
't0'=c(0, .5),
'eta1'=c(0, 1),
'eta2'=c(0, 1),
'sz'=c(0, 5),
'sv'=c(0, 5),
's'=c(0, 5),
'vmax'=c(0, 100),
'k'=c(0, 50),
'st0'=c(0, .5),
'beta'=c(0, 50),
'startValue'=c(0, 2),
'b0'=c(0, 10),
'b1'=c(-100, 100),
'b2'=c(-100, 100),
'bU'=c(0, 0.05))
}
|
##' Compute the F score, max diff ratio difference.
##' @title F score computation
##' @param geno.df a data.frame of one row with the genotype information for each sample.
##' @param tre.dist a distance object from the transcript relative expression.
##' @param tre.df a data.frame with the transcript relative expression.
##' @param svQTL should svQTL test be performed in addition to sQTL. Default is FALSE.
##' @return a data.frame with columns:
##' \item{F}{the F score.}
##' \item{nb.groups}{the number of groups created by the genotypes.}
##' \item{md}{the maximum difference in splicing ratios between genotype groups.}
##' \item{tr.first, tr.second}{the two transcripts that change the most.}
##' @author Jean Monlong
##' @keywords internal
compFscore <- function(geno.df, tre.dist, tre.df,svQTL=FALSE){
if(class(tre.dist)!="dist"){
stop("'tre.dist' must be a distance object.")
}
if(nrow(geno.df)>1){
stop(geno.df$snpId[1], " SNP is duplicated in the genotype file.")
}
if(!any(colnames(geno.df) %in% labels(tre.dist))){
stop("No common samples between genotype and transcript ratios.")
}
geno.snp = geno.df[,labels(tre.dist)]
if(any(geno.snp==-1)){
non.na = geno.snp > -1
geno.snp = geno.snp[non.na]
tre.dist = as.dist(as.matrix(tre.dist)[non.na, non.na])
}
groups.snp.f = factor(as.numeric(geno.snp))
F.snp = adonis.comp(tre.dist,groups.snp.f,permutations=2,svQTL=FALSE)
mdt = md.trans(tre.df, groups.snp.f, labels(tre.dist))
res.df = data.frame(F=F.snp,
nb.groups=nlevels(groups.snp.f) ,
md=mdt$md,
tr.first=mdt$tr.first,
tr.second=mdt$tr.second,
stringsAsFactors=FALSE)
if(svQTL){
res.df$F.svQTL = adonis.comp(tre.dist,groups.snp.f,permutations=2,svQTL=TRUE)
}
return(res.df)
}
|
/R/compFscore.R
|
no_license
|
DuyDN/sQTLseekeR
|
R
| false
| false
| 1,788
|
r
|
##' Compute the F score, max diff ratio difference.
##' @title F score computation
##' @param geno.df a data.frame of one row with the genotype information for each sample.
##' @param tre.dist a distance object from the transcript relative expression.
##' @param tre.df a data.frame with the transcript relative expression.
##' @param svQTL should svQTL test be performed in addition to sQTL. Default is FALSE.
##' @return a data.frame with columns:
##' \item{F}{the F score.}
##' \item{nb.groups}{the number of groups created by the genotypes.}
##' \item{md}{the maximum difference in splicing ratios between genotype groups.}
##' \item{tr.first, tr.second}{the two transcripts that change the most.}
##' @author Jean Monlong
##' @keywords internal
compFscore <- function(geno.df, tre.dist, tre.df,svQTL=FALSE){
if(class(tre.dist)!="dist"){
stop("'tre.dist' must be a distance object.")
}
if(nrow(geno.df)>1){
stop(geno.df$snpId[1], " SNP is duplicated in the genotype file.")
}
if(!any(colnames(geno.df) %in% labels(tre.dist))){
stop("No common samples between genotype and transcript ratios.")
}
geno.snp = geno.df[,labels(tre.dist)]
if(any(geno.snp==-1)){
non.na = geno.snp > -1
geno.snp = geno.snp[non.na]
tre.dist = as.dist(as.matrix(tre.dist)[non.na, non.na])
}
groups.snp.f = factor(as.numeric(geno.snp))
F.snp = adonis.comp(tre.dist,groups.snp.f,permutations=2,svQTL=FALSE)
mdt = md.trans(tre.df, groups.snp.f, labels(tre.dist))
res.df = data.frame(F=F.snp,
nb.groups=nlevels(groups.snp.f) ,
md=mdt$md,
tr.first=mdt$tr.first,
tr.second=mdt$tr.second,
stringsAsFactors=FALSE)
if(svQTL){
res.df$F.svQTL = adonis.comp(tre.dist,groups.snp.f,permutations=2,svQTL=TRUE)
}
return(res.df)
}
|
#' AI Platform Training & Prediction API
#' An API to enable creating and using machine learning models.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2022-07-13 10:40:00
#' filename: /Users/justin/Sync/projects/github.com/justinjm/autoGoogleAPI/googlemlv1.auto/R/ml_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' @docType package
#' @name ml_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Get the service account information associated with your project. You need this information in order to grant the service account permissions for the Google Cloud Storage location where you put your model training code for training the model with Google Cloud Machine Learning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.getConfig <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:getConfig", name)
# ml.projects.getConfig
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Performs explanation on the data in the request. {% dynamic include '/ai-platform/includes/___explain-request' %}
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__ExplainRequest The \link{GoogleCloudMlV1__ExplainRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__ExplainRequest functions
#' @export
projects.explain <- function(GoogleCloudMlV1__ExplainRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:explain", name)
# ml.projects.explain
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__ExplainRequest, "gar_GoogleCloudMlV1__ExplainRequest"))
f(the_body = GoogleCloudMlV1__ExplainRequest)
}
#' Performs online prediction on the data in the request. {% dynamic include '/ai-platform/includes/___predict-request' %}
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__PredictRequest The \link{GoogleCloudMlV1__PredictRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__PredictRequest functions
#' @export
projects.predict <- function(GoogleCloudMlV1__PredictRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:predict", name)
# ml.projects.predict
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__PredictRequest, "gar_GoogleCloudMlV1__PredictRequest"))
f(the_body = GoogleCloudMlV1__PredictRequest)
}
#' List all locations that provides at least one type of CMLE capability.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.list <- function(parent, pageSize = NULL, pageToken = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/locations", parent)
# ml.projects.locations.list
pars = list(pageSize = pageSize, pageToken = pageToken)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Get the complete list of CMLE capabilities in a location, along with their location-specific properties.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Lists all the studies in a region for an associated project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.list <- function(parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/studies", parent)
# ml.projects.locations.studies.list
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Creates a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Study The \link{GoogleCloudMlV1__Study} object to pass to this method
#' @param parent Required
#' @param studyId Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Study functions
#' @export
projects.locations.studies.create <- function(GoogleCloudMlV1__Study, parent, studyId = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/studies", parent)
# ml.projects.locations.studies.create
pars = list(studyId = studyId)
f <- googleAuthR::gar_api_generator(url, "POST", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Study, "gar_GoogleCloudMlV1__Study"))
f(the_body = GoogleCloudMlV1__Study)
}
#' Gets a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Deletes a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Adds one or more trials to a study, with parameter values suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of trial suggestions. When this long-running operation succeeds, it will contain a SuggestTrialsResponse.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__SuggestTrialsRequest The \link{GoogleCloudMlV1__SuggestTrialsRequest} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__SuggestTrialsRequest functions
#' @export
projects.locations.studies.trials.suggest <- function(GoogleCloudMlV1__SuggestTrialsRequest,
parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials:suggest", parent)
# ml.projects.locations.studies.trials.suggest
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__SuggestTrialsRequest, "gar_GoogleCloudMlV1__SuggestTrialsRequest"))
f(the_body = GoogleCloudMlV1__SuggestTrialsRequest)
}
#' Lists the trials associated with a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.list <- function(parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials", parent)
# ml.projects.locations.studies.trials.list
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Stops a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__StopTrialRequest The \link{GoogleCloudMlV1__StopTrialRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__StopTrialRequest functions
#' @export
projects.locations.studies.trials.stop <- function(GoogleCloudMlV1__StopTrialRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:stop", name)
# ml.projects.locations.studies.trials.stop
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__StopTrialRequest, "gar_GoogleCloudMlV1__StopTrialRequest"))
f(the_body = GoogleCloudMlV1__StopTrialRequest)
}
#' Adds a measurement of the objective metrics to a trial. This measurement is assumed to have been taken before the trial is complete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__AddTrialMeasurementRequest The \link{GoogleCloudMlV1__AddTrialMeasurementRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__AddTrialMeasurementRequest functions
#' @export
projects.locations.studies.trials.addMeasurement <- function(GoogleCloudMlV1__AddTrialMeasurementRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:addMeasurement", name)
# ml.projects.locations.studies.trials.addMeasurement
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__AddTrialMeasurementRequest, "gar_GoogleCloudMlV1__AddTrialMeasurementRequest"))
f(the_body = GoogleCloudMlV1__AddTrialMeasurementRequest)
}
#' Adds a user provided trial to a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Trial The \link{GoogleCloudMlV1__Trial} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Trial functions
#' @export
projects.locations.studies.trials.create <- function(GoogleCloudMlV1__Trial, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials", parent)
# ml.projects.locations.studies.trials.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Trial, "gar_GoogleCloudMlV1__Trial"))
f(the_body = GoogleCloudMlV1__Trial)
}
#' Lists the pareto-optimal trials for multi-objective study or the optimal trials for single-objective study. The definition of pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__ListOptimalTrialsRequest The \link{GoogleCloudMlV1__ListOptimalTrialsRequest} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__ListOptimalTrialsRequest functions
#' @export
projects.locations.studies.trials.listOptimalTrials <- function(GoogleCloudMlV1__ListOptimalTrialsRequest,
parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials:listOptimalTrials",
parent)
# ml.projects.locations.studies.trials.listOptimalTrials
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__ListOptimalTrialsRequest, "gar_GoogleCloudMlV1__ListOptimalTrialsRequest"))
f(the_body = GoogleCloudMlV1__ListOptimalTrialsRequest)
}
#' Gets a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.trials.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Checks whether a trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a CheckTrialEarlyStoppingStateResponse.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest The \link{GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest functions
#' @export
projects.locations.studies.trials.checkEarlyStoppingState <- function(GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:checkEarlyStoppingState",
name)
# ml.projects.locations.studies.trials.checkEarlyStoppingState
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest, "gar_GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest"))
f(the_body = GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest)
}
#' Deletes a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.trials.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Marks a trial as complete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CompleteTrialRequest The \link{GoogleCloudMlV1__CompleteTrialRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CompleteTrialRequest functions
#' @export
projects.locations.studies.trials.complete <- function(GoogleCloudMlV1__CompleteTrialRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:complete", name)
# ml.projects.locations.studies.trials.complete
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CompleteTrialRequest, "gar_GoogleCloudMlV1__CompleteTrialRequest"))
f(the_body = GoogleCloudMlV1__CompleteTrialRequest)
}
#' Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.operations.cancel <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.locations.operations.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
f()
}
#' Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.operations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.operations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param resource REQUIRED: The resource for which the policy is being requested
#' @param options.requestedPolicyVersion Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.getIamPolicy <- function(resource, options.requestedPolicyVersion = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:getIamPolicy", resource)
# ml.projects.models.getIamPolicy
pars = list(options.requestedPolicyVersion = options.requestedPolicyVersion)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__SetIamPolicyRequest The \link{GoogleIamV1__SetIamPolicyRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy is being specified
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__SetIamPolicyRequest functions
#' @export
projects.models.setIamPolicy <- function(GoogleIamV1__SetIamPolicyRequest, resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:setIamPolicy", resource)
# ml.projects.models.setIamPolicy
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__SetIamPolicyRequest, "gar_GoogleIamV1__SetIamPolicyRequest"))
f(the_body = GoogleIamV1__SetIamPolicyRequest)
}
#' Lists the models in a project. Each project can contain multiple models, and each model can have multiple versions. If there are no models that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @param filter Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.list <- function(parent, pageSize = NULL, pageToken = NULL, filter = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/models", parent)
# ml.projects.models.list
pars = list(pageSize = pageSize, pageToken = pageToken, filter = filter)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Updates a specific model resource. Currently the only supported fields to update are `description` and `default_version.name`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Model The \link{GoogleCloudMlV1__Model} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Model functions
#' @export
projects.models.patch <- function(GoogleCloudMlV1__Model, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Model, "gar_GoogleCloudMlV1__Model"))
f(the_body = GoogleCloudMlV1__Model)
}
#' Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may 'fail open' without warning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__TestIamPermissionsRequest The \link{GoogleIamV1__TestIamPermissionsRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy detail is being requested
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__TestIamPermissionsRequest functions
#' @export
projects.models.testIamPermissions <- function(GoogleIamV1__TestIamPermissionsRequest,
resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:testIamPermissions",
resource)
# ml.projects.models.testIamPermissions
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__TestIamPermissionsRequest, "gar_GoogleIamV1__TestIamPermissionsRequest"))
f(the_body = GoogleIamV1__TestIamPermissionsRequest)
}
#' Gets information about a model, including its name, the description (if set), and the default version (if at least one version of the model has been deployed).
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Creates a model which will later contain one or more versions. You must add at least one version before you can request predictions from the model. Add versions by calling projects.models.versions.create.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Model The \link{GoogleCloudMlV1__Model} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Model functions
#' @export
projects.models.create <- function(GoogleCloudMlV1__Model, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/models", parent)
# ml.projects.models.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Model, "gar_GoogleCloudMlV1__Model"))
f(the_body = GoogleCloudMlV1__Model)
}
#' Deletes a model. You can only delete a model if there are no versions in it. You can delete versions by calling projects.models.versions.delete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Gets information about a model version. Models can have multiple versions. You can call projects.models.versions.list to get the same information that this method returns for all of the versions of a model.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Gets basic information about all the versions of a model. If you expect that a model has many versions, or if you need to handle only a limited number of results at a time, you can request that the list be retrieved in batches (called pages). If there are no versions that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageToken Optional
#' @param filter Optional
#' @param pageSize Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.list <- function(parent, pageToken = NULL, filter = NULL,
pageSize = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/versions", parent)
# ml.projects.models.versions.list
pars = list(pageToken = pageToken, filter = filter, pageSize = pageSize)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Deletes a model version. Each model can have multiple versions deployed and in use at any given time. Use this method to remove a single version. Note: You cannot delete the version that is set as the default version of the model unless it is the only remaining version.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Designates a version to be the default for the model. The default version is used for prediction requests made against the model that don't specify a version. The first version to be created for a model is automatically set as the default. You must make any subsequent changes to the default version setting manually using this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__SetDefaultVersionRequest The \link{GoogleCloudMlV1__SetDefaultVersionRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__SetDefaultVersionRequest functions
#' @export
projects.models.versions.setDefault <- function(GoogleCloudMlV1__SetDefaultVersionRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:setDefault", name)
# ml.projects.models.versions.setDefault
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__SetDefaultVersionRequest, "gar_GoogleCloudMlV1__SetDefaultVersionRequest"))
f(the_body = GoogleCloudMlV1__SetDefaultVersionRequest)
}
#' Updates the specified Version resource. Currently the only update-able fields are `description`, `requestLoggingConfig`, `autoScaling.minNodes`, and `manualScaling.nodes`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Version The \link{GoogleCloudMlV1__Version} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Version functions
#' @export
projects.models.versions.patch <- function(GoogleCloudMlV1__Version, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Version, "gar_GoogleCloudMlV1__Version"))
f(the_body = GoogleCloudMlV1__Version)
}
#' Creates a new version of a model from a trained TensorFlow model. If the version created in the cloud by this call is the first deployed version of the specified model, it will be made the default version of the model. When you add a version to a model that already has one or more versions, the default version does not automatically change. If you want a new version to be the default, you must call projects.models.versions.setDefault.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Version The \link{GoogleCloudMlV1__Version} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Version functions
#' @export
projects.models.versions.create <- function(GoogleCloudMlV1__Version, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/versions", parent)
# ml.projects.models.versions.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Version, "gar_GoogleCloudMlV1__Version"))
f(the_body = GoogleCloudMlV1__Version)
}
#' Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.cancel <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.operations.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
f()
}
#' Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `'/v1/{name=users/*}/operations'` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation's parent resource
#' @param filter The standard list filter
#' @param pageToken The standard list page token
#' @param pageSize The standard list page size
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.list <- function(name, filter = NULL, pageToken = NULL, pageSize = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}/operations", name)
# ml.projects.operations.list
pars = list(filter = filter, pageToken = pageToken, pageSize = pageSize)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.operations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Describes a job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.jobs.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Cancels a running job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CancelJobRequest The \link{GoogleCloudMlV1__CancelJobRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CancelJobRequest functions
#' @export
projects.jobs.cancel <- function(GoogleCloudMlV1__CancelJobRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.jobs.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CancelJobRequest, "gar_GoogleCloudMlV1__CancelJobRequest"))
f(the_body = GoogleCloudMlV1__CancelJobRequest)
}
#' Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__SetIamPolicyRequest The \link{GoogleIamV1__SetIamPolicyRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy is being specified
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__SetIamPolicyRequest functions
#' @export
projects.jobs.setIamPolicy <- function(GoogleIamV1__SetIamPolicyRequest, resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:setIamPolicy", resource)
# ml.projects.jobs.setIamPolicy
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__SetIamPolicyRequest, "gar_GoogleIamV1__SetIamPolicyRequest"))
f(the_body = GoogleIamV1__SetIamPolicyRequest)
}
#' Updates a specific job resource. Currently the only supported fields to update are `labels`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Job The \link{GoogleCloudMlV1__Job} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Job functions
#' @export
projects.jobs.patch <- function(GoogleCloudMlV1__Job, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.jobs.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Job, "gar_GoogleCloudMlV1__Job"))
f(the_body = GoogleCloudMlV1__Job)
}
#' Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may 'fail open' without warning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__TestIamPermissionsRequest The \link{GoogleIamV1__TestIamPermissionsRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy detail is being requested
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__TestIamPermissionsRequest functions
#' @export
projects.jobs.testIamPermissions <- function(GoogleIamV1__TestIamPermissionsRequest,
resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:testIamPermissions",
resource)
# ml.projects.jobs.testIamPermissions
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__TestIamPermissionsRequest, "gar_GoogleIamV1__TestIamPermissionsRequest"))
f(the_body = GoogleIamV1__TestIamPermissionsRequest)
}
#' Creates a training or a batch prediction job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Job The \link{GoogleCloudMlV1__Job} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Job functions
#' @export
projects.jobs.create <- function(GoogleCloudMlV1__Job, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/jobs", parent)
# ml.projects.jobs.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Job, "gar_GoogleCloudMlV1__Job"))
f(the_body = GoogleCloudMlV1__Job)
}
#' Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param resource REQUIRED: The resource for which the policy is being requested
#' @param options.requestedPolicyVersion Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.getIamPolicy <- function(resource, options.requestedPolicyVersion = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:getIamPolicy", resource)
# ml.projects.jobs.getIamPolicy
pars = list(options.requestedPolicyVersion = options.requestedPolicyVersion)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Lists the jobs in the project. If there are no jobs that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @param filter Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.list <- function(parent, pageSize = NULL, pageToken = NULL, filter = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/jobs", parent)
# ml.projects.jobs.list
pars = list(pageSize = pageSize, pageToken = pageToken, filter = filter)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
|
/googlemlv1.auto/R/ml_functions.R
|
no_license
|
justinjm/autoGoogleAPI
|
R
| false
| false
| 60,402
|
r
|
#' AI Platform Training & Prediction API
#' An API to enable creating and using machine learning models.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2022-07-13 10:40:00
#' filename: /Users/justin/Sync/projects/github.com/justinjm/autoGoogleAPI/googlemlv1.auto/R/ml_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' @docType package
#' @name ml_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Get the service account information associated with your project. You need this information in order to grant the service account permissions for the Google Cloud Storage location where you put your model training code for training the model with Google Cloud Machine Learning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.getConfig <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:getConfig", name)
# ml.projects.getConfig
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Performs explanation on the data in the request. {% dynamic include '/ai-platform/includes/___explain-request' %}
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__ExplainRequest The \link{GoogleCloudMlV1__ExplainRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__ExplainRequest functions
#' @export
projects.explain <- function(GoogleCloudMlV1__ExplainRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:explain", name)
# ml.projects.explain
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__ExplainRequest, "gar_GoogleCloudMlV1__ExplainRequest"))
f(the_body = GoogleCloudMlV1__ExplainRequest)
}
#' Performs online prediction on the data in the request. {% dynamic include '/ai-platform/includes/___predict-request' %}
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__PredictRequest The \link{GoogleCloudMlV1__PredictRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__PredictRequest functions
#' @export
projects.predict <- function(GoogleCloudMlV1__PredictRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:predict", name)
# ml.projects.predict
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__PredictRequest, "gar_GoogleCloudMlV1__PredictRequest"))
f(the_body = GoogleCloudMlV1__PredictRequest)
}
#' List all locations that provides at least one type of CMLE capability.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.list <- function(parent, pageSize = NULL, pageToken = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/locations", parent)
# ml.projects.locations.list
pars = list(pageSize = pageSize, pageToken = pageToken)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Get the complete list of CMLE capabilities in a location, along with their location-specific properties.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Lists all the studies in a region for an associated project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.list <- function(parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/studies", parent)
# ml.projects.locations.studies.list
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Creates a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Study The \link{GoogleCloudMlV1__Study} object to pass to this method
#' @param parent Required
#' @param studyId Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Study functions
#' @export
projects.locations.studies.create <- function(GoogleCloudMlV1__Study, parent, studyId = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/studies", parent)
# ml.projects.locations.studies.create
pars = list(studyId = studyId)
f <- googleAuthR::gar_api_generator(url, "POST", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Study, "gar_GoogleCloudMlV1__Study"))
f(the_body = GoogleCloudMlV1__Study)
}
#' Gets a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Deletes a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Adds one or more trials to a study, with parameter values suggested by AI Platform Vizier. Returns a long-running operation associated with the generation of trial suggestions. When this long-running operation succeeds, it will contain a SuggestTrialsResponse.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__SuggestTrialsRequest The \link{GoogleCloudMlV1__SuggestTrialsRequest} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__SuggestTrialsRequest functions
#' @export
projects.locations.studies.trials.suggest <- function(GoogleCloudMlV1__SuggestTrialsRequest,
parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials:suggest", parent)
# ml.projects.locations.studies.trials.suggest
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__SuggestTrialsRequest, "gar_GoogleCloudMlV1__SuggestTrialsRequest"))
f(the_body = GoogleCloudMlV1__SuggestTrialsRequest)
}
#' Lists the trials associated with a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.list <- function(parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials", parent)
# ml.projects.locations.studies.trials.list
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Stops a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__StopTrialRequest The \link{GoogleCloudMlV1__StopTrialRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__StopTrialRequest functions
#' @export
projects.locations.studies.trials.stop <- function(GoogleCloudMlV1__StopTrialRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:stop", name)
# ml.projects.locations.studies.trials.stop
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__StopTrialRequest, "gar_GoogleCloudMlV1__StopTrialRequest"))
f(the_body = GoogleCloudMlV1__StopTrialRequest)
}
#' Adds a measurement of the objective metrics to a trial. This measurement is assumed to have been taken before the trial is complete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__AddTrialMeasurementRequest The \link{GoogleCloudMlV1__AddTrialMeasurementRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__AddTrialMeasurementRequest functions
#' @export
projects.locations.studies.trials.addMeasurement <- function(GoogleCloudMlV1__AddTrialMeasurementRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:addMeasurement", name)
# ml.projects.locations.studies.trials.addMeasurement
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__AddTrialMeasurementRequest, "gar_GoogleCloudMlV1__AddTrialMeasurementRequest"))
f(the_body = GoogleCloudMlV1__AddTrialMeasurementRequest)
}
#' Adds a user provided trial to a study.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Trial The \link{GoogleCloudMlV1__Trial} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Trial functions
#' @export
projects.locations.studies.trials.create <- function(GoogleCloudMlV1__Trial, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials", parent)
# ml.projects.locations.studies.trials.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Trial, "gar_GoogleCloudMlV1__Trial"))
f(the_body = GoogleCloudMlV1__Trial)
}
#' Lists the pareto-optimal trials for multi-objective study or the optimal trials for single-objective study. The definition of pareto-optimal can be checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__ListOptimalTrialsRequest The \link{GoogleCloudMlV1__ListOptimalTrialsRequest} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__ListOptimalTrialsRequest functions
#' @export
projects.locations.studies.trials.listOptimalTrials <- function(GoogleCloudMlV1__ListOptimalTrialsRequest,
parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/trials:listOptimalTrials",
parent)
# ml.projects.locations.studies.trials.listOptimalTrials
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__ListOptimalTrialsRequest, "gar_GoogleCloudMlV1__ListOptimalTrialsRequest"))
f(the_body = GoogleCloudMlV1__ListOptimalTrialsRequest)
}
#' Gets a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.trials.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Checks whether a trial should stop or not. Returns a long-running operation. When the operation is successful, it will contain a CheckTrialEarlyStoppingStateResponse.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest The \link{GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest functions
#' @export
projects.locations.studies.trials.checkEarlyStoppingState <- function(GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:checkEarlyStoppingState",
name)
# ml.projects.locations.studies.trials.checkEarlyStoppingState
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest, "gar_GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest"))
f(the_body = GoogleCloudMlV1__CheckTrialEarlyStoppingStateRequest)
}
#' Deletes a trial.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.studies.trials.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.studies.trials.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Marks a trial as complete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CompleteTrialRequest The \link{GoogleCloudMlV1__CompleteTrialRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CompleteTrialRequest functions
#' @export
projects.locations.studies.trials.complete <- function(GoogleCloudMlV1__CompleteTrialRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:complete", name)
# ml.projects.locations.studies.trials.complete
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CompleteTrialRequest, "gar_GoogleCloudMlV1__CompleteTrialRequest"))
f(the_body = GoogleCloudMlV1__CompleteTrialRequest)
}
#' Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.operations.cancel <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.locations.operations.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
f()
}
#' Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.locations.operations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.locations.operations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param resource REQUIRED: The resource for which the policy is being requested
#' @param options.requestedPolicyVersion Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.getIamPolicy <- function(resource, options.requestedPolicyVersion = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:getIamPolicy", resource)
# ml.projects.models.getIamPolicy
pars = list(options.requestedPolicyVersion = options.requestedPolicyVersion)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__SetIamPolicyRequest The \link{GoogleIamV1__SetIamPolicyRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy is being specified
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__SetIamPolicyRequest functions
#' @export
projects.models.setIamPolicy <- function(GoogleIamV1__SetIamPolicyRequest, resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:setIamPolicy", resource)
# ml.projects.models.setIamPolicy
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__SetIamPolicyRequest, "gar_GoogleIamV1__SetIamPolicyRequest"))
f(the_body = GoogleIamV1__SetIamPolicyRequest)
}
#' Lists the models in a project. Each project can contain multiple models, and each model can have multiple versions. If there are no models that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @param filter Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.list <- function(parent, pageSize = NULL, pageToken = NULL, filter = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/models", parent)
# ml.projects.models.list
pars = list(pageSize = pageSize, pageToken = pageToken, filter = filter)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Updates a specific model resource. Currently the only supported fields to update are `description` and `default_version.name`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Model The \link{GoogleCloudMlV1__Model} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Model functions
#' @export
projects.models.patch <- function(GoogleCloudMlV1__Model, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Model, "gar_GoogleCloudMlV1__Model"))
f(the_body = GoogleCloudMlV1__Model)
}
#' Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may 'fail open' without warning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__TestIamPermissionsRequest The \link{GoogleIamV1__TestIamPermissionsRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy detail is being requested
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__TestIamPermissionsRequest functions
#' @export
projects.models.testIamPermissions <- function(GoogleIamV1__TestIamPermissionsRequest,
resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:testIamPermissions",
resource)
# ml.projects.models.testIamPermissions
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__TestIamPermissionsRequest, "gar_GoogleIamV1__TestIamPermissionsRequest"))
f(the_body = GoogleIamV1__TestIamPermissionsRequest)
}
#' Gets information about a model, including its name, the description (if set), and the default version (if at least one version of the model has been deployed).
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Creates a model which will later contain one or more versions. You must add at least one version before you can request predictions from the model. Add versions by calling projects.models.versions.create.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Model The \link{GoogleCloudMlV1__Model} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Model functions
#' @export
projects.models.create <- function(GoogleCloudMlV1__Model, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/models", parent)
# ml.projects.models.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Model, "gar_GoogleCloudMlV1__Model"))
f(the_body = GoogleCloudMlV1__Model)
}
#' Deletes a model. You can only delete a model if there are no versions in it. You can delete versions by calling projects.models.versions.delete.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Gets information about a model version. Models can have multiple versions. You can call projects.models.versions.list to get the same information that this method returns for all of the versions of a model.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Gets basic information about all the versions of a model. If you expect that a model has many versions, or if you need to handle only a limited number of results at a time, you can request that the list be retrieved in batches (called pages). If there are no versions that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageToken Optional
#' @param filter Optional
#' @param pageSize Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.list <- function(parent, pageToken = NULL, filter = NULL,
pageSize = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/versions", parent)
# ml.projects.models.versions.list
pars = list(pageToken = pageToken, filter = filter, pageSize = pageSize)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Deletes a model version. Each model can have multiple versions deployed and in use at any given time. Use this method to remove a single version. Note: You cannot delete the version that is set as the default version of the model unless it is the only remaining version.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.models.versions.delete <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Designates a version to be the default for the model. The default version is used for prediction requests made against the model that don't specify a version. The first version to be created for a model is automatically set as the default. You must make any subsequent changes to the default version setting manually using this method.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__SetDefaultVersionRequest The \link{GoogleCloudMlV1__SetDefaultVersionRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__SetDefaultVersionRequest functions
#' @export
projects.models.versions.setDefault <- function(GoogleCloudMlV1__SetDefaultVersionRequest,
name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:setDefault", name)
# ml.projects.models.versions.setDefault
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__SetDefaultVersionRequest, "gar_GoogleCloudMlV1__SetDefaultVersionRequest"))
f(the_body = GoogleCloudMlV1__SetDefaultVersionRequest)
}
#' Updates the specified Version resource. Currently the only update-able fields are `description`, `requestLoggingConfig`, `autoScaling.minNodes`, and `manualScaling.nodes`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Version The \link{GoogleCloudMlV1__Version} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Version functions
#' @export
projects.models.versions.patch <- function(GoogleCloudMlV1__Version, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.models.versions.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Version, "gar_GoogleCloudMlV1__Version"))
f(the_body = GoogleCloudMlV1__Version)
}
#' Creates a new version of a model from a trained TensorFlow model. If the version created in the cloud by this call is the first deployed version of the specified model, it will be made the default version of the model. When you add a version to a model that already has one or more versions, the default version does not automatically change. If you want a new version to be the default, you must call projects.models.versions.setDefault.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Version The \link{GoogleCloudMlV1__Version} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Version functions
#' @export
projects.models.versions.create <- function(GoogleCloudMlV1__Version, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/versions", parent)
# ml.projects.models.versions.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Version, "gar_GoogleCloudMlV1__Version"))
f(the_body = GoogleCloudMlV1__Version)
}
#' Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.cancel <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.operations.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
f()
}
#' Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `'/v1/{name=users/*}/operations'` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation's parent resource
#' @param filter The standard list filter
#' @param pageToken The standard list page token
#' @param pageSize The standard list page size
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.list <- function(name, filter = NULL, pageToken = NULL, pageSize = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}/operations", name)
# ml.projects.operations.list
pars = list(filter = filter, pageToken = pageToken, pageSize = pageSize)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.operations.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.operations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Describes a job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.get <- function(name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.jobs.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Cancels a running job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__CancelJobRequest The \link{GoogleCloudMlV1__CancelJobRequest} object to pass to this method
#' @param name Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__CancelJobRequest functions
#' @export
projects.jobs.cancel <- function(GoogleCloudMlV1__CancelJobRequest, name) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}:cancel", name)
# ml.projects.jobs.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__CancelJobRequest, "gar_GoogleCloudMlV1__CancelJobRequest"))
f(the_body = GoogleCloudMlV1__CancelJobRequest)
}
#' Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__SetIamPolicyRequest The \link{GoogleIamV1__SetIamPolicyRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy is being specified
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__SetIamPolicyRequest functions
#' @export
projects.jobs.setIamPolicy <- function(GoogleIamV1__SetIamPolicyRequest, resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:setIamPolicy", resource)
# ml.projects.jobs.setIamPolicy
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__SetIamPolicyRequest, "gar_GoogleIamV1__SetIamPolicyRequest"))
f(the_body = GoogleIamV1__SetIamPolicyRequest)
}
#' Updates a specific job resource. Currently the only supported fields to update are `labels`.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Job The \link{GoogleCloudMlV1__Job} object to pass to this method
#' @param name Required
#' @param updateMask Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Job functions
#' @export
projects.jobs.patch <- function(GoogleCloudMlV1__Job, name, updateMask = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+name}", name)
# ml.projects.jobs.patch
pars = list(updateMask = updateMask)
f <- googleAuthR::gar_api_generator(url, "PATCH", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Job, "gar_GoogleCloudMlV1__Job"))
f(the_body = GoogleCloudMlV1__Job)
}
#' Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may 'fail open' without warning.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleIamV1__TestIamPermissionsRequest The \link{GoogleIamV1__TestIamPermissionsRequest} object to pass to this method
#' @param resource REQUIRED: The resource for which the policy detail is being requested
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleIamV1__TestIamPermissionsRequest functions
#' @export
projects.jobs.testIamPermissions <- function(GoogleIamV1__TestIamPermissionsRequest,
resource) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:testIamPermissions",
resource)
# ml.projects.jobs.testIamPermissions
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleIamV1__TestIamPermissionsRequest, "gar_GoogleIamV1__TestIamPermissionsRequest"))
f(the_body = GoogleIamV1__TestIamPermissionsRequest)
}
#' Creates a training or a batch prediction job.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param GoogleCloudMlV1__Job The \link{GoogleCloudMlV1__Job} object to pass to this method
#' @param parent Required
#' @importFrom googleAuthR gar_api_generator
#' @family GoogleCloudMlV1__Job functions
#' @export
projects.jobs.create <- function(GoogleCloudMlV1__Job, parent) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/jobs", parent)
# ml.projects.jobs.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(GoogleCloudMlV1__Job, "gar_GoogleCloudMlV1__Job"))
f(the_body = GoogleCloudMlV1__Job)
}
#' Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param resource REQUIRED: The resource for which the policy is being requested
#' @param options.requestedPolicyVersion Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.getIamPolicy <- function(resource, options.requestedPolicyVersion = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+resource}:getIamPolicy", resource)
# ml.projects.jobs.getIamPolicy
pars = list(options.requestedPolicyVersion = options.requestedPolicyVersion)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Lists the jobs in the project. If there are no jobs that match the request parameters, the list request returns an empty response body: {}.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/ml/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/cloud-platform.read-only
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/cloud-platform.read-only)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param parent Required
#' @param pageSize Optional
#' @param pageToken Optional
#' @param filter Optional
#' @importFrom googleAuthR gar_api_generator
#' @export
projects.jobs.list <- function(parent, pageSize = NULL, pageToken = NULL, filter = NULL) {
url <- sprintf("https://ml.googleapis.com/v1/{+parent}/jobs", parent)
# ml.projects.jobs.list
pars = list(pageSize = pageSize, pageToken = pageToken, filter = filter)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
|
require(ggplot2)
require(shiny)
require(dplyr)
require(wordcloud)
# Load the dataset
loadData <- function() {
maintable<- read.csv("./data/maintable.csv")
popullariteti <- read.csv("./data/popullariteti.csv")
wordcloud <- read.csv("./data/wordcloud.csv")
df <- list(maintable, popullariteti, wordcloud)
return(df)
}
# # Plot the wordcloud
getWordCloud <- function(df, reaction){
dfs<-df[[3]]
stateName = reaction$stateName
year = reaction$year
wordNumber = reaction$wordNumber
if (reaction$sexChoose == "Femër"){
sexChoose = "F"
}
else {sexChoose = "M"}
if (stateName == "All"){
indices <- which(dfs$Year == year & dfs$Sex == sexChoose)
new_df <- dfs[indices, ]
new_df <- aggregate(Number ~ Sex+Year+Name, new_df, sum)
cloud_df <- head(new_df[sort.list(new_df$Number, decreasing=TRUE),], wordNumber)
}
else {
indices <- which(dfs$State == stateName & dfs$Year == year & dfs$Sex == sexChoose)
cloud_df <- head(dfs[indices, ][sort.list(dfs[indices, ]$Number, decreasing=TRUE),], wordNumber)
}
set.seed(375) # to make it reproducibles
# plot the word cloud
return(wordcloud(words = cloud_df$Name, freq = cloud_df$Number,
scale = c(8, 0.4),
min.freq = 1,
random.order = FALSE,
rot.per = 0.15,
colors = brewer.pal(8, "Dark2"),
random.color = TRUE,
use.r.layout = FALSE
)) # end return
} # end getWordCloud
# Plot comparision
getCompare<- function(df, reaction){
dfs<-df[[3]]
radio = reaction$radio
new_df1 <- filter(dfs, dfs$State == radio)
new_df2 <- new_df1 %>% group_by(Year, Sex) %>% slice(which.max(Percent))
g2 <- ggplot(new_df2, aes(x=Year, y= Percent, fill= Name)) +
geom_bar(aes(width=.80),stat="identity") +
xlab("Viti") +
ylab("Përqindja")
return(g2)
}
# Plot popularity
getGraph <- function(df, reaction){
popullariteti<-df[[2]]
g1 <- popullariteti %>% filter(name == reaction$name) %>%
ggplot(., aes(year, n)) +
geom_line(aes(color=sex), lwd=1) +
theme_bw() +
ggtitle(reaction$name) + scale_colour_discrete(name ="Gjinia", labels=c("Femra", "Meshkuj")) +
xlab("Viti") +
ylab("Numri") +
scale_x_continuous(breaks = round(seq(min(popullariteti$year), max(popullariteti$year), by = 10),1)) +
theme(
axis.text = element_text(size = 14),
legend.key = element_rect(fill = "white"),
legend.background = element_rect(fill = "white"),
legend.position = c(0.14, 0.80),
panel.grid.major = element_line(colour = "grey40"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "navy")
)
return(g1)
}
# Plot the results
getResult <- function(df, reaction){
maintable <- df[[1]]
kerkimemri = reaction$kerkimemri
if (reaction$gender == "Femër"){
gender = "F"
}
else {gender = "M"}
new_df <- maintable %>% filter(maintable$Emri == kerkimemri & maintable$Gjinia == gender)
if(nrow(new_df) == 0)
{text1 <- "Nuk u gjet asnjë person me këtë emër"}
else if(new_df$Frekuenca <=9)
{text1 <- "Ka më pak se 10 persona me këtë emër" }
else
{text1<-paste("U gjendën", new_df$Frekuenca, "persona me emrin", kerkimemri)}
return(text1)
}
##### GLOBAL OBJECTS #####
# Shared data
globalData <- loadData()
##### SHINY SERVER #####
# Create shiny server.
shinyServer(function(input, output) {
cat("Press \"ESC\" to exit...\n")
# Copy the data frame
localFrame <- globalData
getReactionWordCloud <- reactive({
return(list(stateName = input$stateName1,
year = input$year1,
sexChoose = input$sexChoose1,
wordNumber = input$wordNumber1
))
})
getReactionComp <- reactive({
return(list(stateName = input$stateName1,
radio = input$radio
))
})
getReactionSearch <- reactive({
return(list(
name = input$nameSearch2
))
})
getReactionMain <- reactive({
return(list(
gender = input$kerkimgjinia,
kerkimemri = input$kerkimemri
))
})
# Output Plots.
output$wordCloud <- renderPlot({print(getWordCloud(localFrame, getReactionWordCloud()))},width=1000,height=800) # output wordCloud
output$g2 <- renderPlot({print(getCompare(localFrame, getReactionComp()))},width=1000,height=600) # output comparision
output$g1 <- renderPlot({print(getGraph(localFrame, getReactionSearch()))},width=1000,height=600) # output namesearch
output$text1 <- renderText({print(getResult(localFrame, getReactionMain()))}) # output namesearch
output$downloadData1 <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), './data/top10v2014.xlsx', sep='')
},
content = function(con) {
write.csv(data, con)
}
)
output$downloadData2<- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), './data/top10qark.xlsx', sep='')
},
content = function(con) {
write.csv(data, con)
}
)
}) # shinyServer
|
/server.R
|
no_license
|
endri81/albaniannames
|
R
| false
| false
| 5,117
|
r
|
require(ggplot2)
require(shiny)
require(dplyr)
require(wordcloud)
# Load the dataset
loadData <- function() {
maintable<- read.csv("./data/maintable.csv")
popullariteti <- read.csv("./data/popullariteti.csv")
wordcloud <- read.csv("./data/wordcloud.csv")
df <- list(maintable, popullariteti, wordcloud)
return(df)
}
# # Plot the wordcloud
getWordCloud <- function(df, reaction){
dfs<-df[[3]]
stateName = reaction$stateName
year = reaction$year
wordNumber = reaction$wordNumber
if (reaction$sexChoose == "Femër"){
sexChoose = "F"
}
else {sexChoose = "M"}
if (stateName == "All"){
indices <- which(dfs$Year == year & dfs$Sex == sexChoose)
new_df <- dfs[indices, ]
new_df <- aggregate(Number ~ Sex+Year+Name, new_df, sum)
cloud_df <- head(new_df[sort.list(new_df$Number, decreasing=TRUE),], wordNumber)
}
else {
indices <- which(dfs$State == stateName & dfs$Year == year & dfs$Sex == sexChoose)
cloud_df <- head(dfs[indices, ][sort.list(dfs[indices, ]$Number, decreasing=TRUE),], wordNumber)
}
set.seed(375) # to make it reproducibles
# plot the word cloud
return(wordcloud(words = cloud_df$Name, freq = cloud_df$Number,
scale = c(8, 0.4),
min.freq = 1,
random.order = FALSE,
rot.per = 0.15,
colors = brewer.pal(8, "Dark2"),
random.color = TRUE,
use.r.layout = FALSE
)) # end return
} # end getWordCloud
# Plot comparision
getCompare<- function(df, reaction){
dfs<-df[[3]]
radio = reaction$radio
new_df1 <- filter(dfs, dfs$State == radio)
new_df2 <- new_df1 %>% group_by(Year, Sex) %>% slice(which.max(Percent))
g2 <- ggplot(new_df2, aes(x=Year, y= Percent, fill= Name)) +
geom_bar(aes(width=.80),stat="identity") +
xlab("Viti") +
ylab("Përqindja")
return(g2)
}
# Plot popularity
getGraph <- function(df, reaction){
popullariteti<-df[[2]]
g1 <- popullariteti %>% filter(name == reaction$name) %>%
ggplot(., aes(year, n)) +
geom_line(aes(color=sex), lwd=1) +
theme_bw() +
ggtitle(reaction$name) + scale_colour_discrete(name ="Gjinia", labels=c("Femra", "Meshkuj")) +
xlab("Viti") +
ylab("Numri") +
scale_x_continuous(breaks = round(seq(min(popullariteti$year), max(popullariteti$year), by = 10),1)) +
theme(
axis.text = element_text(size = 14),
legend.key = element_rect(fill = "white"),
legend.background = element_rect(fill = "white"),
legend.position = c(0.14, 0.80),
panel.grid.major = element_line(colour = "grey40"),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "navy")
)
return(g1)
}
# Plot the results
getResult <- function(df, reaction){
maintable <- df[[1]]
kerkimemri = reaction$kerkimemri
if (reaction$gender == "Femër"){
gender = "F"
}
else {gender = "M"}
new_df <- maintable %>% filter(maintable$Emri == kerkimemri & maintable$Gjinia == gender)
if(nrow(new_df) == 0)
{text1 <- "Nuk u gjet asnjë person me këtë emër"}
else if(new_df$Frekuenca <=9)
{text1 <- "Ka më pak se 10 persona me këtë emër" }
else
{text1<-paste("U gjendën", new_df$Frekuenca, "persona me emrin", kerkimemri)}
return(text1)
}
##### GLOBAL OBJECTS #####
# Shared data
globalData <- loadData()
##### SHINY SERVER #####
# Create shiny server.
shinyServer(function(input, output) {
cat("Press \"ESC\" to exit...\n")
# Copy the data frame
localFrame <- globalData
getReactionWordCloud <- reactive({
return(list(stateName = input$stateName1,
year = input$year1,
sexChoose = input$sexChoose1,
wordNumber = input$wordNumber1
))
})
getReactionComp <- reactive({
return(list(stateName = input$stateName1,
radio = input$radio
))
})
getReactionSearch <- reactive({
return(list(
name = input$nameSearch2
))
})
getReactionMain <- reactive({
return(list(
gender = input$kerkimgjinia,
kerkimemri = input$kerkimemri
))
})
# Output Plots.
output$wordCloud <- renderPlot({print(getWordCloud(localFrame, getReactionWordCloud()))},width=1000,height=800) # output wordCloud
output$g2 <- renderPlot({print(getCompare(localFrame, getReactionComp()))},width=1000,height=600) # output comparision
output$g1 <- renderPlot({print(getGraph(localFrame, getReactionSearch()))},width=1000,height=600) # output namesearch
output$text1 <- renderText({print(getResult(localFrame, getReactionMain()))}) # output namesearch
output$downloadData1 <- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), './data/top10v2014.xlsx', sep='')
},
content = function(con) {
write.csv(data, con)
}
)
output$downloadData2<- downloadHandler(
filename = function() {
paste('data-', Sys.Date(), './data/top10qark.xlsx', sep='')
},
content = function(con) {
write.csv(data, con)
}
)
}) # shinyServer
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615855020-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 932
|
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482504e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
backProp <- function(y, layers, alpha){
#alpha <- cnn$lrate
outlayer <- layers[[length(layers)]]
#y <- cnn$y
outlayer <- updateWeight(outlayer, y, alpha)
layers[[length(layers)]] <- outlayer
for(i in (length(layers) - 1):1){
print(layers[[i]]$type)
layers[[i]] <- updateWeight(layers[[i]], layers[[i + 1]], alpha)
}
layers$layer <- layers
return (layers)
}
|
/backProp.R
|
no_license
|
LinHungShi/ConvolutionalNeuralNetwork
|
R
| false
| false
| 418
|
r
|
backProp <- function(y, layers, alpha){
#alpha <- cnn$lrate
outlayer <- layers[[length(layers)]]
#y <- cnn$y
outlayer <- updateWeight(outlayer, y, alpha)
layers[[length(layers)]] <- outlayer
for(i in (length(layers) - 1):1){
print(layers[[i]]$type)
layers[[i]] <- updateWeight(layers[[i]], layers[[i + 1]], alpha)
}
layers$layer <- layers
return (layers)
}
|
##R script to obtain counts per transcript when reads have been mapped to cDNAs
##searches working directory for .sam files and summarizes them.
#BEFORE running the lines below change the working directory to
#the directory with sam files. If your files end in something
#other than ".sam" change the command below
#get a list of sam files in the working directory
#the "$" denotes the end of line
files <- list.files(pattern="\\.sam$")
#look at files to make sure it is OK
print(files)
#create an empty object to hold our results
results <- NULL
#loop through each file...
for (f in files) {
print(f) #print the current file
#read the file. We only care about the third column.
#also discard the header info (rows starting with "@")
tmp <- scan(f,what=list(NULL,NULL,""),
comment.char="@",sep="\t",flush=T)[[3]]
#use table() to count the occurences of each gene.
#convert to matrix for better formatting later
tmp.table <- as.data.frame(table(tmp))
colnames(tmp.table) <- c("gene",f) #get column name specified
#not needed, in fact a mistake, I think.
#tmp.table$gene <- rownames(tmp.table)
#add current results to previous results table, if appropriate
if (is.null(results)) { #first time through
results <- as.data.frame(tmp.table) #format
} else { #not first time through
results<-merge(results,tmp.table,all=T,
by="gene") #combine results
#rownames(results) <- results$Row.names #reset rownames for next time through
} #else
} #for
rm(list=c("tmp","tmp.table")) #remove objects no longer needed
#summarize mapped and unmapped reads:
print("unmapped")
unmapped <- results[results$gene=="*",-1]
unmapped
results.map <- results[results$gene!="*",]
print("mapped")
mapped <- apply(results.map[-1],2,sum,na.rm=T)
mapped
print("percent mapped")
round(mapped/(mapped+unmapped)*100,1)
write.table(results.map,file="sam2countsResults.tsv",sep="\t",row.names=F)
|
/RNAseq/scripts/sam2counts.R
|
permissive
|
iamciera/Scripts-and-Protocols
|
R
| false
| false
| 2,028
|
r
|
##R script to obtain counts per transcript when reads have been mapped to cDNAs
##searches working directory for .sam files and summarizes them.
#BEFORE running the lines below change the working directory to
#the directory with sam files. If your files end in something
#other than ".sam" change the command below
#get a list of sam files in the working directory
#the "$" denotes the end of line
files <- list.files(pattern="\\.sam$")
#look at files to make sure it is OK
print(files)
#create an empty object to hold our results
results <- NULL
#loop through each file...
for (f in files) {
print(f) #print the current file
#read the file. We only care about the third column.
#also discard the header info (rows starting with "@")
tmp <- scan(f,what=list(NULL,NULL,""),
comment.char="@",sep="\t",flush=T)[[3]]
#use table() to count the occurences of each gene.
#convert to matrix for better formatting later
tmp.table <- as.data.frame(table(tmp))
colnames(tmp.table) <- c("gene",f) #get column name specified
#not needed, in fact a mistake, I think.
#tmp.table$gene <- rownames(tmp.table)
#add current results to previous results table, if appropriate
if (is.null(results)) { #first time through
results <- as.data.frame(tmp.table) #format
} else { #not first time through
results<-merge(results,tmp.table,all=T,
by="gene") #combine results
#rownames(results) <- results$Row.names #reset rownames for next time through
} #else
} #for
rm(list=c("tmp","tmp.table")) #remove objects no longer needed
#summarize mapped and unmapped reads:
print("unmapped")
unmapped <- results[results$gene=="*",-1]
unmapped
results.map <- results[results$gene!="*",]
print("mapped")
mapped <- apply(results.map[-1],2,sum,na.rm=T)
mapped
print("percent mapped")
round(mapped/(mapped+unmapped)*100,1)
write.table(results.map,file="sam2countsResults.tsv",sep="\t",row.names=F)
|
\name{nicheoverlap}
\Rdversion{1.1}
\alias{nicheoverlap}
\alias{nichedispl}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Metrics to compare pairs of resource niches
}
\description{
Functions \code{nicheoverlap} and \code{nichedispl} compute the overlap and centroid distance between pairs of resource distributions. In both cases resource relationships are given in the distance matrix \code{D} and the resource use data are given in data frame \code{P1} (and in some modes also \code{P2}).
}
\usage{
nicheoverlap(P1, P2 = NULL, D = NULL, q1 = NULL,q2 = NULL, mode = "multiple",
Np1 = NULL, Np2 = NULL, Nq1 = NULL, Nq2 = NULL,nboot = 1000, alpha=0.05)
nichedispl(P1, P2 = NULL, D = NULL, q1 = NULL, q2 = NULL, mode = "multiple",
Np1 = NULL, Np2 = NULL, Nq1 = NULL, Nq2 = NULL, nboot = 1000, alpha=0.05)
}
\arguments{
\item{P1}{Data frame containing the amount of usage that a set of species (in rows) make of a first set of resources (in columns).}
\item{P2}{Data frame containing the amount of usage that a set of species (in rows) make of a second set of resources (in columns). Not used if \code{mode = "pairwise"}.}
\item{D}{Object of type \code{\link{dist}} containing distance values between resources. If no distance matrix is provided (i.e. if \code{D==NULL}), the distances between resources is assumed to be maximum.}
\item{q1}{Vector with the availability of each resource corresponding to P1.}
\item{q2}{Vector with the availability of each resource corresponding to P2.}
\item{mode}{Either \code{mode = "single"} (rows of matrices P1 and P2 are individual observations to be pooled, for example to compare the niche of two species each with its individual observations), \code{mode = "multiple"} (each row in P1 is compared to the corresponding row of P2, for example, to compare seasonal niche shifts in each species) or \code{mode = "pairwise"} (all rows in P1 are compared pairwise).}
\item{Np1}{Vector with the number of observations per species from which the values in \code{P1} come (in \code{mode = "multiple"} or \code{mode = "pairwise"}).}
\item{Np2}{Vector with the number of observations per species from which the values in \code{P2} come (only in \code{mode = "multiple"}).}
\item{Nq1}{The number of observations from which the values in \code{q1} come.}
\item{Nq2}{The number of observations from which the values in \code{q2} come.}
\item{nboot}{Number of boostrap samples used to compute bias-corrected percentile confidence intervals.}
\item{alpha}{Used to set the confidence level (i.e. \code{alpha = 0.05} means 95 percent confidence interval).}
}
\details{
The method is described in De Caceres et al. (2011). If the distance matrix is not specified (i.e. if \code{D=NULL}) the function assumes that all resources are at a maximum distance (\code{d=1}). If the resource availability vector \code{q1} (and \code{q2} if supplied) is specified, then the values in \code{P1} (and \code{P2} if supplied) are taken as assessments of resource use and the species preference is calculated taking into account resource availability. Otherwise, resource use is equated to resource preference (i.e. all resources are considered equally available). The functions can compute bootstrap confidence intervals following the bias-corrected percentile method (Manly 2007). If \code{mode = "multiple"} and \code{Np1} and \code{Np2} are not null, bootstrap samples for a given niche are generated assuming a multinomial distribution with the proportions calculated from the corresponding row values in \code{P1} (resp. \code{P2}), and the number of observations comes from the corresponding element in \code{Np1} (resp. \code{Np2}). Similarly, if \code{mode = "pairwise"} and \code{Np1} is not null, bootstrap samples for each niche are generated assuming a multinomial distribution with the proportions calculated from the corresponding row values in \code{P1}, and the number of observations comes from the corresponding element in \code{Np1}. Finally, if \code{mode = "single"} then the bootstrapped units are the rows of matrices \code{P1} and \code{P2}. In both cases, if \code{Nq1} (and \code{Nq2}) is indicated, the availability of resources is also bootstrapped. The bias-corrected percentile method is described for overlap niche measures in Mueller and Altenberg (1985).
}
\value{
Function \code{nicheoverlap} (resp. \code{nichedispl}) returns the overlap (resp. the distance between centroids) between the each pair of rows in \code{P1} and \code{P2}. If \code{mode = "multiple"} or \code{mode = "single"} the values are returned as a data frame. If \code{mode = "pairwise"} a matrix of values is returned instead. If bootstrap confidence intervals are asked then the functions also compute the lower and upper bounds of a confidence interval obtained following the bias-corrected percentile method. Upper and lower bounds are returned as additional columns of the data frame in \code{mode = "multiple"} or \code{mode = "single"} or as additional matrices of a list in \code{mode = "pairwise"}.
}
\references{
Mueller, L.D. and L. Altenberg. 1985. Statistical Inference on Measures of Niche Overlap. Ecology 66:1204-1210.
Manly, B.F.J. 2007. Randomization, bootstrap and Monte Carlo methods in biology. Chapman and Hall texts in statistical science series. 2nd edition.
De Caceres, M., Sol, D., Lapiedra, O. and P. Legendre. (2011) A framework for estimating niche metrics using the resemblance between qualitative resources. Oikos 120: 1341-1350.
}
\author{
Miquel De Caceres Ainsa
}
\seealso{
See \code{\link{nichevar}} for descriptors of single niches.
}
\examples{
# Loads example data
data(birds)
# The overlap and displacement metrics using distances among
# resources and assuming equal availability of resources
nicheoverlap(birdsbreed, birdswinter, D = resourceD, mode="multiple")
nichedispl(birdsbreed, birdswinter, D = resourceD, mode="multiple")
# The overlap and displacement metrics using distances among resources
# and computes 95 percent confidence intervals
nicheoverlap(birdsbreed, birdswinter, D = resourceD, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter), Nq1 = 100, Nq2 = 100)
nichedispl(birdsbreed, birdswinter, D = resourceD, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter), Nq1 = 100, Nq2 = 100)
# Same computations with different resource availability
q = c(0.18, 0.24, 0.22, 0.21, 0.15)
nicheoverlap(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple")
nichedispl(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple")
nicheoverlap(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter),
Nq1 = 100, Nq2 = 100)
nichedispl(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter),
Nq1 = 100, Nq2 = 100)
# The overlap metrics using distances among rows of 'birdsbreed'
nicheoverlap(birdsbreed, D = resourceD, mode="pairwise")
}
|
/man/nicheoverlap.Rd
|
no_license
|
HaleyArnold33/indicspecies
|
R
| false
| false
| 7,090
|
rd
|
\name{nicheoverlap}
\Rdversion{1.1}
\alias{nicheoverlap}
\alias{nichedispl}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Metrics to compare pairs of resource niches
}
\description{
Functions \code{nicheoverlap} and \code{nichedispl} compute the overlap and centroid distance between pairs of resource distributions. In both cases resource relationships are given in the distance matrix \code{D} and the resource use data are given in data frame \code{P1} (and in some modes also \code{P2}).
}
\usage{
nicheoverlap(P1, P2 = NULL, D = NULL, q1 = NULL,q2 = NULL, mode = "multiple",
Np1 = NULL, Np2 = NULL, Nq1 = NULL, Nq2 = NULL,nboot = 1000, alpha=0.05)
nichedispl(P1, P2 = NULL, D = NULL, q1 = NULL, q2 = NULL, mode = "multiple",
Np1 = NULL, Np2 = NULL, Nq1 = NULL, Nq2 = NULL, nboot = 1000, alpha=0.05)
}
\arguments{
\item{P1}{Data frame containing the amount of usage that a set of species (in rows) make of a first set of resources (in columns).}
\item{P2}{Data frame containing the amount of usage that a set of species (in rows) make of a second set of resources (in columns). Not used if \code{mode = "pairwise"}.}
\item{D}{Object of type \code{\link{dist}} containing distance values between resources. If no distance matrix is provided (i.e. if \code{D==NULL}), the distances between resources is assumed to be maximum.}
\item{q1}{Vector with the availability of each resource corresponding to P1.}
\item{q2}{Vector with the availability of each resource corresponding to P2.}
\item{mode}{Either \code{mode = "single"} (rows of matrices P1 and P2 are individual observations to be pooled, for example to compare the niche of two species each with its individual observations), \code{mode = "multiple"} (each row in P1 is compared to the corresponding row of P2, for example, to compare seasonal niche shifts in each species) or \code{mode = "pairwise"} (all rows in P1 are compared pairwise).}
\item{Np1}{Vector with the number of observations per species from which the values in \code{P1} come (in \code{mode = "multiple"} or \code{mode = "pairwise"}).}
\item{Np2}{Vector with the number of observations per species from which the values in \code{P2} come (only in \code{mode = "multiple"}).}
\item{Nq1}{The number of observations from which the values in \code{q1} come.}
\item{Nq2}{The number of observations from which the values in \code{q2} come.}
\item{nboot}{Number of boostrap samples used to compute bias-corrected percentile confidence intervals.}
\item{alpha}{Used to set the confidence level (i.e. \code{alpha = 0.05} means 95 percent confidence interval).}
}
\details{
The method is described in De Caceres et al. (2011). If the distance matrix is not specified (i.e. if \code{D=NULL}) the function assumes that all resources are at a maximum distance (\code{d=1}). If the resource availability vector \code{q1} (and \code{q2} if supplied) is specified, then the values in \code{P1} (and \code{P2} if supplied) are taken as assessments of resource use and the species preference is calculated taking into account resource availability. Otherwise, resource use is equated to resource preference (i.e. all resources are considered equally available). The functions can compute bootstrap confidence intervals following the bias-corrected percentile method (Manly 2007). If \code{mode = "multiple"} and \code{Np1} and \code{Np2} are not null, bootstrap samples for a given niche are generated assuming a multinomial distribution with the proportions calculated from the corresponding row values in \code{P1} (resp. \code{P2}), and the number of observations comes from the corresponding element in \code{Np1} (resp. \code{Np2}). Similarly, if \code{mode = "pairwise"} and \code{Np1} is not null, bootstrap samples for each niche are generated assuming a multinomial distribution with the proportions calculated from the corresponding row values in \code{P1}, and the number of observations comes from the corresponding element in \code{Np1}. Finally, if \code{mode = "single"} then the bootstrapped units are the rows of matrices \code{P1} and \code{P2}. In both cases, if \code{Nq1} (and \code{Nq2}) is indicated, the availability of resources is also bootstrapped. The bias-corrected percentile method is described for overlap niche measures in Mueller and Altenberg (1985).
}
\value{
Function \code{nicheoverlap} (resp. \code{nichedispl}) returns the overlap (resp. the distance between centroids) between the each pair of rows in \code{P1} and \code{P2}. If \code{mode = "multiple"} or \code{mode = "single"} the values are returned as a data frame. If \code{mode = "pairwise"} a matrix of values is returned instead. If bootstrap confidence intervals are asked then the functions also compute the lower and upper bounds of a confidence interval obtained following the bias-corrected percentile method. Upper and lower bounds are returned as additional columns of the data frame in \code{mode = "multiple"} or \code{mode = "single"} or as additional matrices of a list in \code{mode = "pairwise"}.
}
\references{
Mueller, L.D. and L. Altenberg. 1985. Statistical Inference on Measures of Niche Overlap. Ecology 66:1204-1210.
Manly, B.F.J. 2007. Randomization, bootstrap and Monte Carlo methods in biology. Chapman and Hall texts in statistical science series. 2nd edition.
De Caceres, M., Sol, D., Lapiedra, O. and P. Legendre. (2011) A framework for estimating niche metrics using the resemblance between qualitative resources. Oikos 120: 1341-1350.
}
\author{
Miquel De Caceres Ainsa
}
\seealso{
See \code{\link{nichevar}} for descriptors of single niches.
}
\examples{
# Loads example data
data(birds)
# The overlap and displacement metrics using distances among
# resources and assuming equal availability of resources
nicheoverlap(birdsbreed, birdswinter, D = resourceD, mode="multiple")
nichedispl(birdsbreed, birdswinter, D = resourceD, mode="multiple")
# The overlap and displacement metrics using distances among resources
# and computes 95 percent confidence intervals
nicheoverlap(birdsbreed, birdswinter, D = resourceD, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter), Nq1 = 100, Nq2 = 100)
nichedispl(birdsbreed, birdswinter, D = resourceD, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter), Nq1 = 100, Nq2 = 100)
# Same computations with different resource availability
q = c(0.18, 0.24, 0.22, 0.21, 0.15)
nicheoverlap(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple")
nichedispl(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple")
nicheoverlap(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter),
Nq1 = 100, Nq2 = 100)
nichedispl(birdsbreed, birdswinter, D = resourceD,
q1 = q, q2 = q, mode="multiple",
Np1 = rowSums(birdsbreed), Np2 = rowSums(birdswinter),
Nq1 = 100, Nq2 = 100)
# The overlap metrics using distances among rows of 'birdsbreed'
nicheoverlap(birdsbreed, D = resourceD, mode="pairwise")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sstable.R
\name{sstable.ae}
\alias{sstable.ae}
\title{Create an adverse event summary table}
\usage{
sstable.ae(ae_data, fullid_data, id.var, aetype.var, grade.var = NULL,
arm.var, digits = 0, test = TRUE, pdigits = 3, pcutoff = 0.001,
chisq.test = FALSE, correct = FALSE, simulate.p.value = FALSE,
B = 2000, workspace = 1e+06, hybrid = FALSE, footer = NULL,
flextable = TRUE, bg = "#F2EFEE")
}
\arguments{
\item{ae_data}{a data frame contains adverse event data.}
\item{fullid_data}{a data frame contains treatment arm data of all participants (not just those had adverse event).}
\item{id.var}{a character specifies name of study id variable (exists in both adverse event data and treatment arm data).}
\item{aetype.var}{a character specifies name of adverse event type variable (exists in adverse event data).}
\item{grade.var}{a character specifies name of adverse event grade variable (exists in adverse event data).}
\item{arm.var}{a character specifies name of treatment arm variable (exists in treatment arm data).}
\item{digits}{a number specifies number of significant digits for numeric statistics.}
\item{test}{a logical value specifies whether a statistical test will be performed to compare between treatment arms.}
\item{pdigits}{a number specifies number of significant digits for p value.}
\item{pcutoff}{a number specifies threshold value of p value to be displayed as "< pcutoff".}
\item{chisq.test}{a logical value specifies whether Chi-squared test or Fisher's exact test will be used to compare between treatment arms.}
\item{correct}{a parameter for chisq.test().}
\item{simulate.p.value}{a parameter for chisq.test() and fisher.test().}
\item{B}{a parameter for chisq.test() and fisher.test().}
\item{workspace}{a parameter for fisher.test().}
\item{hybrid}{a parameter for fisher.test().}
\item{footer}{a vector of strings to be used as footnote of table.}
\item{flextable}{a logical value specifies whether output will be a flextable-type table.}
\item{bg}{a character specifies color of the odd rows in the body of flextable-type table.}
}
\value{
a flextable-type table or a list with values/headers/footers
}
\description{
A function to create a simple adverse event summary table.
}
|
/man/sstable.ae.Rd
|
no_license
|
choisy/C306
|
R
| false
| true
| 2,317
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sstable.R
\name{sstable.ae}
\alias{sstable.ae}
\title{Create an adverse event summary table}
\usage{
sstable.ae(ae_data, fullid_data, id.var, aetype.var, grade.var = NULL,
arm.var, digits = 0, test = TRUE, pdigits = 3, pcutoff = 0.001,
chisq.test = FALSE, correct = FALSE, simulate.p.value = FALSE,
B = 2000, workspace = 1e+06, hybrid = FALSE, footer = NULL,
flextable = TRUE, bg = "#F2EFEE")
}
\arguments{
\item{ae_data}{a data frame contains adverse event data.}
\item{fullid_data}{a data frame contains treatment arm data of all participants (not just those had adverse event).}
\item{id.var}{a character specifies name of study id variable (exists in both adverse event data and treatment arm data).}
\item{aetype.var}{a character specifies name of adverse event type variable (exists in adverse event data).}
\item{grade.var}{a character specifies name of adverse event grade variable (exists in adverse event data).}
\item{arm.var}{a character specifies name of treatment arm variable (exists in treatment arm data).}
\item{digits}{a number specifies number of significant digits for numeric statistics.}
\item{test}{a logical value specifies whether a statistical test will be performed to compare between treatment arms.}
\item{pdigits}{a number specifies number of significant digits for p value.}
\item{pcutoff}{a number specifies threshold value of p value to be displayed as "< pcutoff".}
\item{chisq.test}{a logical value specifies whether Chi-squared test or Fisher's exact test will be used to compare between treatment arms.}
\item{correct}{a parameter for chisq.test().}
\item{simulate.p.value}{a parameter for chisq.test() and fisher.test().}
\item{B}{a parameter for chisq.test() and fisher.test().}
\item{workspace}{a parameter for fisher.test().}
\item{hybrid}{a parameter for fisher.test().}
\item{footer}{a vector of strings to be used as footnote of table.}
\item{flextable}{a logical value specifies whether output will be a flextable-type table.}
\item{bg}{a character specifies color of the odd rows in the body of flextable-type table.}
}
\value{
a flextable-type table or a list with values/headers/footers
}
\description{
A function to create a simple adverse event summary table.
}
|
source("spatcontrol/spatcontrol.R",local=TRUE,chdir=TRUE)
db<-read.csv("OriginalDataPaucarpata.csv")
db<-set_to(db,init=c("NULL"),final=0)
db<-db[which(!is.na(db$easting)),]
db<-changeNameCol(db,"easting","X") # from "easting" to "X"
db<-changeNameCol(db,"northing","Y")
db<-changeNameCol(db,"infested","positive")
db<-changeNameCol(db,"open","observed")
db<-changeNameCol(db,"cityBlockNum","GroupNum")
db<-changeNameCol(db,"inspector","IdObserver")
## corresponding random map
# cofactors
Tc.val<-c(0.68,0.47,0.21,-1.14,-0.28)
names(Tc.val)<-c("CU","PE","oanimal","I.NO","P.NO")
# inspectors
# seedNum<-sample(1:10000,1)
# cat("seedNum:",seedNum)
seedNum<-4786
set.seed(seedNum)
nbObs<-length(levels(as.factor(db$IdObserver)))
obs.qual<-rbeta(nbObs,4.5,2)
names(obs.qual)<-levels(as.factor(db$IdObserver))
dbFit<-gen.map(db,mu=-1.3,Ku=0.48,Kv=169,f=9.0,T=0.3,c.val=Tc.val,obs.qual=obs.qual)
dbFit$fitSet<-db$fitSet
par(mfrow=c(1,2))
plot(db$X,db$Y,col=db$fitSet+1,asp=1)
with(db[which(db$positive==1),],lines(X,Y,col="yellow",type="p"))
legend("bottomleft",c("fitting dataset","validation dataset","infested"),col=c("red","black","yellow"),pch=1)
plot(dbFit$X,dbFit$Y,col=dbFit$fitSet+1,asp=1,main=paste("Seed:",seedNum))
with(dbFit[which(dbFit$positive==1),],lines(X,Y,col="yellow",type="p"))
legend("bottomleft",c("fitting dataset","validation dataset","infested"),col=c("red","black","yellow"),pch=1)
write.csv(dbFit,"JitteredDataPaucarpata.csv",row.names=FALSE)
|
/example_generation.R
|
no_license
|
JavierQC/spatcontrol
|
R
| false
| false
| 1,476
|
r
|
source("spatcontrol/spatcontrol.R",local=TRUE,chdir=TRUE)
db<-read.csv("OriginalDataPaucarpata.csv")
db<-set_to(db,init=c("NULL"),final=0)
db<-db[which(!is.na(db$easting)),]
db<-changeNameCol(db,"easting","X") # from "easting" to "X"
db<-changeNameCol(db,"northing","Y")
db<-changeNameCol(db,"infested","positive")
db<-changeNameCol(db,"open","observed")
db<-changeNameCol(db,"cityBlockNum","GroupNum")
db<-changeNameCol(db,"inspector","IdObserver")
## corresponding random map
# cofactors
Tc.val<-c(0.68,0.47,0.21,-1.14,-0.28)
names(Tc.val)<-c("CU","PE","oanimal","I.NO","P.NO")
# inspectors
# seedNum<-sample(1:10000,1)
# cat("seedNum:",seedNum)
seedNum<-4786
set.seed(seedNum)
nbObs<-length(levels(as.factor(db$IdObserver)))
obs.qual<-rbeta(nbObs,4.5,2)
names(obs.qual)<-levels(as.factor(db$IdObserver))
dbFit<-gen.map(db,mu=-1.3,Ku=0.48,Kv=169,f=9.0,T=0.3,c.val=Tc.val,obs.qual=obs.qual)
dbFit$fitSet<-db$fitSet
par(mfrow=c(1,2))
plot(db$X,db$Y,col=db$fitSet+1,asp=1)
with(db[which(db$positive==1),],lines(X,Y,col="yellow",type="p"))
legend("bottomleft",c("fitting dataset","validation dataset","infested"),col=c("red","black","yellow"),pch=1)
plot(dbFit$X,dbFit$Y,col=dbFit$fitSet+1,asp=1,main=paste("Seed:",seedNum))
with(dbFit[which(dbFit$positive==1),],lines(X,Y,col="yellow",type="p"))
legend("bottomleft",c("fitting dataset","validation dataset","infested"),col=c("red","black","yellow"),pch=1)
write.csv(dbFit,"JitteredDataPaucarpata.csv",row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ecr_operations.R
\name{create_repository}
\alias{create_repository}
\title{Creates an image repository}
\usage{
create_repository(repositoryName, tags = NULL)
}
\arguments{
\item{repositoryName}{[required] The name to use for the repository. The repository name may be specified on its own (such as \code{nginx-web-app}) or it can be prepended with a namespace to group the repository into a category (such as \code{project-a/nginx-web-app}).}
\item{tags}{}
}
\description{
Creates an image repository.
}
\section{Accepted Parameters}{
\preformatted{create_repository(
repositoryName = "string",
tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\examples{
# This example creates a repository called nginx-web-app inside the
# project-a namespace in the default registry for an account.
\donttest{create_repository(
repositoryName = "project-a/nginx-web-app"
)}
}
|
/service/paws.ecr/man/create_repository.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 990
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ecr_operations.R
\name{create_repository}
\alias{create_repository}
\title{Creates an image repository}
\usage{
create_repository(repositoryName, tags = NULL)
}
\arguments{
\item{repositoryName}{[required] The name to use for the repository. The repository name may be specified on its own (such as \code{nginx-web-app}) or it can be prepended with a namespace to group the repository into a category (such as \code{project-a/nginx-web-app}).}
\item{tags}{}
}
\description{
Creates an image repository.
}
\section{Accepted Parameters}{
\preformatted{create_repository(
repositoryName = "string",
tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\examples{
# This example creates a repository called nginx-web-app inside the
# project-a namespace in the default registry for an account.
\donttest{create_repository(
repositoryName = "project-a/nginx-web-app"
)}
}
|
# This script can be used to replicate the analysis undertaken in the summer
# project "Where are the missing grasses?" based at RBG Kew in summer 2016.
#
# For a full explanation of the script and the methods it employs, as well as
# how to use it yourself please see the readme in this repository.
#
# Jonathan Williams, 2016
# jonvw28@gmail.com
#
################################################################################
#
# This script looks as the user set methods in RunMe.R and sets appropriate
# parameters for author data scripts
#
if(!(taxonomist.method %in% c("all","filtered - not status","filtered - status",
"all basionyms",
"basionyms filtered - not status",
"basionyms filtered - status")
)
){
#
# Not a valid choice
#
stop("Invalid species data handling method entered")
}
#
if(taxonomist.method == "all"){
#
# Method simply takes all names so no need to filter
#
tx.tax.stat <- FALSE
tx.hyb.stat <- FALSE
tx.rnk.stat <- FALSE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_all",sep="")
#
}
#
if(taxonomist.method == "filtered - not status"){
#
# Method only filters on rank and hybrid status
#
tx.tax.stat <- FALSE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_not_stat",sep="")
#
}
#
if(taxonomist.method == "filtered - status"){
#
# Method applies all filters
#
tx.tax.stat <- TRUE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_stat",sep="")
#
}
#
if(taxonomist.method == "all basionyms"){
#
# Method applies no filters
#
tx.tax.stat <- FALSE
tx.hyb.stat <- FALSE
tx.rnk.stat <- FALSE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_all_basio",sep="")
#
}
#
if(taxonomist.method == "basionyms filtered - not status"){
#
# Method applies all filters but status
#
tx.tax.stat <- FALSE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_basio_not_stat",sep="")
#
}
#
if(taxonomist.method == "basionyms filtered - status"){
#
# Method applies all filters
#
tx.tax.stat <- TRUE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_basio_stat",sep="")
#
}
|
/Support/Data_Processing/author_method.R
|
no_license
|
fdbesanto2/kew_grasses
|
R
| false
| false
| 3,683
|
r
|
# This script can be used to replicate the analysis undertaken in the summer
# project "Where are the missing grasses?" based at RBG Kew in summer 2016.
#
# For a full explanation of the script and the methods it employs, as well as
# how to use it yourself please see the readme in this repository.
#
# Jonathan Williams, 2016
# jonvw28@gmail.com
#
################################################################################
#
# This script looks as the user set methods in RunMe.R and sets appropriate
# parameters for author data scripts
#
if(!(taxonomist.method %in% c("all","filtered - not status","filtered - status",
"all basionyms",
"basionyms filtered - not status",
"basionyms filtered - status")
)
){
#
# Not a valid choice
#
stop("Invalid species data handling method entered")
}
#
if(taxonomist.method == "all"){
#
# Method simply takes all names so no need to filter
#
tx.tax.stat <- FALSE
tx.hyb.stat <- FALSE
tx.rnk.stat <- FALSE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_all",sep="")
#
}
#
if(taxonomist.method == "filtered - not status"){
#
# Method only filters on rank and hybrid status
#
tx.tax.stat <- FALSE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_not_stat",sep="")
#
}
#
if(taxonomist.method == "filtered - status"){
#
# Method applies all filters
#
tx.tax.stat <- TRUE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all names are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- FALSE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_stat",sep="")
#
}
#
if(taxonomist.method == "all basionyms"){
#
# Method applies no filters
#
tx.tax.stat <- FALSE
tx.hyb.stat <- FALSE
tx.rnk.stat <- FALSE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_all_basio",sep="")
#
}
#
if(taxonomist.method == "basionyms filtered - not status"){
#
# Method applies all filters but status
#
tx.tax.stat <- FALSE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_basio_not_stat",sep="")
#
}
#
if(taxonomist.method == "basionyms filtered - status"){
#
# Method applies all filters
#
tx.tax.stat <- TRUE
tx.hyb.stat <- TRUE
tx.rnk.stat <- TRUE
#
# As all basionyms are included, we are interested in the year of publication of
# each combination so no correction for basionyms
#
tx.basio.year <- FALSE
tx.basio.filt <- TRUE
#
# Modify the ID string
identifier <- paste(identifier,"_tax_filt_basio_stat",sep="")
#
}
|
#-------------------------------------- HEADER --------------------------------------------#
#' @title Distributed Student's t-Test
#' @description Computes one and two sample t-tests on vectors of data
#' @details If paired is TRUE then both x and y must be specified and they must be the same length. Missing values are silently
#' removed (in pairs if paired is TRUE). If var.equal is TRUE then the pooled estimate of the variance is used. By default, if
#' var.equal is FALSE then the variance is estimated separately for both groups and the Welch modification to the degrees of freedom is used.
#'
#' @param x a character, the name of a numeric vector of data values.
#' @param y x a character, the name of a optional numeric vector of data values.
#' @param mu a number indicating the true value of the mean (or difference in means if you are performing a two sample test).
#' @param paired a logical indicating whether you want a paired t-test.
#' @param conf.level confidence level of the interval.
#' @param var.equal a logical variable indicating whether to treat the two variances as being equal.
#' If TRUE then the pooled variance is used to estimate the variance otherwise the Welch (or Satterthwaite) approximation to the degrees of freedom is used.
#' @param datasources a list of opal object(s) obtained after login in to opal servers;
#' these objects hold also the data assign to R, as \code{data frame}, from opal datasources.
#'
#' @return A list containing the following components:
#' \item{statistic}{the value of the t-statistic.}
#' \item{parameter}{the degrees of freedom for the t-statistic.}
#' \item{p.value}{the p-value for the test.}
#' \item{conf.int}{a confidence interval for the mean appropriate to the specified alternative hypothesis.}
#' \item{estimate}{the estimated mean or difference in means depending on whether it was a one-sample test or a two-sample test.}
#' \item{null.value}{the specified hypothesized value of the mean or mean difference depending on whether it was a one-sample test or a two-sample test.}
#' \item{method}{a character string indicating what type of t-test was performed.}
#'
#' @author Paula Raissa Costa e Silva
#' @section Dependencies:
#' \code{\link{getLength}}, \code{\link{getVariance}}
#' @export
#' @examples {
#' ds.tTest(x)
#' ds.tTest(x, y)
#' }
#'
ds.tTest <- function(x, y=NULL, mu=0, paired=FALSE, conf.level=0.95, var.equal=FALSE, datasources=NULL) {
#-------------------------------------- BASIC CHECKS ----------------------------------------------#
if(is.null(x)){
stop("Please provide the name of the input vector", call. = FALSE)
}
if (!missing(mu) && (length(mu)!=1 || is.na(mu)))
stop("Please provide a single value as mu")
if (!missing(conf.level) && (length(conf.level) != 1 || !is.finite(conf.level) || conf.level < 0 || conf.level > 1))
stop("Please provide a single number between 0 and 1")
# if no opal login details are provided look for 'opal' objects in the environment
if(is.null(datasources)){
datasources <- findLoginObjects()
}
xnames <- extract(x)
varname <- xnames$elements
obj2lookfor <- xnames$holders
if(is.na(obj2lookfor)){
defined <- isDefined(datasources, varname)
} else {
defined <- isDefined(datasources, obj2lookfor)
}
xLenght <- ds.length(x, datasources=datasources)
xMean <- ds.arMean(x, datasources=datasources)
xVar <- ds.var(x, datasources=datasources)
if (!is.null(y)) {
yLenght <- ds.length(y, datasources=datasources)
if(xLenght < 1 || (!var.equal && xLenght < 2)) stop("not enough 'x' observations")
if(yLenght < 1 || (!var.equal && yLenght < 2)) stop("not enough 'y' observations")
if(var.equal && xLenght+yLenght < 3) stop("not enough observations")
yMean <- ds.arMean(y, datasources=datasources)
yVar <- ds.var(y, datasources=datasources)
method <- paste(if(!var.equal) "Welch", "Two Sample t-test")
estimate <- c(xMean, yMean)
names(estimate) <- c("mean of x", "mean of y")
if(var.equal) {
df <- xLenght + yLenght - 2
variance <- 0
if(xLenght > 1) variance <- variance + (xLenght-1) * xVar
if(yLenght > 1) variance <- variance + (yLenght-1) * yVar
variance <- variance / df
stderr <- sqrt(variance * (1/xLenght + 1/yLenght))
} else {
stderrx <- sqrt(xVar / xLenght)
stderry <- sqrt(yVar / yLenght)
stderr <- sqrt(stderrx^2 + stderry^2)
df <- stderr^4 / (stderrx^4/(xLenght-1) + stderry^4 / (yLenght-1))
}
if(stderr < 10 * .Machine$double.eps * max(abs(xMean), abs(yMean))) stop("data are essentially constant")
tstat <- (xMean - yMean - mu)/stderr
} else {
if(xLenght < 2) stop("not enough 'x' observations")
df <- xLenght - 1
stderr <- sqrt(xVar / xLenght)
if(stderr < 10 * .Machine$double.eps * abs(xMean)) stop("data are essentially constant")
tstat <- (xMean - mu) / stderr
method <- if (paired) "Paired t-test" else "One Sample t-test"
estimate <- setNames(xMean, if(paired) "mean of the differences" else "mean of x")
}
# mean <- ds.arMean(x, type, datasources)
# sem <- ds.sem(x, type, datasources)
#
# t <- (mean - mu) / sem
pval <- 2 * pt(-abs(tstat), df)
alpha <- 1 - conf.level
# Confidence interval
confidence <- qt(1 - alpha/2, df)
confidence <- tstat + c(-confidence, confidence)
confidence <- mu + confidence * stderr
names(tstat) <- "t"
names(df) <- "df"
names(mu) <- if(paired || !is.null(y)) "difference in means" else "mean"
attr(confidence, "conf.level") <- conf.level
t <- list(statistic = tstat, parameter = df, p.value = pval,
conf.int = confidence, estimate = estimate, null.value = mu,
method = method)
return(t)
}
|
/R/ds.tTest.R
|
no_license
|
paularaissa/distStatsClient
|
R
| false
| false
| 5,744
|
r
|
#-------------------------------------- HEADER --------------------------------------------#
#' @title Distributed Student's t-Test
#' @description Computes one and two sample t-tests on vectors of data
#' @details If paired is TRUE then both x and y must be specified and they must be the same length. Missing values are silently
#' removed (in pairs if paired is TRUE). If var.equal is TRUE then the pooled estimate of the variance is used. By default, if
#' var.equal is FALSE then the variance is estimated separately for both groups and the Welch modification to the degrees of freedom is used.
#'
#' @param x a character, the name of a numeric vector of data values.
#' @param y x a character, the name of a optional numeric vector of data values.
#' @param mu a number indicating the true value of the mean (or difference in means if you are performing a two sample test).
#' @param paired a logical indicating whether you want a paired t-test.
#' @param conf.level confidence level of the interval.
#' @param var.equal a logical variable indicating whether to treat the two variances as being equal.
#' If TRUE then the pooled variance is used to estimate the variance otherwise the Welch (or Satterthwaite) approximation to the degrees of freedom is used.
#' @param datasources a list of opal object(s) obtained after login in to opal servers;
#' these objects hold also the data assign to R, as \code{data frame}, from opal datasources.
#'
#' @return A list containing the following components:
#' \item{statistic}{the value of the t-statistic.}
#' \item{parameter}{the degrees of freedom for the t-statistic.}
#' \item{p.value}{the p-value for the test.}
#' \item{conf.int}{a confidence interval for the mean appropriate to the specified alternative hypothesis.}
#' \item{estimate}{the estimated mean or difference in means depending on whether it was a one-sample test or a two-sample test.}
#' \item{null.value}{the specified hypothesized value of the mean or mean difference depending on whether it was a one-sample test or a two-sample test.}
#' \item{method}{a character string indicating what type of t-test was performed.}
#'
#' @author Paula Raissa Costa e Silva
#' @section Dependencies:
#' \code{\link{getLength}}, \code{\link{getVariance}}
#' @export
#' @examples {
#' ds.tTest(x)
#' ds.tTest(x, y)
#' }
#'
ds.tTest <- function(x, y=NULL, mu=0, paired=FALSE, conf.level=0.95, var.equal=FALSE, datasources=NULL) {
#-------------------------------------- BASIC CHECKS ----------------------------------------------#
if(is.null(x)){
stop("Please provide the name of the input vector", call. = FALSE)
}
if (!missing(mu) && (length(mu)!=1 || is.na(mu)))
stop("Please provide a single value as mu")
if (!missing(conf.level) && (length(conf.level) != 1 || !is.finite(conf.level) || conf.level < 0 || conf.level > 1))
stop("Please provide a single number between 0 and 1")
# if no opal login details are provided look for 'opal' objects in the environment
if(is.null(datasources)){
datasources <- findLoginObjects()
}
xnames <- extract(x)
varname <- xnames$elements
obj2lookfor <- xnames$holders
if(is.na(obj2lookfor)){
defined <- isDefined(datasources, varname)
} else {
defined <- isDefined(datasources, obj2lookfor)
}
xLenght <- ds.length(x, datasources=datasources)
xMean <- ds.arMean(x, datasources=datasources)
xVar <- ds.var(x, datasources=datasources)
if (!is.null(y)) {
yLenght <- ds.length(y, datasources=datasources)
if(xLenght < 1 || (!var.equal && xLenght < 2)) stop("not enough 'x' observations")
if(yLenght < 1 || (!var.equal && yLenght < 2)) stop("not enough 'y' observations")
if(var.equal && xLenght+yLenght < 3) stop("not enough observations")
yMean <- ds.arMean(y, datasources=datasources)
yVar <- ds.var(y, datasources=datasources)
method <- paste(if(!var.equal) "Welch", "Two Sample t-test")
estimate <- c(xMean, yMean)
names(estimate) <- c("mean of x", "mean of y")
if(var.equal) {
df <- xLenght + yLenght - 2
variance <- 0
if(xLenght > 1) variance <- variance + (xLenght-1) * xVar
if(yLenght > 1) variance <- variance + (yLenght-1) * yVar
variance <- variance / df
stderr <- sqrt(variance * (1/xLenght + 1/yLenght))
} else {
stderrx <- sqrt(xVar / xLenght)
stderry <- sqrt(yVar / yLenght)
stderr <- sqrt(stderrx^2 + stderry^2)
df <- stderr^4 / (stderrx^4/(xLenght-1) + stderry^4 / (yLenght-1))
}
if(stderr < 10 * .Machine$double.eps * max(abs(xMean), abs(yMean))) stop("data are essentially constant")
tstat <- (xMean - yMean - mu)/stderr
} else {
if(xLenght < 2) stop("not enough 'x' observations")
df <- xLenght - 1
stderr <- sqrt(xVar / xLenght)
if(stderr < 10 * .Machine$double.eps * abs(xMean)) stop("data are essentially constant")
tstat <- (xMean - mu) / stderr
method <- if (paired) "Paired t-test" else "One Sample t-test"
estimate <- setNames(xMean, if(paired) "mean of the differences" else "mean of x")
}
# mean <- ds.arMean(x, type, datasources)
# sem <- ds.sem(x, type, datasources)
#
# t <- (mean - mu) / sem
pval <- 2 * pt(-abs(tstat), df)
alpha <- 1 - conf.level
# Confidence interval
confidence <- qt(1 - alpha/2, df)
confidence <- tstat + c(-confidence, confidence)
confidence <- mu + confidence * stderr
names(tstat) <- "t"
names(df) <- "df"
names(mu) <- if(paired || !is.null(y)) "difference in means" else "mean"
attr(confidence, "conf.level") <- conf.level
t <- list(statistic = tstat, parameter = df, p.value = pval,
conf.int = confidence, estimate = estimate, null.value = mu,
method = method)
return(t)
}
|
###########################################################################################
#1) Первая часть — очистка рабочего пространства, задание раб директории
setwd("/home/nazarov/02-fmlab.hse.ru/ТЗ до 29.07.2015/With zero/")
#setwd("J:/12 - ЛАФР/02 - Декомпозиция")
#clear working environment
rm(list=ls())
source("reality_func.R")
###########################################################################################
#4) Четвертая часть - reality check с разбивкой на процентили (50%, 30%, 20%, 10%)
library(XLConnect)
# Загружаем ранее сохраненные цены закрытия
resultDataFull<- readWorksheet(loadWorkbook("stocks_russia.xlsx"),sheet=1)
row.names(resultDataFull) <- resultDataFull[,1]
resultDataFull <-resultDataFull[,-1]
# перебор от 1 до 12 мес , ждем от 0 до 8 недель, держим от 1 до 12 мес
# таким образом последние 25 месяцев понадобятся только для одной модели
# Шаг для подсчета разниц в группах победителей и проигравших
STEP=1
# Периоды отбора (в месяцах), удержания (в неделях), инвестирования (в месяцах)
UP1=12
UP2=8
UP3=12
#UP1=2
#UP2=2
#UP3=2
# N - с учетом отступа
N <- (nrow(resultDataFull)-(2+UP3*4))%/%STEP
#N 227
########################Константы для reality check
R <- 1
#
#temp <- ret(4, 1, 4, STEP, N, resultDataFull[,-1], UP1, UP2, 0.3)
#T <- 164
T <- 593
N_rc <- 500
Q <- 0.1
#n <- T-R+1
set.seed(42)
P_R_ind <- data.frame(P_R(R,T,Q))
for (i in 2:N_rc){
P_R_ind[,i] <- P_R(R,T,Q)
}
V_star <- c()
f_bar <- c()
V_star_sharp <- c()
f_sharp <- c()
########################
m <- 1
realityCheckData <- data.frame(1,1,1,1,1,1,1,1,1,1)
colnames(realityCheckData) <-c("mean","t","p-value","hist_per","moment_per","invest_per","percent","winners","losers", "Amount_of_negative")
for (percent in c(0.5,0.3,0.2,0.1) ){
for (p1 in 1:UP1 ){
for (p2 in 0:UP2 ){
for (p3 in 1:UP3 ){
#вектор дельт
temp <- ret(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
return.winner<- ret.winner(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
return.loser<- ret.loser(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
n <- length(temp)
#realityCheckData[m, ] <- list(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, percent,
# mean(return.winner), mean(return.loser))
#cat(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, n, percent, mean(return.winner), mean(return.loser), "\n")
realityCheckData[m, ] <- list(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, percent,
mean(return.winner), mean(return.loser),length(temp[temp<0]))
cat(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, n, percent, mean(return.winner), mean(return.loser),length(temp[temp<0]), "\n")
#############################################################REALITY_CHECK##########################################################################
temp <- temp-0.06/12
f_bar <- mean(temp)
if(m==1){
V_bar <- sqrt(n)*mean(temp)
for (k in 1:N_rc){
V_star[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
}
}
else {
V_bar <- max(sqrt(n)*f_bar,V_bar)
for (k in 1:N_rc){
t <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
V_star[k] <-max(t,V_star[k])
}
}
retBoot <- c(V_star,V_bar)
realityCheckData[m, "pBoot"] <- 1-(rank(retBoot, ties.method = "first")[501]-1)/500
# list(V_bar,V_star)
####################################################################################################################################################
#############################################################REALITY_CHECK - SHARP##################################################################
f_sharp <- mean(temp)/sd(temp)
if(m==1){
V_bar_sharp <- sqrt(n)*f_sharp
for (k in 1:N_rc){
# V_star[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
V_star_sharp[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]])/sd(temp[P_R_ind[,k]]) - f_sharp)
}
}
else {
V_bar_sharp <- max(sqrt(n)* f_sharp ,V_bar_sharp)
for (k in 1:N_rc){
# t <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
t <- sqrt(n)*(mean(temp[P_R_ind[,k]])/sd(temp[P_R_ind[,k]]) - f_sharp)
V_star_sharp[k] <-max(t,V_star_sharp[k])
}
}
retBoot.sharp <- c(V_star_sharp,V_bar_sharp)
realityCheckData[m, "pBoot-Sharp"] <- 1-(rank(retBoot.sharp, ties.method = "first")[501]-1)/500
# list(V_bar,V_star)
####################################################################################################################################################
m <- m+1
}
}
}
}
###########################################################################################
#5) Пятая часть - сохранение результатов на жесткий диск
# сохранение результатов работы
stuff <- list(data=realityCheckData, num=N, V_bar=V_bar,V_star=V_star, V_bar_sharp=V_bar_sharp,V_star_sharp=V_star_sharp) # список ценных объектов
saveRDS(file = "russia_zero_v1.RDS",stuff) # сохраняем всё ценное в файл
mylist <- readRDS("tz_india_v1.RDS") # читаем из файла что там есть
#res_sh <- head(mylist$data[order(-mylist$data[,2]) ,],20) # просматриваем лучшие результаты
#res_sh
###########################################################################################
# testing
setwd("~/Рабочий стол/Reality check/v6")
dd <- realityCheckData[ (realityCheckData$percent==0.5) ,]
head( dd[order((- dd[,1]) ) ,],30)
head( realityCheckData[order(- realityCheckData[,1]) ,],10)
head( realityCheckData[order(- realityCheckData[,2]) ,],10)
head( realityCheckData[order(- realityCheckData[,1]) ,-8],20)
plot ( realityCheckData$mean)
plot( realityCheckData$pBoot)
V_bar/sqrt(74)+0.06/12
ret_p <- c(V_star,V_bar)
1-(rank(ret_p, ties.method = "first")[501]-1)/500
res_sh <- head( realityCheckData[order(- realityCheckData[,1]) ,],20) # просматриваем лучшие результаты
res_sh
mylist <- readRDS("res_rts_20-05-2015.RDS") # читаем из файла что там есть
res_sh2 <- head(mylist$data[order(-mylist$data[,1]) ,],20) # просматриваем лучшие результаты
res_sh2
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
ret_p <- c(V_star,V_bar)
1-(rank(ret_p, ties.method = "first")[501]-1)/500
mylist <- readRDS("res_rts_19-05-2015.RDS") # читаем из файла что там есть
res_sh3 <- head(mylist$data[order(-mylist$data[,1]) ,],20) # просматриваем лучшие результаты
res_sh3
#ret(11, 2, 1, STEP, N, resultDataFull[,-1], UP1, UP2, 0.5)
mylist <- readRDS("tz_29_v1(Q=0.1).RDS") # читаем из файла что там есть
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
result.data <- mylist[[1]]
sort.table<- result.data[order(-result.data[,1]),]
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
|
/03 - данные с нулями/russia_zero.R
|
no_license
|
nicknazarov/02-fmlab.hse.ru
|
R
| false
| false
| 8,155
|
r
|
###########################################################################################
#1) Первая часть — очистка рабочего пространства, задание раб директории
setwd("/home/nazarov/02-fmlab.hse.ru/ТЗ до 29.07.2015/With zero/")
#setwd("J:/12 - ЛАФР/02 - Декомпозиция")
#clear working environment
rm(list=ls())
source("reality_func.R")
###########################################################################################
#4) Четвертая часть - reality check с разбивкой на процентили (50%, 30%, 20%, 10%)
library(XLConnect)
# Загружаем ранее сохраненные цены закрытия
resultDataFull<- readWorksheet(loadWorkbook("stocks_russia.xlsx"),sheet=1)
row.names(resultDataFull) <- resultDataFull[,1]
resultDataFull <-resultDataFull[,-1]
# перебор от 1 до 12 мес , ждем от 0 до 8 недель, держим от 1 до 12 мес
# таким образом последние 25 месяцев понадобятся только для одной модели
# Шаг для подсчета разниц в группах победителей и проигравших
STEP=1
# Периоды отбора (в месяцах), удержания (в неделях), инвестирования (в месяцах)
UP1=12
UP2=8
UP3=12
#UP1=2
#UP2=2
#UP3=2
# N - с учетом отступа
N <- (nrow(resultDataFull)-(2+UP3*4))%/%STEP
#N 227
########################Константы для reality check
R <- 1
#
#temp <- ret(4, 1, 4, STEP, N, resultDataFull[,-1], UP1, UP2, 0.3)
#T <- 164
T <- 593
N_rc <- 500
Q <- 0.1
#n <- T-R+1
set.seed(42)
P_R_ind <- data.frame(P_R(R,T,Q))
for (i in 2:N_rc){
P_R_ind[,i] <- P_R(R,T,Q)
}
V_star <- c()
f_bar <- c()
V_star_sharp <- c()
f_sharp <- c()
########################
m <- 1
realityCheckData <- data.frame(1,1,1,1,1,1,1,1,1,1)
colnames(realityCheckData) <-c("mean","t","p-value","hist_per","moment_per","invest_per","percent","winners","losers", "Amount_of_negative")
for (percent in c(0.5,0.3,0.2,0.1) ){
for (p1 in 1:UP1 ){
for (p2 in 0:UP2 ){
for (p3 in 1:UP3 ){
#вектор дельт
temp <- ret(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
return.winner<- ret.winner(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
return.loser<- ret.loser(p1, p2, p3, STEP, N, resultDataFull, UP1, UP2, percent)
n <- length(temp)
#realityCheckData[m, ] <- list(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, percent,
# mean(return.winner), mean(return.loser))
#cat(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, n, percent, mean(return.winner), mean(return.loser), "\n")
realityCheckData[m, ] <- list(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, percent,
mean(return.winner), mean(return.loser),length(temp[temp<0]))
cat(mean(temp),abs(mean(temp))/sd(temp)*sqrt(n), (1-pt(q = abs(mean(temp))/sd(temp)*sqrt(n),df = n-1))*2 ,p1*4, p2, p3*4, n, percent, mean(return.winner), mean(return.loser),length(temp[temp<0]), "\n")
#############################################################REALITY_CHECK##########################################################################
temp <- temp-0.06/12
f_bar <- mean(temp)
if(m==1){
V_bar <- sqrt(n)*mean(temp)
for (k in 1:N_rc){
V_star[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
}
}
else {
V_bar <- max(sqrt(n)*f_bar,V_bar)
for (k in 1:N_rc){
t <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
V_star[k] <-max(t,V_star[k])
}
}
retBoot <- c(V_star,V_bar)
realityCheckData[m, "pBoot"] <- 1-(rank(retBoot, ties.method = "first")[501]-1)/500
# list(V_bar,V_star)
####################################################################################################################################################
#############################################################REALITY_CHECK - SHARP##################################################################
f_sharp <- mean(temp)/sd(temp)
if(m==1){
V_bar_sharp <- sqrt(n)*f_sharp
for (k in 1:N_rc){
# V_star[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
V_star_sharp[k] <- sqrt(n)*(mean(temp[P_R_ind[,k]])/sd(temp[P_R_ind[,k]]) - f_sharp)
}
}
else {
V_bar_sharp <- max(sqrt(n)* f_sharp ,V_bar_sharp)
for (k in 1:N_rc){
# t <- sqrt(n)*(mean(temp[P_R_ind[,k]]) - f_bar)
t <- sqrt(n)*(mean(temp[P_R_ind[,k]])/sd(temp[P_R_ind[,k]]) - f_sharp)
V_star_sharp[k] <-max(t,V_star_sharp[k])
}
}
retBoot.sharp <- c(V_star_sharp,V_bar_sharp)
realityCheckData[m, "pBoot-Sharp"] <- 1-(rank(retBoot.sharp, ties.method = "first")[501]-1)/500
# list(V_bar,V_star)
####################################################################################################################################################
m <- m+1
}
}
}
}
###########################################################################################
#5) Пятая часть - сохранение результатов на жесткий диск
# сохранение результатов работы
stuff <- list(data=realityCheckData, num=N, V_bar=V_bar,V_star=V_star, V_bar_sharp=V_bar_sharp,V_star_sharp=V_star_sharp) # список ценных объектов
saveRDS(file = "russia_zero_v1.RDS",stuff) # сохраняем всё ценное в файл
mylist <- readRDS("tz_india_v1.RDS") # читаем из файла что там есть
#res_sh <- head(mylist$data[order(-mylist$data[,2]) ,],20) # просматриваем лучшие результаты
#res_sh
###########################################################################################
# testing
setwd("~/Рабочий стол/Reality check/v6")
dd <- realityCheckData[ (realityCheckData$percent==0.5) ,]
head( dd[order((- dd[,1]) ) ,],30)
head( realityCheckData[order(- realityCheckData[,1]) ,],10)
head( realityCheckData[order(- realityCheckData[,2]) ,],10)
head( realityCheckData[order(- realityCheckData[,1]) ,-8],20)
plot ( realityCheckData$mean)
plot( realityCheckData$pBoot)
V_bar/sqrt(74)+0.06/12
ret_p <- c(V_star,V_bar)
1-(rank(ret_p, ties.method = "first")[501]-1)/500
res_sh <- head( realityCheckData[order(- realityCheckData[,1]) ,],20) # просматриваем лучшие результаты
res_sh
mylist <- readRDS("res_rts_20-05-2015.RDS") # читаем из файла что там есть
res_sh2 <- head(mylist$data[order(-mylist$data[,1]) ,],20) # просматриваем лучшие результаты
res_sh2
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
ret_p <- c(V_star,V_bar)
1-(rank(ret_p, ties.method = "first")[501]-1)/500
mylist <- readRDS("res_rts_19-05-2015.RDS") # читаем из файла что там есть
res_sh3 <- head(mylist$data[order(-mylist$data[,1]) ,],20) # просматриваем лучшие результаты
res_sh3
#ret(11, 2, 1, STEP, N, resultDataFull[,-1], UP1, UP2, 0.5)
mylist <- readRDS("tz_29_v1(Q=0.1).RDS") # читаем из файла что там есть
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
result.data <- mylist[[1]]
sort.table<- result.data[order(-result.data[,1]),]
V_star <- mylist[[4]]
V_bar <- mylist[[3]]
|
require(stringr)
require(plyr)
require(ggplot2)
require(mboost)
require(randomForest)
## hey!
## Load Data
sampleSub <- read.csv(file="sample_submission_file.csv",
stringsAsFactors=F)
train <- read.delim(file="train.tsv",
stringsAsFactors=FALSE,
fill=FALSE)
test <- read.delim(file="test.tsv",
stringsAsFactors=FALSE,
fill=FALSE)
## Normalize grades
train[which(is.na(train$grade)), ]
norm.gradeMax <- vector()
for(i in 1:5)
{
norm.gradeMax[[i]] <- max(train$grade[which(train$set==i)])
}
norm.factor <- max(norm.gradeMax) / norm.gradeMax
train$factor <- norm.factor[train$set]
test$factor <- norm.factor[test$set]
train$normalizedGrade <- train$grade * train$factor
ggplot(train, aes(x=normalizedGrade)) + geom_histogram()
ggplot(train, aes(x=wc)) + geom_histogram()
## Add Features
# Word Count
str_count(testRegex, "[ ]")
train$wc <- str_count(train$essay, "[ ]")
test$wc <- str_count(test$essay, "[ ]")
# Essay length
train$essayLength <- nchar(train$essay)
ggplot(train, aes(x=normalizedGrade, y=essayLength)) +
geom_point(shape=1) +
geom_smooth(method=lm) +
ggtitle("Goodness of fit for linear model") +
xlab("grade normalized on 12 point scale") +
ylab("letter count")
## Train and Validate models
# Blackboost
model <- blackboost(normalizedGrade ~ wc, train)
summary(model)
ggplot(train, aes(x=normalizedGrade, y=wc)) +
geom_point(shape=1) +
geom_smooth(method=lm) +
ggtitle("Goodness of fit for linear model") +
xlab("grade normalized on 12 point scale") +
ylab("word count")
plot(normalizedGrade ~ wc, data=train)
lines(train$wc, predict(model), col = "red")
plot(resid(model))
names(model)
head(predict(model))
head(predict(predict(model), newData=train$wc))
head(train$normalizedGrade)
head(predict(model, newdata=test))
test$normalizedGrade <- round(predict(model, newdata=test), digits = 0)
test$grade <- test$normalizedGrade / test$factor
train$predictedGrade <- floor(predict(model))
table(train$normalizedGrade == train$predictedGrade)
# Random forest
model.rf <- randomForest(normalizedGrade ~ wc, data=train)
nrow(predict(model.rf, newdata=test))
model.rf[["type"]]
## Write out results
write.table(submission1, file="submission1.csv", , sep=",", row.names=FALSE, na="NA", eol="\n", quote=FALSE)
submission1 <- data.frame(test$id, test$set, sampleSub$weight, test$grade)
names(submission1) <- names(sampleSub)
|
/final-project.r
|
no_license
|
littlemerman/pproject
|
R
| false
| false
| 2,518
|
r
|
require(stringr)
require(plyr)
require(ggplot2)
require(mboost)
require(randomForest)
## hey!
## Load Data
sampleSub <- read.csv(file="sample_submission_file.csv",
stringsAsFactors=F)
train <- read.delim(file="train.tsv",
stringsAsFactors=FALSE,
fill=FALSE)
test <- read.delim(file="test.tsv",
stringsAsFactors=FALSE,
fill=FALSE)
## Normalize grades
train[which(is.na(train$grade)), ]
norm.gradeMax <- vector()
for(i in 1:5)
{
norm.gradeMax[[i]] <- max(train$grade[which(train$set==i)])
}
norm.factor <- max(norm.gradeMax) / norm.gradeMax
train$factor <- norm.factor[train$set]
test$factor <- norm.factor[test$set]
train$normalizedGrade <- train$grade * train$factor
ggplot(train, aes(x=normalizedGrade)) + geom_histogram()
ggplot(train, aes(x=wc)) + geom_histogram()
## Add Features
# Word Count
str_count(testRegex, "[ ]")
train$wc <- str_count(train$essay, "[ ]")
test$wc <- str_count(test$essay, "[ ]")
# Essay length
train$essayLength <- nchar(train$essay)
ggplot(train, aes(x=normalizedGrade, y=essayLength)) +
geom_point(shape=1) +
geom_smooth(method=lm) +
ggtitle("Goodness of fit for linear model") +
xlab("grade normalized on 12 point scale") +
ylab("letter count")
## Train and Validate models
# Blackboost
model <- blackboost(normalizedGrade ~ wc, train)
summary(model)
ggplot(train, aes(x=normalizedGrade, y=wc)) +
geom_point(shape=1) +
geom_smooth(method=lm) +
ggtitle("Goodness of fit for linear model") +
xlab("grade normalized on 12 point scale") +
ylab("word count")
plot(normalizedGrade ~ wc, data=train)
lines(train$wc, predict(model), col = "red")
plot(resid(model))
names(model)
head(predict(model))
head(predict(predict(model), newData=train$wc))
head(train$normalizedGrade)
head(predict(model, newdata=test))
test$normalizedGrade <- round(predict(model, newdata=test), digits = 0)
test$grade <- test$normalizedGrade / test$factor
train$predictedGrade <- floor(predict(model))
table(train$normalizedGrade == train$predictedGrade)
# Random forest
model.rf <- randomForest(normalizedGrade ~ wc, data=train)
nrow(predict(model.rf, newdata=test))
model.rf[["type"]]
## Write out results
write.table(submission1, file="submission1.csv", , sep=",", row.names=FALSE, na="NA", eol="\n", quote=FALSE)
submission1 <- data.frame(test$id, test$set, sampleSub$weight, test$grade)
names(submission1) <- names(sampleSub)
|
\name{Many simple quantile regressions using logistic regressions}
\alias{logiquant.regs}
\title{
Many simple quantile regressions using logistic regressions.
}
\description{
Many simple quantile regressions using logistic regressions.
}
\usage{
logiquant.regs(target, dataset, logged = FALSE)
}
\arguments{
\item{target}{
The dependent variable, a numerical vector.
}
\item{dataset}{
A matrix with the indendent variables.
}
\item{logged}{
Should the p-values be returned (FALSE) or their logarithm (TRUE)?
}
}
\details{
Instead of fitting quantile regression models, one for each predictor variable and trying to assess its significance,
Redden et al. (2004) proposed a simple singificance test based on logistic regression.
Create an indicator variable I where 1 indicates a response value above its median and 0 elsewhere.
Since I is binary, perform logistic regression for the predictor and assess its significance using the likelihood
ratio test. We perform many logistic regression models since we have many predictors whose univariate association with the
response variable we want to test.
}
\value{
A two-column matrix with the test statistics (likelihood ratio test statistic) and their
associated p-values (or their loggarithm).
}
\references{
David T. Redden, Jose R. Fernandez and David B. Allison (2004). A simple significance test for quantile regression.
Statistics in Medicine, 23(16): 2587-2597
}
\author{
Author: Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{bic.regs}, \link{sp.logiregs} }
}
\examples{
y <- rcauchy(100, 3, 2)
x <- matrix( rnorm(100 * 50), ncol = 50 )
a <- MXM::logiquant.regs(y, x)
}
|
/man/logiquant.regs.Rd
|
no_license
|
cran/MXM
|
R
| false
| false
| 1,812
|
rd
|
\name{Many simple quantile regressions using logistic regressions}
\alias{logiquant.regs}
\title{
Many simple quantile regressions using logistic regressions.
}
\description{
Many simple quantile regressions using logistic regressions.
}
\usage{
logiquant.regs(target, dataset, logged = FALSE)
}
\arguments{
\item{target}{
The dependent variable, a numerical vector.
}
\item{dataset}{
A matrix with the indendent variables.
}
\item{logged}{
Should the p-values be returned (FALSE) or their logarithm (TRUE)?
}
}
\details{
Instead of fitting quantile regression models, one for each predictor variable and trying to assess its significance,
Redden et al. (2004) proposed a simple singificance test based on logistic regression.
Create an indicator variable I where 1 indicates a response value above its median and 0 elsewhere.
Since I is binary, perform logistic regression for the predictor and assess its significance using the likelihood
ratio test. We perform many logistic regression models since we have many predictors whose univariate association with the
response variable we want to test.
}
\value{
A two-column matrix with the test statistics (likelihood ratio test statistic) and their
associated p-values (or their loggarithm).
}
\references{
David T. Redden, Jose R. Fernandez and David B. Allison (2004). A simple significance test for quantile regression.
Statistics in Medicine, 23(16): 2587-2597
}
\author{
Author: Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{bic.regs}, \link{sp.logiregs} }
}
\examples{
y <- rcauchy(100, 3, 2)
x <- matrix( rnorm(100 * 50), ncol = 50 )
a <- MXM::logiquant.regs(y, x)
}
|
rm(list = ls())
#Load Libraries
x = c('ggplot2', 'corrgram', 'DMwR', 'caret', 'randomForest', 'unbalanced', 'C50', 'dummies',
'e1071', 'Information','MASS','rpart', 'gbm', 'ROSE')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
library(ggplot2)
#Importing the dataset
setwd('C:/Users/chait/Desktop/Project1')
df = read.csv('Train_Data.csv')
#Data preprocessing
sum(is.na(df))
head(df)
str(df)
df$state = as.factor(as.character(df$state))
df$area.code = as.factor(as.character(df$area.code))
str(df$state)
str(df$area.code)
df = subset(df,select =-c(phone.number))
#Exploratory data analysis
library(reshape)
table(df$Churn)
pie(table(df$Churn),main = 'Pie chart for Customer Churn',radius = 1)
#Seperating the numeric data
numeric_index = sapply(df,is.numeric)
numeric_data = df[,numeric_index]
cnames = colnames(numeric_data)
#Exploratory data analysis
for(i in 1:ncol(numeric_data))
{
hist(numeric_data[,i],main = paste("Histogram of" , cnames[i]),col='green',border = 'black')
}
imppred <- randomForest(Churn ~ ., data = df,
ntree = 100, keep.forest = FALSE, importance = TRUE)
importance(imppred, type = 1)
#Encoding categorical variables
for(i in 1:ncol(df)){
if(class(df[,i]) == 'factor'){
df[,i] = factor(df[,i], labels =(1:length(levels(factor(df[,i])))))
}
}
#Outlier analysis
for (i in 1:length(cnames)){
assign(paste0("gn",i),ggplot(aes_string(y = cnames[i],x = "Churn"),data = subset(df))+
stat_boxplot(geom = "errorbar", width = 0.5)+
geom_boxplot(outlier.colour = 'red', fill = "green", outlier.shape = 18,
outlier.size = 1, notch = FALSE) +
theme(legend.position = "bottom")+
labs(y=cnames[i],x = "Churn")+
ggtitle(paste(" ", cnames[i])))
}
#Plotting plots together
gridExtra::grid.arrange(gn1,gn2,gn3,ncol = 3)
gridExtra::grid.arrange(gn4,gn5,gn6,ncol = 3)
gridExtra::grid.arrange(gn7,gn8,gn9,ncol = 3)
gridExtra::grid.arrange(gn10,gn11,gn12,ncol = 3)
gridExtra::grid.arrange(gn13,gn14,gn15,ncol = 3)
#Imputing the outlier values with knnImputation method
for( i in cnames){
print(i)
val = df[,i][df[,i] %in% boxplot.stats(df[,i])$out]
df[,i][df[,i] %in% val] = NA
}
df = knnImputation(df, k=80)
#Feature Selection | Correlation plot for numeric variables
library(corrplot)
m = cor(df[,numeric_index])
corrplot(m, method="square")
#Feature Selection
factor_index = sapply(df,is.factor)
factor_data = df[,factor_index]
for(i in 1:4)
{
print(names(factor_data[i]))
print(chisq.test(table(factor_data$Churn,factor_data[,i])))
}
#Feature Selection or Dimensionality Reduction
df1 = subset(df, select = -c(total.day.charge,total.eve.charge,total.night.charge,total.intl.charge,area.code,voice.mail.plan))
numeric_index = sapply(df1,is.numeric)
numeric_data = df1[,numeric_index]
df_train=df1
#Feature Scaling | Normalizing the data
for( i in colnames(numeric_data))
{
print(i)
df_train[,i] = (df_train[,i]- min(df_train[,i]))/
(max(df_train[,i] - min(df_train[,i])))
}
############ Test Data processing############
#Importing the test data
df = read.csv('Test_data.csv')
sum(is.na(df))
head(df)
str(df)
#Basic pre-processing
df$state = as.factor(as.character(df$state))
df$area.code = as.factor(as.character(df$area.code))
str(df$area.code)
datatypes = sapply(df,class)
for(i in 1:ncol(df)){
if(class(df[,i]) == 'factor'){
df[,i] = factor(df[,i], labels =(1:length(levels(factor(df[,i])))))
}
}
#Feature Selection or Dimensionality Reduction in Test Data
df1 = subset(df, select = -c(total.day.charge,total.eve.charge,total.night.charge,total.intl.charge,phone.number,area.code,voice.mail.plan))
df_test=df1
#Feature scaling | Normalization
for( i in colnames(numeric_data))
{
print(i)
df_test[,i] = (df_test[,i]- min(df_test[,i]))/
(max(df_test[,i] - min(df_test[,i])))
}
#Building and training a decision tree model
#install.packages('C50')
library(C50)
C50_model = C5.0(Churn ~.,df_train, trails = 100, rules = TRUE)
#summary(C50_model)
#Predicting the output
C50_predictions = predict(C50_model,df_test[,-15],type = 'class')
#Evaluate the performance of classification model
Confmatrix_C50 = table(df_test$Churn,C50_predictions)
confusionMatrix(Confmatrix_C50)
#Buliding and training a Random Forest Model
#RF
RF_model = randomForest(Churn ~.,df_train, importance = TRUE, ntree=100)
RF_Predictions = predict(RF_model, df_test[,-15])
Confmatrix_RF = table(df_test$Churn,RF_Predictions)
confusionMatrix(Confmatrix_RF)
#############################################END########################
|
/Project.r
|
no_license
|
chaitu009/Telecom-Customer-Churn
|
R
| false
| false
| 4,643
|
r
|
rm(list = ls())
#Load Libraries
x = c('ggplot2', 'corrgram', 'DMwR', 'caret', 'randomForest', 'unbalanced', 'C50', 'dummies',
'e1071', 'Information','MASS','rpart', 'gbm', 'ROSE')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
library(ggplot2)
#Importing the dataset
setwd('C:/Users/chait/Desktop/Project1')
df = read.csv('Train_Data.csv')
#Data preprocessing
sum(is.na(df))
head(df)
str(df)
df$state = as.factor(as.character(df$state))
df$area.code = as.factor(as.character(df$area.code))
str(df$state)
str(df$area.code)
df = subset(df,select =-c(phone.number))
#Exploratory data analysis
library(reshape)
table(df$Churn)
pie(table(df$Churn),main = 'Pie chart for Customer Churn',radius = 1)
#Seperating the numeric data
numeric_index = sapply(df,is.numeric)
numeric_data = df[,numeric_index]
cnames = colnames(numeric_data)
#Exploratory data analysis
for(i in 1:ncol(numeric_data))
{
hist(numeric_data[,i],main = paste("Histogram of" , cnames[i]),col='green',border = 'black')
}
imppred <- randomForest(Churn ~ ., data = df,
ntree = 100, keep.forest = FALSE, importance = TRUE)
importance(imppred, type = 1)
#Encoding categorical variables
for(i in 1:ncol(df)){
if(class(df[,i]) == 'factor'){
df[,i] = factor(df[,i], labels =(1:length(levels(factor(df[,i])))))
}
}
#Outlier analysis
for (i in 1:length(cnames)){
assign(paste0("gn",i),ggplot(aes_string(y = cnames[i],x = "Churn"),data = subset(df))+
stat_boxplot(geom = "errorbar", width = 0.5)+
geom_boxplot(outlier.colour = 'red', fill = "green", outlier.shape = 18,
outlier.size = 1, notch = FALSE) +
theme(legend.position = "bottom")+
labs(y=cnames[i],x = "Churn")+
ggtitle(paste(" ", cnames[i])))
}
#Plotting plots together
gridExtra::grid.arrange(gn1,gn2,gn3,ncol = 3)
gridExtra::grid.arrange(gn4,gn5,gn6,ncol = 3)
gridExtra::grid.arrange(gn7,gn8,gn9,ncol = 3)
gridExtra::grid.arrange(gn10,gn11,gn12,ncol = 3)
gridExtra::grid.arrange(gn13,gn14,gn15,ncol = 3)
#Imputing the outlier values with knnImputation method
for( i in cnames){
print(i)
val = df[,i][df[,i] %in% boxplot.stats(df[,i])$out]
df[,i][df[,i] %in% val] = NA
}
df = knnImputation(df, k=80)
#Feature Selection | Correlation plot for numeric variables
library(corrplot)
m = cor(df[,numeric_index])
corrplot(m, method="square")
#Feature Selection
factor_index = sapply(df,is.factor)
factor_data = df[,factor_index]
for(i in 1:4)
{
print(names(factor_data[i]))
print(chisq.test(table(factor_data$Churn,factor_data[,i])))
}
#Feature Selection or Dimensionality Reduction
df1 = subset(df, select = -c(total.day.charge,total.eve.charge,total.night.charge,total.intl.charge,area.code,voice.mail.plan))
numeric_index = sapply(df1,is.numeric)
numeric_data = df1[,numeric_index]
df_train=df1
#Feature Scaling | Normalizing the data
for( i in colnames(numeric_data))
{
print(i)
df_train[,i] = (df_train[,i]- min(df_train[,i]))/
(max(df_train[,i] - min(df_train[,i])))
}
############ Test Data processing############
#Importing the test data
df = read.csv('Test_data.csv')
sum(is.na(df))
head(df)
str(df)
#Basic pre-processing
df$state = as.factor(as.character(df$state))
df$area.code = as.factor(as.character(df$area.code))
str(df$area.code)
datatypes = sapply(df,class)
for(i in 1:ncol(df)){
if(class(df[,i]) == 'factor'){
df[,i] = factor(df[,i], labels =(1:length(levels(factor(df[,i])))))
}
}
#Feature Selection or Dimensionality Reduction in Test Data
df1 = subset(df, select = -c(total.day.charge,total.eve.charge,total.night.charge,total.intl.charge,phone.number,area.code,voice.mail.plan))
df_test=df1
#Feature scaling | Normalization
for( i in colnames(numeric_data))
{
print(i)
df_test[,i] = (df_test[,i]- min(df_test[,i]))/
(max(df_test[,i] - min(df_test[,i])))
}
#Building and training a decision tree model
#install.packages('C50')
library(C50)
C50_model = C5.0(Churn ~.,df_train, trails = 100, rules = TRUE)
#summary(C50_model)
#Predicting the output
C50_predictions = predict(C50_model,df_test[,-15],type = 'class')
#Evaluate the performance of classification model
Confmatrix_C50 = table(df_test$Churn,C50_predictions)
confusionMatrix(Confmatrix_C50)
#Buliding and training a Random Forest Model
#RF
RF_model = randomForest(Churn ~.,df_train, importance = TRUE, ntree=100)
RF_Predictions = predict(RF_model, df_test[,-15])
Confmatrix_RF = table(df_test$Churn,RF_Predictions)
confusionMatrix(Confmatrix_RF)
#############################################END########################
|
library(knitr)
opts_chunk$set(prompt=TRUE, eval=FALSE, tidy=FALSE, strip.white=FALSE, comment=NA, highlight=FALSE, message=FALSE, warning=FALSE, size='scriptsize', fig.width=6, fig.height=5)
options(width=60, dev='pdf')
options(digits=3)
thm <- knit_theme$get("acid")
knit_theme$set(thm)
# Define a function with two arguments
test_func <- function(first_arg, second_arg) { # Body
first_arg + second_arg # Returns last evaluated statement
} # end test_func
test_func(1, 2) # Apply the function
args(test_func) # Display argument
# Define function that uses variable from enclosure environment
test_func <- function(first_arg, second_arg) {
first_arg + second_arg + glob_var
} # end test_func
test_func(3, 2) # error - glob_var doesn't exist yet!
glob_var <- 10 # Create glob_var
test_func(3, 2) # Now works
# Define function that returns NULL for non-numeric argument
test_func <- function(in_put) {
if (!is.numeric(in_put)) {
warning(paste("argument", in_put, "isn't numeric"))
return(NULL)
}
2*in_put
} # end test_func
test_func(2)
test_func("hello")
# Define a function that returns invisibly
return_invisible <- function(in_put) {
invisible(in_put)
} # end return_invisible
return_invisible(2)
glob_var <- return_invisible(2)
glob_var
rm(list=ls()) # Remove all objects
# Load objects from file
loaded <- load(file="C:/Develop/data/my_data.RData")
loaded # Vector of loaded objects
ls() # List objects
test_func <- function(first_arg, second_arg) {
# Last statement of function is return value
first_arg + 2*second_arg
} # end test_func
test_func(first_arg=3, second_arg=2) # Bind by name
test_func(first=3, second=2) # Partial name binding
test_func(3, 2) # Bind by position
test_func(second_arg=2, 3) # mixed binding
test_func(3, 2, 1) # Too many arguments
test_func(2) # Not enough arguments
# Function "paste" has two arguments with default values
str(paste)
# Default values of arguments can be specified in argument list
test_func <- function(first_arg, fac_tor=1) {
fac_tor*first_arg
} # end test_func
test_func(3) # Default value used for second argument
test_func(3, 2) # Default value over-ridden
# Default values can be a vector of strings
test_func <- function(in_put=c("first_val", "second_val")) {
in_put <- match.arg(in_put) # Match to arg list
in_put
} # end test_func
test_func("second_val")
test_func("se") # Partial name binding
test_func("some_val") # Invalid string
# DAX percentage returns
re_turns <- rutils::diff_it(log(EuStockMarkets[, 1]))
# calc_skew() calculates skew of time series of returns
# Default is normal time series
calc_skew <- function(se_ries=rnorm(1000)) {
# Number of observations
len_gth <- NROW(se_ries)
# Standardize se_ries
se_ries <-
(se_ries - mean(se_ries))/sd(se_ries)
# Calculate skew - last statement automatically returned
len_gth*sum(se_ries^3)/((len_gth-1)*(len_gth-2))
} # end calc_skew
# Calculate skew of DAX returns
# Bind arguments by name
calc_skew(se_ries=re_turns)
# Bind arguments by position
calc_skew(re_turns)
# Use default value of arguments
calc_skew()
str(plot) # Dots for additional plot parameters
bind_dots <- function(in_put, ...) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
bind_dots(1, 2, 3) # "in_put" bound by position
bind_dots(2, in_put=1, 3) # "in_put" bound by name
bind_dots(1, 2, 3, foo=10) # Named argument bound to dots
bind_dots <- function(arg1, arg2, ...) {
arg1 + 2*arg2 + sum(...)
} # end bind_dots
bind_dots(3, 2) # Bind arguments by position
bind_dots(3, 2, 5, 8) # Extra arguments bound to dots
str(sum) # Dots before other arguments
sum(1, 2, 3) # Dots bind before other arguments
sum(1, 2, NA, 3, na.rm=TRUE)
bind_dots <- function(..., in_put) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
# Arguments after dots must be bound by full name
bind_dots(1, 2, 3, in_put=10)
bind_dots(1, 2, 3, in_put=10, foo=4) # Dots bound
bind_dots(1, 2, 3) # "in_put" not bound
bind_dots <- function(..., in_put=10) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
bind_dots(1, 2, 3) # "in_put" not bound, but has default
# Wrapper for mean() with default na.rm=TRUE
my_mean <- function(x, na.rm=TRUE, ...) {
mean(x=x, na.rm=na.rm, ...)
} # end my_mean
foo <- sample(c(1:10, NA, rep(0.1, t=5)))
mean(c(foo, NA))
mean(c(foo, NA), na.rm=TRUE)
my_mean(c(foo, NA))
my_mean(c(foo, NA), trim=0.4) # Pass extra argument
# Wrapper for saving data into default directory
save_data <- function(...,
file=stop("error: no file name"),
my_dir="C:/Develop/data") {
# Create file path
file <- file.path(my_dir, file)
save(..., file=file)
} # end save_data
foo <- 1:10
save_data(foo, file="scratch.RData")
save_data(foo, file="scratch.RData", my_dir="C:/Develop")
# Wrapper for testing negative arguments
stop_if_neg <- function(in_put) {
if (!is.numeric(in_put) || in_put<0)
stop("argument not numeric or negative")
} # end stop_if_neg
# Wrapper for sqrt()
my_sqrt <- function(in_put) {
stop_if_neg(in_put)
sqrt(in_put)
} # end my_sqrt
my_sqrt(2)
my_sqrt(-2)
my_sqrt(NA)
# Recursive function sums its argument list
sum_dots <- function(in_put, ...) {
if (missing(...)) { # Check if dots are empty
return(in_put) # just one argument left
} else {
in_put + sum_dots(...) # Sum remaining arguments
} # end if
} # end sum_dots
sum_dots(1, 2, 3, 4)
# Recursive function sums its argument list
sum_dots <- function(in_put, ...) {
if (NROW(list(...)) == 0) { # Check if dots are empty
return(in_put) # just one argument left
} else {
in_put + sum_dots(...) # Sum remaining arguments
} # end if
} # end sum_dots
sum_dots(1, 2, 3, 4)
fibo_nacci <- function(len_gth) {
if (len_gth > 2) {
fib_seq <- fibo_nacci(len_gth-1) # Recursion
c(fib_seq, sum(tail(fib_seq, 2))) # Return this
} else {
c(0, 1) # Initialize and return
}
} # end fibo_nacci
fibo_nacci(10)
tail(fibo_nacci(9), 2)
# Show the function code
plot.default
# Display function
getAnywhere(plot.default)
# Sum() is a compiled primitive function
sum
# mean() is a generic function
mean
# Show all methods of mean()
methods(generic.function=mean)
# Show code for mean.default()
mean.default
# Get all methods for generic function "plot"
methods("plot")
getAnywhere(plot) # Display function
rm(list=ls())
lazy_func <- function(arg1, arg2) { # Define function lazy_func
2*arg1 # just multiply first argument
} # end lazy_func
lazy_func(3, 2) # Bind arguments by position
lazy_func(3) # Second argument was never evaluated!
lazy_func <- function(arg1, arg2) { # Define function lazy_func
cat(arg1, '\n') # Write to output
cat(arg2) # Write to output
} # end lazy_func
lazy_func(3, 2) # Bind arguments by position
lazy_func(3) # First argument written to output
rm(list=ls())
glob_var <- 1 # Define a global variable
ls(environment()) # Get all variables in environment
func_env <- function() { # Explore function environments
loc_var <- 1 # Define a local variable
cat('objects in evaluation environment:\t',
ls(environment()), '\n')
cat('objects in enclosing environment:\t',
ls(parent.env(environment())), '\n')
cat('this is the enclosing environment:')
parent.env(environment()) # Return enclosing environment
} # end func_env
func_env()
environment(func_env)
environment(print) # Package namespace is the enclosure
rm(list=ls())
glob_var <- 1 # Define a global variable
probe_scope <- function() { # Explore function scope
loc_var <- 2*glob_var # Define a local variable
new_globvar <<- 11 # Define a global variable
cat('objects in evaluation environment:\t',
ls(environment()), '\n')
cat('this is a local loc_var:\t', loc_var, '\n')
cat('objects in enclosing environment:\n',
ls(parent.env(environment())), '\n')
cat('this is glob_var:\t', glob_var, '\n')
glob_var <- 10 # Define local glob_var
cat('this is the local glob_var:\t', glob_var, '\n')
} # end probe_scope
probe_scope()
glob_var # Global variable is unaffected
new_globvar # new_globvar is preserved
loc_var # Local variable is gone!
a <- 1 # Define a variable
# New variable "b" points to value of "a"
b <- a # Define a new variable
# When "b" is modified, R makes a copy of it
b <- b+1
# Function doubles its argument and returns it
double_it <- function(in_put) {
in_put <- 2*in_put
cat("input argument was doubled to:", in_put, "\n")
in_put
}
double_it(a)
a # variable "a" is unchanged
setwd("C:/Develop/lecture_slides/data")
rm(list=ls()) # Remove all objects
ls() # List objects
# Load objects from file (side effect)
load(file="my_data.RData")
ls() # List objects
glob_var <- 1 # Define a global variable
# Explore function scope and side effects
side_effect <- function() {
cat("global glob_var:\t", glob_var, "\n")
# Define local "glob_var" variable
glob_var <- 10
# Re-define the global "glob_var"
glob_var <<- 2
cat("local glob_var:\t", glob_var, "\n")
} # end side_effect
side_effect()
# Global variable was modified as side effect
glob_var
# Standard infix operator call syntax
2 + 3
# Infix operator applied using prefix syntax
"+"(2, 3)
# Standard bracket operator
vec_tor <- c(4, 3, 5, 6)
vec_tor[2]
# Bracket operator applied using prefix syntax
"["(vec_tor, 2)
# Define infix operator that returns string
'%+%' <- function(a, b) paste(a, b, sep=" + ")
2 %+% 3
2 %+% 3 %+% 4
"hello" %+% 2 %+% 3 %+% "bye"
obj_string <- "hello"
class(obj_string)
# Assign to value returned by "class" function
class(obj_string) <- "string"
class(obj_string)
# Define function last()
last <- function(vec_tor) {
vec_tor[NROW(vec_tor)]
} # end last
last(1:10)
# Define replacement function last()
'last<-' <- function(vec_tor, value) {
vec_tor[NROW(vec_tor)] <- value
vec_tor
} # end last
x <- 1:5
last(x) <- 11
x
# Create functional that accepts a function as input argument
func_tional <- function(func_name) {
# Calculates statistic on random numbers
set.seed(1)
func_name(runif(1e4)) # Apply the function name
} # end func_tional
func_tional(mean)
func_tional(sd)
# Define a power function factory
make_func <- function(arg_param) { # Wrapper function
function(in_put) { # Anonymous closure
in_put^arg_param
}
} # end make_func
square_func <- make_func(2) # Define square function
square_func(4)
cube_func <- make_func(3) # Define cube function
cube_func(2)
cube_root_func <- make_func(1/3) # Define cube root function
cube_root_func(8)
make_counter <- function() {
# Counter function with mutable state
counter <- 0 # Initialize counter
cat('counter = ', counter)
function() { # Return anonymous advance function
counter <<- counter + 1 # Advance counter
cat('counter = ', counter)
} # end advance function
} # end make_counter
advance_counter <- make_counter() # Create new counter
advance_counter() # Advance counter
advance_counter() # Advance counter
advance_counter_two <- make_counter() # Create another counter
advance_counter_two() # Advance counter two
advance_counter() # Advance counter one
advance_counter_two() # Advance counter two
advance_counter() # Advance counter one
# Returns the pseudo-random generating function random_generator
# the formal argument 'seed' persists in the evaluation environment of seed_random
seed_random <- function(seed) { # Seed must be an integer
random_number <- as.numeric(paste0('0.', seed)) # Initialize
# Random_generator returns a vector of pseudo-random numbers of length length_rand
random_generator <- function(length_rand=1) { # Assign function name for recursion
# Returns a vector of pseudo-random numbers of length length_rand
random_number <<- 4*random_number*(1 - random_number) # Logistic map
if (length_rand == 1) {
return(random_number)
} else {
return(c(random_number, random_generator(length_rand - 1)))
} # end if
} # end random_generator
} # end seed_random
# Create a random number generating function and set seed
make_random <- seed_random(88)
make_random(10) # calculate vector of 10 pseudo-random numbers
ls(environment(make_random)) # List objects in scope of make_random
rm(list=ls())
# The super-assignment operator '<<-' adjusts the balance
# 'balance' exists in open_account evaluation environment
# Bank account example (from Venables) demonstrates mutable states
# 'balance' is persistent between function calls
open_account <- function(balance) {
# Returns function list for account operations
list(
deposit = function(amount) { # Make deposit
if (amount > 0) {
balance <<- balance + amount # '<<-' super-assignment operator
cat(amount, "deposited. Your balance is now:",
balance, "\n")
} else {
cat("Deposits must be positive!\n")
}
}, # end deposit
withdraw = function(amount) { # Make withdrawal
if (amount <= balance) {
balance <<- balance - amount # '<<-' super-assignment operator
cat(amount, "withdrawn. Your balance is now:",
balance, "\n")
} else {
cat("You don't have that much money!\n")
}
}, # end withdraw
get_balance = function() { # Get balance
cat("Your current balance is:", balance, "\n")
} # end get_balance
) # end list
} # end open_account
# Perform account operations
# open an account with 100 deposit
my_account <- open_account(100)
ls(my_account) # my_account is a list
# Add my_account to search path
attach(my_account)
withdraw(30) # Withdrawal to buy groceries
deposit(100) # Deposit paycheck to account
withdraw(200) # Withdrawal to buy Gucci bag
get_balance() # Get account balance
# List objects in scope of get_balance
ls(environment(get_balance))
detach(my_account) # Remove my_account from search path
# Func_tional accepts function name and additional argument
func_tional <- function(func_name, in_put) {
# Produce function name from argument
func_name <- match.fun(func_name)
# Execute function call
func_name(in_put)
} # end func_tional
func_tional(sqrt, 4)
# String also works because match.fun() converts it to a function
func_tional("sqrt", 4)
str(sum) # Sum() accepts multiple arguments
# Func_tional can't accept indefinite number of arguments
func_tional(sum, 1, 2, 3)
# Func_tional accepts function name and dots '...' argument
func_tional <- function(func_name, ...) {
func_name <- match.fun(func_name)
func_name(...) # Execute function call
} # end func_tional
func_tional(sum, 1, 2, 3)
func_tional(sum, 1, 2, NA, 4, 5)
func_tional(sum, 1, 2, NA, 4, 5, na.rm=TRUE)
# Function with three arguments and dots '...' arguments
my_func <- function(in_put, param1, param2, ...) {
c(input=in_put, param1=param1, param2=param2,
dots=c(...))
} # end my_func
my_func(1, 2, 3, param2=4, param1=5)
func_tional(my_func, 1, 2, 3, param2=4, param1=5)
func_tional(my_func, 1, 2, 3, 4, 5)
# Simple anonymous function
(function(x) (x + 3)) (10)
# Anonymous function passed to func_tional
func_tional(func_name=(function(x) (x + 3)), 5)
# Anonymous function is default value
func_tional <-
function(..., func_name=function(x, y, z) {x+y+z}) {
func_name <- match.fun(func_name)
func_name(...) # Execute function call
} # end func_tional
func_tional(2, 3, 4) # Use default func_name
func_tional(2, 3, 4, 5)
# Func_name bound by name
func_tional(func_name=sum, 2, 3, 4, 5)
# Pass anonymous function to func_name
func_tional(func_name=function(x, y, z) {x*y*z},
2, 3, 4)
str(sum) # Sum() accepts multiple arguments
# Sum() can't accept list of arguments
sum(list(1, 2, 3))
str(do.call) # "what" argument is a function
# Do.call passes list elements into "sum" individually
do.call(sum, list(1, 2, 3))
do.call(sum, list(1, 2, NA, 3))
do.call(sum, list(1, 2, NA, 3, na.rm=TRUE))
# Func_tional() accepts list with function name and arguments
func_tional <- function(list_arg) {
# Produce function name from argument
func_name <- match.fun(list_arg[[1]])
# Execute function call uing do.call()
do.call(func_name, list_arg[-1])
} # end func_tional
arg_list <- list("sum", 1, 2, 3)
func_tional(arg_list)
# Do_call() performs same operation as do.call()
all.equal(
do.call(sum, list(1, 2, NA, 3, na.rm=TRUE)),
rutils::do_call(sum, list(1, 2, NA, 3), na.rm=TRUE))
rm(list=ls())
str(apply) # Get list of arguments
# Create a matrix
mat_rix <- matrix(6:1, nrow=2, ncol=3)
mat_rix
# Sum the rows and columns
row_sums <- apply(mat_rix, 1, sum)
col_sums <- apply(mat_rix, 2, sum)
mat_rix <- cbind(c(sum(row_sums), row_sums),
rbind(col_sums, mat_rix))
dimnames(mat_rix) <- list(c("col_sums", "row1", "row2"),
c("row_sums", "col1", "col2", "col3"))
mat_rix
str(apply) # Get list of arguments
mat_rix <- matrix(sample(12), nrow=3, ncol=4) # Create a matrix
mat_rix
apply(mat_rix, 2, sort) # Sort matrix columns
apply(mat_rix, 2, sort, decreasing=TRUE) # Sort decreasing order
mat_rix[2, 2] <- NA # Introduce NA value
mat_rix
# Calculate median of columns
apply(mat_rix, 2, median)
# Calculate median of columns with na.rm=TRUE
apply(mat_rix, 2, median, na.rm=TRUE)
rm(list=ls())
# DAX percentage returns
re_turns <- rutils::diff_it(log(EuStockMarkets[, 1]))
library(moments) # Load package moments
str(moment) # Get list of arguments
# Apply moment function
moment(x=re_turns, order=3)
# 4x1 matrix of moment orders
moment_orders <- as.matrix(1:4)
# Anonymous function allows looping over function parameters
apply(X=moment_orders, MARGIN=1,
FUN=function(moment_order) {
moment(x=re_turns, order=moment_order)
} # end anonymous function
) # end apply
# Another way of passing parameters into moment() function
apply(X=moment_orders, MARGIN=1, FUN=moment,
x=re_turns)
# Function with three arguments
my_func <- function(arg1, arg2, arg3) {
c(arg1=arg1, arg2=arg2, arg3=arg3)
} # end my_func
my_func(1, 2, 3)
da_ta <- as.matrix(1:4)
# Pass da_ta to arg1
apply(X=da_ta, MAR=1, FUN=my_func, arg2=2, arg3=3)
# Pass da_ta to arg2
apply(X=da_ta, MAR=1, FUN=my_func, arg1=1, arg3=3)
# Pass da_ta to arg3
apply(X=da_ta, MAR=1, FUN=my_func, arg1=1, arg2=2)
# Vector of means of numeric columns
sapply(iris[, -5], mean)
# List of means of numeric columns
lapply(iris[, -5], mean)
# Lapply using anonymous function
unlist(lapply(iris,
function(col_umn) {
if (is.numeric(col_umn)) mean(col_umn)
} # end anonymous function
) # end lapply
) # end unlist
unlist(sapply(iris, function(col_umn) {
if (is.numeric(col_umn)) mean(col_umn)}))
sapply(6:10, sqrt) # Sapply on vector
sapply(list(6, 7, 8, 9, 10), sqrt) # Sapply on list
# Calculate means of iris data frame columns
sapply(iris, mean) # Returns NA for Species
# Create a matrix
mat_rix <- matrix(sample(100), ncol=4)
# Calculate column means using apply
apply(mat_rix, 2, mean)
# Calculate column means using sapply, with anonymous function
sapply(1:NCOL(mat_rix),
function(col_index) { # Anonymous function
mean(mat_rix[, col_index])
} # end anonymous function
) # end sapply
# Vectors form columns of matrix returned by sapply
sapply(2:4, function(num) c(el1=num, el2=2*num))
# Vectors of different lengths returned as list
sapply(2:4, function(num) 1:num)
# vapply is similar to sapply
vapply(2:4, function(num) c(el1=num, el2=2*num),
FUN.VALUE=c(row1=0, row2=0))
# vapply produces an error if it can't simplify
vapply(2:4, function(num) 1:num,
FUN.VALUE=c(row1=0, row2=0))
library(zoo) # Load package zoo
# Show the generic function "merge"
merge
# Show the "merge" method dispatched to "zoo" objects
merge.zoo
library(zoo) # Load package zoo
# Get all methods for generic function merge()
methods(generic.function="merge")
# Get generic function methods applied to "zoo" objects
methods(class="zoo")
# Define a generic function
gen_sum <- function(a, b, ...) {
UseMethod("gen_sum")
} # end gen_sum
# Define method for "numeric" class
gen_sum.numeric <- function(a, b, ...) {
sum(a, b)
} # end gen_sum.character
# Define method for "character" class
gen_sum.character <- function(a, b, ...) {
paste(a, "plus", b)
} # end gen_sum.character
# Apply gen_sum to "numeric" objects
gen_sum(1, 2)
# Apply gen_sum to "character" objects
gen_sum("a", "b")
# 'cbind' is an internal generic function
cbind
# Define "+" method for "character" class
"+.character" <- function(a, b, ...) {
paste(a, "plus", b)
} # end +.character
methods("+") # view methods for "+" operator
# Define variables with "character" class
char1 <- "a"
char2 <- "b"
class(char1)
char1 + char2 # Add two "character" objects - doesn't work
attributes(char1) # Doesn't have explicit "character" class - only implicit
char1 <- structure("a", class="character")
char2 <- structure("b", class="character")
attributes(char1) # Now has explicit "character" class
# Add two "character" objects
char1 + char2
# Define object of class "string"
obj_string <- "how are you today?"
class(obj_string) <- "string"
obj_string
# overload "print" method for string objects
print.string <- function(str_ing) {
print(
paste(strsplit(str_ing, split=" ")[[1]],
collapse=" + "))
} # end print.string
# methods("print") # view new methods for "print" function
print(obj_string)
obj_string
# overwrite "+" operator
"+" = function(a, b) {
if (is.character(a) && is.character(b)) {
paste(a, "plus", b)
} else {
.Primitive("+") (a, b)
}
}
methods("+") # view methods for "+" operator
# Add two "numeric" objects
1 + 2
# Add two "character" objects
"a" + "b"
# overwrite "+" operator with a generic function
"+" <- function(a, b, ...) {
UseMethod("+")
} # end gen_sum
# Define method for "numeric" class
"+.numeric" <- function(a, b, ...) {
sum(a, b)
} # end gen_sum.character
# Define method for "character" class
"+.character" <- function(a, b, ...) {
paste(a, "plus", b)
} # end gen_sum.character
methods("+") # view methods for "+" operator
# Add two "numeric" objects
1 + 2
# Add two "character" objects
"a" + "b"
cbind.ts # Can't view non-visible method
stats::cbind.ts # Can't view non-visible method
stats:::cbind.ts # Display non-visible method
getAnywhere(cbind.ts) # Display non-visible method
rm(list=ls())
new_zoo <- zoo(rnorm(10), order.by=(Sys.Date() + 0:9))
# Coerce "zoo" object to new class "zoo_xtra"
class(new_zoo) <- "zoo_xtra"
class(new_zoo)
methods(generic.function="length")
length # Primitive function
# Define "length" method for class "zoo_xtra"
length.zoo_xtra <- function(in_ts) {
cat("length of zoo_xtra object:\n")
# Unclass object, then calculate length
NROW(unclass(in_ts))
} # end length.zoo_xtra
NROW(new_zoo) # Apply "length" method to "zoo_xtra" object
methods(generic.function="length")
# Define "last" method for class "zoo_xtra"
last.zoo_xtra <- function(in_ts) {
in_ts[NROW(in_ts)]
} # end last.zoo_xtra
last(new_zoo) # Doesn't work
last.zoo_xtra(new_zoo) # Works
# Define a generic function
last <- function(a, b, ...) {
UseMethod("last")
} # end last
last(new_zoo) # Now works
# Define generic "string" class converter
as.string <- function(str_ing, ...)
UseMethod("as.string")
# Default "string" class converter
as.string.default <- function(str_ing, ...)
structure(str_ing, class="string", ...)
# Numeric "string" class converter
as.string.numeric <- function(str_ing, ...)
structure(as.character(str_ing), class="string", ...)
# "string" class checker
is.string <- function(str_ing)
inherits(x=str_ing, what="string")
# Define "string" object
obj_string <- as.string("how are you today?")
obj_string
is.string(obj_string)
is.string("hello")
as.string(123)
is.string(as.string(123))
rm(list=ls())
library(xts)
new_xts <- xts(rnorm(10), order.by=(Sys.Date() + 0:9))
class(new_xts) # Class attribute is a vector
# "last" is a generic function from package "xts"
last
methods(generic.function="last")
last(new_xts) # Apply "last" method from "xts" class
# Derive object "xts_xtra" from "xts" object
class(new_xts) <- c("xts_xtra", class(new_xts))
class(new_xts) # Class attribute is a vector
# "xts_xtra" object inherits "last" method from "xts" class
last(new_xts)
# Define new "last" method for class "xts_xtra"
last.xts_xtra <- function(in_ts) {
cat("last element of xts_xtra object:\n")
drop(in_ts[NROW(in_ts), ])
} # end last.xts_xtra
last(new_xts) # Apply "last" from "xts_xtra" class
# Define "last" method for class "xts_xtra"
last.xts_xtra <- function(in_ts) {
cat("last element of xts_xtra object:\n")
drop(NextMethod())
} # end last.xts_xtra
last(new_xts) # Apply "last" from "xts_xtra" class
|
/functions.R
|
no_license
|
Williamqn/lecture_slides
|
R
| false
| false
| 24,645
|
r
|
library(knitr)
opts_chunk$set(prompt=TRUE, eval=FALSE, tidy=FALSE, strip.white=FALSE, comment=NA, highlight=FALSE, message=FALSE, warning=FALSE, size='scriptsize', fig.width=6, fig.height=5)
options(width=60, dev='pdf')
options(digits=3)
thm <- knit_theme$get("acid")
knit_theme$set(thm)
# Define a function with two arguments
test_func <- function(first_arg, second_arg) { # Body
first_arg + second_arg # Returns last evaluated statement
} # end test_func
test_func(1, 2) # Apply the function
args(test_func) # Display argument
# Define function that uses variable from enclosure environment
test_func <- function(first_arg, second_arg) {
first_arg + second_arg + glob_var
} # end test_func
test_func(3, 2) # error - glob_var doesn't exist yet!
glob_var <- 10 # Create glob_var
test_func(3, 2) # Now works
# Define function that returns NULL for non-numeric argument
test_func <- function(in_put) {
if (!is.numeric(in_put)) {
warning(paste("argument", in_put, "isn't numeric"))
return(NULL)
}
2*in_put
} # end test_func
test_func(2)
test_func("hello")
# Define a function that returns invisibly
return_invisible <- function(in_put) {
invisible(in_put)
} # end return_invisible
return_invisible(2)
glob_var <- return_invisible(2)
glob_var
rm(list=ls()) # Remove all objects
# Load objects from file
loaded <- load(file="C:/Develop/data/my_data.RData")
loaded # Vector of loaded objects
ls() # List objects
test_func <- function(first_arg, second_arg) {
# Last statement of function is return value
first_arg + 2*second_arg
} # end test_func
test_func(first_arg=3, second_arg=2) # Bind by name
test_func(first=3, second=2) # Partial name binding
test_func(3, 2) # Bind by position
test_func(second_arg=2, 3) # mixed binding
test_func(3, 2, 1) # Too many arguments
test_func(2) # Not enough arguments
# Function "paste" has two arguments with default values
str(paste)
# Default values of arguments can be specified in argument list
test_func <- function(first_arg, fac_tor=1) {
fac_tor*first_arg
} # end test_func
test_func(3) # Default value used for second argument
test_func(3, 2) # Default value over-ridden
# Default values can be a vector of strings
test_func <- function(in_put=c("first_val", "second_val")) {
in_put <- match.arg(in_put) # Match to arg list
in_put
} # end test_func
test_func("second_val")
test_func("se") # Partial name binding
test_func("some_val") # Invalid string
# DAX percentage returns
re_turns <- rutils::diff_it(log(EuStockMarkets[, 1]))
# calc_skew() calculates skew of time series of returns
# Default is normal time series
calc_skew <- function(se_ries=rnorm(1000)) {
# Number of observations
len_gth <- NROW(se_ries)
# Standardize se_ries
se_ries <-
(se_ries - mean(se_ries))/sd(se_ries)
# Calculate skew - last statement automatically returned
len_gth*sum(se_ries^3)/((len_gth-1)*(len_gth-2))
} # end calc_skew
# Calculate skew of DAX returns
# Bind arguments by name
calc_skew(se_ries=re_turns)
# Bind arguments by position
calc_skew(re_turns)
# Use default value of arguments
calc_skew()
str(plot) # Dots for additional plot parameters
bind_dots <- function(in_put, ...) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
bind_dots(1, 2, 3) # "in_put" bound by position
bind_dots(2, in_put=1, 3) # "in_put" bound by name
bind_dots(1, 2, 3, foo=10) # Named argument bound to dots
bind_dots <- function(arg1, arg2, ...) {
arg1 + 2*arg2 + sum(...)
} # end bind_dots
bind_dots(3, 2) # Bind arguments by position
bind_dots(3, 2, 5, 8) # Extra arguments bound to dots
str(sum) # Dots before other arguments
sum(1, 2, 3) # Dots bind before other arguments
sum(1, 2, NA, 3, na.rm=TRUE)
bind_dots <- function(..., in_put) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
# Arguments after dots must be bound by full name
bind_dots(1, 2, 3, in_put=10)
bind_dots(1, 2, 3, in_put=10, foo=4) # Dots bound
bind_dots(1, 2, 3) # "in_put" not bound
bind_dots <- function(..., in_put=10) {
paste0("in_put=", in_put,
", dots=", paste(..., sep=", "))
} # end bind_dots
bind_dots(1, 2, 3) # "in_put" not bound, but has default
# Wrapper for mean() with default na.rm=TRUE
my_mean <- function(x, na.rm=TRUE, ...) {
mean(x=x, na.rm=na.rm, ...)
} # end my_mean
foo <- sample(c(1:10, NA, rep(0.1, t=5)))
mean(c(foo, NA))
mean(c(foo, NA), na.rm=TRUE)
my_mean(c(foo, NA))
my_mean(c(foo, NA), trim=0.4) # Pass extra argument
# Wrapper for saving data into default directory
save_data <- function(...,
file=stop("error: no file name"),
my_dir="C:/Develop/data") {
# Create file path
file <- file.path(my_dir, file)
save(..., file=file)
} # end save_data
foo <- 1:10
save_data(foo, file="scratch.RData")
save_data(foo, file="scratch.RData", my_dir="C:/Develop")
# Wrapper for testing negative arguments
stop_if_neg <- function(in_put) {
if (!is.numeric(in_put) || in_put<0)
stop("argument not numeric or negative")
} # end stop_if_neg
# Wrapper for sqrt()
my_sqrt <- function(in_put) {
stop_if_neg(in_put)
sqrt(in_put)
} # end my_sqrt
my_sqrt(2)
my_sqrt(-2)
my_sqrt(NA)
# Recursive function sums its argument list
sum_dots <- function(in_put, ...) {
if (missing(...)) { # Check if dots are empty
return(in_put) # just one argument left
} else {
in_put + sum_dots(...) # Sum remaining arguments
} # end if
} # end sum_dots
sum_dots(1, 2, 3, 4)
# Recursive function sums its argument list
sum_dots <- function(in_put, ...) {
if (NROW(list(...)) == 0) { # Check if dots are empty
return(in_put) # just one argument left
} else {
in_put + sum_dots(...) # Sum remaining arguments
} # end if
} # end sum_dots
sum_dots(1, 2, 3, 4)
fibo_nacci <- function(len_gth) {
if (len_gth > 2) {
fib_seq <- fibo_nacci(len_gth-1) # Recursion
c(fib_seq, sum(tail(fib_seq, 2))) # Return this
} else {
c(0, 1) # Initialize and return
}
} # end fibo_nacci
fibo_nacci(10)
tail(fibo_nacci(9), 2)
# Show the function code
plot.default
# Display function
getAnywhere(plot.default)
# Sum() is a compiled primitive function
sum
# mean() is a generic function
mean
# Show all methods of mean()
methods(generic.function=mean)
# Show code for mean.default()
mean.default
# Get all methods for generic function "plot"
methods("plot")
getAnywhere(plot) # Display function
rm(list=ls())
lazy_func <- function(arg1, arg2) { # Define function lazy_func
2*arg1 # just multiply first argument
} # end lazy_func
lazy_func(3, 2) # Bind arguments by position
lazy_func(3) # Second argument was never evaluated!
lazy_func <- function(arg1, arg2) { # Define function lazy_func
cat(arg1, '\n') # Write to output
cat(arg2) # Write to output
} # end lazy_func
lazy_func(3, 2) # Bind arguments by position
lazy_func(3) # First argument written to output
rm(list=ls())
glob_var <- 1 # Define a global variable
ls(environment()) # Get all variables in environment
func_env <- function() { # Explore function environments
loc_var <- 1 # Define a local variable
cat('objects in evaluation environment:\t',
ls(environment()), '\n')
cat('objects in enclosing environment:\t',
ls(parent.env(environment())), '\n')
cat('this is the enclosing environment:')
parent.env(environment()) # Return enclosing environment
} # end func_env
func_env()
environment(func_env)
environment(print) # Package namespace is the enclosure
rm(list=ls())
glob_var <- 1 # Define a global variable
probe_scope <- function() { # Explore function scope
loc_var <- 2*glob_var # Define a local variable
new_globvar <<- 11 # Define a global variable
cat('objects in evaluation environment:\t',
ls(environment()), '\n')
cat('this is a local loc_var:\t', loc_var, '\n')
cat('objects in enclosing environment:\n',
ls(parent.env(environment())), '\n')
cat('this is glob_var:\t', glob_var, '\n')
glob_var <- 10 # Define local glob_var
cat('this is the local glob_var:\t', glob_var, '\n')
} # end probe_scope
probe_scope()
glob_var # Global variable is unaffected
new_globvar # new_globvar is preserved
loc_var # Local variable is gone!
a <- 1 # Define a variable
# New variable "b" points to value of "a"
b <- a # Define a new variable
# When "b" is modified, R makes a copy of it
b <- b+1
# Function doubles its argument and returns it
double_it <- function(in_put) {
in_put <- 2*in_put
cat("input argument was doubled to:", in_put, "\n")
in_put
}
double_it(a)
a # variable "a" is unchanged
setwd("C:/Develop/lecture_slides/data")
rm(list=ls()) # Remove all objects
ls() # List objects
# Load objects from file (side effect)
load(file="my_data.RData")
ls() # List objects
glob_var <- 1 # Define a global variable
# Explore function scope and side effects
side_effect <- function() {
cat("global glob_var:\t", glob_var, "\n")
# Define local "glob_var" variable
glob_var <- 10
# Re-define the global "glob_var"
glob_var <<- 2
cat("local glob_var:\t", glob_var, "\n")
} # end side_effect
side_effect()
# Global variable was modified as side effect
glob_var
# Standard infix operator call syntax
2 + 3
# Infix operator applied using prefix syntax
"+"(2, 3)
# Standard bracket operator
vec_tor <- c(4, 3, 5, 6)
vec_tor[2]
# Bracket operator applied using prefix syntax
"["(vec_tor, 2)
# Define infix operator that returns string
'%+%' <- function(a, b) paste(a, b, sep=" + ")
2 %+% 3
2 %+% 3 %+% 4
"hello" %+% 2 %+% 3 %+% "bye"
obj_string <- "hello"
class(obj_string)
# Assign to value returned by "class" function
class(obj_string) <- "string"
class(obj_string)
# Define function last()
last <- function(vec_tor) {
vec_tor[NROW(vec_tor)]
} # end last
last(1:10)
# Define replacement function last()
'last<-' <- function(vec_tor, value) {
vec_tor[NROW(vec_tor)] <- value
vec_tor
} # end last
x <- 1:5
last(x) <- 11
x
# Create functional that accepts a function as input argument
func_tional <- function(func_name) {
# Calculates statistic on random numbers
set.seed(1)
func_name(runif(1e4)) # Apply the function name
} # end func_tional
func_tional(mean)
func_tional(sd)
# Define a power function factory
make_func <- function(arg_param) { # Wrapper function
function(in_put) { # Anonymous closure
in_put^arg_param
}
} # end make_func
square_func <- make_func(2) # Define square function
square_func(4)
cube_func <- make_func(3) # Define cube function
cube_func(2)
cube_root_func <- make_func(1/3) # Define cube root function
cube_root_func(8)
make_counter <- function() {
# Counter function with mutable state
counter <- 0 # Initialize counter
cat('counter = ', counter)
function() { # Return anonymous advance function
counter <<- counter + 1 # Advance counter
cat('counter = ', counter)
} # end advance function
} # end make_counter
advance_counter <- make_counter() # Create new counter
advance_counter() # Advance counter
advance_counter() # Advance counter
advance_counter_two <- make_counter() # Create another counter
advance_counter_two() # Advance counter two
advance_counter() # Advance counter one
advance_counter_two() # Advance counter two
advance_counter() # Advance counter one
# Returns the pseudo-random generating function random_generator
# the formal argument 'seed' persists in the evaluation environment of seed_random
seed_random <- function(seed) { # Seed must be an integer
random_number <- as.numeric(paste0('0.', seed)) # Initialize
# Random_generator returns a vector of pseudo-random numbers of length length_rand
random_generator <- function(length_rand=1) { # Assign function name for recursion
# Returns a vector of pseudo-random numbers of length length_rand
random_number <<- 4*random_number*(1 - random_number) # Logistic map
if (length_rand == 1) {
return(random_number)
} else {
return(c(random_number, random_generator(length_rand - 1)))
} # end if
} # end random_generator
} # end seed_random
# Create a random number generating function and set seed
make_random <- seed_random(88)
make_random(10) # calculate vector of 10 pseudo-random numbers
ls(environment(make_random)) # List objects in scope of make_random
rm(list=ls())
# The super-assignment operator '<<-' adjusts the balance
# 'balance' exists in open_account evaluation environment
# Bank account example (from Venables) demonstrates mutable states
# 'balance' is persistent between function calls
open_account <- function(balance) {
# Returns function list for account operations
list(
deposit = function(amount) { # Make deposit
if (amount > 0) {
balance <<- balance + amount # '<<-' super-assignment operator
cat(amount, "deposited. Your balance is now:",
balance, "\n")
} else {
cat("Deposits must be positive!\n")
}
}, # end deposit
withdraw = function(amount) { # Make withdrawal
if (amount <= balance) {
balance <<- balance - amount # '<<-' super-assignment operator
cat(amount, "withdrawn. Your balance is now:",
balance, "\n")
} else {
cat("You don't have that much money!\n")
}
}, # end withdraw
get_balance = function() { # Get balance
cat("Your current balance is:", balance, "\n")
} # end get_balance
) # end list
} # end open_account
# Perform account operations
# open an account with 100 deposit
my_account <- open_account(100)
ls(my_account) # my_account is a list
# Add my_account to search path
attach(my_account)
withdraw(30) # Withdrawal to buy groceries
deposit(100) # Deposit paycheck to account
withdraw(200) # Withdrawal to buy Gucci bag
get_balance() # Get account balance
# List objects in scope of get_balance
ls(environment(get_balance))
detach(my_account) # Remove my_account from search path
# Func_tional accepts function name and additional argument
func_tional <- function(func_name, in_put) {
# Produce function name from argument
func_name <- match.fun(func_name)
# Execute function call
func_name(in_put)
} # end func_tional
func_tional(sqrt, 4)
# String also works because match.fun() converts it to a function
func_tional("sqrt", 4)
str(sum) # Sum() accepts multiple arguments
# Func_tional can't accept indefinite number of arguments
func_tional(sum, 1, 2, 3)
# Func_tional accepts function name and dots '...' argument
func_tional <- function(func_name, ...) {
func_name <- match.fun(func_name)
func_name(...) # Execute function call
} # end func_tional
func_tional(sum, 1, 2, 3)
func_tional(sum, 1, 2, NA, 4, 5)
func_tional(sum, 1, 2, NA, 4, 5, na.rm=TRUE)
# Function with three arguments and dots '...' arguments
my_func <- function(in_put, param1, param2, ...) {
c(input=in_put, param1=param1, param2=param2,
dots=c(...))
} # end my_func
my_func(1, 2, 3, param2=4, param1=5)
func_tional(my_func, 1, 2, 3, param2=4, param1=5)
func_tional(my_func, 1, 2, 3, 4, 5)
# Simple anonymous function
(function(x) (x + 3)) (10)
# Anonymous function passed to func_tional
func_tional(func_name=(function(x) (x + 3)), 5)
# Anonymous function is default value
func_tional <-
function(..., func_name=function(x, y, z) {x+y+z}) {
func_name <- match.fun(func_name)
func_name(...) # Execute function call
} # end func_tional
func_tional(2, 3, 4) # Use default func_name
func_tional(2, 3, 4, 5)
# Func_name bound by name
func_tional(func_name=sum, 2, 3, 4, 5)
# Pass anonymous function to func_name
func_tional(func_name=function(x, y, z) {x*y*z},
2, 3, 4)
str(sum) # Sum() accepts multiple arguments
# Sum() can't accept list of arguments
sum(list(1, 2, 3))
str(do.call) # "what" argument is a function
# Do.call passes list elements into "sum" individually
do.call(sum, list(1, 2, 3))
do.call(sum, list(1, 2, NA, 3))
do.call(sum, list(1, 2, NA, 3, na.rm=TRUE))
# Func_tional() accepts list with function name and arguments
func_tional <- function(list_arg) {
# Produce function name from argument
func_name <- match.fun(list_arg[[1]])
# Execute function call uing do.call()
do.call(func_name, list_arg[-1])
} # end func_tional
arg_list <- list("sum", 1, 2, 3)
func_tional(arg_list)
# Do_call() performs same operation as do.call()
all.equal(
do.call(sum, list(1, 2, NA, 3, na.rm=TRUE)),
rutils::do_call(sum, list(1, 2, NA, 3), na.rm=TRUE))
rm(list=ls())
str(apply) # Get list of arguments
# Create a matrix
mat_rix <- matrix(6:1, nrow=2, ncol=3)
mat_rix
# Sum the rows and columns
row_sums <- apply(mat_rix, 1, sum)
col_sums <- apply(mat_rix, 2, sum)
mat_rix <- cbind(c(sum(row_sums), row_sums),
rbind(col_sums, mat_rix))
dimnames(mat_rix) <- list(c("col_sums", "row1", "row2"),
c("row_sums", "col1", "col2", "col3"))
mat_rix
str(apply) # Get list of arguments
mat_rix <- matrix(sample(12), nrow=3, ncol=4) # Create a matrix
mat_rix
apply(mat_rix, 2, sort) # Sort matrix columns
apply(mat_rix, 2, sort, decreasing=TRUE) # Sort decreasing order
mat_rix[2, 2] <- NA # Introduce NA value
mat_rix
# Calculate median of columns
apply(mat_rix, 2, median)
# Calculate median of columns with na.rm=TRUE
apply(mat_rix, 2, median, na.rm=TRUE)
rm(list=ls())
# DAX percentage returns
re_turns <- rutils::diff_it(log(EuStockMarkets[, 1]))
library(moments) # Load package moments
str(moment) # Get list of arguments
# Apply moment function
moment(x=re_turns, order=3)
# 4x1 matrix of moment orders
moment_orders <- as.matrix(1:4)
# Anonymous function allows looping over function parameters
apply(X=moment_orders, MARGIN=1,
FUN=function(moment_order) {
moment(x=re_turns, order=moment_order)
} # end anonymous function
) # end apply
# Another way of passing parameters into moment() function
apply(X=moment_orders, MARGIN=1, FUN=moment,
x=re_turns)
# Function with three arguments
my_func <- function(arg1, arg2, arg3) {
c(arg1=arg1, arg2=arg2, arg3=arg3)
} # end my_func
my_func(1, 2, 3)
da_ta <- as.matrix(1:4)
# Pass da_ta to arg1
apply(X=da_ta, MAR=1, FUN=my_func, arg2=2, arg3=3)
# Pass da_ta to arg2
apply(X=da_ta, MAR=1, FUN=my_func, arg1=1, arg3=3)
# Pass da_ta to arg3
apply(X=da_ta, MAR=1, FUN=my_func, arg1=1, arg2=2)
# Vector of means of numeric columns
sapply(iris[, -5], mean)
# List of means of numeric columns
lapply(iris[, -5], mean)
# Lapply using anonymous function
unlist(lapply(iris,
function(col_umn) {
if (is.numeric(col_umn)) mean(col_umn)
} # end anonymous function
) # end lapply
) # end unlist
unlist(sapply(iris, function(col_umn) {
if (is.numeric(col_umn)) mean(col_umn)}))
sapply(6:10, sqrt) # Sapply on vector
sapply(list(6, 7, 8, 9, 10), sqrt) # Sapply on list
# Calculate means of iris data frame columns
sapply(iris, mean) # Returns NA for Species
# Create a matrix
mat_rix <- matrix(sample(100), ncol=4)
# Calculate column means using apply
apply(mat_rix, 2, mean)
# Calculate column means using sapply, with anonymous function
sapply(1:NCOL(mat_rix),
function(col_index) { # Anonymous function
mean(mat_rix[, col_index])
} # end anonymous function
) # end sapply
# Vectors form columns of matrix returned by sapply
sapply(2:4, function(num) c(el1=num, el2=2*num))
# Vectors of different lengths returned as list
sapply(2:4, function(num) 1:num)
# vapply is similar to sapply
vapply(2:4, function(num) c(el1=num, el2=2*num),
FUN.VALUE=c(row1=0, row2=0))
# vapply produces an error if it can't simplify
vapply(2:4, function(num) 1:num,
FUN.VALUE=c(row1=0, row2=0))
library(zoo) # Load package zoo
# Show the generic function "merge"
merge
# Show the "merge" method dispatched to "zoo" objects
merge.zoo
library(zoo) # Load package zoo
# Get all methods for generic function merge()
methods(generic.function="merge")
# Get generic function methods applied to "zoo" objects
methods(class="zoo")
# Define a generic function
gen_sum <- function(a, b, ...) {
UseMethod("gen_sum")
} # end gen_sum
# Define method for "numeric" class
gen_sum.numeric <- function(a, b, ...) {
sum(a, b)
} # end gen_sum.character
# Define method for "character" class
gen_sum.character <- function(a, b, ...) {
paste(a, "plus", b)
} # end gen_sum.character
# Apply gen_sum to "numeric" objects
gen_sum(1, 2)
# Apply gen_sum to "character" objects
gen_sum("a", "b")
# 'cbind' is an internal generic function
cbind
# Define "+" method for "character" class
"+.character" <- function(a, b, ...) {
paste(a, "plus", b)
} # end +.character
methods("+") # view methods for "+" operator
# Define variables with "character" class
char1 <- "a"
char2 <- "b"
class(char1)
char1 + char2 # Add two "character" objects - doesn't work
attributes(char1) # Doesn't have explicit "character" class - only implicit
char1 <- structure("a", class="character")
char2 <- structure("b", class="character")
attributes(char1) # Now has explicit "character" class
# Add two "character" objects
char1 + char2
# Define object of class "string"
obj_string <- "how are you today?"
class(obj_string) <- "string"
obj_string
# overload "print" method for string objects
print.string <- function(str_ing) {
print(
paste(strsplit(str_ing, split=" ")[[1]],
collapse=" + "))
} # end print.string
# methods("print") # view new methods for "print" function
print(obj_string)
obj_string
# overwrite "+" operator
"+" = function(a, b) {
if (is.character(a) && is.character(b)) {
paste(a, "plus", b)
} else {
.Primitive("+") (a, b)
}
}
methods("+") # view methods for "+" operator
# Add two "numeric" objects
1 + 2
# Add two "character" objects
"a" + "b"
# overwrite "+" operator with a generic function
"+" <- function(a, b, ...) {
UseMethod("+")
} # end gen_sum
# Define method for "numeric" class
"+.numeric" <- function(a, b, ...) {
sum(a, b)
} # end gen_sum.character
# Define method for "character" class
"+.character" <- function(a, b, ...) {
paste(a, "plus", b)
} # end gen_sum.character
methods("+") # view methods for "+" operator
# Add two "numeric" objects
1 + 2
# Add two "character" objects
"a" + "b"
cbind.ts # Can't view non-visible method
stats::cbind.ts # Can't view non-visible method
stats:::cbind.ts # Display non-visible method
getAnywhere(cbind.ts) # Display non-visible method
rm(list=ls())
new_zoo <- zoo(rnorm(10), order.by=(Sys.Date() + 0:9))
# Coerce "zoo" object to new class "zoo_xtra"
class(new_zoo) <- "zoo_xtra"
class(new_zoo)
methods(generic.function="length")
length # Primitive function
# Define "length" method for class "zoo_xtra"
length.zoo_xtra <- function(in_ts) {
cat("length of zoo_xtra object:\n")
# Unclass object, then calculate length
NROW(unclass(in_ts))
} # end length.zoo_xtra
NROW(new_zoo) # Apply "length" method to "zoo_xtra" object
methods(generic.function="length")
# Define "last" method for class "zoo_xtra"
last.zoo_xtra <- function(in_ts) {
in_ts[NROW(in_ts)]
} # end last.zoo_xtra
last(new_zoo) # Doesn't work
last.zoo_xtra(new_zoo) # Works
# Define a generic function
last <- function(a, b, ...) {
UseMethod("last")
} # end last
last(new_zoo) # Now works
# Define generic "string" class converter
as.string <- function(str_ing, ...)
UseMethod("as.string")
# Default "string" class converter
as.string.default <- function(str_ing, ...)
structure(str_ing, class="string", ...)
# Numeric "string" class converter
as.string.numeric <- function(str_ing, ...)
structure(as.character(str_ing), class="string", ...)
# "string" class checker
is.string <- function(str_ing)
inherits(x=str_ing, what="string")
# Define "string" object
obj_string <- as.string("how are you today?")
obj_string
is.string(obj_string)
is.string("hello")
as.string(123)
is.string(as.string(123))
rm(list=ls())
library(xts)
new_xts <- xts(rnorm(10), order.by=(Sys.Date() + 0:9))
class(new_xts) # Class attribute is a vector
# "last" is a generic function from package "xts"
last
methods(generic.function="last")
last(new_xts) # Apply "last" method from "xts" class
# Derive object "xts_xtra" from "xts" object
class(new_xts) <- c("xts_xtra", class(new_xts))
class(new_xts) # Class attribute is a vector
# "xts_xtra" object inherits "last" method from "xts" class
last(new_xts)
# Define new "last" method for class "xts_xtra"
last.xts_xtra <- function(in_ts) {
cat("last element of xts_xtra object:\n")
drop(in_ts[NROW(in_ts), ])
} # end last.xts_xtra
last(new_xts) # Apply "last" from "xts_xtra" class
# Define "last" method for class "xts_xtra"
last.xts_xtra <- function(in_ts) {
cat("last element of xts_xtra object:\n")
drop(NextMethod())
} # end last.xts_xtra
last(new_xts) # Apply "last" from "xts_xtra" class
|
\name{transparentColorBase}
\alias{transparentColorBase}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Set Transparancey in Base Graphics}
\description{Setting transparency in base graphics is not as easy as in
\code{Lattice}, so here's a little functon to help.}
\usage{
transparentColorBase(color, alphaTrans = alphaTrans)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{color}{The color, or a vector of colors from
\code{\link{colors}()}.}
\item{alphaTrans}{The alpha transparency value between [0,1] with 0
opaque and 1 fully transparent.
}
}
\details{As above.}
\value{The rgb value(s), which can be passed to any base graphics
routine to get transparency.}
\author{Jeffrey H. Gove}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{col2rgb}}, \code{\link{rgb}} }
\examples{
\dontrun{
cols = transparentColorBase('red', alphaTrans=c(0.3,0.6,0.9))
symbols(c(1,1.5,2), c(1,1.5,2), circles=rep(1,3), bg=cols, xlim=c(0,4), ylim=c(0,4))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/transparentColorBase.Rd
|
no_license
|
cran/sampSurf
|
R
| false
| false
| 1,219
|
rd
|
\name{transparentColorBase}
\alias{transparentColorBase}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Set Transparancey in Base Graphics}
\description{Setting transparency in base graphics is not as easy as in
\code{Lattice}, so here's a little functon to help.}
\usage{
transparentColorBase(color, alphaTrans = alphaTrans)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{color}{The color, or a vector of colors from
\code{\link{colors}()}.}
\item{alphaTrans}{The alpha transparency value between [0,1] with 0
opaque and 1 fully transparent.
}
}
\details{As above.}
\value{The rgb value(s), which can be passed to any base graphics
routine to get transparency.}
\author{Jeffrey H. Gove}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{col2rgb}}, \code{\link{rgb}} }
\examples{
\dontrun{
cols = transparentColorBase('red', alphaTrans=c(0.3,0.6,0.9))
symbols(c(1,1.5,2), c(1,1.5,2), circles=rep(1,3), bg=cols, xlim=c(0,4), ylim=c(0,4))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#---
# Author: "Ian Hinds"
# Date: 2020-08-13
# Purpose: Day 7 assignment: joining, pivots, splits, plots
#1
# Make a faceted plot of the cumulative cases & deaths by USA region. Your x axis is date, y axis is value/count. Join and pivot the covid 19 data.
#read covid data
library(tidyverse)
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
head(covid)
#state and region of US classification (N,W,S,E)
region = data.frame(state = state.name, region = state.region)
head(region)
#filter state names and current date, list number of cases per state
covid %>%
filter(!state %in% state.name) %>%
filter(date == max(date)) %>%
count(state)
#full join data
inner_join(covid, region, by = "state") %>%
count(region) %>%
mutate(tot = sum(n))
full_join(covid, region, by = "state") %>%
count(region) %>%
mutate(tot = sum(n))
#right join/ pivot
covid %>%
right_join(region, by = "state") %>%
group_by(region, date) %>%
summarize(cases = sum(cases),
deaths = sum(deaths)) %>%
pivot_longer(cols = c('cases', 'deaths')) %>%
#plot
ggplot(aes(x = date, y = value)) +
geom_line(aes(col = region)) +
facet_grid(name~region, scale = "free_y") +
theme_linedraw() +
theme(legend.position = "bottom")
|
/R/ianhinds-day-07.R
|
no_license
|
Hindstein/geog176A-daily-exercises
|
R
| false
| false
| 1,293
|
r
|
#---
# Author: "Ian Hinds"
# Date: 2020-08-13
# Purpose: Day 7 assignment: joining, pivots, splits, plots
#1
# Make a faceted plot of the cumulative cases & deaths by USA region. Your x axis is date, y axis is value/count. Join and pivot the covid 19 data.
#read covid data
library(tidyverse)
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
covid = read_csv(url)
head(covid)
#state and region of US classification (N,W,S,E)
region = data.frame(state = state.name, region = state.region)
head(region)
#filter state names and current date, list number of cases per state
covid %>%
filter(!state %in% state.name) %>%
filter(date == max(date)) %>%
count(state)
#full join data
inner_join(covid, region, by = "state") %>%
count(region) %>%
mutate(tot = sum(n))
full_join(covid, region, by = "state") %>%
count(region) %>%
mutate(tot = sum(n))
#right join/ pivot
covid %>%
right_join(region, by = "state") %>%
group_by(region, date) %>%
summarize(cases = sum(cases),
deaths = sum(deaths)) %>%
pivot_longer(cols = c('cases', 'deaths')) %>%
#plot
ggplot(aes(x = date, y = value)) +
geom_line(aes(col = region)) +
facet_grid(name~region, scale = "free_y") +
theme_linedraw() +
theme(legend.position = "bottom")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runRppsc.R
\name{runRppsc}
\alias{runRppsc}
\title{launch Rppsc shiny app}
\usage{
runRppsc()
}
\value{
null
}
\description{
A function launches the Rppsc Shiny app that allows the user to use the features
of Rppsc package in an interactive way.
}
\examples{
\dontrun{
runRppsc()
}
}
|
/man/runRppsc.Rd
|
permissive
|
dxjasmine/Rppsc
|
R
| false
| true
| 366
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runRppsc.R
\name{runRppsc}
\alias{runRppsc}
\title{launch Rppsc shiny app}
\usage{
runRppsc()
}
\value{
null
}
\description{
A function launches the Rppsc Shiny app that allows the user to use the features
of Rppsc package in an interactive way.
}
\examples{
\dontrun{
runRppsc()
}
}
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
array <- read.table(file = "stdin", header = F, fill = T, sep = " ")
revArray <- rev(array[2,]) #reverse values
write.table(revArray, row.names = F, col.names = F) #remove overhead variable and row names for printing
|
/[Easy] Arrays - DS.R
|
no_license
|
fardeen-ahmed/HackerRank
|
R
| false
| false
| 287
|
r
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
array <- read.table(file = "stdin", header = F, fill = T, sep = " ")
revArray <- rev(array[2,]) #reverse values
write.table(revArray, row.names = F, col.names = F) #remove overhead variable and row names for printing
|
# ##############################################################################
# Author: Georgios Kampolis
#
# Description: Creates boxplots to explore seasonality of the data set.
#
# ##############################################################################
wind %<>% mutate(year = as.factor(year(dateTime)),
month = as.factor(month(dateTime)),
hour = as.factor(hour(dateTime))
)
boxPlotMonthOverall <- wind %>%
ggplot(aes(month, windSpeed)) + geom_boxplot() +
theme(axis.title.x = element_blank()) +
labs(y = "Wind speed (m/s)")
boxPlotMonth <- wind %>% filter(year(dateTime) < 2018) %>%
ggplot(aes(month, windSpeed)) +
geom_boxplot() + facet_wrap(~ year) +
labs(x = "Months",
y = "Wind speed (m/s)")
boxPlotMonthTotal <- gridExtra::grid.arrange(boxPlotMonthOverall, boxPlotMonth)
saveA5(boxPlotMonthTotal, "BoxPlotMonth", "H")
rm(boxPlotMonth, boxPlotMonthOverall, boxPlotMonthTotal)
boxPlotHourOverall <- wind %>%
ggplot(aes(hour, windSpeed)) +
geom_boxplot() +
labs(y = "Wind speed (m/s)",
x = "Hours")
saveA5(boxPlotHourOverall, "BoxPlotHour", "H")
boxPlotHourByMonth <- wind %>%
ggplot(aes(hour, windSpeed)) +
geom_boxplot() +
facet_wrap(~ month, ncol = 3) +
labs(y = "Wind speed (m/s)",
x = "Hours")
ggsave("BoxPlotHourByMonth.pdf", plot = boxPlotHourByMonth, path = "plots/",
units = "cm",
width = 29.7,
height = 21,
dpi = 300)
rm(boxPlotHourOverall, boxPlotHourByMonth)
# return the data set in its initial state
wind %<>% select(dateTime, windSpeed)
## Notify that script's end has been reached ##
if (require(beepr)) {beepr::beep(1)}
|
/scripts/3_3EDABoxPlot.R
|
permissive
|
gkampolis/ChilWind
|
R
| false
| false
| 1,675
|
r
|
# ##############################################################################
# Author: Georgios Kampolis
#
# Description: Creates boxplots to explore seasonality of the data set.
#
# ##############################################################################
wind %<>% mutate(year = as.factor(year(dateTime)),
month = as.factor(month(dateTime)),
hour = as.factor(hour(dateTime))
)
boxPlotMonthOverall <- wind %>%
ggplot(aes(month, windSpeed)) + geom_boxplot() +
theme(axis.title.x = element_blank()) +
labs(y = "Wind speed (m/s)")
boxPlotMonth <- wind %>% filter(year(dateTime) < 2018) %>%
ggplot(aes(month, windSpeed)) +
geom_boxplot() + facet_wrap(~ year) +
labs(x = "Months",
y = "Wind speed (m/s)")
boxPlotMonthTotal <- gridExtra::grid.arrange(boxPlotMonthOverall, boxPlotMonth)
saveA5(boxPlotMonthTotal, "BoxPlotMonth", "H")
rm(boxPlotMonth, boxPlotMonthOverall, boxPlotMonthTotal)
boxPlotHourOverall <- wind %>%
ggplot(aes(hour, windSpeed)) +
geom_boxplot() +
labs(y = "Wind speed (m/s)",
x = "Hours")
saveA5(boxPlotHourOverall, "BoxPlotHour", "H")
boxPlotHourByMonth <- wind %>%
ggplot(aes(hour, windSpeed)) +
geom_boxplot() +
facet_wrap(~ month, ncol = 3) +
labs(y = "Wind speed (m/s)",
x = "Hours")
ggsave("BoxPlotHourByMonth.pdf", plot = boxPlotHourByMonth, path = "plots/",
units = "cm",
width = 29.7,
height = 21,
dpi = 300)
rm(boxPlotHourOverall, boxPlotHourByMonth)
# return the data set in its initial state
wind %<>% select(dateTime, windSpeed)
## Notify that script's end has been reached ##
if (require(beepr)) {beepr::beep(1)}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/characterise_episodes.R
\name{episode_varacity}
\alias{episode_varacity}
\title{Summarise Non-Verifiable Episodes}
\usage{
episode_varacity(df)
}
\arguments{
\item{df}{the episode table returned from \code{\link{characterise_episodes}}}
}
\value{
a tibble containing summary information for validation at episode
level
}
\description{
Provides an overview of the reasons for episode invalidation
}
|
/man/episode_varacity.Rd
|
no_license
|
CC-HIC/inspectEHR
|
R
| false
| true
| 478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/characterise_episodes.R
\name{episode_varacity}
\alias{episode_varacity}
\title{Summarise Non-Verifiable Episodes}
\usage{
episode_varacity(df)
}
\arguments{
\item{df}{the episode table returned from \code{\link{characterise_episodes}}}
}
\value{
a tibble containing summary information for validation at episode
level
}
\description{
Provides an overview of the reasons for episode invalidation
}
|
load(file="data/trips.20160613_19.all.RData")
univ
grouped <- univ1month %>% group_by(px4,py4) %>% summarise( meantip = mean(tip_amount), meanhpay = mean(hpay), medianhpay=median(hpay), highRate= sum(isHigh)/ n(), n = n(), pa = min(pickup_latitude), po = min(pickup_longitude) ) %>% filter( n >= 200 ) %>% mutate( hpayType = ifelse(highRate>=.5, 'HIGH','OTHER'))
sz <- 125
myscale <- myscale <- list(scale_y_continuous(breaks=-50000), scale_x_continuous(breaks=-50000))
png("EDA/univ1month-meantip-grid.png", width=480, height=960)
ggplot(grouped) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = meantip )) + myscale
dev.off()
ggplot(grouped %>% filter(meanhpay<50)) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = meanhpay>16 )) + myscale
# not informative
ggplot(grouped %>% filter(medianhpay<50)) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = medianhpay > 12 )) + myscale
#+ scale_fill_grey(start = .9, end=.1)
|
/R/EDA-visualize_tip.R
|
no_license
|
Sapphirine/Tip_Prediction_and_GPS_Noise_Modeling_on_NYC_Taxi_Dataset
|
R
| false
| false
| 1,127
|
r
|
load(file="data/trips.20160613_19.all.RData")
univ
grouped <- univ1month %>% group_by(px4,py4) %>% summarise( meantip = mean(tip_amount), meanhpay = mean(hpay), medianhpay=median(hpay), highRate= sum(isHigh)/ n(), n = n(), pa = min(pickup_latitude), po = min(pickup_longitude) ) %>% filter( n >= 200 ) %>% mutate( hpayType = ifelse(highRate>=.5, 'HIGH','OTHER'))
sz <- 125
myscale <- myscale <- list(scale_y_continuous(breaks=-50000), scale_x_continuous(breaks=-50000))
png("EDA/univ1month-meantip-grid.png", width=480, height=960)
ggplot(grouped) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = meantip )) + myscale
dev.off()
ggplot(grouped %>% filter(meanhpay<50)) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = meanhpay>16 )) + myscale
# not informative
ggplot(grouped %>% filter(medianhpay<50)) + geom_rect(aes(xmin=px4-sz, ymin=py4-sz,
xmax=px4+sz, ymax=py4+sz, fill = medianhpay > 12 )) + myscale
#+ scale_fill_grey(start = .9, end=.1)
|
require("project.init")
require("Seurat")
require("gplots")
require(methods)
require(pheatmap)
require(gdata)
require(enrichR) #devtools::install_github("definitelysean/enrichR")
out <- "20_AggregatedLists/"
dir.create(dirout(out))
project.init2("cll-time_course")
atacRes <- list()
cell <- "Bcell"
for(cell in c("Bcell", "CD4", "CD8", "CLL","Mono", "NK")){
x <- fread(paste0(getOption("PROCESSED.PROJECT"), "results/", "cll-time_course_peaks.coverage.joint_qnorm.pca_fix.power.diff_timepoint.limma.",cell,"_diff.gene_signature.weighted.csv"))
atacRes[[paste0(cell, "_up")]] <- x[V2 > 0.5]$V1
atacRes[[paste0(cell, "_down")]] <- x[V2 < -0.5]$V1
}
if(!file.exists(dirout(out, "CLL_Signature.xls")){
system(paste0("wget http://genome.cshlp.org/content/suppl/2013/11/21/gr.152132.112.DC1/Supplemental_File2_diffExpGenes.xls -O ", dirout(out, "CLL_Signature.xls")))
}
if(!file.exists(dirout(out, "Proliferation_Signature.xlsx"))){
system(paste0("wget http://www.impactjournals.com/oncotarget/index.php?journal=oncotarget&page=article&op=downloadSuppFile&path%5B%5D=16961&path%5B%5D=24097 -O ", dirout(out, "Proliferation_Signature.xlsx")))
}
if(!file.exists(dirout(out, "CLL_Signature2.xlsx"))){
system(paste0("wget https://static-content.springer.com/esm/art%3A10.1186%2Fs13073-014-0125-z/MediaObjects/13073_2014_125_MOESM2_ESM.xlsx -O ", dirout(out, "CLL_Signature2.xlsx")))
}
# Fereira et al, Genome Res 2013
x2 <- data.table(read.xls(dirout(out, "CLL_Signature.xls"), sheet=2))
atacRes[["Fereira_normal"]] <- x2[md.tumor < md.normal]$genename
atacRes[["Fereira_tumor"]] <- x2[md.tumor > md.normal]$genename
x2 <- data.table(read.xls(dirout(out, "CLL_Signature.xls"), sheet=3))
atacRes[["Fereira_C1"]] <- x2[md.hcC1 > md.hcC2]$genename
atacRes[["Fereira_C2"]] <- x2[md.hcC1 < md.hcC2]$genename
# Ramaker et al., Oncotarget 2017
x2 <- data.table(read.xls(dirout(out, "Proliferation_Signature.xlsx"), sheet=))
atacRes[["Ramaker_Proliferation"]] <- x2[-1,][[1]]
# two groups
# x2 <- data.table(read.xls(dirout(out, "CLL_Signature2.xlsx"), sheet=3))
# Ibrutinib study
x2 <- fread(dirout(out,"ibrutinib_treatment_expression.timepoint_name.csv"))
atacRes[["Ibrutinib_treatment"]] <- x2[padj < 0.05 & log2FoldChange < 0]$gene
cll.lists <- atacRes
save(cll.lists, file=dirout(out, "lists.RData"))
# ENRICHR
enrichrDBs <- c("NCI-Nature_2016", "WikiPathways_2016", "Human_Gene_Atlas", "Chromosome_Location")
enrichRes <- data.table()
hitSets <- atacRes
for(grp.x in names(hitSets)){
ret=try(as.data.table(enrichGeneList(hitSets[[grp.x]],databases = enrichrDBs)),silent = FALSE)
if(!any(grepl("Error",ret)) && nrow(ret) > 0){
enrichRes <- rbind(enrichRes, data.table(ret, grp = grp.x))
}
}
enrichRes$n <- sapply(strsplit(enrichRes$genes,","), length)
enrichRes <- enrichRes[n > 3]
write.table(enrichRes[qval < 0.05], file=dirout(out, "EnrichR", ".tsv"), sep="\t", quote=F, row.names=F)
if(nrow(enrichRes) > 2 & length(unique(enrichRes$grp)) > 1){
pDat <- dcast.data.table(enrichRes, make.names(category) ~ grp, value.var="qval")
pDatM <- as.matrix(pDat[,-"category", with=F])
pDat$category <- gsub("\\_(\\w|\\d){8}-(\\w|\\d){4}-(\\w|\\d){4}-(\\w|\\d){4}-(\\w|\\d){12}", "", pDat$category)
pDat$category <- substr(pDat$category, 0, 50)
row.names(pDatM) <- pDat$category
pDatM[is.na(pDatM)] <- 1
str(pDatM <- pDatM[apply(pDatM <= 5e-2,1,sum)>=1,apply(pDatM <= 5e-2,2,sum)>=1, drop=F])
if(nrow(pDatM) >=2 & ncol(pDatM) >= 2){
pDatM <- -log10(pDatM)
pDatM[pDatM > 4] <- 4
# pDatM[pDatM < 1.3] <- 0
pdf(dirout(out, "EnrichR.pdf"),onefile=FALSE, width=min(29, 6+ ncol(pDatM)*0.3), height=min(29, nrow(pDatM)*0.3 + 4))
pheatmap(pDatM) #, color=gray.colors(12, start=0, end=1), border_color=NA)
dev.off()
}
}
# Bcell Terms ----------------
cll.lists2 <- cll.lists
bcellTerms <- list(
list(term="CD19\\+_BCells\\(neg._sel.\\)", db="Human_Gene_Atlas"),
list(term="BCR signaling pathway_Homo sapiens_acbf44e2-618c-11e5-8ac5-06603eb7f303", db="NCI-Nature_2016"),
list(term="B Cell Receptor Signaling Pathway_Homo sapiens_WP23", db="WikiPathways_2016")
)
bcTermI <- 1
for(bcTermI in 1:length(bcellTerms)){
fpath <- paste0("http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=",bcellTerms[[bcTermI]]$db)
fhandle <- file(fpath)
dblines <- readLines(con=fhandle)
close(fhandle)
linex <- dblines[grepl(bcellTerms[[bcTermI]]$term, dblines)]
print(linex)
genesx <- sapply(strsplit(linex, "\t")[[1]], function(s) return(strsplit(s, ",")[[1]][1]))
names(genesx) <- NULL
cll.lists2[[paste0("Bcells_", make.names(bcellTerms[[bcTermI]]$db))]] <- genesx
}
save(cll.lists2, file=dirout(out, "lists_plusBcells.RData"))
|
/src/single_cell_RNA/20_AggregateLists_Enrichr.R
|
no_license
|
nattzy94/cll-ibrutinib_time
|
R
| false
| false
| 4,695
|
r
|
require("project.init")
require("Seurat")
require("gplots")
require(methods)
require(pheatmap)
require(gdata)
require(enrichR) #devtools::install_github("definitelysean/enrichR")
out <- "20_AggregatedLists/"
dir.create(dirout(out))
project.init2("cll-time_course")
atacRes <- list()
cell <- "Bcell"
for(cell in c("Bcell", "CD4", "CD8", "CLL","Mono", "NK")){
x <- fread(paste0(getOption("PROCESSED.PROJECT"), "results/", "cll-time_course_peaks.coverage.joint_qnorm.pca_fix.power.diff_timepoint.limma.",cell,"_diff.gene_signature.weighted.csv"))
atacRes[[paste0(cell, "_up")]] <- x[V2 > 0.5]$V1
atacRes[[paste0(cell, "_down")]] <- x[V2 < -0.5]$V1
}
if(!file.exists(dirout(out, "CLL_Signature.xls")){
system(paste0("wget http://genome.cshlp.org/content/suppl/2013/11/21/gr.152132.112.DC1/Supplemental_File2_diffExpGenes.xls -O ", dirout(out, "CLL_Signature.xls")))
}
if(!file.exists(dirout(out, "Proliferation_Signature.xlsx"))){
system(paste0("wget http://www.impactjournals.com/oncotarget/index.php?journal=oncotarget&page=article&op=downloadSuppFile&path%5B%5D=16961&path%5B%5D=24097 -O ", dirout(out, "Proliferation_Signature.xlsx")))
}
if(!file.exists(dirout(out, "CLL_Signature2.xlsx"))){
system(paste0("wget https://static-content.springer.com/esm/art%3A10.1186%2Fs13073-014-0125-z/MediaObjects/13073_2014_125_MOESM2_ESM.xlsx -O ", dirout(out, "CLL_Signature2.xlsx")))
}
# Fereira et al, Genome Res 2013
x2 <- data.table(read.xls(dirout(out, "CLL_Signature.xls"), sheet=2))
atacRes[["Fereira_normal"]] <- x2[md.tumor < md.normal]$genename
atacRes[["Fereira_tumor"]] <- x2[md.tumor > md.normal]$genename
x2 <- data.table(read.xls(dirout(out, "CLL_Signature.xls"), sheet=3))
atacRes[["Fereira_C1"]] <- x2[md.hcC1 > md.hcC2]$genename
atacRes[["Fereira_C2"]] <- x2[md.hcC1 < md.hcC2]$genename
# Ramaker et al., Oncotarget 2017
x2 <- data.table(read.xls(dirout(out, "Proliferation_Signature.xlsx"), sheet=))
atacRes[["Ramaker_Proliferation"]] <- x2[-1,][[1]]
# two groups
# x2 <- data.table(read.xls(dirout(out, "CLL_Signature2.xlsx"), sheet=3))
# Ibrutinib study
x2 <- fread(dirout(out,"ibrutinib_treatment_expression.timepoint_name.csv"))
atacRes[["Ibrutinib_treatment"]] <- x2[padj < 0.05 & log2FoldChange < 0]$gene
cll.lists <- atacRes
save(cll.lists, file=dirout(out, "lists.RData"))
# ENRICHR
enrichrDBs <- c("NCI-Nature_2016", "WikiPathways_2016", "Human_Gene_Atlas", "Chromosome_Location")
enrichRes <- data.table()
hitSets <- atacRes
for(grp.x in names(hitSets)){
ret=try(as.data.table(enrichGeneList(hitSets[[grp.x]],databases = enrichrDBs)),silent = FALSE)
if(!any(grepl("Error",ret)) && nrow(ret) > 0){
enrichRes <- rbind(enrichRes, data.table(ret, grp = grp.x))
}
}
enrichRes$n <- sapply(strsplit(enrichRes$genes,","), length)
enrichRes <- enrichRes[n > 3]
write.table(enrichRes[qval < 0.05], file=dirout(out, "EnrichR", ".tsv"), sep="\t", quote=F, row.names=F)
if(nrow(enrichRes) > 2 & length(unique(enrichRes$grp)) > 1){
pDat <- dcast.data.table(enrichRes, make.names(category) ~ grp, value.var="qval")
pDatM <- as.matrix(pDat[,-"category", with=F])
pDat$category <- gsub("\\_(\\w|\\d){8}-(\\w|\\d){4}-(\\w|\\d){4}-(\\w|\\d){4}-(\\w|\\d){12}", "", pDat$category)
pDat$category <- substr(pDat$category, 0, 50)
row.names(pDatM) <- pDat$category
pDatM[is.na(pDatM)] <- 1
str(pDatM <- pDatM[apply(pDatM <= 5e-2,1,sum)>=1,apply(pDatM <= 5e-2,2,sum)>=1, drop=F])
if(nrow(pDatM) >=2 & ncol(pDatM) >= 2){
pDatM <- -log10(pDatM)
pDatM[pDatM > 4] <- 4
# pDatM[pDatM < 1.3] <- 0
pdf(dirout(out, "EnrichR.pdf"),onefile=FALSE, width=min(29, 6+ ncol(pDatM)*0.3), height=min(29, nrow(pDatM)*0.3 + 4))
pheatmap(pDatM) #, color=gray.colors(12, start=0, end=1), border_color=NA)
dev.off()
}
}
# Bcell Terms ----------------
cll.lists2 <- cll.lists
bcellTerms <- list(
list(term="CD19\\+_BCells\\(neg._sel.\\)", db="Human_Gene_Atlas"),
list(term="BCR signaling pathway_Homo sapiens_acbf44e2-618c-11e5-8ac5-06603eb7f303", db="NCI-Nature_2016"),
list(term="B Cell Receptor Signaling Pathway_Homo sapiens_WP23", db="WikiPathways_2016")
)
bcTermI <- 1
for(bcTermI in 1:length(bcellTerms)){
fpath <- paste0("http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=",bcellTerms[[bcTermI]]$db)
fhandle <- file(fpath)
dblines <- readLines(con=fhandle)
close(fhandle)
linex <- dblines[grepl(bcellTerms[[bcTermI]]$term, dblines)]
print(linex)
genesx <- sapply(strsplit(linex, "\t")[[1]], function(s) return(strsplit(s, ",")[[1]][1]))
names(genesx) <- NULL
cll.lists2[[paste0("Bcells_", make.names(bcellTerms[[bcTermI]]$db))]] <- genesx
}
save(cll.lists2, file=dirout(out, "lists_plusBcells.RData"))
|
# Emma Peltomaa
# 18.11.2018
# Creating two datasets, they are downloaded from here: https://archive.ics.uci.edu/ml/datasets/Student+Performance
# Reading the datasets
math <- read.table("student-mat.csv", sep=";", header=TRUE)
por <- read.table("student-por.csv", sep=";", header=TRUE)
# Dimensions of the datasets
dim(math) # 395 rows/observations and 33 columns/variables
dim(por) # 649 rows/observations and 33 columns/variables
# Structure of the datasets
str(math)
str(por)
###################
# Joining the datasets
library(dplyr)
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
math_por <- inner_join(math, por, by = join_by, suffix = c(".math", ".por"))
# Dimensions and structure of the jointed data
dim(math_por) # 382 rows/observations and 53 columns/variables
str(math_por)
###################
# create a new data frame with only the joined columns
alc <- select(math_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- select(two_columns, 1)[[1]]
}
}
###################
# define a new column alc_use by combining weekday and weekend alcohol use
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
# define a new logical column 'high_use'
alc <- mutate(alc, high_use = alc_use > 2)
###################
glimpse(alc)
# Saving the dataset
write.table(alc, file = "alc.csv")
View(alc)
|
/data/create_alc.R
|
no_license
|
peempe/IODS-project
|
R
| false
| false
| 2,247
|
r
|
# Emma Peltomaa
# 18.11.2018
# Creating two datasets, they are downloaded from here: https://archive.ics.uci.edu/ml/datasets/Student+Performance
# Reading the datasets
math <- read.table("student-mat.csv", sep=";", header=TRUE)
por <- read.table("student-por.csv", sep=";", header=TRUE)
# Dimensions of the datasets
dim(math) # 395 rows/observations and 33 columns/variables
dim(por) # 649 rows/observations and 33 columns/variables
# Structure of the datasets
str(math)
str(por)
###################
# Joining the datasets
library(dplyr)
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
math_por <- inner_join(math, por, by = join_by, suffix = c(".math", ".por"))
# Dimensions and structure of the jointed data
dim(math_por) # 382 rows/observations and 53 columns/variables
str(math_por)
###################
# create a new data frame with only the joined columns
alc <- select(math_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(math)[!colnames(math) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- select(two_columns, 1)[[1]]
}
}
###################
# define a new column alc_use by combining weekday and weekend alcohol use
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
# define a new logical column 'high_use'
alc <- mutate(alc, high_use = alc_use > 2)
###################
glimpse(alc)
# Saving the dataset
write.table(alc, file = "alc.csv")
View(alc)
|
## ----sfs----------------------------------------------------------------------
sfs <- c(112, 57, 24, 34, 16, 29, 8, 10, 15)
## ----model setup--------------------------------------------------------------
library(coala)
model <- coal_model(10, 50) +
feat_mutation(par_prior("theta", runif(1, 1, 5))) +
sumstat_sfs()
## ----simulate, cache=TRUE-----------------------------------------------------
sim_data <- simulate(model, nsim = 2000, seed = 17)
## -----------------------------------------------------------------------------
# Getting the parameters
sim_param <- create_abc_param(sim_data, model)
head(sim_param, n = 3)
# Getting the summary statistics
sim_sumstat <- create_abc_sumstat(sim_data, model)
head(sim_sumstat, n = 3)
## ----abc, fig.align="center", fig.width=5-------------------------------------
suppressPackageStartupMessages(library(abc))
posterior <- abc(sfs, sim_param, sim_sumstat, 0.05, method = "rejection")
hist(posterior, breaks = 20)
|
/fuzzedpackages/coala/inst/doc/coala-abc.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 975
|
r
|
## ----sfs----------------------------------------------------------------------
sfs <- c(112, 57, 24, 34, 16, 29, 8, 10, 15)
## ----model setup--------------------------------------------------------------
library(coala)
model <- coal_model(10, 50) +
feat_mutation(par_prior("theta", runif(1, 1, 5))) +
sumstat_sfs()
## ----simulate, cache=TRUE-----------------------------------------------------
sim_data <- simulate(model, nsim = 2000, seed = 17)
## -----------------------------------------------------------------------------
# Getting the parameters
sim_param <- create_abc_param(sim_data, model)
head(sim_param, n = 3)
# Getting the summary statistics
sim_sumstat <- create_abc_sumstat(sim_data, model)
head(sim_sumstat, n = 3)
## ----abc, fig.align="center", fig.width=5-------------------------------------
suppressPackageStartupMessages(library(abc))
posterior <- abc(sfs, sim_param, sim_sumstat, 0.05, method = "rejection")
hist(posterior, breaks = 20)
|
testlist <- list(A = structure(c(2.31584307509357e+77, 1.19893625614874e+297, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109058-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 257
|
r
|
testlist <- list(A = structure(c(2.31584307509357e+77, 1.19893625614874e+297, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#' Calcula R efetivo sobre a estimativas de nowcasting retornadas pela função NobBS::NobBS
#'
#' @param ncasos vetor de número de novos casos
#' @param dia0 dia zero
#' @param delay atraso
#' @param datas vetor de datas dos novos casos
#' @export
#'
re.com.data <- function(ncasos, datas, dia0 = min(datas), delay = 5) {
if (length(ncasos) != length(datas)) stop("ncasos e ndatas devem ter o mesmo comprimento")
day0 <- min(which(datas >= dia0, arr.ind = TRUE))
if (day0 < delay)
day0 <- delay + 1
Re <- estimate.R0(as.integer(na.zero(ncasos)), day0 = day0, delay = delay)
names(Re$R) <- gsub("\\(R\\)", ".R", names(Re$R))
Re$R$data.inicio <- datas[Re$R$t_start]
Re$R$data.fim <- datas[Re$R$t_end]
return(Re)
}
|
/R/Re.com.data.R
|
permissive
|
covid19br/now_fcts
|
R
| false
| false
| 754
|
r
|
#' Calcula R efetivo sobre a estimativas de nowcasting retornadas pela função NobBS::NobBS
#'
#' @param ncasos vetor de número de novos casos
#' @param dia0 dia zero
#' @param delay atraso
#' @param datas vetor de datas dos novos casos
#' @export
#'
re.com.data <- function(ncasos, datas, dia0 = min(datas), delay = 5) {
if (length(ncasos) != length(datas)) stop("ncasos e ndatas devem ter o mesmo comprimento")
day0 <- min(which(datas >= dia0, arr.ind = TRUE))
if (day0 < delay)
day0 <- delay + 1
Re <- estimate.R0(as.integer(na.zero(ncasos)), day0 = day0, delay = delay)
names(Re$R) <- gsub("\\(R\\)", ".R", names(Re$R))
Re$R$data.inicio <- datas[Re$R$t_start]
Re$R$data.fim <- datas[Re$R$t_end]
return(Re)
}
|
library(Rllvm)
mods = lapply(bcs, readBitcode)
ins = lapply(mods, function(x) unlist(getInstructions(x)))
alloc = lapply(ins, function(i) i[ sapply(i, function(x) is(x, "CallInst") && is((cf <- getCalledFunction(x)), "Function") && grepl("^Rf_allocVector", Rllvm::getName(cf)) ) ])
ualloc = unlist(alloc)
k = sapply(ualloc, function(x) class(x[[1]]))
table(k)
# Argument BinaryOperator CallInst ConstantInt LoadInst PHINode SelectInst
# 8 25 53 11958 10 17 15
klen = sapply(ualloc, function(x) class(x[[2]]))
# Argument BinaryOperator CallInst ConstantInt FPToSIInst FPToUIInst LoadInst PHINode SelectInst SExtInst ZExtInst
# 15 374 178 4973 5 8 183 214 62 5313 761
isList = sapply(ualloc, function(x) is(x[[1]], "ConstantInt") && getValue(x[[1]]) == 19L)
klist = sapply(ualloc[isList], function(x) class(x[[2]]))
lapply(ualloc[isList] [ klist == "SelectInst" ], `[[`, 2)
# There are 11 calls where the length is a SelectInst, so a variable number.
#vv = lapply(ualloc[isList] [ klist == "SelectInst" ], function(x) sapply(x[[2]][-1], getValue))
a = lapply(ualloc[isList], function(x) unique(unlist(xgetValue(x[[2]]))))
vv = a[sapply(a, length) > 1 & sapply(a, function(x) !any(is.na(x)))]
saveRDS(vv, "ListAllocWithVariableLength.rds")
cat(sapply(strsplit(unique(names(vv)), "\\."), function(x) paste(x[1:2], collapse = "::")), sep = "\n")
#vv = lapply(ualloc[isList] [ klist == "PHINode" ], function(x) sapply(x[[2]], getValue))
#which(sapply(ualloc[isList] [ klist == "ZExtInst" ], function(x) class(x[[2]][[1]])) == "SelectInst")
################
# Where we have different type of R object
a = lapply(ualloc, function(x) unique(unlist(xgetValue(x[[2]]))))
vv = a[sapply(a, length) > 1 & sapply(a, function(x) !any(is.na(x)))]
cat(sapply(strsplit(unique(names(vv)), "\\."), function(x) paste(x[1:2], collapse = "::")), sep = "\n")
# The ones where we don't have a known compile time type.
w = sapply(a, function(x) !any(is.na(x)))
table(sapply(ualloc[w], function(x) class(x[[1]])))
# Argument BinaryOperator CallInst LoadInst PHINode SelectInst
# 8 25 53 10 13 1
# Of the 53 calls, 46 are to TYPEOF and the other 7 to routines that map to a SEXPTYPE
table(sapply(ualloc[w][sapply(ualloc[w], function(x) is(x[[1]], "CallInst"))], function(x) Rllvm::getName(getCalledFunction(x[[1]]))))
# Which packages do these come from?
table(sapply(strsplit(names(ualloc[w]), "\\."), `[`,1))
|
/CRAN/listAllocWithDifferentLengths.R
|
no_license
|
duncantl/NativeCodeAnalysis
|
R
| false
| false
| 2,806
|
r
|
library(Rllvm)
mods = lapply(bcs, readBitcode)
ins = lapply(mods, function(x) unlist(getInstructions(x)))
alloc = lapply(ins, function(i) i[ sapply(i, function(x) is(x, "CallInst") && is((cf <- getCalledFunction(x)), "Function") && grepl("^Rf_allocVector", Rllvm::getName(cf)) ) ])
ualloc = unlist(alloc)
k = sapply(ualloc, function(x) class(x[[1]]))
table(k)
# Argument BinaryOperator CallInst ConstantInt LoadInst PHINode SelectInst
# 8 25 53 11958 10 17 15
klen = sapply(ualloc, function(x) class(x[[2]]))
# Argument BinaryOperator CallInst ConstantInt FPToSIInst FPToUIInst LoadInst PHINode SelectInst SExtInst ZExtInst
# 15 374 178 4973 5 8 183 214 62 5313 761
isList = sapply(ualloc, function(x) is(x[[1]], "ConstantInt") && getValue(x[[1]]) == 19L)
klist = sapply(ualloc[isList], function(x) class(x[[2]]))
lapply(ualloc[isList] [ klist == "SelectInst" ], `[[`, 2)
# There are 11 calls where the length is a SelectInst, so a variable number.
#vv = lapply(ualloc[isList] [ klist == "SelectInst" ], function(x) sapply(x[[2]][-1], getValue))
a = lapply(ualloc[isList], function(x) unique(unlist(xgetValue(x[[2]]))))
vv = a[sapply(a, length) > 1 & sapply(a, function(x) !any(is.na(x)))]
saveRDS(vv, "ListAllocWithVariableLength.rds")
cat(sapply(strsplit(unique(names(vv)), "\\."), function(x) paste(x[1:2], collapse = "::")), sep = "\n")
#vv = lapply(ualloc[isList] [ klist == "PHINode" ], function(x) sapply(x[[2]], getValue))
#which(sapply(ualloc[isList] [ klist == "ZExtInst" ], function(x) class(x[[2]][[1]])) == "SelectInst")
################
# Where we have different type of R object
a = lapply(ualloc, function(x) unique(unlist(xgetValue(x[[2]]))))
vv = a[sapply(a, length) > 1 & sapply(a, function(x) !any(is.na(x)))]
cat(sapply(strsplit(unique(names(vv)), "\\."), function(x) paste(x[1:2], collapse = "::")), sep = "\n")
# The ones where we don't have a known compile time type.
w = sapply(a, function(x) !any(is.na(x)))
table(sapply(ualloc[w], function(x) class(x[[1]])))
# Argument BinaryOperator CallInst LoadInst PHINode SelectInst
# 8 25 53 10 13 1
# Of the 53 calls, 46 are to TYPEOF and the other 7 to routines that map to a SEXPTYPE
table(sapply(ualloc[w][sapply(ualloc[w], function(x) is(x[[1]], "CallInst"))], function(x) Rllvm::getName(getCalledFunction(x[[1]]))))
# Which packages do these come from?
table(sapply(strsplit(names(ualloc[w]), "\\."), `[`,1))
|
####### read in data from the coso geothermal field and plot
###### using swig
options(demo.ask=FALSE)
data("GH")
#########
####
STDLAB = c("DONE", "zoom in", "zoom out", "refresh", "restore",
"XTR", "SPEC", "SGRAM" ,"3COMP", "FILT", "Pinfo")
###sel = which(GH$COMPS=="V")
gsel = getvertsorder(GH$pickfile, GH)
###swig(GH, sel=sel, STDLAB=STDLAB)
upix = uwpfile2ypx(GH$pickfile)
######### Repeat, this time sort the traces, plot the archive picks with errors
######### and
######### select only a few buttons,
#########
pickgeninfo()
#########
swig(GH, sel=gsel$sel, APIX =upix, STDLAB =STDLAB, WIN =c(4,13) )
|
/demo/COSO.R
|
no_license
|
cran/RSEIS
|
R
| false
| false
| 653
|
r
|
####### read in data from the coso geothermal field and plot
###### using swig
options(demo.ask=FALSE)
data("GH")
#########
####
STDLAB = c("DONE", "zoom in", "zoom out", "refresh", "restore",
"XTR", "SPEC", "SGRAM" ,"3COMP", "FILT", "Pinfo")
###sel = which(GH$COMPS=="V")
gsel = getvertsorder(GH$pickfile, GH)
###swig(GH, sel=sel, STDLAB=STDLAB)
upix = uwpfile2ypx(GH$pickfile)
######### Repeat, this time sort the traces, plot the archive picks with errors
######### and
######### select only a few buttons,
#########
pickgeninfo()
#########
swig(GH, sel=gsel$sel, APIX =upix, STDLAB =STDLAB, WIN =c(4,13) )
|
# Test download, building and querying random seq.files from GenBank
# Vars
n <- 2 # n per genbank type
wd <- '.'
restez_lib_path <- '~/Coding/restez'
to_download <- TRUE
to_build <- TRUE
# restez setup
devtools::load_all(restez_lib_path)
restez_path_set(wd)
if (to_download) {
# delete any old files
db_delete(everything = TRUE)
restez_path_set(wd)
# Identify random seq files
latest_genbank_release_notes()
downloadable_table <- identify_downloadable_files()
colnames(downloadable_table)
cats <- as.character(unique(downloadable_table[['descripts']]))
seq_files <- unlist(lapply(X = cats, FUN = function(x) {
indxs <- which(x == downloadable_table[['descripts']])
rand_indxs <- sample(indxs, n)
as.character(downloadable_table[rand_indxs, 'seq_files'])
}))
#seq_files <- sample(seq_files, 3)
stated_size <- sum(as.numeric(downloadable_table[
downloadable_table[['seq_files']] %in% seq_files, 'filesizes']))
(stated_size <- stated_size/1E9)
# Download them
for (i in seq_along(seq_files)) {
fl <- seq_files[[i]]
stat_i <- paste0(i, '/', length(seq_files))
cat_line('... ', char(fl), ' (', stat(stat_i), ')')
# TODO: move overwrite to here
success <- file_download(fl, overwrite = FALSE)
if (!success) {
cat_line('... Hmmmm, unable to download that file.')
any_fails <- TRUE
}
}
}
# Create db
if (to_build) {
restez_connect()
on.exit(restez_disconnect())
dpth <- dwnld_path_get()
seq_files <- list.files(path = dpth, pattern = '.seq.gz$')
cat_line('Adding ', stat(length(seq_files)), ' file(s) to the database ...')
for (i in seq_along(seq_files)) {
seq_file <- seq_files[[i]]
cat_line('... ', char(seq_file), '(', stat(i, '/', length(seq_files)), ')')
flpth <- file.path(dpth, seq_file)
records <- flatfile_read(flpth = flpth)
if (length(records) > 0) {
df <- gb_df_generate(records = records, min_length = 0,
max_length = NULL)
gb_sql_add(df = df)
} else {
read_errors <- TRUE
cat_line('... ... Hmmmm... no records found in that file.')
}
add_rcrd_log(fl = seq_file)
}
}
if (to_download) {
status_obj <- status_class()
cnvfctr1 <- 0.2374462
cnvfctr2 <- 6.066667
# cnvfctr1 <- status_obj$Download$`N. GBs` / stated_size
# cnvfctr2 <- status_obj$Database$`N. GBs` / status_obj$Download$`N. GBs`
cat_line('Expected:')
(estmd_downloads <- stated_size * cnvfctr1)
(estmd_database <- estmd_downloads * cnvfctr2)
(estmd_total <- estmd_downloads + estmd_database)
cat_line('Observed:')
(status_obj$Download$`N. GBs`)
(status_obj$Database$`N. GBs`)
(status_obj$Download$`N. GBs` + status_obj$Database$`N. GBs`)
}
# Query
restez_connect()
on.exit(restez_disconnect())
ids <- list_db_ids(n = NULL)
ids <- sample(ids, round(length(ids) * .1))
index <- seq(1, length(ids), 1000)
for (i in 2:length(index)) {
print(i)
id <- ids[index[i - 1]:index[i]]
definition <- gb_definition_get(id)
fasta <- gb_fasta_get(id)
organism <- gb_organism_get(id)
rcrd <- gb_record_get(id)
vrsn <- gb_version_get(id)
}
cat('Completed.\n')
|
/other/random_file_tester.R
|
permissive
|
ropensci/restez
|
R
| false
| false
| 3,141
|
r
|
# Test download, building and querying random seq.files from GenBank
# Vars
n <- 2 # n per genbank type
wd <- '.'
restez_lib_path <- '~/Coding/restez'
to_download <- TRUE
to_build <- TRUE
# restez setup
devtools::load_all(restez_lib_path)
restez_path_set(wd)
if (to_download) {
# delete any old files
db_delete(everything = TRUE)
restez_path_set(wd)
# Identify random seq files
latest_genbank_release_notes()
downloadable_table <- identify_downloadable_files()
colnames(downloadable_table)
cats <- as.character(unique(downloadable_table[['descripts']]))
seq_files <- unlist(lapply(X = cats, FUN = function(x) {
indxs <- which(x == downloadable_table[['descripts']])
rand_indxs <- sample(indxs, n)
as.character(downloadable_table[rand_indxs, 'seq_files'])
}))
#seq_files <- sample(seq_files, 3)
stated_size <- sum(as.numeric(downloadable_table[
downloadable_table[['seq_files']] %in% seq_files, 'filesizes']))
(stated_size <- stated_size/1E9)
# Download them
for (i in seq_along(seq_files)) {
fl <- seq_files[[i]]
stat_i <- paste0(i, '/', length(seq_files))
cat_line('... ', char(fl), ' (', stat(stat_i), ')')
# TODO: move overwrite to here
success <- file_download(fl, overwrite = FALSE)
if (!success) {
cat_line('... Hmmmm, unable to download that file.')
any_fails <- TRUE
}
}
}
# Create db
if (to_build) {
restez_connect()
on.exit(restez_disconnect())
dpth <- dwnld_path_get()
seq_files <- list.files(path = dpth, pattern = '.seq.gz$')
cat_line('Adding ', stat(length(seq_files)), ' file(s) to the database ...')
for (i in seq_along(seq_files)) {
seq_file <- seq_files[[i]]
cat_line('... ', char(seq_file), '(', stat(i, '/', length(seq_files)), ')')
flpth <- file.path(dpth, seq_file)
records <- flatfile_read(flpth = flpth)
if (length(records) > 0) {
df <- gb_df_generate(records = records, min_length = 0,
max_length = NULL)
gb_sql_add(df = df)
} else {
read_errors <- TRUE
cat_line('... ... Hmmmm... no records found in that file.')
}
add_rcrd_log(fl = seq_file)
}
}
if (to_download) {
status_obj <- status_class()
cnvfctr1 <- 0.2374462
cnvfctr2 <- 6.066667
# cnvfctr1 <- status_obj$Download$`N. GBs` / stated_size
# cnvfctr2 <- status_obj$Database$`N. GBs` / status_obj$Download$`N. GBs`
cat_line('Expected:')
(estmd_downloads <- stated_size * cnvfctr1)
(estmd_database <- estmd_downloads * cnvfctr2)
(estmd_total <- estmd_downloads + estmd_database)
cat_line('Observed:')
(status_obj$Download$`N. GBs`)
(status_obj$Database$`N. GBs`)
(status_obj$Download$`N. GBs` + status_obj$Database$`N. GBs`)
}
# Query
restez_connect()
on.exit(restez_disconnect())
ids <- list_db_ids(n = NULL)
ids <- sample(ids, round(length(ids) * .1))
index <- seq(1, length(ids), 1000)
for (i in 2:length(index)) {
print(i)
id <- ids[index[i - 1]:index[i]]
definition <- gb_definition_get(id)
fasta <- gb_fasta_get(id)
organism <- gb_organism_get(id)
rcrd <- gb_record_get(id)
vrsn <- gb_version_get(id)
}
cat('Completed.\n')
|
library(devtools)
# resubmit the source package to cran
if (.Platform$OS.type == "windows") {
setwd("C:/Academia/Cornell/Research/Conditional Mean Independence")
} else {
setwd("~")
}
submit_cran("CMDMeasure")
|
/dev/resubmit_package.R
|
no_license
|
zejin/CMDMeasure
|
R
| false
| false
| 217
|
r
|
library(devtools)
# resubmit the source package to cran
if (.Platform$OS.type == "windows") {
setwd("C:/Academia/Cornell/Research/Conditional Mean Independence")
} else {
setwd("~")
}
submit_cran("CMDMeasure")
|
## Customer reviews from IMDB on the movie "AQUAMAN" and performed wordcloud and Sentimental analysis on the same
library(rvest)
library(XML)
library(magrittr)
library(tm)
library(wordcloud)
library(wordcloud2)
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# IMDB Reviews
aurl <- "https://www.imdb.com/title/tt1477834/reviews?ref_=tt_ov_rt"
IMDB_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>%
html_nodes(".show-more__control") %>%
html_text()
IMDB_reviews <- c(IMDB_reviews,rev)
}
length(IMDB_reviews)
setwd("C:/PRATIK/Data Science/Assignment/Completed/Text Mining/IMDB Analysis")
write.table(IMDB_reviews,"Aquaman.txt",row.names = F)
Aquaman <- read.delim('Aquaman.txt')
str(Aquaman)
View(Aquaman)
# Build Corpus and DTM/TDM
library(tm)
corpus <- Aquaman[-1,]
head(corpus)
class(corpus)
corpus <- Corpus(VectorSource(corpus))
inspect(corpus[1:5])
# Clean the text
corpus <- tm_map(corpus,tolower)
inspect(corpus[1:5])
corpus <- tm_map(corpus,removePunctuation) # To remove the Punctuation
inspect(corpus[1:5])
corpus <- tm_map(corpus,removeNumbers) # To remove the Number
inspect(corpus[1:5])
corpus_clean<-tm_map(corpus,stripWhitespace)
inspect(corpus[1:5])
cleanset<-tm_map(corpus,removeWords, stopwords('english'))
inspect(cleanset[1:5])
removeURL <- function(x) gsub('http[[:alnum:]]*','',x)
cleanset <- tm_map(cleanset, content_transformer(removeURL))
inspect(cleanset[1:5])
cleanset<-tm_map(cleanset,removeWords, c('can','film'))
# Since the word laptop and can were used, this can be removed as we are mining the tweets for this film.Also the word "Can" is common english word.
# we can pull back the word "can" if needed.
cleanset<-tm_map(cleanset,removeWords, c('movie','movies')) # Removing the word movie and movies on similar grounds - as unnecessary.
cleanset <- tm_map(cleanset, gsub,pattern = 'character', replacement = 'characters') # the barplot pulls both character and characters as separate words. this should be counted as one as both holds the same synonym.
inspect(cleanset[1:5])
cleanset <- tm_map(cleanset,stripWhitespace)
inspect(cleanset[1:5])
#Term Document Matrix :
# Convert the unstructured data to structured data :
tdm <- TermDocumentMatrix(cleanset)
tdm
tdm <- as.matrix(tdm)
tdm[1:10,1:20]
# Bar Plot
w <- rowSums(tdm) # provides the no of times a particular word has been used.
w <- subset(w, w>= 50) # Pull words that were used more than 25 times.
barplot(w, las = 2, col = rainbow(50))
# The word Aquaman,Like and James as the highest frequency. This implies that Movie Aquaman has got more reviews about the James and most of them liked the movie.
# Word Cloud :
library(wordcloud)
w <- sort(rowSums(tdm), decreasing = TRUE) # Sort words in decreasing order.
set.seed(123)
wordcloud(words = names(w), freq = w,
max.words = 250,random.order = F,
min.freq = 3,
colors = brewer.pal(8, 'Dark2'),
scale = c(5,0.3),
rot.per = 0.6)
library(wordcloud2)
w <- data.frame(names(w),w)
colnames(w) <- c('word','freq')
wordcloud2(w,size = 0.8, shape = 'triangle', rotateRatio = 0.5, minSize = 1)
# lettercloud
letterCloud(w,word = 'A',frequency(5), size=1)
# Sentiment Analysis for tweets:
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# Read File
IMDB_reviews <- read.delim('Aquaman.TXT')
reviews <- as.character(IMDB_reviews[-1,])
class(reviews)
# Obtain Sentiment scores
s <- get_nrc_sentiment(reviews)
head(s)
reviews[4]
get_nrc_sentiment('splendid')
# Splendid has one Joy and one positive
get_nrc_sentiment('no words') #1 Anger and 1 Negative
# barplot
barplot(colSums(s), las = 2.5, col = rainbow(10),ylab = 'Count',main= 'Sentiment scores for IMDB Reviewsfor Aquaman')
|
/IMDB.R
|
no_license
|
pratiksawant24/Excelr-Assignments
|
R
| false
| false
| 4,073
|
r
|
## Customer reviews from IMDB on the movie "AQUAMAN" and performed wordcloud and Sentimental analysis on the same
library(rvest)
library(XML)
library(magrittr)
library(tm)
library(wordcloud)
library(wordcloud2)
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# IMDB Reviews
aurl <- "https://www.imdb.com/title/tt1477834/reviews?ref_=tt_ov_rt"
IMDB_reviews <- NULL
for (i in 1:10){
murl <- read_html(as.character(paste(aurl,i,sep="=")))
rev <- murl %>%
html_nodes(".show-more__control") %>%
html_text()
IMDB_reviews <- c(IMDB_reviews,rev)
}
length(IMDB_reviews)
setwd("C:/PRATIK/Data Science/Assignment/Completed/Text Mining/IMDB Analysis")
write.table(IMDB_reviews,"Aquaman.txt",row.names = F)
Aquaman <- read.delim('Aquaman.txt')
str(Aquaman)
View(Aquaman)
# Build Corpus and DTM/TDM
library(tm)
corpus <- Aquaman[-1,]
head(corpus)
class(corpus)
corpus <- Corpus(VectorSource(corpus))
inspect(corpus[1:5])
# Clean the text
corpus <- tm_map(corpus,tolower)
inspect(corpus[1:5])
corpus <- tm_map(corpus,removePunctuation) # To remove the Punctuation
inspect(corpus[1:5])
corpus <- tm_map(corpus,removeNumbers) # To remove the Number
inspect(corpus[1:5])
corpus_clean<-tm_map(corpus,stripWhitespace)
inspect(corpus[1:5])
cleanset<-tm_map(corpus,removeWords, stopwords('english'))
inspect(cleanset[1:5])
removeURL <- function(x) gsub('http[[:alnum:]]*','',x)
cleanset <- tm_map(cleanset, content_transformer(removeURL))
inspect(cleanset[1:5])
cleanset<-tm_map(cleanset,removeWords, c('can','film'))
# Since the word laptop and can were used, this can be removed as we are mining the tweets for this film.Also the word "Can" is common english word.
# we can pull back the word "can" if needed.
cleanset<-tm_map(cleanset,removeWords, c('movie','movies')) # Removing the word movie and movies on similar grounds - as unnecessary.
cleanset <- tm_map(cleanset, gsub,pattern = 'character', replacement = 'characters') # the barplot pulls both character and characters as separate words. this should be counted as one as both holds the same synonym.
inspect(cleanset[1:5])
cleanset <- tm_map(cleanset,stripWhitespace)
inspect(cleanset[1:5])
#Term Document Matrix :
# Convert the unstructured data to structured data :
tdm <- TermDocumentMatrix(cleanset)
tdm
tdm <- as.matrix(tdm)
tdm[1:10,1:20]
# Bar Plot
w <- rowSums(tdm) # provides the no of times a particular word has been used.
w <- subset(w, w>= 50) # Pull words that were used more than 25 times.
barplot(w, las = 2, col = rainbow(50))
# The word Aquaman,Like and James as the highest frequency. This implies that Movie Aquaman has got more reviews about the James and most of them liked the movie.
# Word Cloud :
library(wordcloud)
w <- sort(rowSums(tdm), decreasing = TRUE) # Sort words in decreasing order.
set.seed(123)
wordcloud(words = names(w), freq = w,
max.words = 250,random.order = F,
min.freq = 3,
colors = brewer.pal(8, 'Dark2'),
scale = c(5,0.3),
rot.per = 0.6)
library(wordcloud2)
w <- data.frame(names(w),w)
colnames(w) <- c('word','freq')
wordcloud2(w,size = 0.8, shape = 'triangle', rotateRatio = 0.5, minSize = 1)
# lettercloud
letterCloud(w,word = 'A',frequency(5), size=1)
# Sentiment Analysis for tweets:
library(syuzhet)
library(lubridate)
library(ggplot2)
library(scales)
library(reshape2)
library(dplyr)
# Read File
IMDB_reviews <- read.delim('Aquaman.TXT')
reviews <- as.character(IMDB_reviews[-1,])
class(reviews)
# Obtain Sentiment scores
s <- get_nrc_sentiment(reviews)
head(s)
reviews[4]
get_nrc_sentiment('splendid')
# Splendid has one Joy and one positive
get_nrc_sentiment('no words') #1 Anger and 1 Negative
# barplot
barplot(colSums(s), las = 2.5, col = rainbow(10),ylab = 'Count',main= 'Sentiment scores for IMDB Reviewsfor Aquaman')
|
seed <- 273
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 224861.9574970215
df.resid <- 35402
df <- 165
coefs <- c(6.486155048655423, 6.081292936907802, 5.914483970169756, 5.288654609885637, 5.121940163395935, 4.8949692769305475, 4.826978053129348, 4.606036457185742, 4.359994014517566, 4.278827227129895, 4.3408977624492024, 4.181824610339704, 4.001482676256818, 3.9870569929934554, 3.746089564210102, 3.534317195082676, 3.2560095556052455, 2.960362845371825, 2.5149922459905496, 2.045250093439653, 1.6693160207696565, 0.9047881725672514, 0.997361547532587, 0.7460690289419128, 0.6036050403180498, -1.069580104102183, -0.3213029675855193, 0.9965797653805248, 1.2029564684587042, -1.077360699970919, -2.3785180170757263, -2.0404765625435326, -0.2500039092846394, 0.7390028824882291, 1.3133677261041412, -1.1165402710261718, -0.5724526687013826, -1.8544354181703935, 0.10688838069654867, -1.3125527292751191, 1.046138037683145, 0.7262726618173525, -0.5763955633881628, -2.1839847865579194, -1.1811554122742882, -0.7267651206945985, -1.2319716517965937, 0.3739221340271025, 3.334625265406106e-2, -0.603374784319179, 0.19539220974241597, 0.7838893763706083, -2.6792701978413214, 1.6371496924615738, 0.778999289647117, 1.0495242428940739, -1.9207855678876258, 0.1290228876333876, -0.45833549541202623, 1.3038242324697062, 1.2032434442693034, 0.9029862911180795, -1.8619064391315527, -0.5229295940767382, -0.8421641905695528, 0.5103891866530953, 0.561595414449155, -0.5665926864783497, -1.1273145361635317, -0.5569463593731021, -2.065712842977486, -3.290661299526122e-2, 0.5117749704742182, 1.0575362641666761, 0.682997142940741, -1.003707727545621, -1.4845419697481732, -1.4903913452337108, 0.12498118700909788, 0.7497036852097821, 1.2025505339651972, 8.38552530575871e-2, 0.27319129397233066, -1.8472520437732332, -0.473723216343913, 0.41175928446350224, 1.229087093855848, 0.27304008921367734, 1.0188459794439977, -1.6888399629582704, 0.4764592494922771, 0.7316449259168081, 0.7733841575018368, 0.3691319002525878, 0.18141247484518044, 1.4321463068181501, -0.9280472469878053, 0.3637568367254318, -0.3166965277404483, 9.222861771078948e-2, 0.45546907352582255, -0.5833705613510493, 0.8003910677760192, -1.4908879079000459e-2, 0.5617505777551088, 0.828129637731366, 1.133404188689521, -1.2797569065102938, -0.21273923513978765, -0.816691520817463, 0.4115786699490818, 0.5419275018497706, 1.669353304268939, -0.4565728023628269, -0.36964684337969994, -0.7456369084634157, 0.7691384609963544, -0.36495336859973965, 0.49925457817382046, 0.5826611488340596, -0.47635387681806207, -0.46611895078055504, -0.7490039047910264, -0.3130787299060497, 0.45728086073375446, 0.8487539102945555, -0.10820685563022762, 0.896875528908897, -0.45019463290209516, -0.45442800698080277, 0.28847406489160593, 0.9880116553692947, 0.9403294868446098, 0.5957876622432725, -8.908915327232955e-2, 1.095243934997213, -0.3675840855613586, 0.9455274781292918, 0.7430512040983904, 0.8877761452544611, 0.7313458153673134, -0.810255431192085, -0.9242689407501417, 0.7591646196761602, 0.241079086081833, 0.5222879127246892, -0.4486645337595534, -0.5437648085523862, -2.103957370360871, 1.1755135387476396, 0.14160947001103802, 1.2173439228042942, -0.32979847833801884, -0.14058244689010355, -6.176943272928126e-3, -1.9093432918314337, -0.8700620366511488, 0.9560865267481379, 1.1607200290958228, -8.687569616669788e-2, 1.6274740899670384, -0.5470710962268294, -0.19431960300935622, 1.8731372133888532e-2, 1.2210829912643943)
|
/analysis/boot/boot273.R
|
no_license
|
patperry/interaction-proc
|
R
| false
| false
| 3,743
|
r
|
seed <- 273
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 224861.9574970215
df.resid <- 35402
df <- 165
coefs <- c(6.486155048655423, 6.081292936907802, 5.914483970169756, 5.288654609885637, 5.121940163395935, 4.8949692769305475, 4.826978053129348, 4.606036457185742, 4.359994014517566, 4.278827227129895, 4.3408977624492024, 4.181824610339704, 4.001482676256818, 3.9870569929934554, 3.746089564210102, 3.534317195082676, 3.2560095556052455, 2.960362845371825, 2.5149922459905496, 2.045250093439653, 1.6693160207696565, 0.9047881725672514, 0.997361547532587, 0.7460690289419128, 0.6036050403180498, -1.069580104102183, -0.3213029675855193, 0.9965797653805248, 1.2029564684587042, -1.077360699970919, -2.3785180170757263, -2.0404765625435326, -0.2500039092846394, 0.7390028824882291, 1.3133677261041412, -1.1165402710261718, -0.5724526687013826, -1.8544354181703935, 0.10688838069654867, -1.3125527292751191, 1.046138037683145, 0.7262726618173525, -0.5763955633881628, -2.1839847865579194, -1.1811554122742882, -0.7267651206945985, -1.2319716517965937, 0.3739221340271025, 3.334625265406106e-2, -0.603374784319179, 0.19539220974241597, 0.7838893763706083, -2.6792701978413214, 1.6371496924615738, 0.778999289647117, 1.0495242428940739, -1.9207855678876258, 0.1290228876333876, -0.45833549541202623, 1.3038242324697062, 1.2032434442693034, 0.9029862911180795, -1.8619064391315527, -0.5229295940767382, -0.8421641905695528, 0.5103891866530953, 0.561595414449155, -0.5665926864783497, -1.1273145361635317, -0.5569463593731021, -2.065712842977486, -3.290661299526122e-2, 0.5117749704742182, 1.0575362641666761, 0.682997142940741, -1.003707727545621, -1.4845419697481732, -1.4903913452337108, 0.12498118700909788, 0.7497036852097821, 1.2025505339651972, 8.38552530575871e-2, 0.27319129397233066, -1.8472520437732332, -0.473723216343913, 0.41175928446350224, 1.229087093855848, 0.27304008921367734, 1.0188459794439977, -1.6888399629582704, 0.4764592494922771, 0.7316449259168081, 0.7733841575018368, 0.3691319002525878, 0.18141247484518044, 1.4321463068181501, -0.9280472469878053, 0.3637568367254318, -0.3166965277404483, 9.222861771078948e-2, 0.45546907352582255, -0.5833705613510493, 0.8003910677760192, -1.4908879079000459e-2, 0.5617505777551088, 0.828129637731366, 1.133404188689521, -1.2797569065102938, -0.21273923513978765, -0.816691520817463, 0.4115786699490818, 0.5419275018497706, 1.669353304268939, -0.4565728023628269, -0.36964684337969994, -0.7456369084634157, 0.7691384609963544, -0.36495336859973965, 0.49925457817382046, 0.5826611488340596, -0.47635387681806207, -0.46611895078055504, -0.7490039047910264, -0.3130787299060497, 0.45728086073375446, 0.8487539102945555, -0.10820685563022762, 0.896875528908897, -0.45019463290209516, -0.45442800698080277, 0.28847406489160593, 0.9880116553692947, 0.9403294868446098, 0.5957876622432725, -8.908915327232955e-2, 1.095243934997213, -0.3675840855613586, 0.9455274781292918, 0.7430512040983904, 0.8877761452544611, 0.7313458153673134, -0.810255431192085, -0.9242689407501417, 0.7591646196761602, 0.241079086081833, 0.5222879127246892, -0.4486645337595534, -0.5437648085523862, -2.103957370360871, 1.1755135387476396, 0.14160947001103802, 1.2173439228042942, -0.32979847833801884, -0.14058244689010355, -6.176943272928126e-3, -1.9093432918314337, -0.8700620366511488, 0.9560865267481379, 1.1607200290958228, -8.687569616669788e-2, 1.6274740899670384, -0.5470710962268294, -0.19431960300935622, 1.8731372133888532e-2, 1.2210829912643943)
|
\name{lmImpute}
\alias{lmImpute}
\title{Locally Weighted Linear Imputation}
\usage{
lmImpute(x, ...)
}
\arguments{
\item{x}{a data frame or matrix where each row represents
a different record}
\item{...}{additional parameters passed to locfit}
}
\description{
Fill missing values in a column by running a locally
weighted least squares regression against the row number.
Good for large data (large number of records)
}
\examples{
x = matrix(rnorm(100),10,10)
x.missing = x > 1
x[x.missing] = NA
lmImpute(x)
}
|
/man/lmImpute.Rd
|
no_license
|
JMoon1/imputation
|
R
| false
| false
| 531
|
rd
|
\name{lmImpute}
\alias{lmImpute}
\title{Locally Weighted Linear Imputation}
\usage{
lmImpute(x, ...)
}
\arguments{
\item{x}{a data frame or matrix where each row represents
a different record}
\item{...}{additional parameters passed to locfit}
}
\description{
Fill missing values in a column by running a locally
weighted least squares regression against the row number.
Good for large data (large number of records)
}
\examples{
x = matrix(rnorm(100),10,10)
x.missing = x > 1
x[x.missing] = NA
lmImpute(x)
}
|
setMethod("[", signature(x="CCProfile", i="index"),
function(x, i)
{
if (is.character(i))
{
if (length(names(x@sequences)) < 1 ||
any(is.na(names(x@sequences))))
stop("missing names for subsetting\n")
else
i1 <- which(names(x@sequences) %in% i)
if (length(i) != length(i1))
stop("invalid names specified\n")
i <- i1
}
else
{
## convert negative subset
if (all(i < 0))
i <- (1:nrow(x@profiles))[i]
else
{
if (min(i) < 1)
stop("subset indices must be all positive or",
" all negative\n")
}
if (min(i) < 1 || max(i) > nrow(x@profiles))
stop("column subset must be between 1 and number",
" of sequences\n")
}
out <- as(as(x, "PredictionProfile")[i], "CCProfile")
out@pred <- x@pred[i]
out@disc <- x@disc[i]
out
}
)
|
/R/access-methods.R
|
no_license
|
UBod/procoil
|
R
| false
| false
| 1,106
|
r
|
setMethod("[", signature(x="CCProfile", i="index"),
function(x, i)
{
if (is.character(i))
{
if (length(names(x@sequences)) < 1 ||
any(is.na(names(x@sequences))))
stop("missing names for subsetting\n")
else
i1 <- which(names(x@sequences) %in% i)
if (length(i) != length(i1))
stop("invalid names specified\n")
i <- i1
}
else
{
## convert negative subset
if (all(i < 0))
i <- (1:nrow(x@profiles))[i]
else
{
if (min(i) < 1)
stop("subset indices must be all positive or",
" all negative\n")
}
if (min(i) < 1 || max(i) > nrow(x@profiles))
stop("column subset must be between 1 and number",
" of sequences\n")
}
out <- as(as(x, "PredictionProfile")[i], "CCProfile")
out@pred <- x@pred[i]
out@disc <- x@disc[i]
out
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.