blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
167cb2b092467d43f7e052544e6f2b464b4d54cb
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/fms_list_discovered_resources.Rd
|
a022878ebd2cb7a70232199abb7768c80fc2d462
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,580
|
rd
|
fms_list_discovered_resources.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fms_operations.R
\name{fms_list_discovered_resources}
\alias{fms_list_discovered_resources}
\title{Returns an array of resources in the organization's accounts that are
available to be associated with a resource set}
\usage{
fms_list_discovered_resources(
MemberAccountIds,
ResourceType,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{MemberAccountIds}{[required] The Amazon Web Services account IDs to discover resources in. Only one
account is supported per request. The account must be a member of your
organization.}
\item{ResourceType}{[required] The type of resources to discover.}
\item{MaxResults}{The maximum number of objects that you want Firewall Manager to return
for this request. If more objects are available, in the response,
Firewall Manager provides a \code{NextToken} value that you can use in a
subsequent call to get the next batch of objects.}
\item{NextToken}{When you request a list of objects with a \code{MaxResults} setting, if the
number of objects that are still available for retrieval exceeds the
maximum you requested, Firewall Manager returns a \code{NextToken} value in
the response. To retrieve the next batch of objects, use the token
returned from the prior request in your next request.}
}
\description{
Returns an array of resources in the organization's accounts that are available to be associated with a resource set.
See \url{https://www.paws-r-sdk.com/docs/fms_list_discovered_resources/} for full documentation.
}
\keyword{internal}
|
8a24b5f15cd2af7a6de2fd6a369edd6930626e27
|
d28e1990f31441eb2e776f4b468bc2af65316164
|
/load_distribution.R
|
4430df47990f9400e9f6ee043a8ee1e2e57dc24b
|
[] |
no_license
|
balasubramaniansa/Representative-load-curves
|
af0215c8b052a4daa33e4e251172e3751cdb7a57
|
56910445cebca7df0143eee41abc595f20f81554
|
refs/heads/main
| 2023-07-15T18:38:40.150919
| 2021-09-01T16:34:19
| 2021-09-01T16:34:19
| 402,124,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,816
|
r
|
load_distribution.R
|
#Jan-Feb DATA
start_day = 1
number_of_days = 1
block1_number_of_hours<-7
block2_number_of_hours<-6
block3_number_of_hours<-5
block4_number_of_hours<-3
block5_number_of_hours<-3
block1<-c(1,7)
block2<-c(8,13)
block3<-c(14,18)
block4<-c(19,21)
block5<-c(22,24)
jan_feb_data<-as.data.frame(seq(from=start_day,by=1, length=number_of_days))
other_names<-c('Hour1', 'Hour2', 'Hour3', 'Hour4', 'Hour5', 'Hour6', 'Hour7',
'Hour8', 'Hour9', 'Hour10', 'Hour11', 'Hour12', 'Hour13',
'Hour14', 'Hour15', 'Hour16', 'Hour17', 'Hour18',
'Hour19', 'Hour20', 'Hour21',
'Hour22', 'Hour23', 'Hour24')
jan_feb_data[,other_names]<-NA
names(jan_feb_data)<-c('Day', other_names)
weights_jan_feb_block1<-c(0.143374884193962,
0.138450220919929,
0.1372890624046,
0.140429333238312,
0.143133416546381,
0.145498210325547,
0.151824872371269)
weights_jan_feb_block2<-c(0.171589347767214,
0.17115794270613,
0.169322755565148,
0.166764944572984,
0.162485367011891,
0.158679642376633)
weights_jan_feb_block3<-c(0.206742703307131,
0.204614301656572,
0.201546574295495,
0.197065859469591,
0.190030561271211)
weights_jan_feb_block4<-c(0.339429532508897,
0.336830902124829,
0.323739565366274)
weights_jan_feb_block5<-c(0.333260391404824,
0.3394397478411,
0.327299860754076
)
#Generating Hours 1-7 data for each day
day = 1
for(i in 1:number_of_days){
Load_jan_feb = mean(rlnorm(1000, 8.937236326, 0.051609621 )) * block1_number_of_hours
load_data<-c()
for (weight in weights_jan_feb_block1){
load<-weight * Load_jan_feb
load_data<-c(load_data, load)
}
jan_feb_data[day,(block1[1]+1):(block1[2]+1)]<-load_data
day = day + 1
}
#Generating Hours 8-13 data for each day
day = 1
for(i in 1:number_of_days){
Load_jan_feb = mean(rlogis(1000, 8495.74059, 205.09864)) * block2_number_of_hours
load_data<-c()
for (weight in weights_jan_feb_block2){
load<-weight * Load_jan_feb
load_data<-c(load_data, load)
}
jan_feb_data[day,(block2[1]+1):(block2[2]+1)]<-load_data
day = day + 1
}
#Generating Hours 14-18 data for each day
day = 1
for(i in 1:number_of_days){
Load_jan_feb = mean(rlogis(1000, 7432.84711, 293.89481)) * block3_number_of_hours
load_data<-c()
for (weight in weights_jan_feb_block3){
load<-weight * Load_jan_feb
load_data<-c(load_data, load)
}
jan_feb_data[day,(block3[1]+1):(block3[2]+1)]<-load_data
day = day + 1
}
#Generating Hours 19-21 data for each day
day = 1
for(i in 1:number_of_days){
Load_jan_feb = mean(rnorm(1000, 8121.90251, 419.86198)) * block4_number_of_hours
load_data<-c()
for (weight in weights_jan_feb_block4){
load<-weight * Load_jan_feb
load_data<-c(load_data, load)
}
jan_feb_data[day,(block4[1]+1):(block4[2]+1)]<-load_data
day = day + 1
}
#Generating Hours 22-24 data for each day
day = 1
for(i in 1:number_of_days){
Load_jan_feb = mean(rlnorm(1000, 8.938002023, 0.052501351 )) * block5_number_of_hours
load_data<-c()
for (weight in weights_jan_feb_block5){
load<-weight * Load_jan_feb
load_data<-c(load_data, load)
}
jan_feb_data[day,(block5[1]+1):(block5[2]+1)]<-load_data
day = day + 1
}
write.csv(jan_feb_data, file='C:\\Users\\bala\\Desktop\\jan_feb_data.csv')
|
4ad824482270b07330c5bd32508ec0531bec88f2
|
c916bd4e5574beceade8b369291955c9b8bf1a3c
|
/rankall.R
|
7a3f51e4ea5689fb9df74195d7bff681f0265f00
|
[] |
no_license
|
vsunjeev93/R-programming-Assignment-3
|
d4de253c5fa8773e10aced47559a399b9709a4ae
|
3090d6ac17d37874ab13fec64ea14feda153f862
|
refs/heads/master
| 2020-03-30T22:50:55.898229
| 2018-10-05T06:40:38
| 2018-10-05T06:40:38
| 151,680,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,982
|
r
|
rankall.R
|
rankall<-function(out,num='best'){outcome<-read.csv('outcome-of-care-measures.csv',colClasses='character')
sf<-split(outcome,outcome$State)
# function to order lists in ascending order of mortality rate
order_ha<-function(s){f<-as.data.frame(s)
ord_out1<-f[order(as.numeric(f$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack),f$Hospital.Name),]
ord_out<-ord_out1[!is.na(as.numeric(ord_out1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)),]
}
order_hf<-function(s){f<-as.data.frame(s)
ord_out1<-f[order(as.numeric(f$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure),f$Hospital.Name),]
ord_out<-ord_out1[!is.na(as.numeric(ord_out1$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)),]
}
order_p<-function(s){f<-as.data.frame(s)
ord_out1<-f[order(as.numeric(f$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia),f$Hospital.Name),]
ord_out<-ord_out1[!is.na(as.numeric(ord_out1$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)),]
}
getHospital_best<-function(ordf){hospi<-ordf$Hospital.Name[1]}
getHospital_worst<-function(ordf){hospi<-tail(ordf$Hospital.Name,1)}
getHospital_num<-function(ordf){hospi<-ordf$Hospital.Name[num]}
if(out=='heart attack'){ordered_frame<-lapply(sf,order_ha)
if(num=='best'){k<-lapply(ordered_frame,getHospital_best)}
if(num=='worst'){k<-lapply(ordered_frame,getHospital_worst)}
if(class(num)=='numeric'){k<-lapply(ordered_frame,getHospital_num)}}
if(out=='heart failure'){ordered_frame<-lapply(sf,order_hf)
if(num=='best'){k<-lapply(ordered_frame,getHospital_best)}
if(num=='worst'){k<-lapply(ordered_frame,getHospital_worst)}
if(class(num)=='numeric'){k<-lapply(ordered_frame,getHospital_num)}}
if(out=='pneumonia'){ordered_frame<-lapply(sf,order_p)
if(num=='best'){k<-lapply(ordered_frame,getHospital_best)}
if(num=='worst'){k<-lapply(ordered_frame,getHospital_worst)}
if(class(num)=='numeric'){k<-lapply(ordered_frame,getHospital_num)}}
return (k)
}
|
c4a8a7b3bfe9dc8ad44f210016dcfd834ff3a5db
|
a6590ff24de52a38946bd66a8a03e8212dc79fc1
|
/book.r
|
d0bb894931f749f9639607eb37cdb4c158880f29
|
[] |
no_license
|
therao096/recommendation-
|
a4b4ce8d5aa0677d8e0cb9f7000744c54abd344c
|
eb76cc6940587e36bec48e9264437e06a944b3ff
|
refs/heads/master
| 2022-11-06T07:11:50.015599
| 2020-06-20T12:11:29
| 2020-06-20T12:11:29
| 273,701,083
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 719
|
r
|
book.r
|
setwd("F:\\EXCEL R\\ASSIGNMENTS\\RECCOMENDATION SYSTEM")
library(recommenderlab)
library(caTools)
library(Matrix)
books_data <- read.csv("book.csv")
View(books_data)
class(books_data)
str(books_data)
table(books_data$Book.Title)
hist(books_data$Book.Rating)
book_matrix <- as(books_data,'realRatingMatrix')
View(book_matrix)
###popularity based
book_recommend_model <- Recommender(book_matrix,method='POPULAR')
recommended_items1 <- predict(book_recommend_model, book_matrix[415], n=5)
as(recommended_items1, "list")
book_recomm_model2 <- Recommender(book_matrix, method="UBCF")
#Predictions for two users
recommended_items2 <- predict(book_recomm_model2,book_matrix[410:414], n=5)
as(recommended_items2, "list")
|
bd6b1f395a81dfdb2819c67e936d07c1fbfbf15c
|
6690e529b8ce8823febfac75e1696dfdb2d960a2
|
/sev_score_fun.R
|
10940746e3923b071c881a04a6a4b7d8089a933d
|
[] |
no_license
|
jjwill2/sfcw_sed_exposure_analysis
|
a54285c2d30839bdd93238f00bdfe3ff55ec57c9
|
98aea7c89f1fd0e91a555c379c6dbf6791b23308
|
refs/heads/main
| 2023-01-23T23:41:06.992465
| 2020-11-24T00:42:21
| 2020-11-24T00:42:21
| 315,470,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,036
|
r
|
sev_score_fun.R
|
# function to run checks on data ----------------------------------------------------------------------
# write separate functions for each check, then combine into one master check function
# check1 - dataframe column names
check1 <-function(dataframe) {
df_colnames <-colnames(dataframe)
required_colnames <-colnames(data.frame(c("siteid", "date", "ssc")))
ifelse(grepl(paste(required_colnames, collapse = "-.*"), paste(df_colnames, collapse = "-")),
"Check 1 (field names): requirements met",
"Check 1 (field names): warning! column names siteid, date, and ssc are required")
}
# check 2 - data frame column format
check2 <-function(dataframe) {
df_class <-sapply(dataframe, class)
required_class <-c("character", "Date", "numeric")
names(required_class) <-c("siteid", "date", "ssc")
print("Check 2: field formats")
print(ifelse(df_class == required_class,
"field format ok",
"check field format"))
}
# check 3 - missing dates
check3 <-function(dataframe) {
# full date range
FullSeq <- seq.Date(from = min(dataframe$date), to = max(dataframe$date), by = 1)
# missing dates
Missing <- FullSeq[!FullSeq %in% dataframe$date]
print(ifelse(length(Missing) !=0,
"Check 3 (dates): warning! there are missing days, which may cause inaccurate z scores",
"Check 3 (dates): requirments met; no missing days"))
if(length(Missing) !=0) {print(Missing)}
}
# check 4 - multiple ssc results for a date
check4 <-function(dataframe) {
multiples <-
dataframe %>%
group_by(date, siteid) %>%
summarise(count = n()) %>%
filter(count > 1)
print(ifelse(max(multiples$count) > 1,
"Check 4: warning! some days have multiple results, the daily maximum will be used to calculte z",
"Check 4: requirements met; 1 result per day"))
if(max(multiples$count) > 1) {print(multiples)}
}
# combine all into master format check function
check_format <-function(dataframe) {
print(check1(dataframe))
check2(dataframe)
check3(dataframe)
check4(dataframe)
}
# function to calculate max daily z score for specified site-------------------------------------------
library(data.table) # for group by & mutate
max_daily_z <-function(dataframe, site) {
# filter for site
filtered <-
dataframe %>%
filter(siteid == site)
# min and max ssc in dataset
min_ssc <-ifelse(floor(min(filtered$ssc)) == 0, 1, floor(min(filtered$ssc))) # min possible = 1
max_ssc <-floor(max(filtered$ssc))
# for each ssc value in range, create df with # consec days > threshold
dflist = list()
for(i in min_ssc:max_ssc) {
df <-
filtered %>%
arrange(date) %>%
group_by(ID = data.table::rleid(ssc > i)) %>%
mutate(threshold_value = i,
consec_days = if_else(ssc > i, row_number(), 0L)) %>%
mutate(z = 1.0642 + (0.6068 * log(consec_days * 24)) + (0.7384 * log(i)))
df$z[df$z == -Inf] <-0 # if consec_days = 0, set z to 0
dflist[[i]] <- df # add df name to list
}
# combine all dfs together
combined <-do.call(rbind, dflist)
# get max z by date
maxz <-
combined %>%
group_by(date) %>%
mutate(maxz_value = max(z)) %>%
ungroup() %>%
filter(z == maxz_value) %>%
arrange(date) %>%
group_by(date) %>%
mutate(min_threshold = min(threshold_value)) %>%
ungroup() %>%
filter(threshold_value == min_threshold) %>% # keep only one row when consec_days = 0
arrange(date) %>%
select(-min_threshold)
}
# function to plot max daily z time series----------------------------------------------------------------
# requires df created by max_daily_z as input
plot_max_daily_z <-function(df) {
zplot <-
df %>%
ggplot(aes(x = date, y = maxz_value)) +
geom_line() +
theme_few() +
geom_hline(yintercept = 4, linetype = "dashed") +
geom_hline(yintercept = 9, linetype = "dashed") +
ylim(0, 14) +
labs(y = "maximum daily z score") +
ggtitle(df$siteid)
zplot
}
# function to plot time series for all (threshold, duration, max z)----------------------------------------
library(reshape2) # for melt
plotall <-function(df) {
allplot <-
df %>%
select(date, ssc, threshold_value, consec_days, maxz_value) %>%
rename(`duration (days)` = consec_days, `ssc threshold (mg/l)` = threshold_value,
`max z score` = maxz_value, `ssc (mg/l)` = ssc) %>%
melt(id.vars = "date", variable.name = "parameter", value.name = "value") %>%
ggplot(aes(x = date, y = value)) +
geom_line() +
facet_wrap(~parameter, scales = "free_y", ncol = 1) +
theme_few() +
ggtitle(df$siteid)
allplot
}
|
3b129f75b17ea9291c6d55e16a277d18a64b2293
|
1267e61a042649f0640e773c6d588976ae69fc25
|
/R/lasagna.R
|
de51765eeb35bb3a6b7d8ac0da4ab7dccbe829c6
|
[] |
no_license
|
rshiny/lasagnar
|
168d98e176b275b0a12b353595c6741150bf0529
|
32ae55500fffcf17a1dd01a348ea19c204a168b4
|
refs/heads/master
| 2021-05-31T09:09:42.345502
| 2016-02-03T18:57:39
| 2016-02-03T18:57:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,899
|
r
|
lasagna.R
|
#' @name lasagna
#' @title Lasagna Function
#' @description Generic Lasagna Function
#' @param X a matrix: each row a subject, each column a time/location across subjects; preferably rownames are the ids.
#' @param col colors. See "Escaping RGB Land" and the vignette for the colorspace package
#' for followable advice on colors.
#' @param axes (logical) TRUE invokes the default axes from image().
#' FALSE invokes best guess, and better control using \code{xaxis/yaxis}.
#' @param xaxis Use best guess x-axis if \code{image.axis = FALSE}
#' @param yaxis Use best guess y-axis if \code{image.axis = FALSE}
#' @param main as in plot(), the text of the title. Defaults to "(A) Initial Lasagna Plot"
#' @param main.adj the value for `adj` in title(). Defaults to 0, which is left-aligned.
#' @param xlab Label for x-axis
#' @param ylab Label for y-axis
#' @param cex.axis the cex.axis value if `axes` is FALSE.
#' @param gridlines (logical) Add gridlines to the plot. Useful if image is not
#' too big.
#' @param legend (logical) defaults to FALSE. TRUE invokes image.plot() from fields package instead of image() and thus displays a legend.
#' @param ... Additional stuff to be passed to \code{image}
#' @importFrom fields image.plot
#' @export
lasagna <- function(X, col=rainbow_hcl(length(unique(c(X)))),
image.axes = FALSE,
xaxis = TRUE,
yaxis = TRUE,
main="(A) Initial Lasagna Plot", main.adj=0,
xlab = NULL,
ylab = NULL,
cex.axis=1.75,
gridlines=FALSE,
legend=FALSE, ...){
if (!legend) {
## THE crucial piece: everything else is just window dressing
image(t(X)[,(nrow(X):1)], col=col, axes=image.axes,
useRaster = TRUE, ... )
} else{
image.plot(t(X)[,(nrow(X):1)], col=col, axes=image.axes,
useRaster = TRUE, ... )
}
## box border
box()
title(main, adj = main.adj, xlab = xlab, ylab = ylab)
if(!image.axes){
if (xaxis){
axis(side = 1, at = seq(0,1,1/(ncol(X)-1)),
labels = 1:ncol(X),
cex.axis=cex.axis,
tck = 0, mgp=c(0,.50,0))
}
if (yaxis){
axis(side = 2, at = seq(0,1,1/(nrow(X)-1)),
labels = rev(rownames(X)),
las=1,
cex.axis=cex.axis,
tck=0, mgp=c(0,.2,0))
}
}
if(gridlines){
## next two axis() calls add grid-lines:
axis(1,
seq( 1/(ncol(X)-1)*0.5, 1 - 1/(ncol(X)-1)*0.5,
length=(ncol(X)-1)),##c(1/10,3/10,5/10,7/10,9/10),
labels=NA,
tck=1,
lty=1,
col="black")
axis(2,
seq( 1/(nrow(X)-1)*0.5, 1 - 1/(nrow(X)-1)*0.5,
length=(nrow(X)-1)),##c(1/6,3/6,5/6),
labels = NA,
tck=1,
lty=1,
col="black")
}
}
|
16c934ea1c4027a8e98b0816cbb264ddf8e5102d
|
09dce3d5ee039ab4e33a38724101a54db6ce3ad8
|
/plot3.R
|
1f57c5ffcf800e311665cc089a9933d90dd3d678
|
[] |
no_license
|
dekaufman/ExData_Plotting1
|
0c8ca5d88c6da0ba6ebdeff8c812f0f4d3e43e6b
|
34db1cc3b50d65684f3aeaf074b0916263737ce5
|
refs/heads/master
| 2021-01-16T20:41:05.666823
| 2015-02-08T23:55:10
| 2015-02-08T23:55:10
| 30,505,541
| 0
| 0
| null | 2015-02-08T20:59:00
| 2015-02-08T20:58:59
| null |
UTF-8
|
R
| false
| false
| 720
|
r
|
plot3.R
|
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", col.names = c("dt", "tm", "active", "reactive", "vltge", "amps", "sub1Wh", "sub2Wh", "sub3wh"), na.strings = c("?"), nrows = 2100000)
dataTwoday <- subset(data, data$dt == "1/2/2007" | data$dt == "2/2/2007")
png(filename = "plot3.png", width = 480, height = 480)
with(dataTwoday, plot(sub1Wh, type = "n", ylab = "Energy sub metering"))
with(dataTwoday, lines(sub1Wh, pch = "."))
with(dataTwoday, lines(sub2Wh, pch = ".", col = "Red"))
with(dataTwoday, lines(sub3wh, pch = ".", col = "Blue"))
legend("topright", col=c("black", "red", "blue"), pch="-------", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
598b08f86aa70a486516fc70ac3f4afc55b23e39
|
fc0d01525a10db22d9d8b2076bb3fb4a6d556df2
|
/oldcode.R
|
b24d7cdad16d89b4d4907d4c5832a0e7eddf2bcc
|
[] |
no_license
|
adderan/multitask
|
11f9c9278c4fb787e17da4a39ccfc5c67c274692
|
12cbe5afdce2089eaa4fe59f3af3f5faa379330b
|
refs/heads/master
| 2020-04-29T02:26:40.179606
| 2015-01-20T01:01:09
| 2015-01-20T01:01:09
| 15,994,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,857
|
r
|
oldcode.R
|
#no longer used
run.ando <- function(X.all, y.egfr, y.sens) {
m <- length(y.egfr)
nproblems <- dim(X.all)[[2]] #number of prediction problems to use. The i-th prediction problem is the i-th sample in X.all
p <- dim(X.all)[[1]]
print(m)
X.list <- list()
y.list <- list()
#build list of X.all repeated m times
for(i in 1:nproblems) {
x.all.i <- matrix(unlist(X.all[,i]))
x.all.i.manual <- matrix(0, p, 1)
for(k in 1:p) {
x.all.i.manual[k,1] <- X.all[k,i]
}
X.list[[i]] <- x.all.i.manual
y.list[[i]] <- y.egfr[[i]]
#print(dim(x.all.i))
}
source("multitask.R")
source("ando.R")
min.out <- joint.min(X.list, y.list, 5, 2)
W.hat <- min.out[["W.hat"]]
V.hat <- min.out[["V.hat"]]
cat("Dimension of W.hat: ", dim(W.hat), "\n")
cat("Dimention of V.hat: ", dim(V.hat), "\n")
Theta.hat <- min.out[["Theta.hat"]]
cat("Dimension of Theta.hat: ", dim(Theta.hat), "\n")
cat("Length of X.list: ", length(X.list), "\n")
cat("Length of y.list: ", length(y.list), "\n");
cat("Dimention of X.list[[1]]: ", dim(X.list[[1]]), "\n")
cat("Dimension of X.list[[2]]: ", dim(X.list[[2]]), "\n")
t.X.list <- lapply(X.list, t)
#t.y.list <- lapply(y.list, t)
cat("Dimension of t.X.list[[1]]: ", dim(t.X.list[[1]]), "\n")
mydata <- list(X.list = t.X.list, y.list = y.list) #ando test code uses NxF data matrices
ando.test(mydata, W.hat, V.hat, Theta.hat)
#make new weight vector for labeled data
#list(W.hat = W.hat, V.hat = V.hat, Theta.hat = Theta.hat)
Theta.hat
}
gray.analysis <- function(filename) {
source("multitask.R")
source("ando.R")
load(filename)
max.features <- 500
y.sens <- Y[,"Erlotinib"]
na.removed <- remove.missing.data(X, y.sens) #remove data that is missing from y list
X <- na.removed$X
y.sens <- na.removed$y
labeled.samples <- length(y.sens)
#test data
X.test <- X[1:max.features, 21:42]
X.unlabeled <- Xu[1:max.features,]
y.test <- y.sens[21:42]
#training data
X.train <- X[1:max.features, 1:21]
y.train <- y.sens[1:21]
#data for ando algorithm
X.unlabeled <- X.unlabeled[, 1:100]
X.all <- cbind(X.train, X.unlabeled)
y.egfr <- X.all["EGFR",]
X.all <- X.all[-195,]
X.train <- X.train[-195,] #remove erlotinib from training data for Ando, but not ridge regression.
X.test <- X.test[-195,]
#Theta.hat <- run.ando(X.all, y.egfr, y.train) #ando joint predictor
#cat("Dimension of Theta.hat: ", dim(Theta.hat), "\n")
#cat("Dimension of X: ", dim(X.train.ando), "\n")
cat("dimension of X.all: ", dim(X.all), "\n")
cat("length of y.egfr", length(y.egfr), "\n")
cat("dimension of x.train: ", dim(X.train), "\n");
cat("dimension of y.train: ", length(y.train), "\n");
ando.X <- list(X.all, X.train);
ando.Y <- list(y.egfr, y.train);
joint.min.out <- joint.min(ando.X, ando.Y, 10, 3)
Theta.hat <- joint.min.out[["Theta.hat"]]
W.hat <- joint.min.out$W.hat
V.hat <- joint.min.out$V.hat
#data <- list(X.list = ando.X, y.list = ando.Y)
#ando.test(t(ando.X), ando.Y, W.hat, V.hat, Theta.hat)
#labeled.predictor <- optimize.labeled(X.train, y.train, Theta.hat, 1)
#W.hat <- labeled.predictor[["W.hat"]]
#V.hat <- labeled.predictor[["V.hat"]]
cat("Ando objective with unlabeled data: ", ando.objective(X.test, y.test, W.hat[,2], V.hat[,2], Theta.hat), "\n")
#ando without unlabeled data
ando.X <- list(X.train, X.train)
ando.Y <- list(y.egfr, y.train)
joint.min.out <- joint.min(ando.X, ando.Y, 10, 3)
Theta.hat <- joint.min.out$Theta.hat
W.hat <- joint.min.out$W.hat
V.hat <- joint.min.out$V.hat
cat("Ando objective without unlabeled data: ", ando.objective(X.test, y.test, W.hat[,2], V.hat[,2], Theta.hat), "\n")
beta <- ridge.regression(X.train, y.train, 1)
cat("Training Ridge Objective: ", ridge.objective(X.train, y.train, beta), "\n")
cat("Ridge objective: ", ridge.objective(X.test, y.test, beta), "\n")
#print(dim(ando_predictions))
}
gray.analysis <- function(filename) {
source("multitask.R")
source("ando.R")
load(filename)
data <- create.ando.data(X, Y, Xu, 0, 500, 10, "Erlotinib", 195)
joint.min.out <- joint.min(list(data$ando.X[[2]]), list(data$ando.Y[[2]]), 10, 3)
W.hat <- joint.min.out$W.hat
V.hat <- joint.min.out$V.hat
Theta.hat <- joint.min.out$Theta.hat
#print(W.hat)
#cat("Dimension of W.hat: ", dim(W.hat), "\n")
#cat("dimension of test data: ", dim(data$test$X.test), "\n")
cat("Erlotinib, No unlabeled data: ", ando.objective(data$test$X.test, data$test$y.test, W.hat[,1], V.hat[,1], Theta.hat), "\n")
data <- create.ando.data(X, Y, Xu, 0, 500, 10, "Lapatinib", 195)
joint.min.out <- joint.min(list(data$ando.X[[2]]), list(data$ando.Y[[2]]), 10, 3)
cat("Lapatinib, No unlabeled data: " , ando.objective(data$test$X.test, data$test$y.test, joint.min.out$W.hat[,1], joint.min.out$V.hat[,1], joint.min.out$Theta.hat), "\n")
points <- 2
stepsize <- 50
for(i in 1:points) {
erlotinibegfr <- create.ando.data(X, Y, Xu, 0, 500, stepsize*i, "Erlotinib", 195)
lapatinibegfr <- create.ando.data(X, Y, Xu, 0, 500, stepsize*i, "Lapatinib", 195)
lapatiniberbb2 <- create.ando.data(X, Y, Xu, 2500, 3000, stepsize*i, "Lapatinib", 2722)
erlot.control <- create.ando.data(X, Y, Xu, 0, 500, stepsize*i, "Erlotinib", 100)
lapat.control <- create.ando.data(X, Y, Xu, 0. 500, stepsize*i, "Lapatinib", 100)
joint.min.erlotinibegfr <- joint.min(erlotinibegfr$ando.X, erlotinibegfr$ando.Y, 10, 3)
joint.min.lapatinibegfr <- joint.min(lapatinibegfr$ando.X, lapatinibegfr$ando.Y, 10, 3)
joint.min.lapatiniberbb2 <- joint.min(lapatiniberbb2$ando.X, lapatiniberbb2$ando.Y, 10, 3)
joint.min.erlot.control <- joint.min(erlot.control$ando.X, erlot.control$ando.Y, 10, 3)
joint.min.lapat.control <- joint.min(lapat.control$ando.X, lapat.control$ando.Y, 10, 3)
cat("Erlotinib/EGFR with ", i*stepsize, " unlabeled samples: ", ando.objective(erlotinibegfr$test$X.test, erlotinibegfr$test$y.test, joint.min.erlotinibegfr$W.hat[,2], joint.min.erlotinibegfr$V.hat[,2], joint.min.erlotinibegfr$Theta.hat), "\n")
cat("Lapatinib/EGFR with ", i*stepsize, " unlabeled samples: ", ando.objective(lapatinibegfr$test$X.test, lapatinibegfr$test$y.test, joint.min.lapatinibegfr$W.hat[,2], joint.min.lapatinibegfr$V.hat[,2], joint.min.lapatinibegfr$Theta.hat), "\n")
cat("Lapatinib/ERBB2 with ", i*stepsize, " unlabeled samples: ", ando.objective(lapatiniberbb2$test$X.test, lapatiniberbb2$test$y.test, joint.min.lapatiniberbb2$W.hat[,2], joint.min.lapatiniberbb2$V.hat[,2], joint.min.lapatiniberbb2$Theta.hat), "\n")
cat("Erlotinib/ABCC12 (control) with ", i*stepsize, " unlabeled samples: ", ando.objective(erlot.control$test$X.test, erlot.control$test$y.test, joint.min.erlot.control$W.hat[,2], joint.min.erlot.control$V.hat[,2], joint.min.erlot.control$Theta.hat), "\n")
cat("Lapatinib/ABCC12 (control) with ", i*stepsize, " unlabeled samples: ", ando.objective(lapat.control$test$X.test, lapat.control$test$y.test, joint.min.lapat.control$W.hat[,2], joint.min.lapat.control$V.hat[,2], joint.min.lapat.control$Theta.hat), "\n")
cat("\n")
}
}
#generate some test data.
generate.data <- function(f, g, n, epsilon) { #f is total # of features in data, g is number of prediction problems, g is number of true predictors, n is number of feature vectors
x <- matrix(runif(f*n), f, n)
w <- norm(c(rep(1, times=g), rep(0, times=(f-g))))
y <- matrix(runif(g*n), g, n)
x <- normalize_matrix(x)
y <- normalize_matrix(y)
for(k in 1:(g/2)) {
for(i in 1:n) {
y[k, i] <- w%*%(x[, i]) + rnorm(1, mean = 0, sd = epsilon)
}
}
return(list(x, y))
}
#normalizes each feature vector in a matrix of feature vectors. Only useful for normalizing the test data.
normalize.matrix <- function(matr) {
n <- dim(matr)[[2]]
for(i in 1:n) {
matr[ ,i] <- norm(matr[ ,i])
}
return(matr)
}
#normalizes a vector to one.
norm <- function(x) {
norm = 0;
for(i in 1:length(x)) {
norm <- norm + x[i]*x[i]
}
normed.vector <- x * (1/sqrt(norm))
return(normed.vector)
}
w.gradient.descent <- function(x, y, v, theta, restarts, step, iters) {
f <- dim(x)[[1]]
w.hat <- c(rep(0, f))
minobj <- 10000000
for(r in 1:restarts) {
new.w <- w.find.local.min.numeric(x, y, v, theta, step, iters)
new.w.obj <- f.obj1(t(x), y, new.w, v, theta)
if(new.w.obj < minobj) {
w.hat <- new.w
minobj <- new.w.obj
}
}
return(w.hat)
}
w.find.local.min.numeric <- function(x, y, v, theta, step, iters) {
f <- dim(x)[[1]]
w <- c(runif(f, -1, 1))
gmin <- f.obj1(t(x), y, w, v, theta)
iter <- 0
while(iter < iters) {
wstep <- w - dgdw.numeric(x, y, w, v, theta, step)
gnew <- f.obj1(t(x), y, wstep, v, theta)
if(gnew > gmin) {
step <- step /2;
}
else {
w <- wstep;
gmin <- gnew;
iter <- iter + 1;
}
}
return(w)
}
dgdw.numeric <- function(x, y, w, v, theta, step) {
f <- dim(x)[[1]]
dg <- c(rep(0, f))
for(k in 1:f) {
wstep <- w
wstep[k] <- wstep[k] + step
g0 <- f.obj1(t(x), y, w, v, theta)
g1 <- f.obj1(t(x), y, wstep, v, theta)
dg[k] <- (g1 - g0)/step
}
return(dg)
}
ando.test.output <- function(data, h) {
#X <- data$X.list
#y <- data$y.list
m <- length(data$X.list)
p <- dim(data$X.list[[1]])[[1]]
print(m)
print(p)
u <- matrix(0, p, m)
mat <- array(runif(h*p), dim=c(h,p))
#theta <- qr.Q(qr(mat))
#print(t(theta) %*% theta)
#print(dim(theta))
lambda <- c(rep(1, times=m))
theta <- matrix(runif(h*p), h, p)
V.hat <- theta %*% u
W.hat <- w.min.matrix(data$X.list, data$y.list, u, theta)
u <- W.hat + t(theta) %*% V.hat
#theta <- theta_min(u, p, m, h, lambda)
#V.hat <- theta %*% u
#W.hat <- w_min_matrix(data$X.list, data$y.list, u, theta)
return(list(W.hat = W.hat, V.hat = V.hat, Theta.hat = theta))
}
#calculate derivative of vT*theta*x for v minimization
dhdVq <- function(x, q, theta) {
d <- 0
for(k in 1:length(x)) {
d <- d + x[k]*theta[q,k]
}
#cat("Dimension of dhdVq: ", length(d), "\n")
return(d)
}
#compute minimum v given w and theta. Used to minimize v for target problem.
v.min <- function(x, y, w, theta) {
p <- dim(x)[[1]]
n <- dim(x)[[2]]
h <- dim(theta)[[1]]
cat("p = ", p, "\n")
print(x)
coefficients <- matrix(0, h, h)
for(q in 1:h) {
#coefficients.q <- c(length=h)
for(j in 1:h) {
coefficient.qj <- 0
for(i in 1:n) {
coefficient.qji <- 0
for(k in 1:p) {
coefficient.qji <- coefficient.qj + theta[j,k]*x[k,i]
}
coefficient.qji <- coefficient.qj * dhdVq(x[,i], q, theta)
coefficient.qj <- coefficient.qj + coefficient.qji
}
if(q==j) {
coefficient.qj <- coefficient.qj + 2
}
coefficients[q,j] <- coefficient.qj
}
#sum <- c(length=p)
#for(i in 1:n) {
# sum <- sum + x[,i]*dhdVq(x[,i], q, theta)
#}
#coefficients[q,] <- theta %*% sum
#coefficients[q,q] <- coefficients[q,q] + 2
}
values <- c()
for(q in 1:h) {
value.q <- 0
for(i in 1:n) {
value.q <- (y[i] - t(w)%*%x[,i])*dhdVq(x[,i], q, theta)
}
values[q] <- value.q
}
v <- solve(coefficients, values)
}
v.gradient.descent <- function (x, y, w, theta, iters, restarts) {
n <- dim(x)[[2]]
p <- dim(x)[[1]]
h <- dim(theta)[[1]]
vmin <- c()
objmin <- 1000000
for(r in 1:restarts) {
v <- c(runif(h, -1, 1))
vobj <- f.obj1(t(x), y, w, v, theta)
stepsize <- 0.1
iter <- 0
while(iter < iters) {
vtest <- v - v.prime.numeric(x, y, w, v, theta, stepsize)
vtestobj <- f.obj1(t(x), y, w, vtest, theta)
if(vtestobj > vobj) stepsize <- stepsize/2
else {
v <- vtest
vobj <- f.obj1(t(x), y, w, v, theta)
iter <- iter + 1
}
#print(v.prime.numeric(x, y, w, v, theta, 0.000001))
print(vtestobj)
}
finalobj <- f.obj1(t(x), y, w, v, theta)
print(finalobj)
if(finalobj < objmin) {
vmin <- v
objmin <- finalobj
}
}
cat("Final ando training objective: ", objmin, "\n")
vmin
}
v.prime.numeric <- function(x, y, w, v, theta, step) {
source("ando.R")
h <- dim(theta)[[1]]
vprime <- c(rep(0, h))
for(j in 1:h) {
vstep <- v
vstep[j] <- vstep[j] + step
vstepobj <- f.obj1(t(x), y, w, vstep, theta)
vobj <- f.obj1(t(x), y, w, v, theta)
vprime[j] <- (vstepobj - vobj)/h
}
return(vprime)
}
v.prime <- function(x, y, w, v, theta) {
vprime <- c(length = length(v))
n <- dim(x)[[2]]
for(q in 1:length(v)) {
vprime.q <- 0
for(i in 1:n) {
vprime.q <- vprime.q + (t(w) %*% x[,i] + t(v) %*% theta %*% x[,i] - y[[i]])*dhdVq(x[,i],q,theta)
}
vprime[q] <- vprime.q
}
vprime
}
optimize.labeled <- function(x, y, theta, iters) {
f <- dim(x)[[1]]
n <- dim(x)[[2]]
h <- dim(theta)[[1]]
print(h)
v <- c(runif(h, -1, 1))
w <- c(runif(f, -1, 1))
cat("Dimension of theta: ", dim(theta), "\n")
cat("Dimension of x: ", dim(x), "\n")
cat("Length of v:", h, "\n")
for(i in 1:iters) {
w <- w.min(x, y, v, theta)
v <- v.gradient.descent(x, y, w, theta, 100, 20)
}
list(W.hat = w, V.hat = v)
}
|
b67778bcd4cc7796f349651bb6f602db26d23f25
|
71c0c61fad07794e9d12c603115323b72942382d
|
/plot2.R
|
a0088fba8be5c77c3bbd05256ff5a64c3471f4b0
|
[] |
no_license
|
PaddyPadmanaban/ExData_Plotting1
|
1639b28cd97d0a1ab62b0603f1b50934dbf1288d
|
1b5ca3e01b8243c6ebeecc494e61d42cf1c311d1
|
refs/heads/master
| 2021-01-16T23:12:56.981480
| 2015-02-08T00:45:08
| 2015-02-08T00:45:08
| 30,226,967
| 1
| 0
| null | 2015-02-03T05:33:18
| 2015-02-03T05:33:18
| null |
UTF-8
|
R
| false
| false
| 744
|
r
|
plot2.R
|
## Plot 2 Project 1 Exp Data Analysis
## Read the required data in (Only the 2 dates of interest)
sel1 <- read.csv.sql("C:/Users/Padma/Documents/Coursera/ExpDataAnalysis/household_power_consumption.txt", sql = "select * from file where (Date = '1/2/2007' or date = '2/2/2007') ", sep = ";")
## Set up the plot
plot(as.POSIXct(paste(sel1$Date, sel1$Time), format="%d/%m/%Y %H:%M:%S"),sel1$Global_active_power,type='n',xlab="", ylab="")
## Add labels
title(ylab="Global Active Power (Kilowatts)", xlab = "")
## Add the line
lines(as.POSIXct(paste(sel1$Date, sel1$Time),format="%d/%m/%Y %H:%M:%S"),sel1$Global_active_power )
## Create the png version
dev.copy(png,file="C:/Users/Padma/Documents/Coursera/ExpDataAnalysis/plot2.png")
dev.off()
|
46e4e256e1332952d0e476bbce7419a40d51a1d6
|
79ff729ee201e0f283c4ff5c16f23ebedc8e07e1
|
/plot4.R
|
8b16aa95b1710a2a4752766263fa73edd9e4d6d6
|
[] |
no_license
|
karthik-anba/Exploratory_Data_Analysis
|
46719de1f926fa5859e51e38c33bedacf0998e47
|
8c35588b0c5097daf2c4d876768f438ce1bf87b3
|
refs/heads/master
| 2021-05-28T01:16:21.030124
| 2014-11-23T21:15:08
| 2014-11-23T21:15:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
plot4.R
|
##Loading the data
data <- read.table(file.choose(), header = TRUE, sep = ";", na.strings= "?")
attach(data)
str((data))
y<-data[(Date == "1/2/2007" | Date == "2/2/2007"),]
str((y))
detach(data)
attach(y)
z <- paste(Date, Time)
y$DateTime <- strptime(z, "%d/%m/%Y %H:%M:%S")
head(y)
y$Date <- as.Date(y$Date, format = "%d/%m/%Y")
str((y))
##Plot4
png(filename = "plot4.png",width = 480, height = 480,units = "px", bg = "transparent")
par(mfrow = c(2, 2))
#P1-LeftTop
plot(y$DateTime, y$Global_active_power,xlab = "", ylab = "Global Active Power", type = "l")
#P2-RightTop
plot(y$DateTime, y$Voltage, xlab = "datetime", ylab = "Voltage", type = "l")
#P3-LeftBottom
plot(y$DateTime, y$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l", col = "black" )
lines(y$DateTime, y$Sub_metering_2, type = "l", col = "red")
lines(y$DateTime, y$Sub_metering_3, type = "l", col = "blue")
#P4-RightBottom
plot(y$DateTime, y$Global_reactive_power, col = "black", xlab = "datetime", ylab = "Global_reactive_power", type = "l")
dev.off()
|
aa6c7402a7ce8bc225bbf652837ee8823ffe112c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gap/examples/qqunif.Rd.R
|
d0b12b800e5515891ec9b15dbf4c86045b066e5e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
qqunif.Rd.R
|
library(gap)
### Name: qqunif
### Title: Q-Q plot for uniformly distributed random variable
### Aliases: qqunif
### Keywords: hplot distribution univar
### ** Examples
## Not run:
##D # Q-Q Plot for 1000 U(0,1) r.v., marking those <= 1e-5
##D u_obs <- runif(1000)
##D r <- qqunif(u_obs,pch=21,bg="blue",bty="n")
##D u_exp <- r$y
##D hits <- u_exp >= 2.30103
##D points(r$x[hits],u_exp[hits],pch=21,bg="green")
## End(Not run)
|
f034d96d844cb9fd6772c296d888081e0d6cb909
|
9c8962826b6125045ec4f93cab879bc26c9f9a59
|
/mse/11b-rebuilding-recovery-target-2022.R
|
0d1acfd16994fe637506f1b6ac74972b718d1565
|
[] |
no_license
|
pbs-assess/yelloweye-inside
|
c00c61e3764b48e17af3c10b7ddf7f5b5ff58d16
|
8ea4a374f3cee21e515a82627d87d44e67228814
|
refs/heads/master
| 2022-08-19T05:15:03.352305
| 2022-08-15T23:51:36
| 2022-08-15T23:51:36
| 219,026,971
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,306
|
r
|
11b-rebuilding-recovery-target-2022.R
|
FRENCH <- FALSE
library(tidyverse)
library(rosettafish)
library(gfdlm)
if(FRENCH) {
options(french = TRUE)
}
sc <- readRDS("mse/om/ye-scenarios2.rds")
sc_french <- c(
"(1) Base",
"(2) Faibles prises",
"(3) Recrutement\népisodique",
"(4) Estimation de la\nsélectivité du RPFD",
"(A) Faible M ",
"(B) CV élevé\ndu RPFD")
if (FRENCH) sc$scenario_human <- sc_french
get_filtered_scenario <- function(type, column) {
filter(sc, scenario_type == type) %>%
pull(!!column) %>%
set_names()
}
scenarios <- sc$scenario %>% set_names()
scenario_human <- sc$scenario_human %>% set_names()
scenarios_ref <- get_filtered_scenario("Reference", "scenario")
scenarios_ref_human <- get_filtered_scenario("Reference", "scenario_human")
scenarios_rob <- get_filtered_scenario("Robustness", "scenario")
scenarios_rob_human <- get_filtered_scenario("Robustness", "scenario_human")
# Read MSE objects --------------------------------------------------------------------
targ_proj <- function(x, y, BMSYtarget = seq(0.4, 1.6, 0.2), MP, ymax = 100) {
MPind <- match(MP, x@MPs)
lapply(BMSYtarget, function(xx) {
x@B_BMSY[, MPind, 1:ymax, drop = FALSE] %>%
apply(c(2, 3), function(x) mean(x >= xx)) %>%
structure(dimnames = list(MP = x@MPs[MPind], Year = x@OM$CurrentYr[1] + 1:ymax)) %>%
reshape2::melt() %>%
mutate(OM = y,
target_num = xx,
target = factor(paste0(100 * xx, "~\"%\"~B[", en2fr("MSY", FRENCH), "]"),
levels = paste0(100 * BMSYtarget, "~\"%\"~B[", en2fr("MSY", FRENCH), "]")),
target2 = factor(paste0(100 * xx, "%\n", en2fr("BMSY", FRENCH)),
levels = paste0(100 * BMSYtarget, "%\n", en2fr("BMSY", FRENCH))))
}) %>% bind_rows()
}
parser <- function(x) parse(text = x)
mse_fmsyproj <- map(scenarios, ~ readRDS(paste0("mse/om/MSE_", .x, "_FMSYproj.rds")))
mse <- map(scenarios, ~ readRDS(paste0("mse/om/MSE_", .x, ".rds")))
# Rebuilding target --------------------------------------------------------------------
.ggsave <- function(filename, plot, ...) {
if(FRENCH) {
filename <- paste0("figs_2022/fr/", filename)
} else {
filename <- paste0("figs_2022/", filename)
}
ggsave(filename, plot, ...)
}
# Annual probability of being above X% MSY with no fishing
pNFref <- Map(targ_proj, x = mse, y = scenario_human, MoreArgs = list(MP = "NFref")) %>% bind_rows()
# Annual probability of being above X% MSY with 15 t TAC
p15t <- Map(targ_proj, x = mse, y = scenario_human, MoreArgs = list(MP = "CC_15t")) %>% bind_rows()
# Annual probability of being above X% MSY with FMSY fishing
pFMSY <- Map(targ_proj, x = mse_fmsyproj, y = scenario_human, MoreArgs = list(MP = "MP_FMSY")) %>% bind_rows()
# Summary table of probabilities after 1.5 generations
MPout <- c("NFref" = en2fr("No fishing", FRENCH, custom_terms = data.frame(english = "No fishing", french = "Aucun pêche")),
"CC_15t" = en2fr("CC_15t", FRENCH, custom_terms = data.frame(english = "CC_15t", french = "PC_15t")),
"MP_FMSY" = en2fr("FMSY", FRENCH))
tig <- local({
tt <- rbind(pNFref, p15t, pFMSY) %>% filter(Year == 2019 + 56) %>%
mutate(MP = MPout[match(MP, names(MPout))]) %>%
select(!"target" & !"Year")
OM_names <- unique(tt$OM)
tt_list <- lapply(OM_names, function(x) {
dplyr::filter(tt, OM == x) %>% reshape2::dcast(MP ~ target2)
}) %>% structure(names = OM_names)
g <- plot_tigure_facet(tt_list, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8))
g$facet$params$ncol <- 2
g
})
.ggsave("rebuilding_table.png", tig, height = 5, width = 6)
tt_avg <- local({
tt <- rbind(pNFref, p15t, pFMSY) %>% filter(Year == 2019 + 56) %>%
mutate(MP = MPout[match(MP, names(MPout))]) %>%
select(!"target" & !"Year")
OM_names <- unique(tt$OM)[1:4]
tt_avg <- tt %>% filter(OM %in% OM_names) %>% group_by(MP, target2) %>% summarise(value = mean(value)) %>%
reshape2::dcast(MP ~ target2)
g <- plot_tigure(tt_avg, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8))
g
})
.ggsave("rebuilding_table_avg.png", tt_avg, height = 2, width = 4)
# Figures
y_trans <- en2fr("Probability above target", FRENCH,
custom_terms = data.frame(english = "Probability above target",
french = "Probabilité en haut de cible"))
col_trans <- en2fr("Candidate\nrebuilding targets", FRENCH,
custom_terms = data.frame(english = "Candidate\nrebuilding targets",
french = "Cibles de\nreconstruction\npotentielles"))
g <- ggplot(pNFref, aes(Year, value, colour = target)) +
geom_path() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) + coord_cartesian(xlim = 2019 + c(1, 56)) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = col_trans) +
scale_color_viridis_d(labels = parser)
.ggsave("projection_rebuilding_target_NFref.png", g, height = 4, width = 8)
g <- ggplot(p15t, aes(Year, value, colour = target)) +
geom_path() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) + coord_cartesian(xlim = 2019 + c(1, 56)) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = col_trans) +
scale_color_viridis_d(labels = parser)
.ggsave("/projection_rebuilding_target_15t.png", g, height = 4, width = 8)
g <- ggplot(pFMSY, aes(Year, value, colour = target)) +
geom_path() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) + coord_cartesian(xlim = 2019 + c(1, 56)) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = col_trans) +
scale_color_viridis_d(labels = parser)
.ggsave("projection_rebuilding_target_FMSY.png", g, height = 4, width = 8)
# Recovery target --------------------------------------------------------------------
Hist_SSB <- sapply(sc$scenario, function(x) readRDS(paste0("mse/om/", x, ".rds"))@SSB, simplify = "array")
cosewic_proj <- function(i, x, y, threshold = c(0.3, 0.5, 0.7), MP, MGT = 38, ymax = 100, Hist) {
MPind <- match(MP, x[[i]]@MPs)
B_hist <- Hist[, 1:102, i]
B_proj <- x[[i]]@SSB[, MPind, ]
p <- sapply(1:ymax, function(yy) {
arr <- cbind(B_hist, B_proj[, 1:yy, drop = FALSE])
yend <- ncol(arr)
ystart <- max(1, ncol(arr) - 3 * MGT + 1)
metric <- 1 - arr[, yend]/arr[, ystart]
sapply(threshold, function(Ref) sum(metric > Ref)/length(metric))
}) %>% structure(dimnames = list(threshold = threshold, Year = x[[i]]@OM$CurrentYr[1] + 1:ymax)) %>%
reshape2::melt(value.name = "tvalue") %>% mutate(OM = y[i], MP = x[[i]]@MPs[MPind])
return(p)
}
# Probability of decline during projection
rNFref <- lapply(1:length(mse), cosewic_proj, x = mse, y = scenario_human, MP = "NFref", Hist = Hist_SSB) %>% bind_rows()
r15t <- lapply(1:length(mse), cosewic_proj, x = mse, y = scenario_human, MP = "CC_15t", Hist = Hist_SSB) %>% bind_rows()
rFMSY <- lapply(1:length(mse_fmsyproj), cosewic_proj, x = mse_fmsyproj,
y = scenario_human, MP = "MP_FMSY", Hist = Hist_SSB) %>% bind_rows()
col_trans <- en2fr("COSEWIC\nthreshold", FRENCH,
custom_terms = data.frame(english = "COSEWIC\nthreshold",
french = "Seuil du\nCOSEPAC"))
y_trans <- en2fr("Probability of X % decline", FRENCH,
custom_terms = data.frame(english = "Probability of X % decline",
french = "Probabilité de déclin (X%)"))
g <- ggplot(rNFref, aes(Year, tvalue, colour = paste0(100 * threshold, "%"))) +
geom_path() +
#geom_point() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) +
geom_hline(yintercept = 0.5, linetype = 3) + geom_vline(xintercept = 2019 + 56, linetype = 3) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = paste(col_trans, "(X%)"))
.ggsave("projection_recovery_target_NFref.png", g, height = 4, width = 8)
g <- ggplot(r15t, aes(Year, tvalue, colour = paste0(100 * threshold, "%"))) +
geom_path() +
#geom_point() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) +
geom_hline(yintercept = 0.5, linetype = 3) + geom_vline(xintercept = 2019 + 56, linetype = 3) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = paste(col_trans, "(X%)"))
.ggsave("projection_recovery_target_15t.png", g, height = 4, width = 8)
g <- ggplot(rFMSY, aes(Year, tvalue, colour = paste0(100 * threshold, "%"))) +
geom_path() +
#geom_point() +
gfdlm::theme_pbs() +
facet_wrap(~ OM) +
expand_limits(y = 0) +
geom_hline(yintercept = 0.5, linetype = 3) + geom_vline(xintercept = 2019 + 56, linetype = 3) +
labs(x = en2fr("Year", FRENCH), y = y_trans, colour = paste(col_trans, "(X%)"))
.ggsave("projection_recovery_target_FMSY.png", g, height = 4, width = 8)
# Summary table of probabilities after 1.5 generations
tig <- local({
tt <- rbind(rNFref, r15t, rFMSY) %>% filter(Year == 2019 + 56) %>%
mutate(MP = MPout[match(MP, names(MPout))],
threshold = paste0(threshold * 100, "%\n", en2fr("decline", FRENCH, case = "lower"))) %>%
select(!"Year")
OM_names <- unique(tt$OM)
tt_list <- lapply(OM_names, function(x) {
dplyr::filter(tt, OM == x) %>% reshape2::dcast(MP ~ threshold, value.var = "tvalue")
}) %>% structure(names = OM_names)
tt_list
g <- plot_tigure_facet(tt_list, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8)) +
scale_fill_viridis_c(limits = c(0, 1), begin = 0.15, end = 1, alpha = 0.6, option = "D", direction = -1)
g
})
.ggsave("recovery_table.png", tig, height = 3.5, width = 6)
tt_avg <- local({
tt <- rbind(rNFref, r15t, rFMSY) %>% filter(Year == 2019 + 56) %>%
mutate(MP = MPout[match(MP, names(MPout))],
threshold = paste0(threshold * 100, "%\n", en2fr("decline", FRENCH, case = "lower"))) %>%
select(!"Year")
OM_names <- unique(tt$OM)[1:4]
tt_avg <- tt %>% filter(OM %in% OM_names) %>% group_by(MP, threshold) %>% summarise(tvalue = mean(tvalue)) %>%
reshape2::dcast(MP ~ threshold, value.var = "tvalue")
g <- plot_tigure(tt_avg, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8)) +
scale_fill_viridis_c(limits = c(0, 1), begin = 0.15, end = 1, alpha = 0.6, option = "D", direction = -1)
g
})
.ggsave("recovery_table_avg.png", tt_avg, height = 2, width = 3)
# Recovery table after 100 years
tig_100 <- local({
tt <- rbind(rNFref, r15t, rFMSY) %>% filter(Year == max(Year)) %>%
mutate(MP = MPout[match(MP, names(MPout))],
threshold = paste0(threshold * 100, "%\n", en2fr("decline", FRENCH, case = "lower"))) %>%
select(!"Year")
OM_names <- unique(tt$OM)
tt_list <- lapply(OM_names, function(x) {
dplyr::filter(tt, OM == x) %>% reshape2::dcast(MP ~ threshold, value.var = "tvalue")
}) %>% structure(names = OM_names)
tt_list
g <- plot_tigure_facet(tt_list, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8))
g
})
.ggsave("recovery_table_100.png", tig_100, height = 3.5, width = 6)
tt_avg_100 <- local({
tt <- rbind(rNFref, r15t, rFMSY) %>% filter(Year == max(Year)) %>%
mutate(MP = MPout[match(MP, names(MPout))],
threshold = paste0(threshold * 100, "%\n", en2fr("decline", FRENCH, case = "lower"))) %>%
select(!"Year")
OM_names <- unique(tt$OM)[1:4]
tt_avg <- tt %>% filter(OM %in% OM_names) %>% group_by(MP, threshold) %>% summarise(tvalue = mean(tvalue)) %>%
reshape2::dcast(MP ~ threshold, value.var = "tvalue")
g <- plot_tigure(tt_avg, mp_order = MPout %>% rev()) +
theme(axis.text.x = element_text(size = 8))
g
})
.ggsave("recovery_table_avg_100.png", tt_avg_100, height = 2, width = 3)
# Probability of decline vs. biomass
cNFref <- left_join(pNFref, rNFref, by = c("Year", "OM", "MP")) %>%
mutate(target3 = paste0("Y = ", 100 * target_num, "% BMSY"), threshold3 = paste0(100 * threshold, "%"))
c15t <- left_join(p15t, r15t, by = c("Year", "OM", "MP")) %>%
mutate(target3 = paste0("Y = ", 100 * target_num, "% BMSY"), threshold3 = paste0(100 * threshold, "%"))
cFMSY <- left_join(pFMSY, rFMSY, by = c("Year", "OM", "MP")) %>%
mutate(target3 = paste0("Y = ", 100 * target_num, "% BMSY"), threshold3 = paste0(100 * threshold, "%"))
#dat_end <- cNFref %>% filter(Year %in% c(range(Year), 2019 + 56))
#g <- ggplot(cNFref, aes(tvalue, value, colour = threshold3, group = threshold3)) +
# facet_grid(OM ~ target3, scales = "free_y") +
# geom_vline(xintercept = 0.5, linetype = 3) +
# geom_hline(yintercept = 0.5, linetype = 3) +
# geom_path() +
# geom_point(data = dat_end, aes(shape = as.factor(Year))) +
# labs(y = "Probability above Y % BMSY", x = "Probability of X % decline",
# shape = "Year", colour = "COSEWIC\nthreshold (X%)") +
# theme_bw() +
# scale_shape_manual(values = c(16, 8, 17)) +
# theme(panel.spacing = unit(0, "lines"), axis.text.x = element_text(angle = 45, vjust = 0.5)) +
# expand_limits(y = 0.4) +
# ggtitle(expression("Projections with no fishing"))
#ggsave("figs_2022/projection_recovery_phase_NFref.png", g, height = 7.25, width = 7.5)
#
#
#dat_end <- c15t %>% filter(Year %in% c(range(Year), 2019 + 56))
#g <- ggplot(c15t, aes(tvalue, value, colour = threshold3, group = threshold3)) +
# facet_grid(OM ~ target3, scales = "free_y") +
# geom_vline(xintercept = 0.5, linetype = 3) +
# geom_hline(yintercept = 0.5, linetype = 3) +
# geom_path() +
# geom_point(data = dat_end, aes(shape = as.factor(Year))) +
# labs(y = "Probability above Y % BMSY", x = "Probability of X % decline",
# shape = "Year", colour = "COSEWIC\nthreshold (X%)") +
# theme_bw() +
# scale_shape_manual(values = c(16, 8, 17)) +
# theme(panel.spacing = unit(0, "lines"), axis.text.x = element_text(angle = 45, vjust = 0.5)) +
# expand_limits(y = 0.4) +
# ggtitle(expression("Projections with 15 t catch"))
#ggsave("figs_2022/projection_recovery_phase_15t.png", g, height = 7.25, width = 7.5)
#
#dat_end <- cFMSY %>% filter(Year %in% c(range(Year), 2019 + 56))
#g <- ggplot(cFMSY, aes(tvalue, value, colour = threshold3, group = threshold3)) +
# facet_grid(OM ~ target3, scales = "free_y") +
# geom_vline(xintercept = 0.5, linetype = 3) +
# geom_hline(yintercept = 0.5, linetype = 3) +
# geom_path() +
# geom_point(data = dat_end, aes(shape = as.factor(Year))) +
# labs(y = "Probability above Y % BMSY", x = "Probability of X % decline",
# shape = "Year", colour = "COSEWIC\nthreshold (X%)") +
# theme_bw() +
# scale_shape_manual(values = c(16, 8, 17)) +
# theme(panel.spacing = unit(0, "lines"), axis.text.x = element_text(angle = 45, vjust = 0.5)) +
# expand_limits(y = 0.4) +
# ggtitle(expression("Projections with F ="~F[MSY]))
#ggsave("figs_2022/projection_recovery_phase_FMSY.png", g, height = 7.25, width = 7.5)
# Scatter plot
scat_plot <- rbind(cNFref, c15t, cFMSY) %>% filter(Year == 2019 + 56, MP == "CC_15t") %>%
mutate(target = paste0(target_num * 100, "% B", en2fr("MSY", FRENCH)) %>%
factor(levels = paste0(seq(40, 160, 20), "% B", en2fr("MSY", FRENCH))))
col_trans <- en2fr("Candidate\ntargets", FRENCH,
custom_terms = data.frame(english = "Candidate\ntargets",
french = "Cibles\npotentielles"))
shape_trans <- en2fr("COSEWIC\nthreshold", FRENCH,
custom_terms = data.frame(english = "COSEWIC\nthreshold",
french = "Seuil du\nCOSEPAC"))
x_trans <- en2fr("Probability of decline", FRENCH,
custom_terms = data.frame(english = "Probability of decline",
french = "Probabilité de déclin"))
y_trans <- en2fr("Probability above target", FRENCH,
custom_terms = data.frame(english = "Probability above target",
french = "Probabilité en haut de cible"))
g <- ggplot(scat_plot, aes(tvalue, value, group = target, colour = target, shape = threshold3)) +
geom_path() +
geom_point() +
theme_bw() + facet_wrap(~ OM) +
labs(colour = col_trans,
shape = shape_trans,
x = x_trans,
y = y_trans) +
coord_cartesian(ylim = c(0, 1), xlim = c(0, 1))
.ggsave("projection_recovery_scatter.png", g, height = 4, width = 7.5)
|
19f41dd733c27abca4e37f2f57087ec7d55a6044
|
ec44cf2c3f4316610e890bee2665cdb15cb0d986
|
/Simulation/PracticasVectores_ii.R
|
8d20529cf318dbea0c895176bd63361b7a7d1852
|
[] |
no_license
|
jr-98/R
|
c92959c244f3fa51a98f7859a275451675c076ce
|
8164b6ccfdba4c9c5a58ba81bb76482d73066f55
|
refs/heads/master
| 2020-07-03T16:30:23.561059
| 2019-08-23T09:36:03
| 2019-08-23T09:36:03
| 201,968,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,062
|
r
|
PracticasVectores_ii.R
|
x<-c(1,2,3,4,5)
min(x) #devuelve l valor menor del vector
mean(x)# devuelve la ?
median(x)# Devuelve la media
var(x)# devuelce la varianza
prod(x)# El producto de todas los valores del vector
cumprod(x)# devuelve un vectror que contiene la multiplicacion sicesiva de sus terminos
quantile(x)# Devuelve los cuartiles y sus valores
y<-c(5,6,7,8,9)
cov(x,y) # devulve la covarianza de dos vectores
cor(x,y) #Devuelve la correlacion entere los vectores ingresados
#operaciones condicionales de vectores
x<=c(15,20,14,7,8,2,15,2,15,15,2,16,28)
x=11:20
x
x==15
x<15
x>15
x<=15
x>=15
x!=15
sum(x==15)
sum(x==15)
sum(x>15)
sum (x!=15)
#Calcular la media de un vestor y el numero de valores que estan por debajo de la media y la mediana
x<-c(1,5,7,9,3,5,6,1,4,7,5,6,9,8,6,2,6,1,3)
mean(x)
median(x)
sum(x< mean(x))
sum(x<median(x))
length(x)
x==median(x)
z=1:5
z[c(TRUE,FALSE,TRUE,FALSE,TRUE)]
z=.7
z[c(TRUE,FALSE,TRUE,FALSE,TRUE)]
x=runif(10)
x
x[x<0.2]
x[x<0.4]
#conta
x[(x<0.2)|(x>0.8)]# si se reperesenta en un grafico se onservara que ciumple las caracteristicas de una
#curva normar, el la auci'on se sta tomamando el valor de las dos colas
sum((x<0.2)|(x>0.8))
#sumar
sum(x[(x<0.2)|(x>0.8)])
#whitch ===> detectaun vlaor numerico
which((x>0.2)&(x<0.6))
#####Dugrama de barras###########
x=c(1,1,1,1,2,2,3,3,3,5,6,6,7,7,7)
#table+++. tabula los datos
table(x)
barplot(table(x))
##histograma
x=runif(100)
hist(x)
#escartterplots o diagramas de dispersion, requieres 2 variable con datos
#
##QQ-PLOT
#nuemro aleatorisa que riguen una distribuion normal
x=rnorm(1000)
y=rnorm(1000)
qqplot(x,y,main = "X e y con la mimsa distrivubion")
#distuc=buion uniforme
a = rnorm(1000, mean =3, sd=2)
qqplot(x,a,main="xN(0,1), b(t)")
##Generar un serie fibonaci en con un vector unicializado en 0
n<-1:12
x<-vector("numeric",n)
f1 <- 1
f2 <- 0
fn<- 0
for (i in n) {
f1 <- f1 + f2
f2 <- fn
fn<- f1
x[i]<- f1
}
print(x)
# congruelvial multiplicativo
#Xn= 171 X(n-1)(mod 30269)
#UN =Xn/30269
#Semilla = 27218
ramdom.namber<- vector("numeric",50)
for(j in 1:50){
}
|
17311ff268cfb5bb39700e1200ca2f52c0864955
|
64341466a2f01a718abece011226197d0585b393
|
/tests/testthat/test-crosstalk.R
|
7489ff72c0c4d6c24a0895b07865fb6756ac570c
|
[] |
no_license
|
ggobi/ggally
|
2aa29f340250005c084e1a913a94c69af685ccaa
|
d79b4efae51ae78bc3fc2d4c94504327169e1b42
|
refs/heads/master
| 2023-09-03T21:00:58.614094
| 2023-07-03T02:07:11
| 2023-07-03T02:07:11
| 1,384,794
| 558
| 124
| null | 2022-11-04T16:15:43
| 2011-02-19T00:18:46
|
R
|
UTF-8
|
R
| false
| false
| 1,090
|
r
|
test-crosstalk.R
|
context("crosstalk")
test_that("crosstalk works with ggduo and ggpairs", {
skip_if_not_installed("crosstalk")
sd <- try(crosstalk::SharedData$new(iris[1:4]), silent = TRUE)
if (inherits(sd, "try-error")) {
skip("crosstalk data can not be initialized")
}
expect_silent({
pm <- ggpairs(sd)
})
expect_error({
pm <- ggpairs(sd, 3:5)
},
"Make sure your numeric"
)
expect_error({
pm <- ggpairs(sd, c("Petal.Length", "Petal.Width", crosstalk_key()))
},
"Columns in 'columns' not"
)
expect_silent({
pm <- ggduo(sd)
})
expect_error({
pm <- ggduo(sd, c(1:2, 5), 3:5)
},
"Make sure your numeric 'columnsX'"
)
expect_error({
pm <- ggduo(
sd,
c("Sepal.Length", "Sepal.Width", crosstalk_key()),
c("Petal.Length", "Petal.Width")
)
},
"Columns in 'columnsX' not"
)
expect_error({
pm <- ggduo(
sd,
c("Sepal.Length", "Sepal.Width"),
c("Petal.Length", "Petal.Width", crosstalk_key())
)
},
"Columns in 'columnsY' not"
)
})
|
e464d5ec96a09e650cabad91d77085c63f24b6e0
|
f77d4ae139d960f6138e29f4e5e9e39fcba528fb
|
/R_CODES/Previous/final R codes/5_1 compare ABC posterior to likelihood posterior.R
|
390bbe19148efced57a670be7feade13e24f3336
|
[] |
no_license
|
zenabu-suboi/masters_project
|
fc80eb077af8e92bf81cda94952d4dec196bb263
|
d865eb68e66d35c52229023d7aa83b78fd7518f4
|
refs/heads/master
| 2022-04-22T15:01:15.063667
| 2020-04-28T10:50:17
| 2020-04-28T10:50:17
| 179,095,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,971
|
r
|
5_1 compare ABC posterior to likelihood posterior.R
|
### COMPARE ABC POSTERIOR TO LIKELIHOOD POSTERIOR
################ REJECTION ABC
### abc0.1
sum((ras.abc0.1.mat-lik.abc0.1.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.1.mat-lik.abc0.1.mat))+(2*(nreprej*0.1-sum(ras.abc0.1.mat))))/ (2*(nreprej*0.1)))
### abc0.01
sum((ras.abc0.01.mat-lik.abc0.01.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.01.mat-lik.abc0.01.mat))+(2*(nreprej*0.01-sum(ras.abc0.01.mat))))/ (2*(nreprej*0.01)))
### abc0.001
sum((ras.abc0.001.mat-lik.abc0.001.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.001.mat-lik.abc0.001.mat))+(2*(nreprej*0.001-sum(ras.abc0.001.mat))))/ (2*(nreprej*0.001)))
################# REGRESSION LINEAR
### abc0.1
sum((ras.abc0.1lin.mat-lik.abc0.1lin.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.1lin.mat-lik.abc0.1lin.mat))+(2*(nreprej*0.1-sum(ras.abc0.1lin.mat))))/ (2*(nreprej*0.1)))
### abc0.01
sum((ras.abc0.01lin.mat-lik.abc0.01lin.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.01lin.mat-lik.abc0.01lin.mat))+(2*(nreprej*0.01-sum(ras.abc0.01lin.mat))))/ (2*(nreprej*0.01)))
### abc0.001
sum((ras.abc0.001lin.mat-lik.abc0.001lin.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.001lin.mat-lik.abc0.001lin.mat))+(2*(nreprej*0.001-sum(ras.abc0.001lin.mat))))/ (2*(nreprej*0.001)))
################## REGRESSION NEURALNET
###
sum((ras.abc0.1nnet.mat-lik.abc0.1nnet.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.1nnet.mat-lik.abc0.1nnet.mat))+(2*(nreprej*0.1-sum(ras.abc0.1nnet.mat))))/ (2*(nreprej*0.1)))
### abc0.01
sum((ras.abc0.01nnet.mat-lik.abc0.01nnet.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.01nnet.mat-lik.abc0.01nnet.mat))+(2*(nreprej*0.01-sum(ras.abc0.01nnet.mat))))/ (2*(nreprej*0.01)))
### abc0.001
sum((ras.abc0.001nnet.mat-lik.abc0.001nnet.mat)^2)
### percentage overlap:
1-((sum(abs(ras.abc0.001nnet.mat-lik.abc0.001nnet.mat))+(2*(nreprej*0.001-sum(ras.abc0.001nnet.mat))))/ (2*(nreprej*0.001)))
|
87cf8d2128c5c1c7119d394b395411c70c73ecb5
|
4f98c8660757b8d163312336992cc68604f15df7
|
/run_analysis.R
|
00e10ec381147d32c0a7cfca9237b0d9fc070e05
|
[] |
no_license
|
rajithnair/Getting_Cleaning_Data
|
fb3741a9e092e1d66b3e42079d813795ab9d473e
|
26deb16467b43ad702d93db0b1fb5000be06f345
|
refs/heads/master
| 2021-01-22T14:32:42.522422
| 2014-06-22T18:09:10
| 2014-06-22T18:09:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,258
|
r
|
run_analysis.R
|
run_analysis<-function(){
### OBTAINING THE DATA FROM THE SOURCE
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipname <- "./data/getdata-projectfiles-UCI HAR Dataset.zip"
# Creating the data folder
if (!file.exists("data")) {
dir.create("data")
}
# Unzipping the file
if (!file.exists(zipname)){
download.file(fileurl, destfile=zipname, mode="wb")
unzip(zipname, exdir="./data")
}
### MERGING THE TRAINING AND TEST SETS TO CREATE ONE DATASET
### Reading in the features from the features.txt
features<-read.table("./data/UCI HAR Dataset/features.txt",stringsAsFactors=F)
### Reading in the activity labels from the activity_labels.txt
activity_label<-read.table("./data/UCI HAR Dataset/activity_labels.txt",stringsAsFactors=F)
### Reading in the subjects from the testing and training sets
testing_subjects<-read.table("./data/UCI HAR Dataset/test/subject_test.txt")
training_subjects<-read.table("./data/UCI HAR Dataset/train/subject_train.txt")
## Adding the subjects from the testing set to the new dataset
tidy_dataset<-testing_subjects
## Adding the subjects from the training set to the new dataset
tidy_dataset<-rbind(tidy_dataset,training_subjects)
### Replacing the column name to Subjects
colnames(tidy_dataset)[1]<-"Subjects"
### Reading the activities of the subjects from both the testing and training sets
test_activity<-read.table("./data/UCI HAR Dataset/test/Y_test.txt")
train_activity<-read.table("./data/UCI HAR Dataset/train/Y_train.txt")
subject_activity<-test_activity ### Adding the testing activity
subject_activity<-rbind(subject_activity,train_activity) ### Appending the training activity
### Replacing the activities by their name from the activity_label dataframe
for(i in 1:nrow(tidy_dataset)){
index<-which(activity_label[,1] == subject_activity[i,1])
tidy_dataset$Activity[i]<-activity_label[index,2]
}
### Reading the 561 feature readings for each subject from both the testing and training sets
test_feature_readings<-read.table("./data/UCI HAR Dataset/test/X_test.txt")
train_feature_readings<-read.table("./data/UCI HAR Dataset/train/X_train.txt")
feature_readings<-test_feature_readings ### Adding the feature readings of testing
feature_readings<-rbind(feature_readings,train_feature_readings) ### Appending the feature readings of training
### Replacing the names of 561 columns with feature readings in the tidy_dataset
### with their feature names from the features dataframe
for(i in 1:ncol(feature_readings))
{
tidy_dataset<-cbind(tidy_dataset,feature_readings[,i])
names(tidy_dataset)[ncol(tidy_dataset)]<-features[i,2]
}
### EXTRACTING ONLY THE MEASUREMENTS ON THE MEAN AND STANDARD DEVIATION
## Extracting the columns representing the measurements on the mean and standard deviation
extract<-grep("(.*)(mean|std)[Freq]?(.*)[/(/)]$|(.*)(mean|std)(.*)()-[X|Y|Z]$",colnames(tidy_dataset),value=T)
## Sorting the tidy_dataset with the extracted columns
tidy_dataset<-tidy_dataset[,c("Subjects","Activity",extract)]
### REPLACING THE ACTIVITY LABELS WITH DESCRIPTIVE NAMES
tidy_dataset$Activity<-gsub("WALKING_UPSTAIRS","Walking Up",tidy_dataset$Activity)
tidy_dataset$Activity<-gsub("WALKING_DOWNSTAIRS","Walking Down",tidy_dataset$Activity)
tidy_dataset$Activity<-gsub("WALKING","Walking",tidy_dataset$Activity)
tidy_dataset$Activity<-gsub("SITTING","Sitting",tidy_dataset$Activity)
tidy_dataset$Activity<-gsub("STANDING","Standing",tidy_dataset$Activity)
tidy_dataset$Activity<-gsub("LAYING","Laying",tidy_dataset$Activity)
### REPLACING THE COLUMN NAMES WITH DESCRIPTIVE NAMES
## Striping the parenthesis from the column names
colnames(tidy_dataset)<-gsub("[/(/)]","",colnames(tidy_dataset))
## Striping the hyphens from the column names and replacing it with underscores
colnames(tidy_dataset)<-gsub("-","_",colnames(tidy_dataset))
### AVERAGING THE VARIABLES ACROSS EACH ACTIVITY AND SUBJECT
## Checking whether data.table package is installed and accordingly the package is loaded
if(!("data.table" %in% rownames(installed.packages()))){
install.packages("data.table")
library(data.table)
}else{
library(data.table)
}
## Converting the data frame to data table
tidy_dataset = data.table(tidy_dataset)
## Grouping the data through the Subjects and the Activity performed by each of the subject and thereafter
## summarizing the data by taking mean of all the remaining columns
tidy_dataset<-tidy_dataset[,lapply(.SD,mean),by='Subjects,Activity']
tidy_dataset<-tidy_dataset[order(tidy_dataset$Subjects),]
write.csv(tidy_dataset,"Tidy_Data.csv",row.names=F)
cat("\n### Please find the tidy data in the Tidy_Data.csv file saved in your working directory ###")
}
|
5ecf329a5af926d79ff96d87ce0404d7767c77b7
|
bb17abdd113af9108aec487935cc86a0906d902e
|
/Shiny Datasets.R
|
53a2a1e7db85882e29e9be0fcdc1d183ebf13c21
|
[] |
no_license
|
jn131/Spatial-Visualization-For-Public-Health-Intelligence
|
3e12f02702565b052342800062fc0b6f64aa2036
|
7e9af770bdaeac4bdd85661b46619b1226d2bb14
|
refs/heads/master
| 2021-07-12T20:32:56.641813
| 2017-10-12T18:32:29
| 2017-10-12T18:32:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,152
|
r
|
Shiny Datasets.R
|
#.......................
# Shiny Datasets
# Jessica Needleman
#.......................
## df
df <- deaths_county %>% dplyr::rename(GEOID10 = GeoIDBlkGr)
# race_shiny
race_original <- c("African American","Chinese","Filipino","Hawaiian","Japanese","Native American","Other","Other Asian","Unknown","White")
race_new <- c("African American","Asian","Asian","Native Hawaiian","Asian","Native American","Other","Asian","Other","White")
df$race_shiny <- race_new[match(df$RACE, race_original)]
# age_shiny
age_original <- c(0:112)
age_new <- c(rep("0-4", 5), rep("5-9", 5), rep("10-14", 5), rep("15-19", 5), rep("20-24", 5), rep("25-29", 5), rep("30-34", 5),
rep("35-39", 5), rep("40-44", 5), rep("45-49", 5), rep("50-54", 5), rep("55-59", 5), rep("60-64", 5), rep("65-69", 5),
rep("70-74", 5), rep("75-79", 5), rep("80-84", 5), rep("85 and Over", 113-85))
df$age_shiny <- age_new[match(df$AGE, age_original)]
# sex_shiny
sex_original <- c("M","F")
sex_new <- c("Male","Female")
df$sex_shiny <- sex_new[match(df$SEX, sex_original)]
# causeofdeath_shiny
cause_original <- c("Cancer","Breast Cancer","Colon, Rectum and Anus Cancer","Prostate Cancer","Trachea, Bronchus and Lung Cancer","HIV Disease","Diabetes Mellitus","Heart Disease")
cause_new <- c(rep("Cancer", 5),"HIV","Diabetes","Heart Disease")
df$causeofdeath_shiny <- cause_new[match(df$CauseOfDea, cause_original)]
# ment_shiny
cancer.f <- function(x) {x[which(substr(x, 1, 1) == "C")]}
cancer_icd10 <- unique(c(cancer.f(df$MENT1), cancer.f(df$MENT2), cancer.f(df$MENT3), cancer.f(df$MENT4), cancer.f(df$MENT5)))
cancer_new <- rep("Cancer", length(cancer_icd10))
diabetes.f <- function(x) {x[which(substr(x, 1, 3) %in% c("E08","E09","E10","E11","E12","E13","E14"))]}
diabetes_icd10 <- unique(c(diabetes.f(df$MENT1), diabetes.f(df$MENT2), diabetes.f(df$MENT3), diabetes.f(df$MENT4), diabetes.f(df$MENT5)))
diabetes_new <- rep("Diabetes", length(diabetes_icd10))
heart.f <- function(x) {x[which(substr(x, 1, 3) %in% c(paste("I","0",c(5:9), sep = ""), paste("I",c(20:52), sep = "")))]}
heartdisease_icd10 <- unique(c(heart.f(df$MENT1), heart.f(df$MENT2), heart.f(df$MENT3), heart.f(df$MENT4), heart.f(df$MENT5)))
heartdisease_new <- rep("Heart Disease", length(heartdisease_icd10))
hiv.f <- function(x) {x[which(substr(x, 1, 3) %in% c(paste("B",c(20:24), sep = "")))]}
hiv_icd10 <- unique(c(hiv.f(df$MENT1), hiv.f(df$MENT2), hiv.f(df$MENT3), hiv.f(df$MENT4), hiv.f(df$MENT5)))
hiv_new <- rep("HIV", length(hiv_icd10))
ment_original <- c(cancer_icd10, diabetes_icd10, heartdisease_icd10, hiv_icd10)
ment_new <- c(cancer_new, diabetes_new, heartdisease_new, hiv_new)
df$ment1_shiny <- ment_new[match(df$MENT1, ment_original)]
df$ment2_shiny <- ment_new[match(df$MENT2, ment_original)]
df$ment3_shiny <- ment_new[match(df$MENT3, ment_original)]
df$ment4_shiny <- ment_new[match(df$MENT4, ment_original)]
df$ment5_shiny <- ment_new[match(df$MENT5, ment_original)]
## Rate Datasets
race_colnames <- c("GEOID10","Total","White","African American","Native American","Asian","Native Hawaiian","Other")
pop.f <- function(x, y) {apply(population_county[,c(x,y)], 1, sum)}
# pop_shiny_total
pop_shiny_total <- cbind(population_county[,c(1,18)], pop.f(3,11), pop.f(4,12), pop.f(5,13), pop.f(6,14), pop.f(7,15), pop.f(5:9, 13:17), rep(0, length(population_county$GeoIDBlkGr)), rep(0, length(population_county$Total)))
setnames(pop_shiny_total, old = colnames(pop_shiny_total), new = c(race_colnames, "Zero1","Zero2"))
# pop_shiny_nonhispanic
pop_shiny_nonhispanic <- cbind(population_county[,c(1:7)], apply(population_county[,5:9], 1, sum), rep(0, length(population_county$GeoIDBlkGr)), rep(0, length(population_county$Total)))
setnames(pop_shiny_nonhispanic, old = colnames(pop_shiny_nonhispanic), new = c(race_colnames,"Zero1","Zero2"))
# population_county_hispanic
pop_shiny_hispanic <- cbind(population_county[,c(1,10:15)],apply(population_county[,16:17], 1, sum), rep(0, length(population_county$GeoIDBlkGr)), rep(0, length(population_county$Total)))
setnames(pop_shiny_hispanic, old = colnames(pop_shiny_hispanic), new = c(race_colnames,"Zero1","Zero2"))
|
af1294c12d2ffc129b92080bc505903c38865eef
|
1686456163a1cf0d04a4c8e22cae63e1566f13cf
|
/tree.R
|
487e4717071a6b22af454e1f5e055c4e233b16ca
|
[] |
no_license
|
yaomisun/R-Modelling
|
66beddd5004ec847a89b8d45ba4df28bff31411b
|
b6a38c9dbde658367f7386f46b7edebf3779c8ce
|
refs/heads/main
| 2023-01-08T17:08:35.823558
| 2020-11-10T17:58:38
| 2020-11-10T17:58:38
| 311,426,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,946
|
r
|
tree.R
|
#read data
myData=read.table("uscrime.txt", stringsAsFactors=FALSE, header=TRUE)
#myData
library(tree)
Ctree<-tree(Crime~.,data=myData)
summary(Ctree)
#visualize
plot(Ctree)
text(Ctree)
Ctree$frame
Ctree$where
#predict
PredTree<-predict(Ctree)
PredTree
plot(PredTree,myData$Crime)
#computes the root mean squared error between two numeric vectors
rmse_cTree<-sqrt(mean((PredTree-myData$Crime)^2))
rmse_cTree
prune.tree(Ctree)$size
prune.tree(Ctree)$dev
set.seed(42)
cv.tree(Ctree)
cv.tree(Ctree)$dev
#prune the tree to 4 leaves
Ctree_P<-prune.tree(Ctree, best=4)
Ctree_P
plot(Ctree_P)
text(Ctree_P)
#for randome forest
library(randomForest)
set.seed(42)
#choose number of predictors: 1+log(n) or n/3, which I choose 5
Pred_No<-5
C_RF<-randomForest(Crime~.,data=myData, mtry=Pred_No,importance=TRUE,ntree=500)
C_RF
importance(C_RF)
Pred_RF<-predict(C_RF, myData)
Pred_RF
rmse_RF<-sqrt(mean((Pred_RF-myData$Crime)^2))
rmse_RF
#10.3
myData=read.table("germancredit.txt", sep=" ")
head(myData)
#myData
#match 1 and 2 in data file to 0 and 1(binary)
myData$V21[myData$V21==1] <-0
myData$V21[myData$V21==2] <-1
head(myData)
#make training and test set
Credit_Train<-myData[1:800,]
Credit_Test<-myData[801:1000,]
Credit_logit<-glm(V21~.,family=binomial(link="logit"),data=Credit_Train)
summary(Credit_logit)
Pred_Credit<-predict(Credit_logit, Credit_Test, type = "response")
Pred_Credit
#use
library(pROC)
roc(Credit_Test$V21, round(Pred_Credit))
#set threshold
TH<-0.8
run_TH<-as.integer(Pred_Credit>TH)
Conf_mtx<-as.matrix(table(run_TH,Credit_Test$V21))
Conf_mtx
#choose false positive
Conf_mtx[2,1]
index<-1:95
finalFP=1000;
final_TH=0;
for (ov in index) {
TH<-ov*0.01
run_TH<-as.integer(Pred_Credit>TH)
Conf_mtx<-as.matrix(table(run_TH,Credit_Test$V21))
if(Conf_mtx[2,1]<finalFP)
{
finalFP<-Conf_mtx[2,1]
final_TH<-TH
}
}
cat("lowest False Positive ", finalFP, " get when threshold is ", final_TH)
|
9881a18f313bb159166c92e294136e6c32cd638a
|
2ce2a707b6805971283454659d014e74a545ab60
|
/R/surg2rec.R
|
f0babd0ddfc491f12145a7f644e82a717831f952
|
[] |
no_license
|
UCCC/McLeod
|
6843ae4f6d0b0264ced9de21a82438fc0c362d2d
|
5368d34d5a8efc9360f7b955558a1f09c85fe735
|
refs/heads/master
| 2020-03-11T16:17:04.119880
| 2018-07-19T20:59:12
| 2018-07-19T20:59:12
| 130,112,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
surg2rec.R
|
#' surg2rec: The goal of surg2rec is to generate the tables requested by Lisa McLeod.
#'
#' The surg2rec package contains the function, .RMD files, and data files necessary for generating tables from CHORDS data for Surgery to Recovery project.
#'
#' @docType package
#' @name surg2rec
NULL
|
842d0b7c9ea5bda5c6f39ee4d18fdfd39e7727dc
|
2ed70e68a904ddb39e489b79b7569a847919a8ae
|
/run_maxent_one_fold.R
|
3d933abba0c2746ac652f4f7b35cb2a0e96a561d
|
[] |
no_license
|
abbylute/Rock_glacier_modeling
|
90fea79dfd9de11243ccfe251bbad06bfde4430e
|
316aa762e17fb5d5f47d9b1fc7f837b08bee99ed
|
refs/heads/master
| 2023-06-02T03:49:17.588779
| 2021-06-25T22:07:46
| 2021-06-25T22:07:46
| 308,749,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
run_maxent_one_fold.R
|
s1 <- s2 %>% filter(fold==2)
bg.coords1 <- bg.coords[bgfold==2,]
bgfold1 <- bgfold[bgfold==2]
occ1 <- occ[s2$fold==2,]
bg11 <- rbind(occ1,bg.coords1)
p = rep(0,nrow(bg11))
p[1:nrow(occ1)] <- 1
bg111 <- bg11[,3:ncol(bg11)]
sb1 <- dismo::maxent(bg111,p,factors='lith')
saveRDS(sb1, file=paste0(outdir,'ENMeval_object.RData'))
testval <- occ[s2$fold==1,]
myauc <- dismo::evaluate(testval,bg.coords,sb1)@auc
|
e8e7cf2798c9029ef8f6dde9ea4e72c5ddcda1d8
|
dc65db19ba8bcd45f1954599b8ff526b9d44573c
|
/ScripsCassava.R
|
01b157772cb15374d8711fecef8c0af86971980e
|
[] |
no_license
|
caosmax/gfsf_project
|
dd345be3e63b73eeee9edc8b9365f65e4f83fc22
|
bbd770aa0411cdb45e43a009853368b7f4ed9b71
|
refs/heads/master
| 2021-01-19T10:06:38.217105
| 2017-04-07T18:11:12
| 2017-04-07T18:11:12
| 87,828,521
| 2
| 0
| null | 2017-04-10T15:40:46
| 2017-04-10T15:40:46
| null |
ISO-8859-10
|
R
| false
| false
| 45,217
|
r
|
ScripsCassava.R
|
### preparacion de los datos para presentacion de yuca.
## Autor: CArlos Eduardo Gonzalez
## prep
#librerias------
library(XML)
library(treemap)
library(migest)
library(circlize)
library(rworldmap)
library(maptree) #maps of
library(ggplot2) #plot
library(dplyr) # reshape
library(raster) # procesar Raster
library(plyr)
library(grid)
library(gridExtra)
library(xtable)
library(dplyr)
library(tidyr)
library(lattice)
library(latticeExtra)
library(rgdal)
library(sp)
library(maptools)
library(maps)
library(tiff)
library(rasterVis)
library(dismo)
library(ncdf4)
#definir directorios de trabajo-----
#directorio de trabajo
setwd("C:/Users/CEGONZALEZ/Documents/cassava") # print the current working directory - cwd
#directorio de graficos
pic <-("C:/Users/CEGONZALEZ/Documents/cassava/graph/")
#Limitar numero de decimales y remove scientific notation-----
options(digits=3)
options(scipen=999)
#Importar datos-----
#datos produccion, area y rendimientos
fao_oferta<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAO_Prod_Yield_Area.csv", header = TRUE)
fao_land_area<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOLandArea.csv", header = TRUE)
#datos de precios
fao_price<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAO_PriceProducer.csv", header = TRUE)
fao_pricePro_index<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOProducerPriceIndicesAnnual.csv", header = TRUE)
fao_pricePro_monthly<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOPriceProducerMonthly.csv", header = TRUE)
#datos de comercio exportaciones e importaciones
fao_trade<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOTrade.csv", header = TRUE)
#matrices de relaciones comerciales
fao_matrix_trade<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOMatrixTrade.csv", header = TRUE)
#datos sobre valor agregado
fao_valueAgg<-read.csv("C:/Users/CEGONZALEZ/Documents/cassava/FAOSTAT/FAOValueAgProd.csv", header = TRUE)
#datos sobre Harmonized Commodity Description and Coding System "HS"
comtrade_1<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/COMTRADE/TradeCassava2015-2011.csv", header = TRUE)
comtrade_2<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/COMTRADE/TradeCassava2010-2006.csv", header = TRUE)
comtrade_3<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/COMTRADE/TradeCassava2005-2001.csv", header = TRUE)
comtrade_4<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/COMTRADE/TradeCassava2000-1996.csv", header = TRUE)
comtrade_5<- read.csv("C:/Users/CEGONZALEZ/Documents/cassava/COMTRADE/TradeCassava1995-1991.csv", header = TRUE)
#Importar Spatial Data------
#SPAM Spatial Allocation Model
spam_prod<-raster("C:/Users/CEGONZALEZ/Documents/cassava/SPATIAL/spam2005v2r0_production_cassava_total.tiff")
spam_yield<-raster("C:/Users/CEGONZALEZ/Documents/cassava/SPATIAL/spam2005v2r0_yield_cassava_rainfed.tiff")
levelplot(spam_prod) # visualizar datos spatial.
levelplot(spam_yield)
#Tuber Roots and Bananas CGIAR
rtb_prod_yield_area<-("C:/Users/CEGONZALEZ/Documents/cassava/SPATIAL/cassava_HarvAreaYield_NetCDF/cassava_AreaYieldProduction")
rtb_pya<- paste(rtb_prod_yield_area,".nc", sep = "")
map_rtb<- nc_open(rtb_pya)
print(map_rtb)
map_rtb <- brick(rtb_pya, lvar=4) # dimensiones son 4 creando mapas por layer
yield <- map_rtb[[2]]
yield_q <- map_rtb[[4]]
harvest<- map_rtb[[1]]
harvest_h <- map_rtb[[5]]
production <- map_rtb[[6]]
# brecha de rendimientos
rtb_yieldgap<-("C:/Users/CEGONZALEZ/Documents/cassava/SPATIAL/cassava_yieldgap_netcdf/YieldGap_cassava_2000_BaseGDD_8_MaxYieldPct_95_ContourFilteredClimateSpace_10x10_prec_YieldGap")
rtb_gap<- paste(rtb_yieldgap,".nc", sep = "")
map_gap <- raster::stack(rtb_gap) # los :: es para indica que la opcion corresponde a ese paquete
map_gap <- raster::brick(rtb_gap)
map_gap <- raster::raster(rtb_gap)
# shape file World
World_shp <- "C:/Users/CEGONZALEZ/Documents/cassava/SPATIAL/gisdata/"
World_1 <- shapefile(paste0(World_shp,"G2014_2013_0.shp"))
World_2 <- shapefile(paste0(World_shp,"G2014_2013_1.shp"))
World_3 <- shapefile(paste0(World_shp, "G2014_2013_2.shp"))
plot(World_1)
#procesamiento de datos-----
fao_oferta # data produccion, area y rendimiento
names(fao_oferta)
#cambiar nombres de variables y eliminar variables innecesarias
fao_oferta$Domain.Code<- NULL
fao_oferta$Flag<- NULL
fao_oferta$FlagD<- NULL
fao_oferta$ItemCode<- NULL
fao_oferta$ElementCode<- NULL
fao_oferta<- fao_oferta[-(17031:17032),]
names(fao_oferta)[names(fao_oferta) == 'ElementName'] <- 'variable'
names(fao_oferta)[names(fao_oferta) == 'AreaName'] <- 'region'
fao_land_area # data uso de tierra
fao_land_area$Domain.Code<- NULL
fao_land_area$Flag<- NULL
fao_land_area$FlagD<- NULL
fao_land_area$ItemCode<- NULL
fao_land_area$ElementCode<- NULL
fao_land_area$ElementName<-NULL
fao_land_area<- fao_land_area[-(51923:51924),]
names(fao_land_area)[names(fao_land_area) == 'ItemName'] <- 'variable'
names(fao_land_area)[names(fao_land_area) == 'AreaName'] <- 'region'
fao_price # data sobre precios
fao_price$Domain.Code<- NULL
fao_price$Flag<- NULL
fao_price$FlagD<- NULL
fao_price$ItemCode<- NULL
fao_price$ElementCode<- NULL
fao_price$ItemName<-NULL
fao_price$Domain<-NULL
fao_price<- fao_price[-(3366:3367),]
names(fao_price)[names(fao_price) == 'ElementName'] <- 'variable'
names(fao_price)[names(fao_price) == 'AreaName'] <- 'region'
fao_pricePro_index # data sobre indice de precios
fao_pricePro_index$Domain.Code<- NULL
fao_pricePro_index$Flag<- NULL
fao_pricePro_index$FlagD<- NULL
fao_pricePro_index$ItemCode<- NULL
fao_pricePro_index$ElementCode<- NULL
fao_pricePro_index$Domain<-NULL
fao_pricePro_index<- fao_oferta[-(1317:1318),]
names(fao_pricePro_index)[names(fao_pricePro_index) == 'ElementName'] <- 'variable'
names(fao_pricePro_index)[names(fao_pricePro_index) == 'AreaName'] <- 'region'
fao_pricePro_monthly # data indice de precios mensuales
fao_pricePro_monthly$Domain.Code<- NULL
fao_pricePro_monthly$Flag<- NULL
fao_pricePro_monthly$FlagD<- NULL
fao_pricePro_monthly$ItemCode<- NULL
fao_pricePro_monthly$ElementCode<- NULL
fao_pricePro_monthly$Domain<-NULL
fao_pricePro_monthly<- fao_pricePro_monthly[-(867:868),]
names(fao_pricePro_monthly)[names(fao_pricePro_monthly) == 'ElementName'] <- 'Moth'
names(fao_pricePro_monthly)[names(fao_pricePro_monthly) == 'AreaName'] <- 'region'
fao_trade # data de comercio
fao_trade$Domain.Code<- NULL
fao_trade$Flag<- NULL
fao_trade$FlagD<- NULL
fao_trade$ItemCode<- NULL
fao_trade$ElementCode<- NULL
fao_trade$Domain<-NULL
fao_trade<- fao_trade[-(37749:377502),]
names(fao_trade)[names(fao_trade) == 'ElementName'] <- 'variable'
names(fao_trade)[names(fao_trade) == 'AreaName'] <- 'region'
fao_matrix_trade # data comercio como matrix
fao_matrix_trade$Domain.Code<- NULL
fao_matrix_trade$Flag<- NULL
fao_matrix_trade$FlagD<- NULL
fao_matrix_trade$ItemCode<- NULL
fao_matrix_trade$Element.Code<- NULL
fao_matrix_trade$Domain<-NULL
fao_matrix_trade$Item.Code<-NULL
fao_matrix_trade$Year.Code<-NULL
fao_matrix_trade$NoRecords<-NULL
fao_matrix_trade$Flag.Description<-NULL
fao_matrix_trade<- fao_matrix_trade[-(28389),]
names(fao_matrix_trade)[names(fao_matrix_trade) == 'Element'] <- 'variable'
names(fao_matrix_trade)[names(fao_matrix_trade) == 'AreaName'] <- 'region'
#data comercio
# listados
ListComtrade <- list(comtrade_1, comtrade_2, comtrade_3, comtrade_4, comtrade_5 )
lapply(ListComtrade, dim)
for(i in 1:length(ListComtrade)) {
cat('Delete columns in file:',i,'\n')
ListComtrade[[i]]<- ListComtrade[[i]][ , -which(names(ListComtrade[[i]]) %in% c("Aggregate.Level",
"Is.Leaf.Code",
"X2nd.Partner.Code",
"X2nd.Partner",
"X2nd.Partner.ISO",
"Customs.Proc..Code",
"Mode.of.Transport.Code",
"Mode.of.Transport",
"Customs",
"Flag",
"FOB.Trade.Value..US..",
"CIF.Trade.Value..US..",
"Period", "Period.Desc.",
"Partner.ISO",
"Alt.Qty.Unit.Code",
"Qty",
"Alt.Qty",
"Gross.weight..kg.",
"Partner.Code" ))]
}
# comprobar que se eliminaron columnas
lapply(ListComtrade, dim)
c_1 <- ListComtrade[[1]]
c_2 <- ListComtrade[[2]]
c_3 <- ListComtrade[[3]]
c_4 <- ListComtrade[[4]]
c_5 <- ListComtrade[[5]]
# unir los datos en un append
TradeCom <- rbind(c_1, c_2, c_3, c_4, c_5) # para poner bases de datos uno sobre el otros
# convertir de Hg a Kg relacion es 1 hg a 0.1 kkfao_oferta-----------
r<- which(fao_oferta$variable=="Yield")
for(i in 1:nrow(fao_oferta)){
if(i %in% r){
fao_oferta$Value[i]<- fao_oferta$Value[i] * 0.1
} else { }
}
#Generar graficos-----
#parametros para realizar graficas
# test a 95% datos
testcarlos<- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), median(Value, na.rm=TRUE)))
testcarlos_median <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), median(Value, na.rm=TRUE)))
testcarlos_mean <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), mean(Value, na.rm = TRUE)))
colnames(testcarlos_mean)[4] <- 'Value'
colnames(testcarlos_median)[4] <- 'Value'
colnames(testcarlos)[4] <- 'Value'
quantileFun <- function(x){z <- stats::quantile(x, probs=0.05, na.rm=TRUE); return(z)}
aux <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), quantileFun(Value)))
testcarlos$p0_05 <- as.numeric(aux[,ncol(aux)])
testcarlos_mean$p0_05<- as.numeric(aux[,ncol(aux)])
testcarlos_median$p0_05<- as.numeric(aux[,ncol(aux)])
quantileFun <- function(x){z <- stats::quantile(x, probs=0.95, na.rm=TRUE); return(z)}
aux <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), quantileFun(Value)))
testcarlos$p0_95 <- as.numeric(aux[,ncol(aux)])
testcarlos_mean$p0_95 <- as.numeric(aux[,ncol(aux)])
testcarlos_median$p0_95 <- as.numeric(aux[,ncol(aux)])
# testcarlos$gap<- por diseņar
# test 90% datos
testcarlos <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), median(Value, na.rm=TRUE)))
colnames(testcarlos)[4] <- 'Value'
quantileFun <- function(x){z <- stats::quantile(x, probs=0.1, na.rm=TRUE); return(z)}
aux <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), quantileFun(Value)))
testcarlos$pmin_10 <- as.numeric(aux[,ncol(aux)])
quantileFun <- function(x){z <- stats::quantile(x, probs=0.8, na.rm=TRUE); return(z)}
aux <- as.data.frame(dplyr::summarise(group_by(fao_oferta,variable,ItemName,Year), quantileFun(Value)))
testcarlos$pmax_80 <- as.numeric(aux[,ncol(aux)])
testcarlos$gab <- testcarlos$p0_95-testcarlos$p0_05
# filtros
yield_cassava <- which(fao_oferta$variable=="Yield" & fao_oferta$ItemName=="Cassava")
produ_cassava<- which(fao_oferta$variable=="Production" & fao_oferta$ItemName=="Cassava")
area_cassava<- which(fao_oferta$variable=="Area harvested" & fao_oferta$ItemName=="Cassava")
fao_oferta$region <- as.character(fao_oferta$region)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
thailandia = c("Thailand")
thailandia <- as.numeric(unlist(grep2(pattern=thailandia, x=as.character(fao_oferta$region))))
thailandia <- base::intersect(yield_cassava, thailandia)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
Nigeria = c("Nigeria")
Nigeria <- as.numeric(unlist(grep2(pattern=Nigeria, x=as.character(fao_oferta$region))))
Nigeria <- base::intersect(yield_cassava, Nigeria)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
brazil = c("Brazil")
brazil <- as.numeric(unlist(grep2(pattern=brazil, x=as.character(fao_oferta$region))))
brazil <- base::intersect(yield_cassava, brazil)
# # Grafico Base-------
gg<- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value, color=region), alpha=0.2)
gg<- gg + geom_line(aes(group = region, alpha=0.1))
gg<- gg + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg<- gg + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg<- gg + ggtitle("World Cassava Yield \n (1960-2014)")
gg<- gg + theme(legend.position="none")
gg<- gg + ylab('Yield Kg/Ha') + xlab('Years') + theme()
gg<- gg + theme(axis.text.x=element_text(size=14, angle=45))
gg<- gg + theme(axis.text.y=element_text(size=14, angle=360))
gg<- gg + theme(plot.title=element_text(size=24, face = 'bold'))
gg # locura
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldAllCountries.png", gg, width=15, height=10.5, units='in') #saves g
gg1<- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value))
gg1<- gg1 + geom_line(aes(group = region), alpha=0.1)
gg1<- gg1 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='red', alpha=0.4)
gg1<- gg1 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg1<- gg1 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg1<- gg1 + ggtitle("World Cassava Yield \n (1960-2014)")
gg1<- gg1 + theme(legend.position="none")
gg1<- gg1 + ylab('Yield Kg/Ha') + xlab('Years') + theme()
gg1<- gg1 + theme(axis.text.x=element_text(size=14, angle=45))
gg1<- gg1 + theme(axis.text.y=element_text(size=14, angle=360))
gg1<- gg1 + theme(plot.title=element_text(size=24, face = 'bold'))
gg1<- gg1 + theme()
gg1
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldAllCountries1.png", gg1, width=15, height=10.5, units='in') #saves g
gg2<- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value))
gg2<- gg2 + geom_line(aes(group = region), alpha=0)
gg2<- gg2 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='red', alpha=0.4)
gg2<- gg2 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg2<- gg2 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg2<- gg2 + ggtitle("World Cassava Yield \n (1960-2014)")
gg2<- gg2 + theme(legend.position="none")
gg2<- gg2 + ylab('Yield Kg/Ha') + xlab('Years') + theme()
gg2<- gg2 + theme(axis.text.x=element_text(size=14, angle=45))
gg2<- gg2 + theme(axis.text.y=element_text(size=14, angle=360))
gg2<- gg2 + theme(plot.title=element_text(size=24, face = 'bold'))
gg2<- gg2 + theme()
gg2
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldAllCountries2.png", gg2, width=15, height=10.5, units='in') #saves g
gg3<- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value))
gg3<- gg3 + geom_line(aes(group = region), alpha=0)
gg3<- gg3 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='red', alpha=0.4)
gg3<- gg3 + geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='black', alpha=1)
gg3<- gg3 + geom_line(data=testcarlos_mean[which(testcarlos_mean$variable=='Yield' & testcarlos_mean$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='blue', alpha=1)
gg3<- gg3 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg3<- gg3 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg3<- gg3 + ggtitle("World Cassava Yield \n (1960-2014)")
gg3<- gg3 + theme(legend.position="none")
gg3<- gg3 + ylab('Yield Kg/Ha') + xlab('Years') + theme()
gg3<- gg3 + theme(axis.text.x=element_text(size=14, angle=45))
gg3<- gg3 + theme(axis.text.y=element_text(size=14, angle=360))
gg3<- gg3 + theme(plot.title=element_text(size=24, face = 'bold'))
gg3<- gg3 + theme()
gg3<- gg3 + annotate(geom="text", x=2008, y=9000, label= "Median", size=15, color="black", fontface="italic")
gg3<- gg3 + annotate(geom="text", x=2009, y=13000, label= "Mean", size=15, color="blue", fontface="italic")
gg3
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldAllCountries3.png", gg3, width=15, height=10.5, units='in') #saves g
# gg4<- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value))
# gg4<- gg4 + geom_line(aes(group = region), alpha=0)
# gg4<- gg4 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='red', alpha=0.4)
# gg4<- gg4 + geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='black', alpha=0.5)
# gg4<- gg4 + geom_line(data=testcarlos_mean[which(testcarlos_mean$variable=='Yield' & testcarlos_mean$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='blue', alpha=0.5)
# gg4<- gg4 + geom_bardata=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=gab),size=1.5, colour='red', alpha=1)
#
# geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=gab),size=1.5, colour='red', alpha=1)
# gg4<- gg4 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
# gg4<- gg4 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
# gg4<- gg4 + ggtitle("World Cassava Yield \n (1960-2014)")
# gg4<- gg4 + theme(legend.position="none")
# gg4<- gg4 + ylab('Yield Kg/Ha') + xlab('Years') + theme()
# gg4<- gg4 + theme(axis.text.x=element_text(size=14, angle=45))
# gg4<- gg4 + theme(axis.text.y=element_text(size=14, angle=360))
# gg4<- gg4 + theme(plot.title=element_text(size=24, face = 'bold'))
# gg4<- gg4 + theme()
# gg4<- gg4 + annotate(geom="text", x=2008, y=9000, label= "Median", size=15, color="black", fontface="italic")
# gg4<- gg4 + annotate(geom="text", x=2009, y=13000, label= "Mean", size=15, color="blue", fontface="italic")
# gg4<- gg4 + annotate(geom="text", x=2008, y=22000, label= "Gap", size=15, color="red", fontface="italic")
# gg4
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldAllCountries4.png", gg4, width=15, height=10.5, units='in') #saves g
gg<- gg + geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='orange', alpha=1)
gg<- gg + geom_line(data=fao_oferta[Nigeria,], aes(x=Year, y=Value, group = region), size=1.2, colour='red')
gg<- gg + geom_line(data=fao_oferta[thailandia,], aes(x=Year, y=Value, group = region), size=1.2, colour='green')
gg<- gg + geom_line(data=fao_oferta[brazil,], aes(x=Year, y=Value, group = region), size=1.2, colour='pink')
#gg<- gg + theme_bw(20) + theme(legend.position="none")
gg<- gg + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg<- gg + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg<- gg + ggtitle("Cassava Yield of World \n & Confidence Intervals (1960-2014)")
gg<- gg + ylab('Yield Kg/Ha') + xlab('Years')
gg<- gg + theme(axis.text.x=element_text(size=14, angle=45))
gg<- gg + theme(axis.text.y=element_text(size=14, angle=360))
gg<- gg + theme(plot.title=element_text(size=24, face = 'bold'))
gg<- gg + annotate(geom="text", x=2008, y=9000, label= "Median", size=15, color="black", fontface="italic")
gg<- gg + annotate(geom="text", x=2008, y=12000, label= "Nigeria", size=15, color="black", fontface="italic")
gg<- gg + annotate(geom="text", x=2008, y=20000, label= "Thailandia", size=15, color="black", fontface="italic")
gg<- gg + annotate(geom="text", x=2008, y=15000, label= "Brazil", size=15, color="black", fontface="italic")
gg
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldCassava.png", gg, width=15, height=10.5, units='in') #saves g
# solo el intervalo con todos los paises
gg1 <- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value)) # con datos originales
gg1 <- gg1 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='blue', alpha=0.7) # con newdata
#gg1 <- gg1 + geom_line(aes(group = region),alpha=0.2)
gg1 <- gg1 + geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='orange', alpha=1)
gg1<- gg1 + geom_line(data=fao_oferta[Nigeria,], aes(x=Year, y=Value, group = region), size=1.2, colour='red')
gg1<- gg1 + geom_line(data=fao_oferta[thailandia,], aes(x=Year, y=Value, group = region), size=1.2, colour='green')
gg1<- gg1 + geom_line(data=fao_oferta[brazil,], aes(x=Year, y=Value, group = region), size=1.2, colour='pink')
gg1<- gg1 + theme_bw(20) + theme(legend.position="none")
gg1<- gg1 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg1<- gg1 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg1<- gg1 + ggtitle("Cassava Yield of World \n & Confidence Intervals (1960-2014)")
gg1<- gg1 + ylab('Yield Kg/Ha') + xlab('Years')
gg1<- gg1 + theme(axis.text.x=element_text(size=14, angle=45))
gg1<- gg1 + theme(axis.text.y=element_text(size=14, angle=360))
gg1<- gg1 + theme(plot.title=element_text(size=24, face = 'bold'))
gg1<- gg1 + annotate(geom="text", x=2008, y=9000, label= "Median", size=15, color="black", fontface="italic")
gg1<- gg1 + annotate(geom="text", x=2008, y=12000, label= "Nigeria", size=15, color="black", fontface="italic")
gg1<- gg1 + annotate(geom="text", x=2008, y=20000, label= "Thailandia", size=15, color="black", fontface="italic")
gg1<- gg1 + annotate(geom="text", x=2008, y=15000, label= "Brazil", size=15, color="black", fontface="italic")
gg1
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/CIYields.png", gg1, width=15, height=10.5, units='in') #saves g
#solo el intervalo
gg2 <- ggplot(data = fao_oferta[yield_cassava,], aes(x=Year, y=Value)) # con datos originales
gg2 <- gg2 + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=p0_05, ymax=p0_95, linetype=NA), fill='blue', alpha=0.7) # con newdata
#gg1 <- gg1 + geom_line(aes(group = region),alpha=0.2)
gg2 <- gg2 + geom_line(data=testcarlos[which(testcarlos$variable=='Yield' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='orange', alpha=1)
#gg1<- gg1 + geom_line(data=fao_oferta[Nigeria,], aes(x=Year, y=Value, group = region), size=1.2, colour='red')
#gg1<- gg1 + geom_line(data=fao_oferta[thailandia,], aes(x=Year, y=Value, group = region), size=1.2, colour='green')
#gg1<- gg1 + geom_line(data=fao_oferta[brazil,], aes(x=Year, y=Value, group = region), size=1.2, colour='blue')
gg2<- gg2 + theme_bw(20) + theme(legend.position="none")
gg2<- gg2 + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gg2<- gg2 + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
gg2<- gg2 + ggtitle("Cassava Yield of World \n & Confidence Intervals (1960-2014)")
gg2<- gg2 + ylab('Yield Kg/Ha') + xlab('Years')
gg2<- gg2 + theme(axis.text.x=element_text(size=14, angle=45))
gg2<- gg2 + theme(axis.text.y=element_text(size=14, angle=360))
gg2<- gg2 + theme(plot.title=element_text(size=24, face = 'bold'))
gg2<- gg2 + annotate(geom="text", x=2008, y=9000, label= "Median", size=15, color="black", fontface="italic")
#gg1<- gg1 + annotate(geom="text", x=2008, y=12000, label= "Nigeria", size=15, color="black", fontface="italic")
#gg1<- gg1 + annotate(geom="text", x=2008, y=20000, label= "Thailandia", size=15, color="black", fontface="italic")
#gg1<- gg1 + annotate(geom="text", x=2008, y=15000, label= "Brazil", size=15, color="black", fontface="italic")
gg2
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/onlyCIYields.png", gg2, width=15, height=10.5, units='in') #saves g
# graph Yield-----
# graphs by countries-----
yth<- ggplot(data = fao_oferta[yield_cassava,],
aes(x=Year, y=Value))
yth<- yth + geom_ribbon(aes(ymin=2500, ymax=20000), fill="lightblue", alpha=0.7)
yth<- yth + geom_line(aes(y= 20000), colour = 'black', size = 0.5, linetype="dashed") + geom_line(aes(y= 2500), colour = 'black', size = 0.5, linetype="dashed")
yth<- yth + geom_line(aes(group = region), alpha=0.2)
yth<- yth + geom_line(data=fao_oferta[thailandia,], aes(x=Year, y=Value, group = region), size=2, colour='red')
yth<- yth + theme_gray(20)+ theme(legend.position="none")
#yth<- yth + ggtitle("Thailand, Cassava Yield")
yth<- yth + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 4),1))
yth<- yth + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
yth<- yth + ylab('Yield Kg') + xlab('Years')
yth<- yth + theme(axis.text.x=element_text(size=14, angle=360))
yth<- yth + theme(axis.text.y=element_text(size=14, angle=360))
yth<- yth + theme(plot.title=element_text(size=24, face = 'bold'))
yth<- yth + annotate(geom="text", x=1990, y=10000, label= "Thailand", size=30, color="black", fontface="italic")
yth
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/Thailand.png", yth, width=10, height=10.5, units='in') #saves g
# Brazil
yield_cassava <- which(fao_oferta$variable=="Yield" & fao_oferta$ItemName=="Cassava")
fao_oferta$region <- as.character(fao_oferta$region)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
brazil = c("Brazil")
brazil <- as.numeric(unlist(grep2(pattern=brazil, x=as.character(fao_oferta$region))))
brazil <- base::intersect(yield_cassava, brazil)
yb<- ggplot(data = fao_oferta[yield_cassava,],
aes(x=Year, y=Value))
yb<- yb + geom_ribbon(aes(ymin=2500, ymax=20000), fill="lightblue", alpha=0.7)
yb<- yb + geom_line(aes(y= 20000), colour = 'black', size = 0.5, linetype="dashed") + geom_line(aes(y= 2500), colour = 'black', size = 0.5, linetype="dashed")
yb<- yb + geom_line(aes(group = region), alpha=0.2)
yb<- yb + geom_line(data=fao_oferta[brazil,], aes(x=Year, y=Value, group = region), size=2, colour='red')
yb<- yb + theme_gray(20)+ theme(legend.position="none")
yb<- yb + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 4),1))
yb<- yb + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
yb<- yb + ylab('Yield Kg') + xlab('Years')
yb<- yb + theme(axis.text.x=element_text(size=14, angle=360))
yb<- yb + theme(axis.text.y=element_text(size=14, angle=360))
yb<- yb + theme(plot.title=element_text(size=24, face = 'bold'))
yb<- yb + annotate(geom="text", x=1990, y=10000, label= "Brazil", size=30, color="black", fontface="italic")
yb
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/Brazil.png", yb, width=10, height=10.5, units='in') #saves g
# Nigeria
yield_cassava <- which(fao_oferta$variable=="Yield" & fao_oferta$ItemName=="Cassava")
fao_oferta$region <- as.character(fao_oferta$region)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
Nigeria = c("Nigeria")
Nigeria <- as.numeric(unlist(grep2(pattern=Nigeria, x=as.character(fao_oferta$region))))
Nigeria <- base::intersect(yield_cassava, Nigeria)
yn<- ggplot(data = fao_oferta[yield_cassava,],
aes(x=Year, y=Value))
yn<- yn + geom_ribbon(aes(ymin=2500, ymax=20000), fill="lightblue", alpha=0.7)
yn<- yn + geom_line(aes(y= 20000), colour = 'black', size = 0.5, linetype="dashed") + geom_line(aes(y= 2500), colour = 'black', size = 0.5, linetype="dashed")
yn<- yn + geom_line(aes(group = region), alpha=0.2)
yn<- yn + geom_line(data=fao_oferta[Nigeria,], aes(x=Year, y=Value, group = region), size=2, colour='red')
yn<- yn + theme_gray(20)+ theme(legend.position="none")
yn<- yn + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 4),1))
yn<- yn + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
yn<- yn + ylab('Yield Kg') + xlab('Years')
yn<- yn + theme(axis.text.x=element_text(size=14, angle=360))
yn<- yn + theme(axis.text.y=element_text(size=14, angle=360))
yn<- yn + theme(plot.title=element_text(size=24, face = 'bold'))
yn<- yn + annotate(geom="text", x=1990, y=10000, label= "Nigeria", size=30, color="black", fontface="italic")
yn
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/Nigeria.png", yn , width=10, height=10.5, units='in') #saves g
# Thailand Nigeria and Brazil
yield_cassava <- which(fao_oferta$variable=="Yield" & fao_oferta$ItemName=="Cassava")
fao_oferta$region <- as.character(fao_oferta$region)
grep2 <- Vectorize(FUN=grep, vectorize.args='pattern')
yall<- ggplot(data = fao_oferta[yield_cassava,],
aes(x=Year, y=Value))
yall<- yall + geom_ribbon(aes(ymin=2500, ymax=20000), fill="lightblue", alpha=0.7)
yall<- yall + geom_line(aes(y= 20000), colour = 'black', size = 0.5, linetype="dashed") + geom_line(aes(y= 2500), colour = 'black', size = 0.5, linetype="dashed")
yall<- yall + geom_line(aes(group = region), alpha=0.2)
yall<- yall + geom_line(data=fao_oferta[Nigeria,], aes(x=Year, y=Value, group = region), size=1.5, colour='blue')
yall<- yall + geom_line(data=fao_oferta[thailandia,], aes(x=Year, y=Value, group = region), size=1.5, colour='green')
yall<- yall + geom_line(data=fao_oferta[brazil,], aes(x=Year, y=Value, group = region), size=1.5, colour='purple')
yall<- yall + theme_gray(20)+ theme(legend.position="none")
yall<- yall + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 4),1))
yall<- yall + scale_y_continuous(breaks = round(seq(min(0), max(40000), by = 5000),1))
yall<- yall + ylab('Yield Kg') + xlab('Years')
yall<- yall + theme(axis.text.x=element_text(size=14, angle=360))
yall<- yall + theme(axis.text.y=element_text(size=14, angle=360))
yall<- yall + theme(plot.title=element_text(size=24, face = 'bold'))
#yall<- yall + geom_dl(aes(label = Nigeria), method = list(dl.combine("first.points", "last.points"), cex = 0.8))
#yall<- yall + annotate(geom="text", x=1990, y=10000, label= "Nigeria", size=30, color="black", fontface="italic")
yall
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/YieldCassava.png", yf2, width=10, height=10.5, units='in') #saves g
# graph production-------
ggq<- ggplot(data = fao_oferta[produ_cassava,], aes(x=Year, y=Value)) # con datos originales
ggq<- ggq + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Production' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=pmin_10, ymax=pmax_80, linetype=NA), fill='red', alpha=0.6) # con newdata
#ggq<- ggq + geom_line(data=testcarlos[which(testcarlos$variable=='Production' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='orange', alpha=1)
ggq<- ggq + theme_bw(20) + theme(legend.position="none")
ggq<- ggq + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
ggq<- ggq + scale_y_continuous(breaks = round(seq(min(100000), max(14000000), by = 500000),1))
ggq<- ggq + ggtitle("Cassava Production of World, \n & Intervals 90% (1960-2014)")
ggq<- ggq + ylab('Tonnes') + xlab('Years')
ggq<- ggq + theme(axis.text.x=element_text(size=14, angle=45))
ggq<- ggq + theme(axis.text.y=element_text(size=14, angle=360))
ggq<- ggq + theme(plot.title=element_text(size=24, face = 'bold'))
ggq
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/production90.png", ggq, width=15, height=10.5, units='in') #saves g
# graph Area------
gga<- ggplot(data = fao_oferta[area_cassava,], aes(x=Year, y=Value)) # con datos originales
gga<- gga + geom_ribbon(data=testcarlos[which(testcarlos$variable=='Area harvested' & testcarlos$ItemName=='Cassava'),], aes(x=Year, ymin=pmin_10, ymax=pmax_80, linetype=NA), fill='green', alpha=0.6) # con newdata
#gga<- gga + geom_line(data=testcarlos[which(testcarlos$variable=='Production' & testcarlos$ItemName=='Cassava'),], aes(x=Year, y=Value),size=1.5, colour='orange', alpha=1)
gga<- gga + theme_bw(20) + theme(legend.position="none")
gga<- gga + scale_x_continuous(breaks = round(seq(min(1960), max(2014), by = 2),1))
gga<- gga + scale_y_continuous(breaks = round(seq(min(20000), max(300000), by = 20000),1))
gga<- gga + ggtitle("Cassava Area of World, \n & Intervals 90% (1960-2014)")
gga<- gga + ylab('Ha') + xlab('Years')
gga<- gga + theme(axis.text.x=element_text(size=14, angle=45))
gga<- gga + theme(axis.text.y=element_text(size=14, angle=360))
gga<- gga + theme(plot.title=element_text(size=24, face = 'bold'))
gga
ggsave(file="C:/Users/CEGONZALEZ/Documents/cassava/graph/Area90.png", gga, width=15, height=10.5, units='in') #saves g
# graph Spatial information -----
#SPAM
spam_yield # raster
plot(World_1) # shapefile
class(World_1)
getinfo.shape(World_1)
head(World_1@data) # con el arroba puedo conocer los datos dentro de shape file
View(World_1)
# para eliminar a la Antarctiva, greeland
World_1<- World_1[World_1@data$ADM0_NAME!="Antarctica",]
World_1<- World_1[World_1@data$ADM0_NAME!="Greenland",]
#graph spam
plot(spam_yield)
plot(World_1, bg="transparent", add=TRUE)
#visualizaciones tipo google earth
mymap <- gmap("Colombia", type = "satellite")
mapta <- gmap("thailand", type = "satellite")
mapssa <- gmap("SSA Africa", type = "satellite")
plot(mymap)
plot(mapssa)
#visualizacion de raster
levelplot(spam_yield)
p <- levelplot(spam_yield, layers=1,par.settings = RdBuTheme, margin = FALSE, main = "Cassava yield in tons/ha")
p<- p + layer(sp.lines(World_1, lwd=0.7, col="darkgray"))
tiff(filename=paste(pic,"spamyield.tif",sep=""),
width = 12, height = 8, units = 'in', res = 300)
p
dev.off()
# solo donde hay rendimientos
spam_yield[spam_yield[]==0]<- NA # smap_yield[] responde a todos los valores de raster
p1 <- levelplot(spam_yield, layers=1,par.settings = RdBuTheme, margin = FALSE, main = "Cassava Yield in kg/ha\nSPAM")
p1<- p1 + layer(sp.lines(World_1, lwd=0.7, col="darkgray"))
tiff(filename=paste(pic,"spamyield1.tif",sep=""),
width = 12, height = 8, units = 'in', res = 300)
p1
dev.off()
# Rendimiento, produccion, area y brechas de rendimiento de RTB
yield <- map_rtb[[2]]
hist(yield)
levelplot(yield)
# rendimientos
yield[yield[]==0]<- NA # smap_yield[] responde a todos los valores de raster
prtb <- levelplot(yield, layers=1,par.settings = RdBuTheme, margin = FALSE, main = "Cassava Yield in kg/ha\nRTB")
prtb<- prtb + layer(sp.lines(World_1, lwd=0.7, col="darkgray"))
tiff(filename=paste(pic,"RTByield.tif",sep=""),
width = 12, height = 8, units = 'in', res = 300)
prtb
dev.off()
# brecha de rendimientos
map_gap
sum(map_gap)
histogram(map_gap)
map_gap[map_gap[]==0]<- NA
gap_yield <- levelplot(map_gap, layers=1,par.settings = RdBuTheme, margin = FALSE, main = "Cassava Gap Yield in kg/ha\nRTB")
gap_yield<- gap_yield + layer(sp.lines(World_1, lwd=0.7, col="darkgray"))
tiff(filename=paste(pic,"RTBGapYield.tif",sep=""),
width = 12, height = 8, units = 'in', res = 300)
gap_yield
dev.off()
#global trends----
# grafico heatmap
names(TradeCom)
View(fao_oferta)
#Trade network----
fao_matrix_trade
names(fao_matrix_trade)
#Cassava dried 1986 -----
flow_x_casDried_1986<- fao_matrix_trade[which(fao_matrix_trade$variable=="Export Quantity" &
fao_matrix_trade$Item=="Cassava dried" & fao_matrix_trade$Year==1986),]
flow_x_casDried_1986$RecordOrder<-NULL
flow_x_casDried_1986$Reporter.Country.Code<-NULL
flow_x_casDried_1986$Partner.Country.Code<-NULL
flow_x_casDried_1986$Item<-NULL
flow_x_casDried_1986$Unit<-NULL
flow_x_casDried_1986$Year<- NULL
flow_x_casDried_1986$variable<- NULL
rownames(flow_x_casDried_1986) <- 1:nrow(flow_x_casDried_1986)
# length(unique(flow_x_casDried_1986$Reporter.Countries))
# length(unique(flow_x_casDried_1986$Partner.Countries))
#
# dim(expand.grid(unique(df0$orig_reg), unique(df0$dest_reg)))
# dim(df0)
# dim(expand.grid(unique(flow_x_casDried_1986$Reporter.Countries), unique(flow_x_casDried_1986$Partner.Countries)))
# dim(flow_x_casDried_1986)
aux <- expand.grid(unique(flow_x_casDried_1986$Reporter.Countries), unique(flow_x_casDried_1986$Partner.Countries))
colnames(aux) <- colnames(flow_x_casDried_1986[,1:2])
aux2 <- flow_x_casDried_1986[,1:2]
require(sqldf)
aux2 <- sqldf("select * from aux except select * from aux2")
dim(aux2)
aux2$Value <- 0
flow_x_casDried_1986 <- rbind(flow_x_casDried_1986, aux2)
dim(flow_x_casDried_1986)
#flow_x_casDried_1986 <- as.matrix(flow_x_casDried_1986)
#flow_x_casDried_1986 <- as.data.frame(flow_x_casDried_1986)
flow_x_casDried_1986$Reporter.Countries <- as.character(flow_x_casDried_1986$Reporter.Countries)
flow_x_casDried_1986$Partner.Countries <- as.character(flow_x_casDried_1986$Partner.Countries)
flow_x_casDried_1986$Value <- as.numeric(flow_x_casDried_1986$Value)
flow_x_casDried_1986$Value[which(flow_x_casDried_1986$Value==0)] <- 1
flow_x_casDried_1986$Value <- log(flow_x_casDried_1986$Value, base=exp(1))
flow_x_casDried_1986$Value[which(flow_x_casDried_1986$Value==0)] <- 0.5
png('circos_test.png', width=8, height=8, units='in', res=300)
chordDiagram(x = flow_x_casDried_1986[flow_x_casDried_1986$Value>.6,])
dev.off()
png('circos_testyyy.png', width=8, height=8, units='in', res=300)
circos.par(start.degree = 90, gap.degree = 4, track.margin = c(-0.1, 0.1), points.overflow.warning = FALSE)
par(mar = rep(0, 4))
chordDiagram(x = flow_x_casDried_1986[flow_x_casDried_1986$Value>9,], transparency = 0.25,
directional = 1,
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
# annotationTrack = "name", annotationTrackHeight = c(0.05, 0.05),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE)
dev.off()
##plot parameters
circos.clear()
circos.par(start.degree = 90, gap.degree = 4, track.margin = c(-0.1, 0.1), points.overflow.warning = FALSE)
par(mar = rep(0, 4))
#Exportaciones 1986. Cassava Dried
png('X_cassavaDried1986.png', width=8, height=8, units='in', res=300)
chordDiagram(x = flow_x_casDried_1986[flow_x_casDried_1986$Value>0.5,], transparency = 0.25,
directional = 1,
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
annotationTrack = "grids", preAllocateTracks = list(track.height = 0.1),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE)
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
if(abs(xplot[2] - xplot[1]) < 20) {
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise",
niceFacing = TRUE, adj = c(0, 0.5))
} else {
circos.text(mean(xlim), ylim[1], sector.name, facing = "inside",
niceFacing = TRUE, adj = c(0.5, 0))
}
}, bg.border = NA)
dev.off()
#Casssava dried 2000----
flow_x_casDried_2000<- fao_matrix_trade[which(fao_matrix_trade$variable=="Export Quantity" &
fao_matrix_trade$Item=="Cassava dried" & fao_matrix_trade$Year==2000),]
flow_x_casDried_2000$RecordOrder<-NULL
flow_x_casDried_2000$Reporter.Country.Code<-NULL
flow_x_casDried_2000$Partner.Country.Code<-NULL
flow_x_casDried_2000$Item<-NULL
flow_x_casDried_2000$Unit<-NULL
flow_x_casDried_2000$Year<- NULL
flow_x_casDried_2000$variable<- NULL
rownames(flow_x_casDried_2000) <- 1:nrow(flow_x_casDried_2000)
# length(unique(flow_x_casDried_2000$Reporter.Countries))
# length(unique(flow_x_casDried_2000$Partner.Countries))
# dim(expand.grid(unique(flow_x_casDried_2000$Reporter.Countries), unique(flow_x_casDried_2000$Partner.Countries)))
# dim(flow_x_casDried_2000)
auxCarlos <- expand.grid(unique(flow_x_casDried_2000$Reporter.Countries), unique(flow_x_casDried_2000$Partner.Countries))
colnames(auxCarlos) <- colnames(flow_x_casDried_2000[,1:2])
auxCarlos2 <- flow_x_casDried_2000[,1:2]
require(sqldf)
auxCarlos2 <- sqldf("select * from auxCarlos except select * from auxCarlos2")
dim(auxCarlos2)
auxCarlos2$Value <- 0
flow_x_casDried_2000 <- rbind(flow_x_casDried_2000, auxCarlos2) # apilar
dim(flow_x_casDried_2000)
flow_x_casDried_2000$Reporter.Countries <- as.character(flow_x_casDried_2000$Reporter.Countries)
flow_x_casDried_2000$Partner.Countries <- as.character(flow_x_casDried_2000$Partner.Countries)
flow_x_casDried_2000$Value <- as.numeric(flow_x_casDried_2000$Value)
flow_x_casDried_2000$Value[which(flow_x_casDried_2000$Value==0)] <- 1
flow_x_casDried_2000$Value <- log(flow_x_casDried_2000$Value, base=exp(1))
flow_x_casDried_2000$Value[which(flow_x_casDried_2000$Value==0)] <- 0.5
##plot parameters
circos.clear()
circos.par(start.degree = 90, gap.degree = 4, track.margin = c(-0.1, 0.1), points.overflow.warning = FALSE)
par(mar = rep(0, 4))
#Exportaciones 2000. Cassava Dried
png('X_cassavaDried2000.png', width=8, height=8, units='in', res=300)
chordDiagram(x = flow_x_casDried_2000[flow_x_casDried_2000$Value>1.5,], transparency = 0.25,
directional = 1,
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
annotationTrack = "grids", preAllocateTracks = list(track.height = 0.1),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE)
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
if(abs(xplot[2] - xplot[1]) < 20) {
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise",
niceFacing = TRUE, adj = c(0, 0.5))
} else {
circos.text(mean(xlim), ylim[1], sector.name, facing = "inside",
niceFacing = TRUE, adj = c(0.5, 0))
}
}, bg.border = NA)
dev.off()
#Casssava dried 2013----
flow_x_casDried_2013<- fao_matrix_trade[which(fao_matrix_trade$variable=="Export Quantity" &
fao_matrix_trade$Item=="Cassava dried" & fao_matrix_trade$Year=="2013"),]
flow_x_casDried_2013$RecordOrder<-NULL
flow_x_casDried_2013$Reporter.Country.Code<-NULL
flow_x_casDried_2013$Partner.Country.Code<-NULL
flow_x_casDried_2013$Item<-NULL
flow_x_casDried_2013$Unit<-NULL
flow_x_casDried_2013$Year<- NULL
flow_x_casDried_2013$variable<- NULL
rownames(flow_x_casDried_2013) <- 1:nrow(flow_x_casDried_2013)
# length(unique(flow_x_casDried_2013$Reporter.Countries))
# length(unique(flow_x_casDried_2013$Partner.Countries))
# dim(expand.grid(unique(flow_x_casDried_2013$Reporter.Countries), unique(flow_x_casDried_2013$Partner.Countries)))
# dim(flow_x_casDried_2013)
ac <- expand.grid(unique(flow_x_casDried_2013$Reporter.Countries), unique(flow_x_casDried_2013$Partner.Countries))
colnames(ac) <- colnames(flow_x_casDried_2013[,1:2])
ac2 <- flow_x_casDried_2013[,1:2]
require(sqldf)
ac2 <- sqldf("select * from ac except select * from ac2")
dim(ac2)
ac2$Value <- 0
flow_x_casDried_2013 <- rbind(flow_x_casDried_2013, ac2) # apilar
dim(flow_x_casDried_2013)
flow_x_casDried_2013$Reporter.Countries <- as.character(flow_x_casDried_2013$Reporter.Countries)
flow_x_casDried_2013$Partner.Countries <- as.character(flow_x_casDried_2013$Partner.Countries)
flow_x_casDried_2013$Value <- as.numeric(flow_x_casDried_2013$Value)
flow_x_casDried_2013$Value[which(flow_x_casDried_2013$Value==0)] <- 1
flow_x_casDried_2013$Value <- log(flow_x_casDried_2013$Value, base=exp(1))
flow_x_casDried_2013$Value[which(flow_x_casDried_2013$Value==0)] <- 0.5
circos.clear()
circos.par(start.degree = 90, gap.degree = 2, track.margin = c(-0.1, 0.1), points.overflow.warning = FALSE)
par(mar = rep(0, 4))
png('X_cassavaDried2013.png', width=8, height=8, units='in', res=300)
chordDiagram(x = flow_x_casDried_2013[flow_x_casDried_2013$Value>0.5,], transparency = 0.25,
directional = 1,
direction.type = c("arrows", "diffHeight"), diffHeight = -0.04,
annotationTrack = "grids", preAllocateTracks = list(track.height = 0.1),
link.arr.type = "big.arrow", link.sort = TRUE, link.largest.ontop = TRUE)
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
xplot = get.cell.meta.data("xplot")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
if(abs(xplot[2] - xplot[1]) < 20) {
circos.text(mean(xlim), ylim[1], sector.name, facing = "clockwise",
niceFacing = TRUE, adj = c(0, 0.5))
} else {
circos.text(mean(xlim), ylim[1], sector.name, facing = "inside",
niceFacing = TRUE, adj = c(0.5, 0))
}
}, bg.border = NA)
dev.off()
|
46cdc7ba3161979bc417a8930aebff497bfe2a4b
|
61999842a6809627d96b554267b51b8b2e563816
|
/rankall.R
|
25ded797a7dd948bfebc1f5524f0500f4ea03500
|
[] |
no_license
|
odonovdo/ProgrammingAssignment3
|
3c433b06dfd5f1e3f7be6a47aac275e3714264b1
|
216f6fb63e6c0c779962e60ddceef8565c3b7e01
|
refs/heads/master
| 2020-04-13T03:32:19.779077
| 2015-03-25T09:11:41
| 2015-03-25T09:11:41
| 32,801,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
rankall.R
|
setwd("C:/Users/odonovad/datasciencecoursera/ProgrammingAssignment3")
rankall <- function(outcome,num= "best") {
File.in <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# remove dots from colunms
colnames(File.in) <- casefold(gsub("[.]"," ",colnames(File.in)))
# create a expression to use for finding the outcome
pattern <- paste0("^[hH]ospital","(.*)","mortality","(.*)",outcome,"$")
if(sum(grepl(pattern,colnames(File.in)))==0){
stop("invalid outcome")
}else{
if(!(grepl("[Bb]est",num)|grepl("[Ww]orst",num)|is.numeric(num))){
stop("invalid Choice")
}else{
Column.ID <- which(grepl(pattern,colnames(File.in),ignore.case = TRUE))
filter <- suppressWarnings(!is.na(as.numeric(File.in[,Column.ID])))
# remove non numeric values
File.values <- File.in[filter,]
File.values[,Column.ID] <- as.numeric(File.values[,Column.ID])
# assisgn "best" a numerical value of 1
if(grepl("[Bb]est",num))num <-1
#identfy hosipital column
Hos.col <- which("hospital name"==colnames(File.values))
# loop over a list of hospitals by state and return rank
out.put <- lapply(split(File.values,File.values$state),function(x){
Hospitals <- as.data.frame(x)
if(grepl("[Ww]orst",num))num <- nrow(Hospitals)
x[order(x[,Column.ID],x[,Hos.col]),c(Hos.col,7)] [num,]
})
Names.states <- as.data.frame(do.call(rbind,out.put) )
colnames(Names.states) <-c("hospital","state")
Names.states
}}}
|
064263e942446ac3f0ba69f04a9c8e2c25f576ce
|
fe3f2623101dac6ecf2bab6885d80fff02d31d9f
|
/assignment(Introduction_to_R_Programming)-shrutigupta.R
|
a976aeb7ee093ddedbd3d1079a353c8aa2404b39
|
[] |
no_license
|
shrutinikki/Data-Science
|
e4660db250b6b2ea2cd5b2246c5196217ef54de5
|
99e05e7e1a3877c8d0df1c05a9ea2213906bc03c
|
refs/heads/master
| 2021-06-24T12:27:48.395402
| 2021-01-26T12:18:06
| 2021-01-26T12:18:06
| 199,778,432
| 0
| 0
| null | 2021-01-26T12:18:07
| 2019-07-31T04:27:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,803
|
r
|
assignment(Introduction_to_R_Programming)-shrutigupta.R
|
#Author: Shruti Nikhila Gupta
#Assignment 1: Introduction to R Programming
#Assignment Objective: Hands-on practice on diamond dataset using functions from dplyr package
#Questions
#Q1. Which color has maximum price/carat
#Q2. Which clarity has minimum price/carat
#Q3. Which cut has more missing values
#Q4. Which color has minimum median price
#Q5. What conclusion can you draw from the variables color, cut-price, and carat
#data set: BigDiamonds.cvs
#including libary
library("dplyr")
#set working directory
setwd ("C:\\Users\\HP\\Documents\\Courses\\digitalvidya\\datascienceusingr\\class\\week2")
#retrieve the working directory name
getwd()
#retriving file
data_set = read.csv("BigDiamonds.csv",header = TRUE,stringsAsFactors = FALSE,na.strings = "NA")
data_set
#Q1. Which color has maximum price/carat
maxcarat<-max(data_set$carat,na.rm = FALSE)
colorfind<-data_set$color
finalcolor<-setdiff(colorfind, maxcarat)
finalcolor
#Q2. Which clarity has minimum price/carat
minprice<-min(data_set$price,na.rm = TRUE)
minprice
clarityfind<-data_set$clarity
finalclarity<-setdiff(clarityfind, minprice)
finalclarity
#Q3. Which cut has more missing values
napresent<- function() {
price=filter_all(data_set,all_vars(is.na(data_set$price)))
x=filter_all(data_set,all_vars(is.na(data_set$x)))
y=filter_all(data_set,all_vars(is.na(data_set$y)))
z=filter_all(data_set,all_vars(is.na(data_set$z)))
}
data_set%>%group_by(carat)%>%summarise_all(funs(filter_all(data_set,all_vars(is.na(data_set$price)))))
select_all(data_set,funs(filter_all(data_set,all_vars(is.na(data_set$price)))))
#Q4. Which color has minimum median price
#Q5. What conclusion can you draw from the variables color, cut-price, and carat
|
e964415af31ae605899a1508366179a7dc688b5c
|
d769c2c28cdf8d1a8b1a825f18424c4ecd6e5ff1
|
/man/summary.equivttest.Rd
|
757839b7f4a7a403384e09a67aa9d0cd027be8e0
|
[
"MIT"
] |
permissive
|
jwbowers/equivtest
|
8a025ef7b00b9e83283dae42f30a4d155ce9a7f0
|
5f881c57b559ec38792fa6c03842dcbfeabbf43a
|
refs/heads/master
| 2022-01-08T14:57:16.591079
| 2019-07-03T02:01:57
| 2019-07-03T02:01:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 664
|
rd
|
summary.equivttest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_equivttest.R
\name{summary.equivttest}
\alias{summary.equivttest}
\title{Equivalence t-Test Summary Function}
\usage{
\method{summary}{equivttest}(res)
}
\description{
\code{summary.equivttest} is a summary function for objects of the class \code{equivttest}. This summary function should be paired with the \code{equiv.t.test} function.
}
\examples{
# Wellek p 124
x=c(10.3,11.3,2,-6.1,6.2,6.8,3.7,-3.3,-3.6,-3.5,13.7,12.6)
y=c(3.3,17.7,6.7,11.1,-5.8,6.9,5.8,3,6,3.5,18.7,9.6)
eps=c(.5,1)
res=equiv.t.test(x,y,eps_sub=eps)
summary(res)
}
\seealso{
\code{\link{equiv.t.test}}
}
|
e745749e5db6af7ee7db3e5115c464ba8061a41e
|
de7b5af9c415426ad5496ccf0b87f91b663cee29
|
/man/checkNotDigitInDataframe.Rd
|
57b8e0f90970c7640e6d4970ca002c5a92cf1164
|
[
"MIT"
] |
permissive
|
rgriffier/statsBordeaux
|
bb465057eab0b41e6d1515f02cfd13adb8179ac8
|
ded4292fb43e2a959058267a1f707d25505c0b7d
|
refs/heads/master
| 2021-09-06T15:02:07.420504
| 2021-08-03T13:56:13
| 2021-08-03T13:56:13
| 178,895,755
| 2
| 0
| null | 2020-09-28T14:14:45
| 2019-04-01T15:45:33
|
R
|
UTF-8
|
R
| false
| true
| 1,053
|
rd
|
checkNotDigitInDataframe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statistic_functions.R
\name{checkNotDigitInDataframe}
\alias{checkNotDigitInDataframe}
\title{Check if data are only digits}
\usage{
checkNotDigitInDataframe(data, returnError = FALSE, excelFormat = TRUE)
}
\arguments{
\item{data}{a data.frame containing the data to check}
\item{returnError}{boolean. If TRUE, return a data.frame with the coordinate of the cell}
\item{excelFormat}{a boolean vector of length one. If TRUE, data.frame containing position of none
digit cell is in excel format. Default to TRUE.}
}
\value{
TRUE if the data.frame only contain digit, else return a data.frame
with the coordinate of the cell containing some wrond format data in excel style. FALSE by default.
}
\description{
Check if the input data.frale are only composed of digits
}
\examples{
data(mtcars)
checkNotDigitInDataframe(data = mtcars, returnError = FALSE)
mtcars$NOT_DIGIT <- rep("A", nrow(mtcars))
errorPosition <- checkNotDigitInDataframe(data = mtcars, returnError = TRUE)
}
|
8547e65b1ed8b2dfe0307280e752c39a55b72095
|
1c591b580a42e90ba318675e3cebeabdfc06534a
|
/R/func__geoTempAnalyser__calcAllelicDiversityPerCountry.R
|
eb5b6a3d91ed1bfd4f44a250ae9e11a1b7506ad8
|
[
"Apache-2.0"
] |
permissive
|
wanyuac/GeneMates
|
974d9a883d43ccd7602167204d8b3ff5bba6b74c
|
e808430b2cdd920f1b9abd8b6b59993fde8754a7
|
refs/heads/master
| 2022-08-31T13:00:42.583172
| 2022-08-08T10:01:02
| 2022-08-08T10:01:02
| 138,949,733
| 25
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,347
|
r
|
func__geoTempAnalyser__calcAllelicDiversityPerCountry.R
|
#' @title Allelic diversity per country or any kind of regions
#'
#' @description Calculate Simpson's or Shannon's diversity index for alleles per
#' country or any other kind of geographic regions. Each strain must belong to
#' only a single country.
#'
#' @param sam A data frame whose first and second columns are strain and country
#' names, respectively.
#' @param pam A binary allelic presence-absence matrix, where row names are
#' strain names and column names are allele names.
#' @param alleles A character vector specifying alleles to be analysed. Keep NULL
#' to include all alleles in pam.
#' @param countries A character vector specifying countries to be considered. Keep
#' NULL to include all countries in pam.
#' @param method A character argument of either "simpson" or "shannon".
#' @param shannon_base The base for Shannon's diversity index. It is not used for
#' Simpson's diveristy index. Default: exp(1).
#'
#' @return A list of four elements:
#' c: a matrix of n(countries) by n(alleles) for allele counts per country;
#' d: a named numeric vector of diversity indices.
#' p: the final allelic presence-absence matrix on which the diversity indices
#' are computed.
#' s: isolation information of the final set of strains.
#'
#' @author Yu Wan (\email{wanyuac@@126.com})
#' @export
#'
# Copyright 2018 Yu Wan <wanyuac@126.com>
# Licensed under the Apache License, Version 2.0
# First and the latest edition: 31 Oct 2018
calcAllelicDiveristyPerCountry <- function(sam, pam, alleles = NULL, countries = NULL,
method = "simpson", shannon_base = exp(1)) {
require(vegan)
# Preparation
sam <- sam[, c(1, 2)]
names(sam) <- c("Strain", "Country")
sam <- subset(sam, !is.na(Country))
if (!is.null(countries)) {
sam <- subset(sam, Country %in% countries) # to reduce the volumn of data
}
strains <- intersect(x = rownames(pam), y = sam$Strain) # the final set of strains. Some strains may be removed from the original pam due to absence of any alleles.
pam <- pam[strains, ] # Unicity of strain names is guaranteed by the previous function intersect.
pam <- pam[, as.logical(colSums(pam))] # remove empty colums after excluding some strains
sam <- subset(sam, Strain %in% strains)
countries <- unique(sam$Country) # the final set of countries to be analysed
if (!is.null(alleles)) {
pam <- pam[, intersect(x = alleles, y = colnames(pam))]
}
alleles <- colnames(pam) # the final set of alleles to be analysed
# Calculation
m <- matrix(0, nrow = length(countries), ncol = length(alleles),
dimnames = list(countries, alleles))
for (k in countries) {
strains_k <- sam$Strain[sam$Country == k]
pam_k <- pam[strains_k, ]
if (length(strains_k) > 1) {
m[k, ] <- as.numeric(colSums(pam_k))
} else { # The length must equal one, which means a single strain was isolated in the current country.
m[k, ] <- as.numeric(pam_k) # pam_k is a vector in this case.
}
}
if (method != "simpson") {
d <- diversity(x = m, index = "shannon", MARGIN = 1, base = shannon_base)
} else {
d <- diversity(x = m, index = "simpson", MARGIN = 1)
}
return(list(d = d, c = m, p = pam, s = sam))
}
|
3ca2481c82930cb54a2c64a935f293c2f6ee52e0
|
2e85b739e89e8be13c2325049016f63079f4db75
|
/man/env.two.Rd
|
5e15581c9ca90c92952a74603b2ca778e6f0ac56
|
[] |
no_license
|
cran/gma
|
8c5028cea049bdfbdb5bb91dab63cc32ad0f2335
|
eb424ae18c87f5d60ebf0cd8abe23b5ccd429cc2
|
refs/heads/master
| 2021-06-30T00:10:55.739931
| 2017-09-19T09:07:35
| 2017-09-19T09:07:35
| 104,059,236
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,351
|
rd
|
env.two.Rd
|
\name{env.two}
\alias{env.two}
\docType{data}
\title{ Simulated two-level dataset
}
\description{ "env.two" is an R environment containing a data list generated from 50 subjects, and the parameter settings used to generate the data.
}
\usage{data("env.two")}
\format{ An R environment.
\describe{
\item{\code{data2}}{a list of length 50, each contains a data frame with 3 variables.}
\item{\code{error2}}{a list of length 50, each contains a data frame with 2 columns.}
\item{\code{theta}}{a 3 by 1 vector, which is the population level coefficients \code{(A,B,C)} of the model.}
\item{\code{Sigma}}{a 2 by 2 matrix, which is the covariance matrix of the two Gaussian white noise processes.}
\item{\code{p}}{the order of the vector autoregressive (VAR) model.}
\item{\code{W}}{a 2\code{p} by 2 matrix, which is the transition matrix of the VAR(\code{p}) model.}
\item{\code{Delta}}{a 2 by 2 matrix, which is the covariance matrix of the initial condition of the Gaussian white noise processes.}
\item{\code{n}}{a 50 by 1 matrix, is the number of time points for each subject.}
\item{\code{Lambda}}{the covariance matrix of the model errors in the coefficient regression model.}
\item{\code{A}}{a vector of length 50, is the \code{A} value in the single-level for each subject.}
\item{\code{B}}{a vector of length 50, is the \code{B} value in the single-level for each subject.}
\item{\code{C}}{a vector of length 50, is the \code{C} value in the single-level for each subject.}
}
}
\details{ The true parameters are set as follows. The number of subjects i \eqn{N = 50}. For each subject, the number of time points is a random draw from a Poisson distribution with mean 100. The population level coefficients are set to be \eqn{A = 0.5}, \eqn{C = 0.5} and \eqn{B = -1}, and the variances of the Gaussian white noise process are assumed to be the same across participants with \eqn{\sigma_{1_{i}}^2 = 1}, \eqn{\sigma_{2_{i}}^2 = 4} and the correlation is \eqn{\delta = 0.5}. For the VAR model, we consider the case \eqn{p = 1}, and the parameter settings satisfy the stationarity condition.
}
\references{Zhao, Y., & Luo, X. (2017). \emph{Granger Mediation Analysis of Multiple Time Series with an Application to fMRI.} arXiv preprint arXiv:1709.05328.
}
\examples{
data(env.two)
dt<-get("data2",env.two)
}
\keyword{datasets}
|
9df0f5da9aeb7e4d0d84ab0a5ede084b4db12de6
|
a2891ad5a64ec595f04b6b6dbc182e9b4604ad68
|
/R/odd_even.R
|
b36d6152f440f0b82a56751c4a1d48d874bbae80
|
[] |
no_license
|
wyliehampson/lasagnashark
|
38a0861dcfd490500452f23cb31b5b2af4988112
|
822b688eaece25c9e4e775e5576bb03b3a33ba40
|
refs/heads/main
| 2023-07-13T07:31:35.494289
| 2021-08-20T16:32:23
| 2021-08-20T16:32:23
| 395,761,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
odd_even.R
|
#' Odd_Even
#'
#' Adds two numbers together and says whether the sum is odd or even
#'
#' @param num1 The first number
#' @param num2 The second number
#'
#' @return
#' @export
#'
#' @examples odd_even(10, 53)
odd_even <- function(num1, num2) {
sum <- num1 + num2
if ((sum %% 2) == 1) {
print("Odd")
} else {
print("Even")
}
}
|
f498ae3a1be5195862dad3330b2fca2c8f97c5b7
|
318d4910b23bd2388d51f42201ac849e48de7581
|
/cachematrix.R
|
692990a45214663d384a99076e2ebec21ddfc9c4
|
[] |
no_license
|
BenMacrae/ProgrammingAssignment2
|
b760e22b27853858d75adc09428e4b8556590484
|
f9e6446154a2a39cca750517acb08bd61f8d8dad
|
refs/heads/master
| 2021-04-29T14:07:36.567050
| 2018-02-16T16:30:49
| 2018-02-16T16:30:49
| 121,767,001
| 0
| 0
| null | 2018-02-16T15:34:26
| 2018-02-16T15:34:25
| null |
UTF-8
|
R
| false
| false
| 1,429
|
r
|
cachematrix.R
|
## Here we define two functions which will allow a matrix inverse to be calculated
## and stored, such that subsequent calls to calculate the inverse will load the
## result from previous computation instead. This is a slightly modified version
## of the similar script taking means provided in the exercise text.
## This function establishes an environment in which the matrix inverse to its
## input matrix x can be stored using setinverse(). It can be extracted by getinverse().
## Only one matrix can be stored at once, and changing the source matrix wipes
## the previous stored inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(I) inverse <<- I
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function takes as input the list of functions generated by
## makeCacheMatrix and returns the matrix inverse, using the stored value if it
## has been previously set.
cacheSolve <- function(x, ...) {
I <- x$getinverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setinverse(I)
I
}
|
51431674666ec246f80f8849ca1a0c48491dbea2
|
0e8702748e6f94e40243ac5af83056e423cb5df5
|
/Metadimensional tools/HepatocellularCarcinoma/SNF/HPC_SNFTool_Analysis_3Labels_UsingTop10Top5FilteredDataset.R
|
372ed72a75076bc165360933a4001fc144d12b0f
|
[] |
no_license
|
AtinaSat/Evaluation-of-integration-tools
|
373aeec1669384371c51115d5dd35b966d95f05b
|
2cdd6316bcb8e899ba57976c0ff5e040bc94cc68
|
refs/heads/master
| 2020-04-19T03:36:08.812575
| 2019-08-07T12:14:41
| 2019-08-07T12:14:41
| 167,938,938
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,657
|
r
|
HPC_SNFTool_Analysis_3Labels_UsingTop10Top5FilteredDataset.R
|
## HPC 3 labels SNFTool analysis using Top10Top5filtered
library(SNFtool)
library(tictoc)
load("HPC_Top10Top5Filtered_data.Rdata")
tic("SNF-HPC3Labels")
# Calculating distance matrices
dist1 <- as.matrix(dist(mrna))
dist2 <- as.matrix(dist(mirna))
dist3 <- as.matrix(dist(me))
# Calculating affinity matrics
W1 <- affinityMatrix(dist1)
W2 <- affinityMatrix(dist2)
W3 <- affinityMatrix(dist3)
# Combining the clusters
W = SNF(list(W1,W2, W3))
# Spectral clustering
clusters_label <- data.frame(spectralClustering(W,K = 3))
toc()
# SNF-HPC3Labels: 0.744 sec elapsed
clusters_label$Samples <- data.labels$Samples
clusters_label$Samples <- as.character(clusters_label$Samples)
clusters_label$Truth <- clusters_label$Samples
clusters_label$Truth1 <- clusters_label$Samples
clusters_label$Truth1 <- gsub("N.{1,4}", "N", clusters_label$Truth1)
clusters_label$Truth1 <- gsub("T.{1,4}", "T", clusters_label$Truth1)
clusters_label$Truth1 <- gsub("P.{1,4}", "P", clusters_label$Truth1)
confusion.mat = table(true = clusters_label$Truth1, Pred = clusters_label$spectralClustering.W..K...3.)
confusion.mat
# Pred
# true 1 2 3
# N 0 20 0
# P 9 1 10
# T 10 1 9
# Cluster 1: Tumour; Cluster 2: Normal; Cluster 3: PVTT
clusters_label$Truth <- clusters_label$Samples
clusters_label$Truth <- gsub("N.{1,4}", "2", clusters_label$Truth)
clusters_label$Truth <- gsub("T.{1,4}", "1", clusters_label$Truth)
clusters_label$Truth <- gsub("P.{1,4}", "3", clusters_label$Truth)
confusion.mat = table(true = clusters_label$Truth, Pred = clusters_label$spectralClustering.W..K...3.)
confusion.mat
# Pred
# true 1 2 3
# 1 10 1 9
# 2 0 20 0
# 3 9 1 10
# Performance analysis
SNF_Accuracy = sum(clusters_label$Truth == clusters_label$spectralClustering.W..K...3.)/length(clusters_label$spectralClustering.W..K...3.)
round(SNF_Accuracy,9)
# 0.6666667
# Tumor
Pg1 <- round(confusion.mat[1,1]/sum(confusion.mat[,1]),2) # Precision
Rg1 <- round(confusion.mat[1,1]/sum(confusion.mat[1,]),2) # Recall
Fg1 <- round(2*((Pg1*Rg1)/(Pg1+Rg1)),2) # F1-score
Fg1[is.nan(Fg1)] <- 0
# Normal
Pg2 <- round(confusion.mat[2,2]/sum(confusion.mat[,2]),2) # Precision
Rg2 <- round(confusion.mat[2,2]/sum(confusion.mat[2,]),2) # Recall
Fg2 <- round(2*((Pg2*Rg2)/(Pg2+Rg2)),2) # F1-score
Fg2[is.nan(Fg2)] <- 0
# PVTT
Pg3 <- round(confusion.mat[3,3]/sum(confusion.mat[,3]),2) # Precision
Rg3 <- round(confusion.mat[3,3]/sum(confusion.mat[3,]),2) # Recall
Fg3 <- round(2*((Pg3*Rg3)/(Pg3+Rg3)),2) # F1-score
Fg3[is.nan(Fg3)] <- 0
AvgF <- round((Fg1+Fg2+Fg3)/3,2)
save.image(file = "HPC_SNFTool_Results_3labels_UsingTop10Top5FilteredDataset.RData")
|
4e515a32b3000c5d897681170405359c26f5ece1
|
4d3b6b633f8f5be05f320cd9a6ad5b94e39dfe16
|
/javacode/target/classes/rscripts/generateModel.R
|
dc1cd9a2a20c0b6d61265d6d01309ea4277f7280
|
[
"MIT"
] |
permissive
|
sbelete/ReFa
|
651bcd7bd3ba593363c5fa7499727fe716641039
|
3c271051e40f6fc4c5920436ebc12aeb9abc87d7
|
refs/heads/main
| 2021-07-15T14:17:38.749739
| 2017-10-20T18:24:04
| 2017-10-20T18:24:04
| 91,976,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
generateModel.R
|
library(caret)
library(RSQLite)
library(DBI)
sqlite <- dbDriver("SQLite")
exampledb <- dbConnect(sqlite,wd)
dbListTables(exampledb)
dbGetQuery(exampledb, "SELECT * FROM main")
results <- dbSendQuery(exampledb, "SELECT * FROM main")
data = fetch(results,-1)
data = data[ ,c(2:8,13:15)]
data$real = as.factor(data$real)
data$real = make.names(data$real)
data$c1l1 = as.factor(data$c1l1)
data$wordpress = as.factor(data$wordpress)
control <- trainControl(method="cv", number=10, classProbs=TRUE, summaryFunction=twoClassSummary)
model = train(real ~ ., data=data, method = 'rf', metric="ROC", trControl=control,na.action = na.pass)
saveRDS(model, "model.rds")
model
|
3ad2fe6ff170c53a23762ae523a43d4fb12c0a17
|
1d5f8ab25866b9cb4898be799bb700d260ef5b62
|
/R/kernel_model.R
|
d18398fce519e2d93ec787212643b39ea27696b8
|
[] |
no_license
|
CollinErickson/GauPro
|
20537d576a5a47308840ecbe080dabb2c244b96c
|
c12cfa14b5ac4e1506daec1baec27f75a2253f53
|
refs/heads/master
| 2023-04-16T08:42:18.177784
| 2023-04-12T00:59:27
| 2023-04-12T00:59:27
| 64,254,165
| 14
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147,791
|
r
|
kernel_model.R
|
#' Gaussian process model with kernel
#'
#' @description
#' Class providing object with methods for fitting a GP model.
#' Allows for different kernel and trend functions to be used.
#' The object is an R6 object with many methods that can be called.
#'
#' `gpkm()` is equivalent to `GauPro_kernel_model$new()`, but is easier to type
#' and gives parameter autocomplete suggestions.
#'
#' @docType class
#' @importFrom R6 R6Class
#' @importFrom stats model.frame
#' @export
#' @useDynLib GauPro
#' @importFrom Rcpp evalCpp
#' @importFrom stats optim
# @keywords data, kriging, Gaussian process, regression
#' @return Object of \code{\link{R6Class}} with methods for fitting GP model.
#' @format \code{\link{R6Class}} object.
#' @examples
#' n <- 12
#' x <- matrix(seq(0,1,length.out = n), ncol=1)
#' y <- sin(2*pi*x) + rnorm(n,0,1e-1)
#' gp <- GauPro_kernel_model$new(X=x, Z=y, kernel="gauss")
#' gp$predict(.454)
#' gp$plot1D()
#' gp$cool1Dplot()
#'
#' n <- 200
#' d <- 7
#' x <- matrix(runif(n*d), ncol=d)
#' f <- function(x) {x[1]*x[2] + cos(x[3]) + x[4]^2}
#' y <- apply(x, 1, f)
#' gp <- GauPro_kernel_model$new(X=x, Z=y, kernel=Gaussian)
#' @field X Design matrix
#' @field Z Responses
#' @field N Number of data points
#' @field D Dimension of data
# @field corr Type of correlation function
#' @field nug.min Minimum value of nugget
#' @field nug.max Maximum value of the nugget.
#' @field nug.est Should the nugget be estimated?
#' @field nug Value of the nugget, is estimated unless told otherwise
# @field separable Are the dimensions separable?
#' @field param.est Should the kernel parameters be estimated?
#' @field verbose 0 means nothing printed, 1 prints some, 2 prints most.
#' @field useGrad Should grad be used?
#' @field useC Should C code be used?
#' @field parallel Should the code be run in parallel?
#' @field parallel_cores How many cores are there? By default it detects.
#' @field kernel The kernel to determine the correlations.
#' @field trend The trend.
#' @field mu_hatX Predicted trend value for each point in X.
#' @field s2_hat Variance parameter estimate
#' @field K Covariance matrix
#' @field Kchol Cholesky factorization of K
#' @field Kinv Inverse of K
#' @field Kinv_Z_minus_mu_hatX K inverse times Z minus the predicted
#' trend at X.
#' @field restarts Number of optimization restarts to do when updating.
#' @field normalize Should the inputs be normalized?
#' @field normalize_mean If using normalize, the mean of each column.
#' @field normalize_sd If using normalize, the standard
#' deviation of each column.
#' @field optimizer What algorithm should be used to optimize the
#' parameters.
#' @field track_optim Should it track the parameters evaluated
#' while optimizing?
#' @field track_optim_inputs If track_optim is TRUE,
#' this will keep a list of parameters evaluated.
#' View them with plot_track_optim.
#' @field track_optim_dev If track_optim is TRUE,
#' this will keep a vector of the deviance values calculated
#' while optimizing parameters.
#' View them with plot_track_optim.
#' @field formula Formula
#' @field convert_formula_data List for storing data to convert data
#' using the formula
#' @section Methods:
#' \describe{
#' \item{\code{new(X, Z, corr="Gauss", verbose=0, separable=T, useC=F,
#' useGrad=T,
#' parallel=T, nug.est=T, ...)}}{
#' This method is used to create object of this
#' class with \code{X} and \code{Z} as the data.}
#'
#' \item{\code{update(Xnew=NULL, Znew=NULL, Xall=NULL, Zall=NULL,
#' restarts = 0,
#' param_update = T, nug.update = self$nug.est)}}{This method updates the
#' model, adding new data if given, then running optimization again.}
#' }
# GauPro_kernel_model ----
GauPro_kernel_model <- R6::R6Class(
classname = "GauPro",
public = list(
X = NULL,
Z = NULL,
N = NULL,
D = NULL,
kernel = NULL,
trend = NULL,
nug = NULL,
nug.min = NULL,
nug.max = NULL,
nug.est = NULL,
param.est = NULL,
# Whether parameters besides nugget (theta) should be updated
# mu_hat = NULL,
mu_hatX = NULL,
s2_hat = NULL,
K = NULL,
Kchol = NULL,
Kinv = NULL,
Kinv_Z_minus_mu_hatX = NULL,
verbose = 0,
useC = TRUE,
useGrad = FALSE,
parallel = NULL,
parallel_cores = NULL,
restarts = NULL,
normalize = NULL,
# Should the Z values be normalized for internal computations?
normalize_mean = NULL,
normalize_sd = NULL,
optimizer = NULL, # L-BFGS-B, BFGS
track_optim = NULL,
track_optim_inputs = list(),
track_optim_dev = numeric(0),
formula = NULL,
convert_formula_data = NULL,
#deviance_out = NULL, #(theta, nug)
#deviance_grad_out = NULL, #(theta, nug, overwhat)
#deviance_fngr_out = NULL,
#' @description Create kernel_model object
#' @param X Matrix whose rows are the input points
#' @param Z Output points corresponding to X
#' @param kernel The kernel to use. E.g., Gaussian$new().
#' @param trend Trend to use. E.g., trend_constant$new().
#' @param verbose Amount of stuff to print. 0 is little, 2 is a lot.
#' @param useC Should C code be used when possible? Should be faster.
#' @param useGrad Should the gradient be used?
#' @param parallel Should code be run in parallel? Make optimization
#' faster but uses more computer resources.
#' @param parallel_cores When using parallel, how many cores should
#' be used?
#' @param nug Value for the nugget. The starting value if estimating it.
#' @param nug.min Minimum allowable value for the nugget.
#' @param nug.max Maximum allowable value for the nugget.
#' @param nug.est Should the nugget be estimated?
#' @param param.est Should the kernel parameters be estimated?
#' @param restarts How many optimization restarts should be used when
#' estimating parameters?
#' @param normalize Should the data be normalized?
#' @param optimizer What algorithm should be used to optimize the
#' parameters.
#' @param track_optim Should it track the parameters evaluated
#' while optimizing?
#' @param formula Formula for the data if giving in a data frame.
#' @param data Data frame of data. Use in conjunction with formula.
#' @param ... Not used
initialize = function(X, Z,
kernel, trend,
verbose=0, useC=TRUE, useGrad=TRUE,
parallel=FALSE, parallel_cores="detect",
nug=1e-6, nug.min=1e-8, nug.max=1e2, nug.est=TRUE,
param.est = TRUE, restarts = 0,
normalize = FALSE, optimizer="L-BFGS-B",
track_optim=FALSE,
formula, data,
...) {
# If formula is given, use it to get X and Z.
if ((!missing(X) && is.formula(X)) ||
(!missing(Z) && is.formula(Z)) ||
(!missing(formula) && is.formula(formula))) {
if (!missing(X) && is.formula(X)) {
formula <- X
if (!missing(Z) && is.data.frame(Z)) {
data <- Z
} else if (!missing(data) && is.data.frame(data)) {
# data <- data
} else if (!missing(Z)) {
warning("Z given in but not being used")
data <- NULL
} else if (!missing(data)) {
warning("data given in but not being used")
data <- NULL
} else {
data <- NULL
}
}
if (!missing(Z) && is.formula(Z)) {
formula <- Z
# Don't need data given in, can be global variables
if (!missing(X) && is.data.frame(X)) {
data <- X
} else if (!missing(data) && is.data.frame(data)) {
# data <- data
} else if (!missing(X)) {
warning("X given in but not being used")
data <- NULL
} else if (!missing(data)) {
warning("data given in but not being used")
data <- NULL
} else {
data <- NULL
}
}
if (!missing(formula) && is.formula(formula)) {
# formula <- formula
# Find data now
if (!missing(X) && is.data.frame(X)) {
data <- X
} else if (!missing(Z) && is.data.frame(Z)) {
data <- Z
} else if (!missing(data) && is.data.frame(data)) {
# data <- data
} else {
# stop("formula given in but not data")
# Data can be in global, don't give error for this.
}
} else if (!missing(formula) && !is.null(formula)) {
message("formula given in but not used")
}
# Get data
modfr <- model.frame(formula = formula, data = data)
Z <- modfr[,1]
Xdf <- modfr[,2:ncol(modfr), drop=FALSE]
convert_formula_data <- list(factors=list(),
chars=list())
# Convert factor columns to integer
for (i in 1:ncol(Xdf)) {
if (is.factor(Xdf[, i])) {
convert_formula_data$factors[[
length(convert_formula_data$factors)+1
]] <- list(index=i,
levels=levels(Xdf[[i]]),
ordered=is.ordered(Xdf[, i]))
Xdf[[i]] <- as.integer(Xdf[[i]])
}
}
# Convert char columns to integer
for (i in 1:ncol(Xdf)) {
if (is.character(Xdf[, i])) {
convert_formula_data$chars[[
length(convert_formula_data$chars)+1
]] <- list(index=i,
vals=sort(unique(Xdf[[i]])))
Xdf[[i]] <- sapply(Xdf[[i]],
function(x) {
which(x==convert_formula_data$chars[[
length(convert_formula_data$chars)
]]$vals)
})
}
}
# Using formula won't convert z ~ . into z ~ a + b + ...,
# but using terms from modfr will
# self$formula <- formula
self$formula <- attr(modfr, "terms")
# Don't allow formulas with interaction terms. Everything interacts.
if (any(grepl(":", attr(self$formula, "term.labels"), fixed=TRUE)) ||
any(grepl("*", attr(self$formula, "term.labels"), fixed=TRUE))) {
stop(paste0("Don't use a formula with * or :. ",
"Interactions are all included."))
}
# self$data <- data
self$convert_formula_data <- convert_formula_data
X <- as.matrix(Xdf)
} # End formula was given in
if (missing(X) || is.null(X)) {
stop("You must give X to GauPro_kernel_model")
}
if (missing(Z) || is.null(Z)) {
stop("You must give Z to GauPro_kernel_model")
}
# X is always a matrix. If data comes from data frame, it gets converted
# to numeric so it can be stored in a matrix.
if (is.data.frame(X)) {
X <- as.matrix(X)
}
# Make sure numeric, no NA/NaN
stopifnot(is.numeric(X), !any(is.na(X)),
!any(is.nan(X)), all(is.finite(X)))
stopifnot(is.numeric(Z), !any(is.na(Z)),
!any(is.nan(Z)), all(is.finite(Z)))
self$X <- X
self$Z <- matrix(Z, ncol=1)
self$normalize <- normalize
if (self$normalize) {
self$normalize_mean <- mean(self$Z)
self$normalize_sd <- sd(self$Z)
self$Z <- (self$Z - self$normalize_mean) / self$normalize_sd
}
self$verbose <- verbose
if (!is.matrix(self$X)) {
if (length(self$X) == length(self$Z)) {
self$X <- matrix(X, ncol=1)
} else {
stop("X and Z don't match")
}
}
self$N <- nrow(self$X)
self$D <- ncol(self$X)
# Expected run time
expruntime <- (.0581 + .00394*self$N + .0230*self$D) ^ 3
if (expruntime > 5 && self$verbose >= 0) {
cat("Expected run time:", round(expruntime), "seconds\n")
}
# Set kernel
if (missing(kernel)) {
# # Stop and give message
# stop(paste0(
# "Argument 'kernel' is missing. ",
# "Try using 'gauss' or 'matern52'.",
# " See documentation for more details."
# ))
# Set to matern52 by default
kernel <- "matern52"
message(paste0(
"Argument 'kernel' is missing. ",
"It has been set to 'matern52'.",
" See documentation for more details."
))
}
if ("R6ClassGenerator" %in% class(kernel)) {
# Let generator be given so D can be set auto
self$kernel <- kernel$new(D=self$D)
} else if ("GauPro_kernel" %in% class(kernel)) {
# Otherwise it should already be a kernel
if (!is.na(kernel$D) && kernel$D != self$D) {
warning(paste0("Dimensions of data and kernel don't match,",
" this seems like an error"))
}
self$kernel <- kernel
} else if(is.character(kernel) && length(kernel)==1) {
kernel <- tolower(kernel)
Dcts <- self$D - length(self$convert_formula_data$factors) -
length(self$convert_formula_data$chars)
if (Dcts < .5) {
kernel <- 1
} else if (kernel %in% c("gaussian", "gauss")) {
kernel <- Gaussian$new(D=Dcts, useC=useC)
} else if (kernel %in% c("matern32", "m32", "matern3/2",
"matern3_2")) {
kernel <- Matern32$new(D=Dcts, useC=useC)
} else if (kernel %in% c("matern52", "m52", "matern5/2",
"matern5_2")) {
kernel <- Matern52$new(D=Dcts, useC=useC)
} else if (kernel %in% c("exp", "exponential",
"m12", "matern12",
"matern1/2", "matern1_2")) {
kernel <- Exponential$new(D=Dcts, useC=useC)
} else if (kernel %in% c("ratquad", "rationalquadratic", "rq")) {
kernel <- RatQuad$new(D=Dcts, useC=useC)
} else if (kernel %in% c("powerexponential", "powexp", "pe",
"powerexp")) {
kernel <- PowerExp$new(D=Dcts, useC=useC)
} else if (kernel %in% c("triangle", "tri")) {
kernel <- Triangle$new(D=Dcts, useC=useC)
} else if (kernel %in% c("periodic", "period", "per")) {
kernel <- Periodic$new(D=Dcts, useC=useC)
} else if (kernel %in% c("cubic", "cube", "cub")) {
kernel <- Cubic$new(D=Dcts, useC=useC)
} else {
stop(paste0("Kernel given to GauPro_kernel_model (",
kernel, ") is not valid. ",
'Consider using "Gaussian" or "Matern52".'))
}
# Add factor kernels for factor/char dimensions
if (self$D - Dcts > .5) {
# kernel over cts needs to ignore these dims
if (Dcts > .5) {
igninds <- c(
unlist(sapply(self$convert_formula_data$factors,
function(x) {x$index})),
unlist(sapply(self$convert_formula_data$chars,
function(x) {x$index}))
)
kernel <- IgnoreIndsKernel$new(k=kernel,
ignoreinds=igninds)
}
for (i in seq_along(self$convert_formula_data$factors)) {
nlevels_i <- length(self$convert_formula_data$factors[[i]]$levels)
if (self$convert_formula_data$factors[[i]]$ordered) {
kernel_i <- OrderedFactorKernel$new(
D=1,
xindex=self$convert_formula_data$factors[[i]]$index,
nlevels=nlevels_i, useC=useC
)
} else {
kernel_i <- LatentFactorKernel$new(
D=1,
xindex=self$convert_formula_data$factors[[i]]$index,
nlevels=nlevels_i,
latentdim= if (nlevels_i>=3) {2} else {1},
useC=useC
)
}
kernel <- kernel * kernel_i
}
for (i in seq_along(self$convert_formula_data$chars)) {
nlevels_i <- length(self$convert_formula_data$chars[[i]]$vals)
kernel_i <- LatentFactorKernel$new(
D=1,
xindex=self$convert_formula_data$chars[[i]]$index,
nlevels=nlevels_i,
latentdim= if (nlevels_i>=3) {2} else {1},
useC=useC
)
kernel <- kernel * kernel_i
}
}
self$kernel <- kernel
} else {
stop(paste0("Kernel given to GauPro_kernel_model is not valid. ",
'Consider using "Gaussian" or "Matern52".'))
}
# Check that kernel is valid
ctsinds <- find_kernel_cts_dims(self$kernel)
facinds <- find_kernel_factor_dims(self$kernel)
if (length(facinds) > .5) {
facinds <- facinds[seq(1, length(facinds), 2)]
}
cts_and_fac <- intersect(ctsinds, facinds)
if (length(cts_and_fac) > .5) {
stop(paste0(c("Invalid kernel: index", cts_and_fac,
" appear in both continuous and factor kernels"),
collapse = ' '))
}
if (anyDuplicated(facinds) > .5) {
stop(paste0(c("Invalid kernel: index", facinds[anyDuplicated(facinds)],
" appears in multiple factor kernels"),
collapse = ' '))
}
# Set trend
if (missing(trend)) {
self$trend <- trend_c$new()
} else if ("GauPro_trend" %in% class(trend)) {
self$trend <- trend
} else if ("R6ClassGenerator" %in% class(trend)) {
self$trend <- trend$new(D=self$D)
}
stopifnot(nug.min <= nug.max)
self$nug <- min(max(nug, nug.min), nug.max)
self$nug.min <- nug.min
self$nug.max <- nug.max
self$nug.est <- nug.est
# if (nug.est) {stop("Can't estimate nugget now")}
self$param.est <- param.est
self$useC <- useC
self$useGrad <- useGrad
self$parallel <- parallel
if (self$parallel) {
if (parallel_cores == "detect") {
self$parallel_cores <- parallel::detectCores()
} else {
self$parallel_cores <- parallel_cores
}
} else {self$parallel_cores <- 1}
self$restarts <- restarts
if (optimizer %in% c("L-BFGS-B", "BFGS", "lbfgs", "genoud")) {
self$optimizer <- optimizer
} else {
stop(paste0('optimizer must be one of c("L-BFGS-B", "BFGS",',
' "lbfgs, "genoud")'))
}
stopifnot(length(track_optim) == 1, is.logical(track_optim))
self$track_optim <- track_optim
self$update_K_and_estimates() # Need to get mu_hat before starting
self$fit()
invisible(self)
},
# initialize_GauPr = function() {
# },
#' @description Fit model
#' @param X Inputs
#' @param Z Outputs
fit = function(X, Z) {
self$update()
},
#' @description Update covariance matrix and estimates
update_K_and_estimates = function () {
# Update K, Kinv, mu_hat, and s2_hat, maybe nugget too
self$K <- self$kernel$k(self$X) + diag(self$kernel$s2 * self$nug,
self$N)
while(T) {
try.chol <- try(self$Kchol <- chol(self$K), silent = T)
if (!inherits(try.chol, "try-error")) {break}
warning("Can't Cholesky, increasing nugget #7819553")
oldnug <- self$nug
self$nug <- max(1e-8, 2 * self$nug)
self$K <- self$K + diag(self$kernel$s2 * (self$nug - oldnug),
self$N)
cat("Increasing nugget to get invertibility from ", oldnug, ' to ',
self$nug, "\n")
}
self$Kinv <- chol2inv(self$Kchol)
# self$mu_hat <- sum(self$Kinv %*% self$Z) / sum(self$Kinv)
self$mu_hatX <- self$trend$Z(X=self$X)
self$Kinv_Z_minus_mu_hatX <- c(self$Kinv %*% (self$Z - self$mu_hatX))
# self$s2_hat <- c(t(self$Z - self$mu_hat) %*% self$Kinv %*%
# (self$Z - self$mu_hat) / self$N)
self$s2_hat <- self$kernel$s2
},
#' @description Predict for a matrix of points
#' @param XX points to predict at
#' @param se.fit Should standard error be returned?
#' @param covmat Should covariance matrix be returned?
#' @param split_speed Should the matrix be split for faster predictions?
#' @param mean_dist Should the error be for the distribution of the mean?
#' @param return_df When returning se.fit, should it be returned in
#' a data frame? Otherwise it will be a list, which is faster.
predict = function(XX, se.fit=F, covmat=F, split_speed=F, mean_dist=FALSE,
return_df=TRUE) {
self$pred(XX=XX, se.fit=se.fit, covmat=covmat,
split_speed=split_speed, mean_dist=mean_dist,
return_df=return_df)
},
#' @description Predict for a matrix of points
#' @param XX points to predict at
#' @param se.fit Should standard error be returned?
#' @param covmat Should covariance matrix be returned?
#' @param split_speed Should the matrix be split for faster predictions?
#' @param mean_dist Should the error be for the distribution of the mean?
#' @param return_df When returning se.fit, should it be returned in
#' a data frame? Otherwise it will be a list, which is faster.
pred = function(XX, se.fit=F, covmat=F, split_speed=F, mean_dist=FALSE,
return_df=TRUE) {
if (!is.null(self$formula) && is.data.frame(XX)) {
XX <- convert_X_with_formula(XX, self$convert_formula_data,
self$formula)
}
if (is.data.frame(XX)) {
XX <- as.matrix(XX)
}
if (is.matrix(XX)) {
stopifnot(is.numeric(XX))
} else {
if (is.numeric(XX)) {
if (self$D == 1) XX <- matrix(XX, ncol=1)
else if (length(XX) == self$D) XX <- matrix(XX, nrow=1)
else stop(paste0('Predict input should be matrix with ', self$D,
' columns or vector of length ', self$D))
} else {
stop(paste("Bad type of XX given to pred"))
}
}
stopifnot(ncol(XX) == self$D)
N <- nrow(XX)
# Split speed makes predictions for groups of rows separately.
# Fastest is for about 40.
if (split_speed & N >= 200 & !covmat) {#print('In split speed')
mn <- numeric(N)
if (se.fit) {
s2 <- numeric(N)
se <- numeric(N)
#se <- rep(0, length(mn)) # NEG VARS will be 0 for se,
# NOT SURE I WANT THIS
}
ni <- 40 # batch size
Nni <- ceiling(N/ni)-1
for (j in 0:Nni) {
XXj <- XX[(j*ni+1):(min((j+1)*ni,N)), , drop=FALSE]
# kxxj <- self$corr_func(XXj)
# kx.xxj <- self$corr_func(self$X, XXj)
predj <- self$pred_one_matrix(XX=XXj, se.fit=se.fit,
covmat=covmat, mean_dist=mean_dist,
return_df=return_df)
#mn[(j*ni+1):(min((j+1)*ni,N))] <- pred_meanC(XXj, kx.xxj,
# self$mu_hat, self$Kinv, self$Z)
if (!se.fit) { # if no se.fit, just set vector
mn[(j*ni+1):(min((j+1)*ni,N))] <- predj
} else { # otherwise set all three from data.frame
mn[(j*ni+1):(min((j+1)*ni,N))] <- predj$mean
#s2j <- pred_var(XXj, kxxj, kx.xxj, self$s2_hat, self$Kinv,
# self$Z)
#s2[(j*ni+1):(min((j+1)*ni,N))] <- s2j
s2[(j*ni+1):(min((j+1)*ni,N))] <- predj$s2
se[(j*ni+1):(min((j+1)*ni,N))] <- predj$se
}
}
#se[s2>=0] <- sqrt(s2[s2>=0])
# # Unnormalize if needed
# if (self$normalize) {
# mn <- mn * self$normalize_sd + self$normalize_mean
# if (se.fit) {
# se <- se * self$normalize_sd
# s2 <- s2 * self$normalize_sd^2
# }
# }
if (!se.fit) {# covmat is always FALSE for split_speed } &
# !covmat) {
return(mn)
} else {
return(data.frame(mean=mn, s2=s2, se=se))
}
} else { # Not splitting, just do it all at once
pred1 <- self$pred_one_matrix(XX=XX, se.fit=se.fit,
covmat=covmat,
mean_dist=mean_dist,
return_df=return_df)
return(pred1)
}
},
#' @description Predict for a matrix of points
#' @param XX points to predict at
#' @param se.fit Should standard error be returned?
#' @param covmat Should covariance matrix be returned?
#' @param return_df When returning se.fit, should it be returned in
#' a data frame? Otherwise it will be a list, which is faster.
#' @param mean_dist Should the error be for the distribution of the mean?
pred_one_matrix = function(XX, se.fit=F, covmat=F, return_df=FALSE,
mean_dist=FALSE) {
# input should already be checked for matrix
# kxx <- self$kernel$k(XX) + diag(self$nug * self$s2_hat, nrow(XX))
kx.xx <- self$kernel$k(self$X, XX)
# mn <- pred_meanC(XX, kx.xx, self$mu_hat, self$Kinv, self$Z)
# Changing to use trend, mu_hat is matrix
# mu_hat_matX <- self$trend$Z(self$X)
mu_hat_matXX <- self$trend$Z(XX)
# mn <- pred_meanC_mumat(XX, kx.xx, self$mu_hatX, mu_hat_matXX,
# self$Kinv, self$Z)
# New way using _fast is O(n^2)
mn <- pred_meanC_mumat_fast(XX, kx.xx, self$Kinv_Z_minus_mu_hatX,
mu_hat_matXX)
# It's supposed to return a vector, but it's a matrix
mn <- mn[, 1]
if (self$normalize) {
mn <- mn * self$normalize_sd + self$normalize_mean
}
if (!se.fit & !covmat) {
return(mn)
}
if (covmat) {
# new for kernel
# kxx <- self$kernel$k(XX) + diag(self$nug * self$s2_hat, nrow(XX))
kxx <- self$kernel$k(XX)
# The mean doesn't get the nugget added
if (!mean_dist) {
kxx <- kxx + diag(self$nug * self$s2_hat, nrow(XX))
}
covmatdat <- kxx - t(kx.xx) %*% self$Kinv %*% kx.xx
if (self$normalize) {
covmatdat <- covmatdat * self$normalize_sd ^ 2
}
# #covmatdat <- self$pred_var(XX, kxx=kxx, kx.xx=kx.xx, covmat=T)
# covmatdat <- pred_cov(XX, kxx, kx.xx, self$s2_hat, self$Kinv,
# self$Z)
s2 <- diag(covmatdat)
# se <- rep(1e-8, length(mn)) # NEG VARS will be 0 for se,
# # NOT SURE I WANT THIS
# se[s2>=0] <- sqrt(s2[s2>=0])
if (any(s2 < 0)) {
if (mean_dist) { # mean can have zero s2
min_s2 <- 0
} else { # pred var should always be at least this big
min_s2 <- max(.Machine$double.eps, self$s2_hat * self$nug *
if (self$normalize) {self$normalize_sd} else {1})
}
warning(paste0("Negative s2 predictions are being set to ",
min_s2, " (", sum(s2<0)," values, min=", min(s2),").",
" covmat is not being altered."))
s2 <- pmax(s2, min_s2)
}
se <- sqrt(s2)
return(list(mean=mn, s2=s2, se=se, cov=covmatdat))
}
# new for kernel
# covmatdat <- kxx - t(kx.xx) %*% self$Kinv %*% kx.xx
# s2 <- diag(covmatdat)
# Better way doesn't do full matmul twice, 2x speed for 50 rows,
# 20x speedup for 1000 rows
# This method is bad since only diag of k(XX) is needed
# kxx <- self$kernel$k(XX) + diag(self$nug * self$s2_hat, nrow(XX))
# s2 <- diag(kxx) - colSums( (kx.xx) * (self$Kinv %*% kx.xx))
# This is bad since apply is actually really slow for a
# simple function like this
# diag.kxx <- self$nug * self$s2_hat + apply(XX, 1,
# function(xrow) {self$kernel$k(xrow)})
# s2 <- diag.kxx - colSums( (kx.xx) * (self$Kinv %*% kx.xx))
# This method is fastest, assumes that correlation of point
# with itself is 1, which is true for basic kernels.
# diag.kxx <- self$nug * self$s2_hat + rep(self$s2_hat, nrow(XX))
diag.kxx <- rep(self$s2_hat, nrow(XX))
if (!mean_dist) {
diag.kxx <- diag.kxx + self$nug * self$s2_hat
}
s2 <- diag.kxx - colSums( (kx.xx) * (self$Kinv %*% kx.xx))
if (self$normalize) {
s2 <- s2 * self$normalize_sd ^ 2
}
# # s2 <- pred_var(XX, kxx, kx.xx, self$s2_hat, self$Kinv, self$Z)
# se <- rep(0, length(mn)) # NEG VARS will be 0 for se,
# # NOT SURE I WANT THIS
# se[s2>=0] <- sqrt(s2[s2>=0])
if (any(s2 < 0)) {
if (mean_dist) { # mean can have zero s2
min_s2 <- 0
} else { # pred var should always be at least this big
min_s2 <- max(.Machine$double.eps, self$s2_hat * self$nug *
if (self$normalize) {self$normalize_sd} else {1})
}
warning(paste0("Negative s2 predictions are being set to ",
min_s2, " (", sum(s2<0)," values, min=", min(s2),")"))
s2 <- pmax(s2, min_s2)
}
se <- sqrt(s2)
# se.fit but not covmat
if (return_df) {
# data.frame is really slow compared to cbind or list
data.frame(mean=mn, s2=s2, se=se)
} else {
list(mean=mn, s2=s2, se=se)
}
},
#' @description Predict mean
#' @param XX points to predict at
#' @param kx.xx Covariance of X with XX
pred_mean = function(XX, kx.xx) { # 2-8x faster to use pred_meanC
# c(self$mu_hat + t(kx.xx) %*% self$Kinv %*% (self$Z - self$mu_hat))
# mu_hat_matX <- self$trend$Z(self$X)
mu_hat_matXX <- self$trend$Z(XX)
c(mu_hat_matXX + t(kx.xx) %*% self$Kinv %*% (self$Z - self$mu_hatX))
},
#' @description Predict mean using C
#' @param XX points to predict at
#' @param kx.xx Covariance of X with XX
pred_meanC = function(XX, kx.xx) { # Don't use if R uses pass by copy(?)
# pred_meanC(XX, kx.xx, self$mu_hat, self$Kinv, self$Z)
# mu_hat_matX <- self$trend$Z(self$X)
mu_hat_matXX <- self$trend$Z(XX)
# This way is O(n^2)
# pred_meanC_mumat(XX, kx.xx, self$mu_hatX, mu_hat_matXX,
# self$Kinv, self$Z)
# New way is O(n), but not faster in R
# mu_hat_matXX +
# colSums(sweep(kx.xx, 1, self$Kinv_Z_minus_mu_hatX, `*`))
# Rcpp code is slightly fast for small n, 2x for bigger n,
pred_meanC_mumat_fast(XX, kx.xx, self$Kinv_Z_minus_mu_hatX,
mu_hat_matXX)
},
#' @description Predict variance
#' @param XX points to predict at
#' @param kxx Covariance of XX with itself
#' @param kx.xx Covariance of X with XX
#' @param covmat Should the covariance matrix be returned?
pred_var = function(XX, kxx, kx.xx, covmat=F) {
# 2-4x faster to use C functions pred_var and pred_cov
self$s2_hat * diag(kxx - t(kx.xx) %*% self$Kinv %*% kx.xx)
},
#' @description leave one out predictions
#' @param se.fit Should standard errors be included?
pred_LOO = function(se.fit=FALSE) {
# Predict LOO (leave-one-out) on data used to fit model
# See vignette for explanation of equations
# If se.fit==T, then calculate the LOO se and the
# corresponding t score
Z_LOO <- numeric(self$N)
if (se.fit) {Z_LOO_s2 <- numeric(self$N)}
Z_trend <- self$trend$Z(self$X)
for (i in 1:self$N) {
E <- self$Kinv[-i, -i] # Kinv without i
b <- self$K[ i, -i] # K between i and rest
g <- self$Kinv[ i, -i] # Kinv between i and rest
# Kinv for K if i wasn't in K
Ainv <- E + E %*% b %*% g / (1-sum(g*b))
Zi_LOO <- Z_trend[i] + c(b %*% Ainv %*% (self$Z[-i] - Z_trend[-i]))
Z_LOO[i] <- Zi_LOO
if (se.fit) {
Zi_LOO_s2 <- self$K[i,i] - c(b %*% Ainv %*% b)
# Have trouble when s2 < 0, set to small number
Zi_LOO_s2 <- max(Zi_LOO_s2, 1e-16)
Z_LOO_s2[i] <- Zi_LOO_s2
}
}
if (self$normalize) {
Z_LOO <- Z_LOO * self$normalize_sd + self$normalize_mean
if (se.fit) {
Z_LOO_s2 <- Z_LOO_s2 * self$normalize_sd ^ 2
}
}
if (se.fit) { # Return df with se and t if se.fit
Z_LOO_se <- sqrt(Z_LOO_s2)
Zref <- if (self$normalize) {
self$Z * self$normalize_sd + self$normalize_mean
} else {self$Z}
t_LOO <- (Zref - Z_LOO) / Z_LOO_se
data.frame(fit=Z_LOO, se.fit=Z_LOO_se, t=t_LOO)
} else { # Else just mean LOO
Z_LOO
}
},
#' @description Predict variance after adding points
#' @param add_points Points to add
#' @param pred_points Points to predict at
pred_var_after_adding_points = function(add_points, pred_points) {
# Calculate pred_var at pred_points after add_points
# have been added to the design self$X
# S is add points
# G <- solve(self$pred(add_points, covmat = TRUE)$cov)
# FF <- -self$Kinv %*% 1
if (!is.matrix(add_points)) {
if (length(add_points) != self$D) {
stop("add_points must be matrix or of length D")
}
else {add_points <- matrix(add_points, nrow=1)}
} else if (ncol(add_points) != self$D) {
stop("add_points must have dimension D")
}
C_S <- self$kernel$k(add_points)
C_S <- C_S + self$s2_hat * diag(self$nug, nrow(C_S)) # Add nugget
C_XS <- self$kernel$k(self$X, add_points)
C_X_inv_C_XS <- self$Kinv %*% C_XS
G <- solve(C_S - t(C_XS) %*% C_X_inv_C_XS)
FF <- - C_X_inv_C_XS %*% G
E <- self$Kinv - C_X_inv_C_XS %*% t(FF)
# Speed this up a lot by avoiding apply and doing all at once
# Assume single point cov is s2(1+nug)
C_a <- self$s2_hat * (1 + self$nug)
C_Xa <- self$kernel$k(self$X, pred_points) # length n vec, not matrix
C_Sa <- self$kernel$k(add_points, pred_points)
C_a - (colSums(C_Xa * (E %*% C_Xa)) +
2 * colSums(C_Xa * (FF %*% C_Sa)) +
colSums(C_Sa * (G %*% C_Sa)))
},
#' @description Predict variance reductions after adding each point separately
#' @param add_points Points to add
#' @param pred_points Points to predict at
pred_var_after_adding_points_sep = function(add_points, pred_points) {
# Calculate pred_var at pred_points after each add_points
# has individually (separately) been added to the design self$X
# A vectorized version of pred_var_after_adding_points_sep
# S is add points, a is pred_points in variables below
# Output is matrix of size nrow(pred_points) by nrow(add_points)
# where (i,j) element is predictive variance at pred_points[i,]
# after add_points[j,] has been added to current design
# Equations below, esp with sweep and colSums are confusing
# but work out. Make it fast and vectorized.
# Check against pred_var_after_adding_points.
if (!is.matrix(add_points)) {
if (length(add_points) != self$D) {
stop("add_points must be matrix or of length D")
}
else {add_points <- matrix(add_points, nrow=1)}
} else if (ncol(add_points) != self$D) {
stop("add_points must have dimension D")
}
C_S <- self$s2_hat * (1+self$nug)
C_XS <- self$kernel$k(self$X, add_points)
C_X_inv_C_XS <- self$Kinv %*% C_XS
G <- 1 / (C_S - colSums(C_XS * C_X_inv_C_XS))
# Speed this up a lot by avoiding apply and doing all at once
# Assume single point cov is s2(1+nug)
C_a <- self$s2_hat * (1 + self$nug)
C_Xa <- self$kernel$k(self$X, pred_points) # matrix
C_Sa <- self$kernel$k(add_points, pred_points) # matrix
t1a <- colSums(C_Xa * (self$Kinv %*% C_Xa))
t1 <- sweep(sweep((t(C_Xa) %*% C_X_inv_C_XS)^2, 2, G, `*`),
1, t1a, `+`)
t2 <- -2*sweep((t(C_X_inv_C_XS) %*% C_Xa) * C_Sa, 1, G, `*`)
t3 <- sweep((C_Sa)^2, 1, G, `*`)
return(C_a - (t1 + t(t2 + t3)))
},
#' @description Predict variance reduction for a single point
#' @param add_point Point to add
#' @param pred_points Points to predict at
pred_var_reduction = function(add_point, pred_points) {
# Calculate pred_var at pred_points after add_point
# have been added to the design self$X
# S is add point
if (!is.vector(add_point) || length(add_point)!=self$D) {
stop("add_point must be vector of length D")
}
C_S <- self$s2_hat * (1 + self$nug) # Assumes correlation structure
C_XS <- self$kernel$k(self$X, add_point)
C_X_inv_C_XS <- as.vector(self$Kinv %*% C_XS)
G <- 1 / c(C_S - t(C_XS) %*% C_X_inv_C_XS)
# Assume single point cov is s2(1+nug)
# C_a <- self$s2_hat * (1 + self$nug)
# pred_var_a_func <- function(a) {
# C_Xa <- self$kernel$k(self$X, a) # length n vector, not matrix
# C_Sa <- self$kernel$k(add_point, a)
# (sum(C_Xa * C_X_inv_C_XS) - C_Sa) ^ 2 * G
# }
# if (is.matrix(pred_points)) {
# if (method1) { # Slow way
# prds <- apply(pred_points, 1, pred_var_a_func)
# } else {
# Speeding it up by getting all at once instead of by row
C_aX <- self$kernel$k(pred_points, self$X) # len n vec, not mat
C_aS <- self$kernel$k(pred_points, add_point)
# (sum(C_Xa * C_X_inv_C_XS) - C_Sa) ^ 2 * G
prds <- (c(C_aX %*% C_X_inv_C_XS) - C_aS) ^ 2 * G
# }
# }
# else {prds <- pred_var_a_func(pred_points)}
prds
},
#' @description Predict variance reductions
#' @param add_points Points to add
#' @param pred_points Points to predict at
pred_var_reductions = function(add_points, pred_points) {
# Calculate pred_var at pred_points after each of add_points
# has been added to the design self$X separately.
# This is a vectorized version of pred_var_reduction,
# to consider all of add_points added together
# use pred_var_after_adding_points
# S is add points, a is pred points
if (!is.matrix(add_points) || ncol(add_points) != self$D) {
stop("add_points must be a matrix with D columns")
}
# C_S <- self$s2_hat * diag(1 + self$nug, nrow(add_points))
# Assumes correlation structure
C_XS <- self$kernel$k(self$X, add_points)
C_X_inv_C_XS <- self$Kinv %*% C_XS
# G <- 1 / c(C_S - t(C_XS) %*% C_X_inv_C_XS)
G <- 1 / (self$s2_hat*(1+self$nug) - colSums(C_XS * C_X_inv_C_XS))
C_aX <- self$kernel$k(pred_points, self$X) # now a matrix
C_aS <- self$kernel$k(pred_points, add_points)
# (sum(C_Xa * C_X_inv_C_XS) - C_Sa) ^ 2 * G
prds <- sweep(((C_aX %*% C_X_inv_C_XS) - C_aS) ^ 2, 2, G, `*`)
prds
},
#' @description Plot the object
#' @param ... Parameters passed to cool1Dplot(), plot2D(), or plotmarginal()
plot = function(...) {
if (self$D == 1) {
self$cool1Dplot(...)
} else if (self$D == 2) {
self$plot2D(...)
} else {
# stop("No plot method for higher than 2 dimension")
self$plotmarginalrandom(...)
}
},
#' @description Make cool 1D plot
#' @param n2 Number of things to plot
#' @param nn Number of things to plot
#' @param col2 color
#' @param ylab y label
#' @param xlab x label
#' @param xmin xmin
#' @param xmax xmax
#' @param ymax ymax
#' @param ymin ymin
#' @param gg Should ggplot2 be used to make plot?
cool1Dplot = function (n2=20, nn=201, col2="green",
xlab='x', ylab='y',
xmin=NULL, xmax=NULL,
ymin=NULL, ymax=NULL,
gg=TRUE
) {
if (self$D != 1) stop('Must be 1D')
if (length(find_kernel_factor_dims(self$kernel)) > 0) {
message("cool1Dplot doesn't work for factor input, using plot1D instead")
return(self$plot1D())
}
# Letting user pass in minx and maxx
if (is.null(xmin)) {
minx <- min(self$X)
} else {
minx <- xmin
}
if (is.null(xmax)) {
maxx <- max(self$X)
} else {
maxx <- xmax
}
# minx <- min(self$X)
# maxx <- max(self$X)
x1 <- minx - .1 * (maxx - minx)
x2 <- maxx + .1 * (maxx - minx)
# nn <- 201
x <- seq(x1, x2, length.out = nn)
px <- self$pred(x, covmat = T, mean_dist=TRUE)
# px$cov <- self$kernel$k(matrix(x,ncol=1))
# n2 <- 20
Sigma.try <- try(newy <- MASS::mvrnorm(n=n2, mu=px$mean,
Sigma=px$cov),
silent = TRUE)
nug_Sig <- 1e-8 # self$nug, now use 1e-8 since self$nug is excluded in pred.
haderror <- FALSE
while (inherits(Sigma.try, "try-error")) {
haderror <- TRUE
# message(paste0("Adding nugget to cool1Dplot: ", nug_Sig))
Sigma.try <- try(
newy <- MASS::mvrnorm(n=n2, mu=px$mean,
Sigma=px$cov + diag(nug_Sig, nrow(px$cov))),
silent = TRUE)
# if (inherits(Sigma.try2, "try-error")) {
# stop("Can't do cool1Dplot")
# }
nug_Sig <- 2*nug_Sig
}
if (haderror) {
message(paste0("Adding variance to cool1Dplot: ", nug_Sig))
}
if (n2==1) { # Avoid error when n2=1
newy <- matrix(newy, nrow=1)
}
# plot(x,px$me, type='l', lwd=4, ylim=c(min(newy),max(newy)),
# xlab=xlab, ylab=ylab)
# sapply(1:n2, function(i) points(x, newy[i,], type='l', col=col2))
# points(self$X, self$Z, pch=19, col=1, cex=2)
# Setting ylim, giving user option
if (is.null(ymin)) {
miny <- min(newy)
} else {
miny <- ymin
}
if (is.null(ymax)) {
maxy <- max(newy)
} else {
maxy <- ymax
}
if (gg) {
xdf <- as.data.frame(cbind(x=x, newy=t(newy)))
xdf2 <- tidyr::pivot_longer(xdf, 1 + 1:n2)
# xdf2 %>% str
ggplot2::ggplot() +
ggplot2::geom_line(data=xdf2,
ggplot2::aes(x, value, group=name),
alpha=1, color=col2) +
ggplot2::geom_line(ggplot2::aes(x, px$mean), linewidth=2) +
ggplot2::geom_point(ggplot2::aes(self$X, if (self$normalize) {
self$Z * self$normalize_sd + self$normalize_mean
} else {self$Z}),
size=4, pch=21, color='white', fill='black', stroke=1) +
ggplot2::xlab(NULL) +
ggplot2::ylab(NULL)
} else {
# Redo to put gray lines on bottom
for (i in 1:n2) {
if (i == 1) {
plot(x, newy[i,], type='l', col=col2,
# ylim=c(min(newy),max(newy)),
ylim=c(miny,maxy),
xlab=xlab, ylab=ylab)
} else {
points(x, newy[i,], type='l', col=col2)
}
}
points(x,px$me, type='l', lwd=4)
points(self$X,
if (self$normalize) {
self$Z * self$normalize_sd + self$normalize_mean
} else {self$Z},
pch=19, col=1, cex=2)
}
},
#' @description Make 1D plot
#' @param n2 Number of things to plot
#' @param nn Number of things to plot
#' @param col2 Color of the prediction interval
#' @param col3 Color of the interval for the mean
#' @param ylab y label
#' @param xlab x label
#' @param xmin xmin
#' @param xmax xmax
#' @param ymax ymax
#' @param ymin ymin
#' @param gg Should ggplot2 be used to make plot?
plot1D = function(n2=20, nn=201, col2=2, col3=3, #"gray",
xlab='x', ylab='y',
xmin=NULL, xmax=NULL,
ymin=NULL, ymax=NULL,
gg=TRUE) {
if (self$D != 1) stop('Must be 1D')
if (length(find_kernel_factor_dims(self$kernel)) > 0) {
# Factor input
fd <- find_kernel_factor_dims(self$kernel)
df <- data.frame(x=1:fd[2])
pred <- self$pred(df$x, se=T)
predmean <- self$pred(df$x, se=T, mean_dist = T)
df2 <- data.frame(
x=df$x,
pred=pred$mean,
predse=pred$se,
meanpred=predmean$mean,
meanpredse=predmean$se
)
df2
ggplot2::ggplot(df2, ggplot2::aes(x=x, xend=x)) +
ggplot2::geom_segment(ggplot2::aes(y=pred+2*predse,
yend=pred-2*predse),
color="red", linewidth=4) +
ggplot2::geom_segment(ggplot2::aes(y=meanpred+2*meanpredse,
yend=meanpred-2*meanpredse),
color="green", linewidth=6) +
ggplot2::geom_jitter(ggplot2::aes(x,y),
data=data.frame(x=c(self$X), y=c(self$Z)),
width=.1, height=0, size=2) +
ggplot2::ylab(NULL)
} else { # Cts input
# Letting user pass in minx and maxx
if (is.null(xmin)) {
minx <- min(self$X)
} else {
minx <- xmin
}
if (is.null(xmax)) {
maxx <- max(self$X)
} else {
maxx <- xmax
}
# minx <- min(self$X)
# maxx <- max(self$X)
x1 <- minx - .1 * (maxx - minx)
x2 <- maxx + .1 * (maxx - minx)
# nn <- 201
x <- seq(x1, x2, length.out = nn)
px <- self$pred(x, se=T)
pxmean <- self$pred(x, se=T, mean_dist=T)
# n2 <- 20
# Setting ylim, giving user option
if (is.null(ymin)) {
miny <- min(px$mean - 2*px$se)
} else {
miny <- ymin
}
if (is.null(ymax)) {
maxy <- max(px$mean + 2*px$se)
} else {
maxy <- ymax
}
if (gg) {
ggplot2::ggplot(px, ggplot2::aes(x, mean)) +
ggplot2::geom_line(data=pxmean, ggplot2::aes(y=mean+2*se),
color="green", linewidth=2) +
ggplot2::geom_line(data=pxmean, ggplot2::aes(y=mean-2*se),
color="green", linewidth=2) +
ggplot2::geom_line(ggplot2::aes(y=mean+2*se),
color="red", linewidth=2) +
ggplot2::geom_line(ggplot2::aes(y=mean-2*se),
color="red", linewidth=2) +
ggplot2::geom_line(linewidth=2) +
ggplot2::geom_point(data=data.frame(
x=unname(self$X),
y=if (self$normalize) {
self$Z * self$normalize_sd + self$normalize_mean
} else {self$Z}),
ggplot2::aes(x,y),
size=4,
# Make points have a border
color="gray", fill="black", pch=21
) +
ggplot2::ylab(NULL) +
ggplot2::xlab(if (is.null(colnames(self$X))) {"X"} else {
colnames(self$X)})
} else {
plot(x, px$mean+2*px$se, type='l', col=col2, lwd=2,
# ylim=c(min(newy),max(newy)),
ylim=c(miny,maxy),
xlab=xlab, ylab=ylab,
# main=paste0("Predicted output (95% interval for mean is green,",
# " 95% interval for sample is red)")
)
legend(x='topleft',
legend=c('95% prediction','95% mean'),
fill=2:3)
# Mean interval
points(x, pxmean$mean+2*pxmean$se, type='l', col=col3, lwd=2)
points(x, pxmean$mean-2*pxmean$se, type='l', col=col3, lwd=2)
# Prediction interval
points(x, px$mean+2*px$se, type='l', col=col2, lwd=2)
points(x, px$mean-2*px$se, type='l', col=col2, lwd=2)
# Mean line
points(x,px$me, type='l', lwd=4)
# Data points
points(self$X,
if (self$normalize) {
self$Z * self$normalize_sd + self$normalize_mean
} else {self$Z},
pch=19, col=1, cex=2)
}
}
},
#' @description Make 2D plot
#' @param mean Should the mean be plotted?
#' @param se Should the standard error of prediction be plotted?
#' @param horizontal If plotting mean and se, should they be next to each
#' other?
#' @param n Number of points along each dimension
plot2D = function(se=FALSE, mean=TRUE, horizontal=TRUE, n=50) {
if (self$D != 2) {stop("plot2D only works in 2D")}
stopifnot(is.logical(se), length(se)==1)
stopifnot(is.logical(mean), length(mean)==1)
stopifnot(is.logical(horizontal), length(horizontal)==1)
stopifnot(mean || se)
mins <- apply(self$X, 2, min)
maxs <- apply(self$X, 2, max)
xmin <- mins[1] - .03 * (maxs[1] - mins[1])
xmax <- maxs[1] + .03 * (maxs[1] - mins[1])
ymin <- mins[2] - .03 * (maxs[2] - mins[2])
ymax <- maxs[2] + .03 * (maxs[2] - mins[2])
if (mean) {
plotmean <- ContourFunctions::cf_func(self$predict, batchmax=Inf,
xlim=c(xmin, xmax),
ylim=c(ymin, ymax),
pts=self$X,
n=n,
gg=TRUE)
}
if (se) {
plotse <- ContourFunctions::cf_func(
function(X) {self$predict(X, se.fit=T)$se}, batchmax=Inf,
xlim=c(xmin, xmax),
ylim=c(ymin, ymax),
pts=self$X,
n=n,
gg=TRUE)
}
if (mean && se) {
gridExtra::grid.arrange(plotmean, plotse,
nrow=if (horizontal) {1} else{2})
} else if (mean) {
plotmean
} else if (se) {
plotse
} else {
stop("Impossible #819571924")
}
},
#' @description Plot marginal. For each input, hold all others at a constant
#' value and adjust it along it's range to see how the prediction changes.
#' @param npt Number of lines to make. Each line represents changing a
#' single variable while holding the others at the same values.
#' @param ncol Number of columnsfor the plot
plotmarginal = function(npt=5, ncol=NULL) {
# pt <- colMeans(self$X)
# pt
pt <- lhs::maximinLHS(n=npt, k=self$D)
pt <- sweep(pt, 2, apply(self$X, 2, max) - apply(self$X, 2, min), "*")
pt <- sweep(pt, 2, apply(self$X, 2, min), "+")
factorinfo <- find_kernel_factor_dims(self$kernel)
if (length(factorinfo > 0)) {
factorindexes <- factorinfo[2*(1:(length(factorinfo)/2))-1]
factornlevels <- factorinfo[2*(1:(length(factorinfo)/2))]
for (i in 1:length(factorindexes)) {
if (!(pt[factorindexes[i]] %in% 1:factornlevels[i])) {
pt[, factorindexes[i]] <- sample(1:factornlevels[i], npt, replace=T)
}
}
} else {
factorindexes <- c()
}
icolnames <- if (is.null(colnames(self$X))) {
paste0("X", 1:ncol(self$X))
} else {
colnames(self$X)
}
pts <- NULL
for (j in 1:npt) {
for (i in 1:ncol(self$X)) {
if (i %in% factorindexes) {
ind_i <- which(factorindexes == i)
xseq <- 1:(factorinfo[2*ind_i])
} else {
xseq <- seq(min(self$X[,i]), max(self$X[,i]), l=51)
}
Xmat <- matrix(pt[j,], byrow=T, ncol=ncol(pt), nrow=length(xseq))
Xmat[, i] <- xseq
pX <- suppressWarnings(self$pred(Xmat, se.fit = T))
pXm <- suppressWarnings(self$pred(Xmat, se.fit = T, mean_dist=T))
pts <- rbind(pts,
data.frame(pred=pX$mean, predse=pX$se, predmeanse=pXm$se,
xi=xseq, i=i, j=j, icolname=icolnames[i]))
}
}
pts2 <- as.data.frame(pts)
# pts2 %>%
# mutate(predupper=pred+2*predse,
# predlower=pred-2*predse)
pts2$predupper <- pts2$pred + 2*pts2$predse
pts2$predlower <- pts2$pred - 2*pts2$predse
pts2$predmeanupper <- pts2$pred + 2*pts2$predmeanse
pts2$predmeanlower <- pts2$pred - 2*pts2$predmeanse
if (length(factorindexes) < .5) {
ggplot2::ggplot(data=pts2, ggplot2::aes(xi, pred, group=j)) +
ggplot2::facet_wrap(.~icolname, scales = "free_x") +
ggplot2::geom_line(ggplot2::aes(y=predmeanupper), color="orange") +
ggplot2::geom_line(ggplot2::aes(y=predmeanlower), color="orange") +
ggplot2::geom_line(ggplot2::aes(y=predupper), color="green") +
ggplot2::geom_line(ggplot2::aes(y=predlower), color="green") +
ggplot2::geom_line(linewidth=1) +
ggplot2::ylab("Predicted Z (95% interval)") +
ggplot2::xlab("x along dimension i")
} else {
# Has at least one factor.
# Convert factor/char back from int
plots <- list()
# ncol <- floor(sqrt(self$D))
# Pick ncol based on plot size/shape and num dims
if (is.null(ncol)) {
ncol <- min(self$D,
max(1,
round(sqrt(self$D)*dev.size()[1]/dev.size()[2])))
}
stopifnot(is.numeric(ncol), length(ncol)==1, ncol>=1, ncol<=self$D)
ylim <- c(min(pts2$predlower), max(pts2$predupper))
for (iii in 1:self$D) {
pts2_iii <- dplyr::filter(pts2, i==iii)
if (iii %in% factorindexes && !is.null(self$convert_formula_data)) {
for (jjj in seq_along(self$convert_formula_data$factors)) {
if (iii == self$convert_formula_data$factors[[jjj]]$index) {
pts2_iii$xi <-
self$convert_formula_data$factors[[jjj]]$levels[pts2_iii$xi]
}
}
for (jjj in seq_along(self$convert_formula_data$chars)) {
if (iii == self$convert_formula_data$chars[[jjj]]$index) {
pts2_iii$xi <-
self$convert_formula_data$chars[[jjj]]$vals[pts2_iii$xi]
}
}
}
stopifnot(is.data.frame(pts2_iii))
plt <- ggplot2::ggplot(data=pts2_iii,
mapping=ggplot2::aes(xi, pred, group=j)) +
ggplot2::facet_wrap(.~icolname, scales = "free_x") +
ggplot2::geom_line(ggplot2::aes(y=predmeanupper), color="orange") +
ggplot2::geom_line(ggplot2::aes(y=predmeanlower), color="orange") +
ggplot2::geom_line(ggplot2::aes(y=predupper), color="green") +
ggplot2::geom_line(ggplot2::aes(y=predlower), color="green") +
ggplot2::geom_line(linewidth=1) +
ggplot2::ylab(NULL) +
ggplot2::xlab(NULL) +
ggplot2::coord_cartesian(ylim=ylim)
if (iii%%ncol != 1) {
plt <- plt +
ggplot2::theme(axis.title.y=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank())
}
plots[[iii]] <- plt
}
gridExtra::grid.arrange(grobs=plots,
left="Predicted Z (95% interval)",
bottom='x along dimension i', ncol=ncol)
}
},
#' @description Plot marginal prediction for random sample of inputs
#' @param npt Number of random points to evaluate
#' @param ncol Number of columns in the plot
plotmarginalrandom = function(npt=100, ncol=NULL) {
pt <- lhs::maximinLHS(n=npt, k=self$D)
pt <- sweep(pt, 2, apply(self$X, 2, max) - apply(self$X, 2, min), "*")
pt <- sweep(pt, 2, apply(self$X, 2, min), "+")
factorinfo <- GauPro:::find_kernel_factor_dims(self$kernel)
if (length(factorinfo > 0)) {
factorindexes <- factorinfo[2*(1:(length(factorinfo)/2))-1]
factornlevels <- factorinfo[2*(1:(length(factorinfo)/2))]
for (i in 1:length(factorindexes)) {
if (!(pt[factorindexes[i]] %in% 1:factornlevels[i])) {
pt[, factorindexes[i]] <- sample(1:factornlevels[i], npt, replace=T)
}
}
} else {
factorindexes <- c()
}
icolnames <- if (is.null(colnames(self$X))) {
paste0("X", 1:ncol(self$X))
} else {
colnames(self$X)
}
# browser()
pts <- as.data.frame(pt)
colnames(pts) <- icolnames
pred_pts <- suppressWarnings(self$predict(pt, se.fit=T))
predmean_pts <- suppressWarnings(self$predict(pt, se.fit=T, mean_dist=T))
# browser()
pts$pred <- pred_pts$mean
pts$predse <- pred_pts$se
pts$predmeanse <- predmean_pts$se
pts2 <- as.data.frame(pts)
# pts2 %>%
# mutate(predupper=pred+2*predse,
# predlower=pred-2*predse)
pts2$predupper <- pts2$pred + 2*pts2$predse
pts2$predlower <- pts2$pred - 2*pts2$predse
pts2$predmeanupper <- pts2$pred + 2*pts2$predmeanse
pts2$predmeanlower <- pts2$pred - 2*pts2$predmeanse
if (length(factorinfo) < .5) {
tidyr::pivot_longer(pts2, 1:self$D) %>%
ggplot2::ggplot(ggplot2::aes(value, pred)) +
ggplot2::geom_segment(ggplot2::aes(xend=value,
y=predlower, yend=predupper),
color="green", linewidth=2) +
ggplot2::geom_point() +
ggplot2::facet_wrap(.~name, scales='free_x') +
ggplot2::ylab("Predicted Z (95% interval)") +
ggplot2::xlab(NULL)
} else {
# browser()
# Has at least one factor.
# Convert factor/char back from int
plots <- list()
# ncol <- floor(sqrt(self$D))
# Pick ncol based on plot size/shape and num dims
if (is.null(ncol)) {
ncol <- min(self$D,
max(1,
round(sqrt(self$D)*dev.size()[1]/dev.size()[2])))
}
stopifnot(is.numeric(ncol), length(ncol)==1, ncol>=1, ncol<=self$D)
ylim <- c(min(pts2$predlower), max(pts2$predupper))
# Do each separately since some may be converted to char/factor,
# they can't be in same column as numeric
for (iii in 1:self$D) {
pts2_iii_inds <- c(iii, setdiff(1:ncol(pts2), 1:self$D))
pts2_iii <- pts2[, pts2_iii_inds]
colnames(pts2_iii)[1] <- "xi"
pts2_iii$icolname <- icolnames[iii]
#dplyr::filter(pts2, i==iii)
if (iii %in% factorindexes && !is.null(self$convert_formula_data)) {
for (jjj in seq_along(self$convert_formula_data$factors)) {
if (iii == self$convert_formula_data$factors[[jjj]]$index) {
pts2_iii$xi <-
self$convert_formula_data$factors[[jjj]]$levels[pts2_iii$xi]
}
}
for (jjj in seq_along(self$convert_formula_data$chars)) {
if (iii == self$convert_formula_data$chars[[jjj]]$index) {
pts2_iii$xi <-
self$convert_formula_data$chars[[jjj]]$vals[pts2_iii$xi]
}
}
}
stopifnot(is.data.frame(pts2_iii))
plt <- ggplot2::ggplot(data=pts2_iii,
mapping=ggplot2::aes(xi, pred)) +
ggplot2::facet_wrap(.~icolname, scales = "free_x") +
# ggplot2::geom_line(ggplot2::aes(y=predmeanupper), color="orange") +
# ggplot2::geom_line(ggplot2::aes(y=predmeanlower), color="orange") +
# ggplot2::geom_line(ggplot2::aes(y=predupper), color="green") +
# ggplot2::geom_line(ggplot2::aes(y=predlower), color="green") +
# ggplot2::geom_line(linewidth=1) +
ggplot2::geom_segment(ggplot2::aes(xend=xi,
y=predlower, yend=predupper),
color="green", linewidth=2) +
ggplot2::geom_point() +
ggplot2::ylab(NULL) +
ggplot2::xlab(NULL) +
ggplot2::coord_cartesian(ylim=ylim)
if (iii%%ncol != 1) {
plt <- plt +
ggplot2::theme(axis.title.y=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank())
}
plots[[iii]] <- plt
}
gridExtra::grid.arrange(grobs=plots,
left="Predicted Z (95% interval)",
bottom='x along dimension i', ncol=ncol)
}
},
#' @description Plot the kernel
#' @param X X matrix for kernel plot
plotkernel = function(X=self$X) {
self$kernel$plot(X=X)
},
#' @description Plot leave one out predictions for design points
# @importFrom ggplot2 ggplot aes stat_smooth geom_abline geom_segment
# @importFrom ggplot2 geom_point geom_text xlab ylab ggtitle
plotLOO = function() {
ploo <- self$pred_LOO(se.fit = T)
loodf <- cbind(ploo, Z=self$Z)
loodf
loodf$upper <- loodf$fit + 1.96 * loodf$se.fit
loodf$lower <- loodf$fit - 1.96 * loodf$se.fit
# Add text with coverage, R-sq
coveragevec <- with(loodf, upper >= Z & lower <= Z)
coverage <- mean(coveragevec)
coverage
rsq <- with(loodf, 1 - (sum((fit-Z)^2)) / (sum((mean(Z)-Z)^2)))
rsq
ggplot2::ggplot(loodf, ggplot2::aes(fit, Z)) +
ggplot2::stat_smooth(method="loess", formula="y~x") +
ggplot2::geom_abline(slope=1, intercept=0, color="red") +
ggplot2::geom_segment(ggplot2::aes(x=lower, xend=upper, yend=Z),
color="green") +
ggplot2::geom_point() +
# geom_text(x=min(loodf$fit), y=max(loodf$Z), label="abc") +
ggplot2::geom_text(x=-Inf, y=Inf,
label=paste("Coverage (95%):", signif(coverage,5)),
hjust=0, vjust=1) +
ggplot2::geom_text(x=-Inf, y=Inf,
label=paste("R-sq: ", signif(rsq,5)),
hjust=0, vjust=2.2) +
# geom_text(x=Inf, y=-Inf, label="def", hjust=1, vjust=0)
ggplot2::xlab("Predicted values (fit)") +
ggplot2::ylab("Actual values (Z)") +
ggplot2::ggtitle("Calibration of leave-one-out (LOO) predictions")
},
#' @description If track_optim, this will plot the parameters
#' in the order they were evaluated.
#' @param minindex Minimum index to plot.
plot_track_optim = function(minindex=NULL) {
if (length(self$track_optim_inputs) < 0.5) {
stop("Can't plot_track_optim if track_optim was FALSE")
}
toi <- as.data.frame(matrix(unlist(self$track_optim_inputs), byrow=T,
ncol=length(self$track_optim_inputs[[1]])))
toi$deviance <- self$track_optim_dev
toi$index <- 1:nrow(toi)
if (!missing(minindex) && !is.null(minindex)) {
stopifnot(is.numeric(minindex) && length(minindex) == 1)
toi <- toi[toi$index >= minindex, ]
}
toi2 <- tidyr::pivot_longer(toi, cols = 1:(ncol(toi)-1))
ggplot2::ggplot(toi2, ggplot2::aes(index, value)) +
ggplot2::geom_line() +
ggplot2::facet_wrap(.~name, scales='free_y')
},
#' @description Calculate loglikelihood of parameters
#' @param mu Mean parameters
#' @param s2 Variance parameter
loglikelihood = function(mu=self$mu_hatX, s2=self$s2_hat) {
# Last two terms are -2*deviance
-self$N/2*log(2*pi) +
-.5*as.numeric(determinant(self$K,logarithm=TRUE)$modulus) +
-.5*c(t(self$Z - self$mu_hatX)%*%self$Kinv%*%(self$Z - self$mu_hatX))
},
#' @description AIC (Akaike information criterion)
AIC = function() {
2 * length(self$param_optim_start(nug.update = self$nug.est, jitter=F)) -
2 * self$loglikelihood()
},
#' @description Get optimization functions
#' @param param_update Should parameters be updated?
#' @param nug.update Should nugget be updated?
get_optim_functions = function(param_update, nug.update) {
# self$kernel$get_optim_functions(param_update=param_update)
# if (nug.update) { # Nug will be last in vector of parameters
# list(
# fn=function(params) {
# l <- length(params)
# self$deviance(params=params[1:(l-1)], nuglog=params[l])
# },
# gr=function(params) {
# l <- length(params)
# self$deviance_grad(params=params[1:(l-1)], nuglog=params[l],
# nug.update=nug.update)
# },
# fngr=function(params) {
# l <- length(params)
# list(
# fn=function(params) {
# self$deviance(params=params[1:(l-1)], nuglog=params[l])
# },
# gr=function(params) {self$deviance_grad(
# params=params[1:(l-1)], nuglog=params[l],
# nug.update=nug.update)
# }
# )
# }
# )
# } else {
# list(
# fn=function(params) {self$deviance(params=params)},
# gr=function(params) {self$deviance_grad(params=params,
# nug.update=nug.update)},
# fngr=function(params) {
# list(
# fn=function(params) {self$deviance(params=params)},
# gr=function(params) {self$deviance_grad(params=params,
# nug.update=nug.update)}
# )
# }
# )
# }
# Need to get trend, kernel, and nug separated out into args
tl <- length(self$trend$param_optim_start())
kl <- length(self$kernel$param_optim_start())
nl <- as.integer(nug.update)
ti <- if (tl>0) {1:tl} else {c()}
ki <- if (kl>0) {tl + 1:kl} else {c()}
ni <- if (nl>0) {tl+kl+1:nl} else {c()}
list(
fn=function(params) {
if (self$track_optim) {
self$track_optim_inputs[[length(self$track_optim_inputs)+1]] <- params
}
tparams <- if (tl>0) {params[ti]} else {NULL}
kparams <- if (kl>0) {params[ki]} else {NULL}
nparams <- if (nl>0) {params[ni]} else {NULL}
dev <- self$deviance(params=kparams, nuglog=nparams,
trend_params=tparams)
if (self$track_optim) {
self$track_optim_dev[[length(self$track_optim_dev)+1]] <- dev
}
dev
},
gr=function(params) {
if (self$track_optim) {
self$track_optim_inputs[[length(self$track_optim_inputs)+1]] <- params
}
tparams <- if (tl>0) {params[ti]} else {NULL}
kparams <- if (kl>0) {params[ki]} else {NULL}
nparams <- if (nl>0) {params[ni]} else {NULL}
dev_grad <- self$deviance_grad(params=kparams, nuglog=nparams,
trend_params=tparams, nug.update=nug.update)
if (self$track_optim) { # Doesn't actually get value
self$track_optim_dev[[length(self$track_optim_dev)+1]] <- NA
}
dev_grad
},
fngr=function(params) {
if (self$track_optim) {
self$track_optim_inputs[[length(self$track_optim_inputs)+1]] <- params
}
tparams <- if (tl>0) {params[ti]} else {NULL}
kparams <- if (kl>0) {params[ki]} else {NULL}
nparams <- if (nl>0) {params[ni]} else {NULL}
dev_fngr <- self$deviance_fngr(params=kparams, nuglog=nparams,
trend_params=tparams, nug.update=nug.update)
if (self$track_optim) {
self$track_optim_dev[[length(self$track_optim_dev)+1]] <- dev_fngr$fn
}
dev_fngr
}
)
},
#' @description Lower bounds of parameters for optimization
#' @param nug.update Is the nugget being updated?
param_optim_lower = function(nug.update) {
if (nug.update) {
# c(self$kernel$param_optim_lower(), log(self$nug.min,10))
nug_lower <- log(self$nug.min, 10)
} else {
# self$kernel$param_optim_lower()
nug_lower <- c()
}
trend_lower <- self$trend$param_optim_lower()
kern_lower <- self$kernel$param_optim_lower()
c(trend_lower, kern_lower, nug_lower)
},
#' @description Upper bounds of parameters for optimization
#' @param nug.update Is the nugget being updated?
param_optim_upper = function(nug.update) {
if (nug.update) {
# c(self$kernel$param_optim_upper(), Inf)
nug_upper <- log(self$nug.max, 10)
} else {
# self$kernel$param_optim_upper()
nug_upper <- c()
}
trend_upper <- self$trend$param_optim_upper()
kern_upper <- self$kernel$param_optim_upper()
c(trend_upper, kern_upper, nug_upper)
},
#' @description Starting point for parameters for optimization
#' @param jitter Should there be a jitter?
#' @param nug.update Is nugget being updated?
param_optim_start = function(nug.update, jitter) {
# param_start <- self$kernel$param_optim_start(jitter=jitter)
if (nug.update) {
nug_start <- log(self$nug,10)
if (jitter) {
nug_start <- nug_start + rexp(1, 1)
nug_start <- min(max(log(self$nug.min,10),
nug_start),
log(self$nug.max,10))
}
# c(param_start, nug_start)
} else {
# param_start
nug_start <- c()
}
trend_start <- self$trend$param_optim_start(jitter=jitter)
kern_start <- self$kernel$param_optim_start(jitter=jitter)
# nug_start <- Inf
c(trend_start, kern_start, nug_start)
},
#' @description Starting point for parameters for optimization
#' @param jitter Should there be a jitter?
#' @param nug.update Is nugget being updated?
param_optim_start0 = function(nug.update, jitter) {
# param_start <- self$kernel$param_optim_start0(jitter=jitter)
if (nug.update) {
nug_start <- -4
if (jitter) {nug_start <- nug_start + rexp(1, 1)}
# Make sure nug_start is in nug range
nug_start <- min(max(log(self$nug.min,10), nug_start),
log(self$nug.max,10))
# c(param_start, nug_start)
} else {
# param_start
nug_start <- c()
}
trend_start <- self$trend$param_optim_start(jitter=jitter)
kern_start <- self$kernel$param_optim_start(jitter=jitter)
# nug_start <- Inf
c(trend_start, kern_start, nug_start)
},
#' @description Get matrix for starting points of optimization
#' @param restarts Number of restarts to use
#' @param nug.update Is nugget being updated?
#' @param l Not used
param_optim_start_mat = function(restarts, nug.update, l) {
s0 <- sample(c(T,F), size=restarts+1, replace=TRUE, prob = c(.33,.67))
s0[1] <- FALSE
sapply(1:(restarts+1), function(i) {
if (s0[i]) {
self$param_optim_start0(nug.update=nug.update, jitter=(i!=1))
} else {
self$param_optim_start(nug.update=nug.update, jitter=(i!=1))
}
})
# mat <- matrix(0, nrow=restarts, ncol=l)
# mat[1,] <- self$param_optim_start0(nug.update=nug.update)
},
#' @description Optimize parameters
#' @param restarts Number of restarts to do
#' @param n0 This many starting parameters are chosen and evaluated.
#' The best ones are used as the starting points for optimization.
#' @param param_update Should parameters be updated?
#' @param nug.update Should nugget be updated?
#' @param parallel Should restarts be done in parallel?
#' @param parallel_cores If running parallel, how many cores should be used?
optim = function (restarts = self$restarts, n0=5*self$D,
param_update = T,
nug.update = self$nug.est, parallel=self$parallel,
parallel_cores=self$parallel_cores) {
# Does parallel
# Joint MLE search with L-BFGS-B, with restarts
#if (param_update & nug.update) {
# optim.func <- function(xx) {self$deviance_log2(joint=xx)}
# grad.func <- function(xx) {self$deviance_log2_grad(joint=xx)}
# optim.fngr <- function(xx) {self$deviance_log2_fngr(joint=xx)}
#} else if (param_update & !nug.update) {
# optim.func <- function(xx) {self$deviance_log2(beta=xx)}
# grad.func <- function(xx) {self$deviance_log2_grad(beta=xx)}
# optim.fngr <- function(xx) {self$deviance_log2_fngr(beta=xx)}
#} else if (!param_update & nug.update) {
# optim.func <- function(xx) {self$deviance_log2(lognug=xx)}
# grad.func <- function(xx) {self$deviance_log2_grad(lognug=xx)}
# optim.fngr <- function(xx) {self$deviance_log2_fngr(lognug=xx)}
#} else {
# stop("Can't optimize over no variables")
#}
optim_functions <- self$get_optim_functions(
param_update=param_update,
nug.update=nug.update)
#optim.func <- self$get_optim_func(param_update=param_update,
# nug.update=nug.update)
# optim.grad <- self$get_optim_grad(param_update=param_update,
# nug.update=nug.update)
# optim.fngr <- self$get_optim_fngr(param_update=param_update,
# nug.update=nug.update)
optim.func <- optim_functions[[1]]
optim.grad <- optim_functions[[2]]
optim.fngr <- optim_functions[[3]]
# # Set starting parameters and bounds
# lower <- c()
# upper <- c()
# start.par <- c()
# start.par0 <- c() # Some default params
# if (param_update) {
# lower <- c(lower, self$param_optim_lower())
# #rep(-5, self$theta_length))
# upper <- c(upper, self$param_optim_upper())
# #rep(7, self$theta_length))
# start.par <- c(start.par, self$param_optim_start())
# #log(self$theta_short, 10))
# start.par0 <- c(start.par0, self$param_optim_start0())
# #rep(0, self$theta_length))
# }
# if (nug.update) {
# lower <- c(lower, log(self$nug.min,10))
# upper <- c(upper, Inf)
# start.par <- c(start.par, log(self$nug,10))
# start.par0 <- c(start.par0, -4)
# }
#
# Changing so all are gotten by self function
lower <- self$param_optim_lower(nug.update=nug.update)
upper <- self$param_optim_upper(nug.update=nug.update)
# start.par <- self$param_optim_start(nug.update=nug.update)
# start.par0 <- self$param_optim_start0(nug.update=nug.update)
#
n0 <- max(n0, restarts+1)
param_optim_start_mat <- self$param_optim_start_mat(restarts=n0-1, #restarts,
nug.update=nug.update,
l=length(lower))
if (!is.matrix(param_optim_start_mat)) {
# Is a vector, should be a matrix with one row since it applies
# over columns
param_optim_start_mat <- matrix(param_optim_start_mat, nrow=1)
}
# Below could just be run when condition is true,
# but it needs devs below anayways.
if (TRUE || n0 > restarts + 1.5) {
# Find best starting points
devs <- rep(NA, ncol(param_optim_start_mat))
for (i in 1:ncol(param_optim_start_mat)) {
try(devs[i] <- optim.func(param_optim_start_mat[,i]), silent=T)
}
# Find best to start with
best_start_inds <- order(order(devs))
param_optim_start_mat <- param_optim_start_mat[
, best_start_inds < restarts+1.5, drop=F]
}
# # This will make sure it at least can start
# # Run before it sets initial parameters
# # try.devlog <- try(devlog <- optim.func(start.par), silent = T)
# try.devlog <- try(devlog <- optim.func(param_optim_start_mat[,1]),
# silent = T)
# if (inherits(try.devlog, "try-error") || is.infinite(devlog)) {
# warning("Current nugget doesn't work, increasing it #31973")
# # This will increase the nugget until cholesky works
# self$update_K_and_estimates()
# # devlog <- optim.func(start.par)
# devlog <- optim.func(param_optim_start_mat[,1])
# }
devlog <- devs[best_start_inds[which.min(best_start_inds)]]
if (is.na(devlog) ||
is.nan(devlog)) {
warning("Current nugget doesn't work, increasing it #752983")
# This will increase the nugget until cholesky works
self$update_K_and_estimates()
# devlog <- optim.func(start.par)
devlog <- optim.func(param_optim_start_mat[,1])
}
# Find best params with optimization, start with current params in
# case all give error
# Current params
#best <- list(par=c(log(self$theta_short, 10), log(self$nug,10)),
# value = devlog)
# best <- list(par=start.par, value = devlog)
best <- list(par=param_optim_start_mat[,1], value = devlog)
if (self$verbose >= 2) {
cat("Optimizing\n");cat("\tInitial values:\n");print(best)
}
#details <- data.frame(start=paste(c(self$theta_short,self$nug),
# collapse=","),end=NA,value=best$value,func_evals=1,
# grad_evals=NA,convergence=NA, message=NA, stringsAsFactors=F)
details <- data.frame(
start=paste(param_optim_start_mat[,1],collapse=","),end=NA,
value=best$value,func_evals=1,grad_evals=NA,convergence=NA,
message=NA, stringsAsFactors=F
)
# runs them in parallel, first starts from current,
# rest are jittered or random
sys_name <- Sys.info()["sysname"]
if (!self$parallel) {
# Not parallel, just use lapply
restarts.out <- lapply(
1:(1+restarts),
function(i){
self$optimRestart(start.par=start.par,
start.par0=start.par0,
param_update=param_update,
nug.update=nug.update,
optim.func=optim.func,
optim.grad=optim.grad,
optim.fngr=optim.fngr,
lower=lower, upper=upper,
jit=(i!=1),
start.par.i=param_optim_start_mat[,i])})
} else if (sys_name == "Windows") {
# Parallel on Windows
# Not much speedup since it has to copy each time.
# Only maybe worth it on big problems.
parallel_cluster <- parallel::makeCluster(
spec = self$parallel_cores, type = "SOCK")
restarts.out <- parallel::clusterApplyLB(
cl=parallel_cluster,
1:(1+restarts),
function(i){
self$optimRestart(start.par=start.par,
start.par0=start.par0,
param_update=param_update,
nug.update=nug.update,
optim.func=optim.func,
optim.grad=optim.grad,
optim.fngr=optim.fngr,
lower=lower, upper=upper,
jit=(i!=1),
start.par.i=param_optim_start_mat[,i])})
parallel::stopCluster(parallel_cluster)
#, mc.cores = parallel_cores)
} else { # Mac/Unix
restarts.out <- parallel::mclapply(
1:(1+restarts),
function(i){
self$optimRestart(start.par=start.par,
start.par0=start.par0,
param_update=param_update,
nug.update=nug.update,
optim.func=optim.func,
optim.grad=optim.grad,
optim.fngr=optim.fngr,
lower=lower,
upper=upper,
jit=(i!=1))},
start.par.i=param_optim_start_mat[,i],
mc.cores = parallel_cores)
}
new.details <- t(sapply(restarts.out,function(dd){dd$deta}))
vals <- sapply(restarts.out,
function(ii){
if (inherits(ii$current,"try-error")){Inf}
else ii$current$val
}
)
bestparallel <- which.min(vals) #which.min(new.details$value)
if(inherits(
try(restarts.out[[bestparallel]]$current$val, silent = T),
"try-error")
) { # need this in case all are restart vals are Inf
message("All restarts had error, keeping initial")
} else if (restarts.out[[bestparallel]]$current$val < best$val) {
best <- restarts.out[[bestparallel]]$current
}
details <- rbind(details, new.details)
if (self$verbose >= 2) {print(details)}
# If new nug is below nug.min, optimize again with fixed nug
# Moved into update_params, since I don't want to set nugget here
if (nug.update) {
best$par[length(best$par)] <- 10 ^ (best$par[length(best$par)])
}
best
},
#' @description Run a single optimization restart.
#' @param start.par Starting parameters
#' @param start.par0 Starting parameters
#' @param param_update Should parameters be updated?
#' @param nug.update Should nugget be updated?
#' @param optim.func Function to optimize.
#' @param optim.grad Gradient of function to optimize.
#' @param optim.fngr Function that returns the function value
#' and its gradient.
#' @param lower Lower bounds for optimization
#' @param upper Upper bounds for optimization
#' @param jit Is jitter being used?
#' @param start.par.i Starting parameters for this restart
optimRestart = function (start.par, start.par0, param_update,
nug.update, optim.func, optim.grad,
optim.fngr, lower, upper, jit=T,
start.par.i) {
#
# FOR lognug RIGHT NOW, seems to be at least as fast,
# up to 5x on big data, many fewer func_evals
# still want to check if it is better or not
# if (runif(1) < .33 & jit) {
# # restart near some spot to avoid getting stuck in bad spot
# start.par.i <- start.par0
# #print("start at zero par")
# } else { # jitter from current params
# start.par.i <- start.par
# }
# if (FALSE) {#jit) {
# #if (param_update) {start.par.i[1:self$theta_length] <-
# start.par.i[1:self$theta_length] +
# rnorm(self$theta_length,0,2)} # jitter betas
# theta_indices <- 1:length(self$param_optim_start())
# #if () -length(start.par.i)
# if (param_update) {start.par.i[theta_indices] <-
# start.par.i[theta_indices] +
# self$param_optim_jitter(start.par.i[theta_indices])}
# # jitter betas
# if (nug.update) {start.par.i[length(start.par.i)] <-
# start.par.i[length(start.par.i)] + min(4, rexp(1,1))}
# # jitter nugget
# }
# if (runif(1) < .33) { # Start at 0 params
# start.par.i <- self$kernel$param_optim_start0(jitter=jit)
# } else { # Start at current params
# start.par.i <- self$kernel$param_optim_start(jitter=jit)
# }
#
if (self$verbose >= 2) {
cat("\tRestart (parallel): starts pars =",start.par.i,"\n")
}
current <- try(
{
if (self$useGrad) {
if (is.null(optim.fngr)) {
lbfgs::lbfgs(optim.func, optim.grad, start.par.i, invisible=1)
} else {
# Two options for shared grad
if (self$optimizer == "L-BFGS-B") {
# optim uses L-BFGS-B which uses upper and lower
# optim_share(fngr=optim.fngr, par=start.par.i,
# method='L-BFGS-B', upper=upper, lower=lower)
# if (use_optim_share2) {
optim_share2(fngr=optim.fngr, par=start.par.i,
method='L-BFGS-B', upper=upper, lower=lower)
# } else {
# optim_share(fngr=optim.fngr, par=start.par.i,
# method='L-BFGS-B', upper=upper, lower=lower)
# }
} else if (self$optimizer == "lbfgs") {
# lbfgs does not, so no longer using it
lbfgs_share(optim.fngr, start.par.i, invisible=1)
# 1.7x speedup uses grad_share
} else if (self$optimizer == "genoud") {
capture.output(suppressWarnings({
tmp <- rgenoud::genoud(fn=optim.func,
nvars=length(start.par.i),
starting.values=start.par.i,
Domains=cbind(lower, upper),
gr=optim.grad,
boundary.enforcement = 2,
pop.size=1e2, max.generations=10)
}))
tmp
} else{
stop("Optimizer not recognized")
}
}
} else {
optim(start.par.i, optim.func, method="L-BFGS-B",
lower=lower, upper=upper, hessian=F)
}
}, silent=TRUE
)
if (!inherits(current, "try-error")) {
# if (self$useGrad) {
if (is.null(current$counts)) {current$counts <- c(NA,NA)}
if(is.null(current$message)) {current$message=NA}
# }
details.new <- data.frame(
start=paste(signif(start.par.i,3),collapse=","),
end=paste(signif(current$par,3),collapse=","),
value=current$value,func_evals=current$counts[1],
grad_evals=current$counts[2],
convergence=if (is.null(current$convergence)) {NA}
else {current$convergence},
message=current$message, row.names = NULL, stringsAsFactors=F
)
} else{
details.new <- data.frame(
start=paste(signif(start.par.i,3),collapse=","),
end="try-error",value=NA,func_evals=NA,grad_evals=NA,
convergence=NA, message=current[1], stringsAsFactors=F)
}
list(current=current, details=details.new)
},
#' @description Update the model. Should only give in
#' (Xnew and Znew) or (Xall and Zall).
#' @param Xnew New X values to add.
#' @param Znew New Z values to add.
#' @param Xall All X values to be used. Will replace existing X.
#' @param Zall All Z values to be used. Will replace existing Z.
#' @param nug.update Is the nugget being updated?
#' @param restarts Number of optimization restarts.
#' @param param_update Are the parameters being updated?
#' @param no_update Are no parameters being updated?
update = function (Xnew=NULL, Znew=NULL, Xall=NULL, Zall=NULL,
restarts = self$restarts,
param_update = self$param.est,
nug.update = self$nug.est, no_update=FALSE) {
# Doesn't update Kinv, etc
self$update_data(Xnew=Xnew, Znew=Znew, Xall=Xall, Zall=Zall)
if (!no_update && (param_update || nug.update)) {
# This option lets it skip parameter optimization entirely
self$update_params(restarts=restarts,
param_update=param_update,
nug.update=nug.update)
}
self$update_K_and_estimates()
invisible(self)
},
#' @description Fast update when adding new data.
#' @param Xnew New X values to add.
#' @param Znew New Z values to add.
update_fast = function (Xnew=NULL, Znew=NULL) {
# Updates data, K, and Kinv, quickly without adjusting parameters
# Should be O(n^2) instead of O(n^3), but in practice not much faster
stopifnot(is.matrix(Xnew))
N1 <- nrow(self$X)
N2 <- nrow(Xnew)
inds2 <- (N1+1):(N1+N2) # indices for new col/row, shorter than inds1
K2 <- self$kernel$k(Xnew) + diag(self$kernel$s2 * self$nug, N2)
K12 <- self$kernel$k(self$X, Xnew) # Need this before update_data
self$update_data(Xnew=Xnew, Znew=Znew) # Doesn't update Kinv, etc
# Update K
K3 <- matrix(0, nrow=self$N, ncol=self$N)
K3[-inds2, -inds2] <- self$K
K3[-inds2, inds2] <- K12
K3[inds2, -inds2] <- t(K12)
K3[inds2, inds2] <- K2
# Check for accuracy
# summary(c(K3 - (self$kernel$k(self$X) +
# diag(self$kernel$s2*self$nug, self$N))))
self$K <- K3
# Update the inverse using the block inverse formula
K1inv_K12 <- self$Kinv %*% K12
G <- solve(K2 - t(K12) %*% K1inv_K12)
F1 <- -K1inv_K12 %*% G
E <- self$Kinv - K1inv_K12 %*% t(F1)
K3inv <- matrix(0, nrow=self$N, ncol=self$N)
K3inv[-inds2, -inds2] <- E
K3inv[-inds2, inds2] <- F1
K3inv[inds2, -inds2] <- t(F1)
K3inv[inds2, inds2] <- G
# Check for accuracy
# summary(c(K3inv - solve(self$K)))
# self$K <- K3
self$Kinv <- K3inv
# self$mu_hatX <- self$trend$Z(X=self$X)
# Just rbind new values
self$mu_hatX <- rbind(self$mu_hatX,self$trend$Z(X=Xnew))
invisible(self)
},
#' @description Update the parameters.
#' @param nug.update Is the nugget being updated?
#' @param ... Passed to optim.
update_params = function(..., nug.update) {
# Find lengths of params to optimize for each part
tl <- length(self$trend$param_optim_start())
kl <- length(self$kernel$param_optim_start())
nl <- as.integer(nug.update)
ti <- if (tl>0) {1:tl} else {c()}
ki <- if (kl>0) {tl + 1:kl} else {c()}
ni <- if (nl>0) {tl+kl+1:nl} else {c()}
# If no params to optim, just return
if (tl+kl+nl == 0) {return()}
# Run optimization
optim_out <- self$optim(..., nug.update=nug.update)
# Split output into parts
if (nug.update) {
# self$nug <- optim_out$par[lpar] # optim already does 10^
# self$kernel$set_params_from_optim(optim_out$par[1:(lpar-1)])
self$nug <- optim_out$par[ni] # optim already does 10^
# Give message if it's at a boundary
if (self$nug <= self$nug.min && self$verbose>=0) {
message(paste0("nug is at minimum value after optimizing. ",
"Check the fit to see it this caused a bad fit. ",
"Consider changing nug.min. ",
"This is probably fine for noiseless data."))
}
if (self$nug >= self$nug.max && self$verbose>=0) {
message(paste0("nug is at maximum value after optimizing. ",
"Check the fit to see it this caused a bad fit. ",
"Consider changing nug.max or checking for ",
"other problems with the data/model."))
}
} else {
# self$kernel$set_params_from_optim(optim_out$par)
}
self$kernel$set_params_from_optim(optim_out$par[ki])
self$trend$set_params_from_optim(optim_out$par[ti])
},
#' @description Update the data. Should only give in
#' (Xnew and Znew) or (Xall and Zall).
#' @param Xnew New X values to add.
#' @param Znew New Z values to add.
#' @param Xall All X values to be used. Will replace existing X.
#' @param Zall All Z values to be used. Will replace existing Z.
update_data = function(Xnew=NULL, Znew=NULL, Xall=NULL, Zall=NULL) {
if (!is.null(Xall)) {
self$X <- if (is.matrix(Xall)) {
Xall
} else if (is.data.frame(Xall)) {
stop("Xall in update_data must be numeric, not data frame.")
} else if (is.numeric(Xall)) {
matrix(Xall,nrow=1)
} else {
stop("Bad Xall in update_data")
}
self$N <- nrow(self$X)
} else if (!is.null(Xnew)) {
Xnewformatted <- if (is.matrix(Xnew)) {
Xnew
} else if (is.data.frame(Xnew)) {
stop("Xnew in update_data must be numeric, not data frame.")
} else if (is.numeric(Xnew)) {
matrix(Xnew,nrow=1)
} else {
stop("Bad Xnew in update_data")
}
self$X <- rbind(self$X,
Xnewformatted)
self$N <- nrow(self$X)
}
if (!is.null(Zall)) {
self$Z <- if (is.matrix(Zall)) Zall else matrix(Zall,ncol=1)
if (self$normalize) {
self$normalize_mean <- mean(self$Z)
self$normalize_sd <- sd(self$Z)
self$Z <- (self$Z - self$normalize_mean) / self$normalize_sd
}
} else if (!is.null(Znew)) {
Znewmat <- if (is.matrix(Znew)) Znew else matrix(Znew,ncol=1)
if (self$normalize) {
Znewmat <- (Znewmat - self$normalize_mean) / self$normalize_sd
}
self$Z <- rbind(self$Z, Znewmat)
}
#if (!is.null(Xall) | !is.null(Xnew)) {self$update_K_and_estimates()}
# update Kinv, etc, DONT THINK I NEED IT
},
#' @description Update correlation parameters. Not the nugget.
#' @param ... Passed to self$update()
update_corrparams = function (...) {
self$update(nug.update = F, ...=...)
},
#' @description Update nugget Not the correlation parameters.
#' @param ... Passed to self$update()
update_nugget = function (...) {
self$update(param_update = F, ...=...)
},
# deviance_searchnug = function() {
# optim(self$nug, function(nnug) {self$deviance(nug=nnug)},
# method="L-BFGS-B", lower=0, upper=Inf, hessian=F)$par
# },
# nugget_update = function () {
# nug <- self$deviance_searchnug()
# self$nug <- nug
# self$update_K_and_estimates()
# },
#' @description Calculate the deviance.
#' @param params Kernel parameters
#' @param nug Nugget
#' @param nuglog Log of nugget. Only give in nug or nuglog.
#' @param trend_params Parameters for the trend.
deviance = function(params=NULL, nug=self$nug, nuglog, trend_params=NULL) {
if (!missing(nuglog) && !is.null(nuglog)) {
nug <- 10^nuglog
}
if (any(is.nan(params), is.nan(nug))) {
if (self$verbose >= 2) {
print("In deviance, returning Inf #92387")
}
return(Inf)
}
K <- self$kernel$k(x=self$X, params=params) +
diag(nug, self$N) * self$kernel$s2_from_params(params=params)
# if (is.nan(log(det(K)))) {return(Inf)}
Z_hat <- self$trend$Z(X=self$X, params=trend_params)
# dev.try <- try(dev <- log(det(K)) + sum((self$Z - self$mu_hat) *
# solve(K, self$Z - self$mu_hat)))
dev.try <- try(
# dev <- log(det(K)) + sum((self$Z - Z_hat) * solve(K, self$Z - Z_hat))
# Less likely to overflow by telling it to do logarithm
dev <- (as.numeric(determinant(K,logarithm=TRUE)$modulus) +
sum((self$Z - Z_hat) * solve(K, self$Z - Z_hat)))
)
if (inherits(dev.try, "try-error")) {
if (self$verbose>=2) {
print("Deviance error #87126, returning Inf")
}
return(Inf)
}
# print(c(params, nuglog, dev))
if (is.infinite(abs(dev))) {
if (self$verbose>=2) {
print("Deviance infinite #2332, returning Inf")
}
return(Inf)
}
dev
},
#' @description Calculate the gradient of the deviance.
#' @param params Kernel parameters
#' @param kernel_update Is the kernel being updated? If yes,
#' it's part of the gradient.
#' @param X Input matrix
#' @param nug Nugget
#' @param nug.update Is the nugget being updated? If yes,
#' it's part of the gradient.
#' @param nuglog Log of the nugget.
#' @param trend_params Trend parameters
#' @param trend_update Is the trend being updated? If yes,
#' it's part of the gradient.
deviance_grad = function(params=NULL, kernel_update=TRUE,
X=self$X,
nug=self$nug, nug.update, nuglog,
trend_params=NULL, trend_update=TRUE) {
if (!missing(nuglog) && !is.null(nuglog)) {
nug <- 10^nuglog
}
if (any(is.nan(params), is.nan(nug))) {
if (self$verbose>=2) {
print("In deviance_grad, returning NaN #92387")
};
return(rep(NaN, length(params)+as.integer(isTRUE(nug.update))))
}
C_nonug <- self$kernel$k(x=X, params=params)
s2_from_kernel <- self$kernel$s2_from_params(params=params)
C <- C_nonug + s2_from_kernel * diag(nug, self$N)
# if (length(params) <= 0.5) {
# # Avoid error when no params are being updated
# kernel_update <- FALSE
# } else {
dC_dparams_out <- self$kernel$dC_dparams(params=params, X=X, C=C,
C_nonug=C_nonug, nug=nug)
dC_dparams <- dC_dparams_out#[[1]]
# }
# First of list should be list of dC_dparams
# s2_from_kernel <- dC_dparams_out[[2]]
# Second should be s2 for nugget deriv
Z_hat <- self$trend$Z(X=X, params=trend_params)
dZ_dparams <- self$trend$dZ_dparams(X=X, params=trend_params)
# yminusmu <- self$Z - self$mu_hat
yminusmu <- self$Z - Z_hat
solve.try <- try(Cinv_yminusmu <- solve(C, yminusmu))
if (inherits(solve.try, "try-error")) {
if (self$verbose>=2) {
print("Deviance grad error #63466, returning Inf")
}
return(Inf)
}
out <- c()
if (length(dZ_dparams) > 0 && trend_update) {
trend_gradfunc <- function(di) {
-2 * t(yminusmu) %*% solve(C, di) # Siginv %*% du/db
}
trend_out <- apply(dZ_dparams, 2, trend_gradfunc)
out <- trend_out
} else {
# trend_out <- c()
}
gradfunc <- function(di) {
t1 <- sum(diag(solve(C, di)))
t2 <- sum(Cinv_yminusmu * (di %*% Cinv_yminusmu))
t1 - t2
}
if (kernel_update) {
kernel_out <- apply(dC_dparams, 1, gradfunc)
out <- c(out, kernel_out)
}
# # out <- c(sapply(dC_dparams[[1]],gradfunc), gradfunc(dC_dparams[[2]]))
# out <- sapply(dC_dparams,gradfunc)
if (nug.update) {
nug_out <- gradfunc(diag(s2_from_kernel*nug*log(10), nrow(C)))
out <- c(out, nug_out)
# out <- c(out, gradfunc(diag(s2_from_kernel*, nrow(C)))*nug*log(10))
}
# print(c(params, nuglog, out))
out
},
#' @description Calculate the deviance along with its gradient.
#' @param params Kernel parameters
#' @param kernel_update Is the kernel being updated? If yes,
#' it's part of the gradient.
#' @param X Input matrix
#' @param nug Nugget
#' @param nug.update Is the nugget being updated? If yes,
#' it's part of the gradient.
#' @param nuglog Log of the nugget.
#' @param trend_params Trend parameters
#' @param trend_update Is the trend being updated? If yes,
#' it's part of the gradient.
deviance_fngr = function(params=NULL, kernel_update=TRUE,
X=self$X,
nug=self$nug, nug.update, nuglog,
trend_params=NULL, trend_update=TRUE) {
if (self$verbose >= 20) {cat('in deviance_fngr', '\n')}
if (!missing(nuglog) && !is.null(nuglog)) {
nug <- 10^nuglog
}
if (any(is.nan(params), is.nan(nug))) {
if (self$verbose>=2) {
print("In deviance_grad, returning NaN #92387")
};
return(rep(NaN, length(params)+as.integer(isTRUE(nug.update))))
}
# C_nonug <- self$kernel$k(x=X, params=params)
# C <- C_nonug + s2_from_kernel * diag(nug, self$N)
# s2_from_kernel <- self$kernel$s2_from_params(params=params)
C_dC_try <- try(
C_dC_dparams_out <- self$kernel$C_dC_dparams(params=params,
X=X, nug=nug),
#C=C, C_nonug=C_nonug)
silent = TRUE
)
if (inherits(C_dC_try, 'try-error')) {
return(list(fn=self$deviance(params=params, nug=nug),
gr=self$deviance_grad(params=params, X=X, nug=nug,
nug.update=nug.update)))
}
if (length(C_dC_dparams_out) < 2) {stop("Error #532987")}
C <- C_dC_dparams_out[[1]]
# First of list should be list of dC_dparams
dC_dparams <- C_dC_dparams_out[[2]]
# Second should be s2 for nugget deriv
# s2_from_kernel <- dC_dparams_out[[2]]
Z_hat <- self$trend$Z(X=X, params=trend_params)
dZ_dparams <- self$trend$dZ_dparams(X=X, params=trend_params)
# yminusmu <- self$Z - self$mu_hat
yminusmu <- self$Z - Z_hat
s2_from_kernel <- self$kernel$s2_from_params(params=params)
# I tried not doing chol2inv, but it's faster inside of gradfunc, so keep it
# if (calc_inv_from_chol) {
Cinv <- chol2inv(chol(C))
# solve.try <- try(Cinv_yminusmu <- solve(C, yminusmu))
solve.try <- try(Cinv_yminusmu <- Cinv %*% yminusmu)
# } else {
# chol_C <- chol(C)
# solve.try <- try(Cinv_yminusmu <- solvewithchol(chol_C, yminusmu))
# }
if (inherits(solve.try, "try-error")) {
if (self$verbose>=2) {
print("Deviance grad error #63466, returning Inf")
}
return(Inf)
}
gr <- c()
if (length(dZ_dparams) > 0 && trend_update) {
trend_gradfunc <- function(di) {
# -2 * t(yminusmu) %*% solve(C, di) # Siginv %*% du/db
# if (calc_inv_from_chol) {
-2 * t(yminusmu) %*% (Cinv %*% di) # Siginv %*% du/db
# } else {
# -2 * t(yminusmu) %*% solvewithchol(chol_C, di)
# }
}
trend_gr <- apply(dZ_dparams, 2, trend_gradfunc)
gr <- trend_gr
} else {
# trend_out <- c()
}
gradfunc <- function(di) {
# t1 <- sum(diag(solve(C, di))) # Waste to keep resolving
# t1 <- sum(diag((Cinv %*% di))) # Don't need whole mat mul
# This is the main reason to calculate Cinv. Getting this trace this way
# is way faster than having to repeatedly do solves with chol_C and then
# taking the trace.
t1 <- sum(Cinv * t(di))
t2 <- sum(Cinv_yminusmu * (di %*% Cinv_yminusmu))
t1 - t2
}
# out <- c(sapply(dC_dparams[[1]],gradfunc), gradfunc(dC_dparams[[2]]))
# gr <- sapply(dC_dparams,gradfunc)
if (kernel_update) {
# Using apply() is 5x faster than Cpp code I wrote to do same thing
# Speed up by saving Cinv above to reduce number of solves
# kernel_gr <- apply(dC_dparams, 1, gradfunc) # 6x faster below
if (self$useC) {
kernel_gr <- gradfuncarray(dC_dparams, Cinv, Cinv_yminusmu)
} else {
# Changing to R code so it works on my laptop
kernel_gr <- gradfuncarrayR(dC_dparams, Cinv, Cinv_yminusmu)
}
gr <- c(gr, kernel_gr)
}
if (nug.update) {
gr <- c(gr, gradfunc(diag(s2_from_kernel*nug*log(10), nrow(C))))
# out <- c(out, gradfunc(diag(s2_from_kernel*, nrow(C)))*nug*log(10))
}
# Calculate fn
logdetC <- as.numeric(determinant(C,logarithm=TRUE)$modulus) #log(det(C))
if (is.nan(logdetC)) {
dev <- Inf #return(Inf)
} else {
# dev.try <- try(dev <- logdetC + sum((yminusmu) * solve(C, yminusmu)))
dev.try <- try(dev <- logdetC + sum((yminusmu) * Cinv_yminusmu))
if (inherits(dev.try, "try-error")) {
if (self$verbose>=2) {
print("Deviance error #87126, returning Inf")
}
dev <- Inf #return(Inf)
}
# print(c(params, nuglog, dev))
if (is.infinite(abs(dev))) {
if (self$verbose>=2) {
# print("Deviance infinite #2333, returning Inf")
message(paste0("Deviance infinite #2333, returning 1e100, ",
"this is a hack and gives noticeable worse ",
"results on this restart."))
}
dev <- 1e100 # .Machine$double.xmax # Inf
}
}
# dev
# print(c(params, nuglog, out))
out <- list(fn=dev, gr=gr)
# cat('finished fngr, dev=', dev, ' par was', params, '\n')
out
},
#' @description Calculate gradient
#' @param XX points to calculate at
#' @param X X points
#' @param Z output points
grad = function(XX, X=self$X, Z=self$Z) {
if (!is.matrix(XX)) {
if (length(XX) == self$D) {
XX <- matrix(XX, nrow=1)
} else {
stop("XX must have length D or be matrix with D columns")
}
}
dtrend_dx <- self$trend$dZ_dx(X=XX)
dC_dx <- self$kernel$dC_dx(XX=XX, X=X)
trendX <- self$trend$Z(X=X)
# Cinv_Z_minus_Zhat <- solve(self$K, Z - trendX)
# Speed up since already have Kinv
Cinv_Z_minus_Zhat <- self$Kinv %*% (Z - trendX)
# Faster multiplication with arma_mult_cube_vec by 10x for big
# and .5x for small
# t2 <- apply(dC_dx, 1, function(U) {U %*% Cinv_Z_minus_Zhat})
t2 <- arma_mult_cube_vec(dC_dx, Cinv_Z_minus_Zhat)
# if (ncol(dtrend_dx) > 1) {
dtrend_dx + t(t2)
# } else { # 1D needed transpose, not anymore
# dtrend_dx + t2 # No longer needed with arma_mult_cube_vec
# }
# dtrend_dx + dC_dx %*% solve(self$K, Z - trendX)
},
#' @description Calculate norm of gradient
#' @param XX points to calculate at
grad_norm = function (XX) {
grad1 <- self$grad(XX)
if (!is.matrix(grad1)) return(abs(grad1))
apply(grad1,1, function(xx) {sqrt(sum(xx^2))})
},
#' @description Calculate distribution of gradient
#' @param XX points to calculate at
grad_dist = function(XX) {
# Calculates distribution of gradient at rows of XX
if (!is.matrix(XX)) {
if (self$D == 1) XX <- matrix(XX, ncol=1)
else if (length(XX) == self$D) XX <- matrix(XX, nrow=1)
else stop('grad_dist input should be matrix')
} else {
if (ncol(XX) != self$D) {stop("Wrong dimension input")}
}
nn <- nrow(XX)
# Get mean from self$grad
mn <- self$grad(XX=XX)
# Calculate covariance here
cv <- array(data = NA_real_, dim = c(nn, self$D, self$D))
# New way calculates c1 and c2 outside loop
c2 <- self$kernel$d2C_dudv_ueqvrows(XX=XX)
c1 <- self$kernel$dC_dx(XX=XX, X=self$X)
for (i in 1:nn) {
# c2 <- self$kernel$d2C_dudv(XX=XX[i,,drop=F], X=XX[i,,drop=F])
# c1 <- self$kernel$dC_dx(XX=XX[i,,drop=F], X=self$X)
tc1i <- c1[i,,] # 1D gives problem, only need transpose if D>1
if (!is.null(dim(tc1i))) {tc1i <- t(tc1i)}
cv[i, , ] <- c2[i,,] - c1[i,,] %*% (self$Kinv %*% tc1i)
}
list(mean=mn, cov=cv)
},
#' @description Sample gradient at points
#' @param XX points to calculate at
#' @param n Number of samples
grad_sample = function(XX, n) {
if (!is.matrix(XX)) {
if (length(XX) == self$D) {XX <- matrix(XX, nrow=1)}
else {stop("Wrong dimensions #12574")}
}
# if (nrow(XX) > 1) {return(apply(XX, 1, self$grad_sample))}
if (nrow(XX) > 1) {stop("Only can do 1 grad sample at a time")}
grad_dist <- self$grad_dist(XX=XX)
grad_samp <- MASS::mvrnorm(n=n, mu = grad_dist$mean[1,],
Sigma = grad_dist$cov[1,,])
grad_samp
# gs2 <- apply(gs, 1, . %>% sum((.)^2))
# c(mean(1/gs2), var(1/gs2))
},
#' @description Calculate mean of gradient norm squared
#' @param XX points to calculate at
grad_norm2_mean = function(XX) {
# Calculate mean of squared norm of gradient
# XX is matrix of points to calculate it at
# Twice as fast as use self$grad_norm2_dist(XX)$mean
grad_dist <- self$grad_dist(XX=XX)
sum_grad_dist_mean_2 <- rowSums(grad_dist$mean^2)
# Use sapply to get trace of cov matrix, return sum
sum_grad_dist_mean_2 + sapply(1:nrow(XX), function(i) {
if (ncol(XX)==1 ) {
grad_dist$cov[i,,]
} else {
sum(diag(grad_dist$cov[i,,]))
}
})
},
#' @description Calculate distribution of gradient norm squared
#' @param XX points to calculate at
grad_norm2_dist = function(XX) {
# Calculate mean and var for squared norm of gradient
# grad_dist <- gp$grad_dist(XX=XX) # Too slow because it does all
d <- ncol(XX)
nn <- nrow(XX)
means <- numeric(nn)
vars <- numeric(nn)
for (i in 1:nn) {
grad_dist_i <- self$grad_dist(XX=XX[i, , drop=FALSE])
mean_i <- grad_dist_i$mean[1,]
Sigma_i <- grad_dist_i$cov[1,,]
# Don't need to invert it, just solve with SigmaRoot
# SigmaInv_i <- solve(Sigma_i)
# Using my own sqrt function since it is faster.
# # SigmaInvRoot_i <- expm::sqrtm(SigmaInv_i)
# SigmaInvRoot_i <- sqrt_matrix(mat=SigmaInv_i, symmetric = TRUE)
SigmaRoot_i <- sqrt_matrix(mat=Sigma_i, symmetric=TRUE)
eigen_i <- eigen(Sigma_i)
P_i <- t(eigen_i$vectors)
lambda_i <- eigen_i$values
# testthat::expect_equal(t(P) %*% diag(eth$values) %*% (P), Sigma)
# # Should be equal
# b_i <- P_i %*% SigmaInvRoot_i %*% mean_i
b_i <- P_i %*% solve(SigmaRoot_i, mean_i)
g2mean_i <- sum(lambda_i * (b_i^2 + 1))
g2var_i <- sum(lambda_i^2 * (4*b_i^2+2))
means[i] <- g2mean_i
vars[i] <- g2var_i
}
data.frame(mean=means, var=vars)
},
#' @description Get samples of squared norm of gradient
#' @param XX points to sample at
#' @param n Number of samples
grad_norm2_sample = function(XX, n) {
# Get samples of squared norm of gradient, check with grad_norm2_dist
d <- ncol(XX)
nn <- nrow(XX)
out_sample <- matrix(NA_real_, nn, n)
for (i in 1:nn) {
grad_dist_i <- self$grad_dist(XX=XX[i, , drop=FALSE])
mean_i <- grad_dist_i$mean[1,]
Sigma_i <- grad_dist_i$cov[1,,]
SigmaInv_i <- solve(Sigma_i)
# Using my own sqrt function since it is faster.
# SigmaInvRoot_i <- expm::sqrtm(SigmaInv_i)
SigmaInvRoot_i <- sqrt_matrix(mat=SigmaInv_i, symmetric = TRUE)
eigen_i <- eigen(Sigma_i)
P_i <- t(eigen_i$vectors)
lambda_i <- eigen_i$values
# testthat::expect_equal(t(P) %*% diag(eth$values) %*%
# (P), Sigma) # Should be equal
b_i <- c(P_i %*% SigmaInvRoot_i %*% mean_i)
out_sample[i, ] <- replicate(n,
sum(lambda_i * (rnorm(d) + b_i) ^ 2))
}
out_sample
},
#grad_num = function (XX) { # NUMERICAL GRAD IS OVER 10 TIMES SLOWER
# if (!is.matrix(XX)) {
# if (self$D == 1) XX <- matrix(XX, ncol=1)
# else if (length(XX) == self$D) XX <- matrix(XX, nrow=1)
# else stop('Predict input should be matrix')
# } else {
# if (ncol(XX) != self$D) {stop("Wrong dimension input")}
# }
# grad.func <- function(xx) self$pred(xx)$mean
# grad.apply.func <- function(xx) numDeriv::grad(grad.func, xx)
# grad1 <- apply(XX, 1, grad.apply.func)
# if (self$D == 1) return(grad1)
# t(grad1)
#},
#grad_num_norm = function (XX) {
# grad1 <- self$grad_num(XX)
# if (!is.matrix(grad1)) return(abs(grad1))
# apply(grad1,1, function(xx) {sqrt(sum(xx^2))})
#},
#' @description Calculate Hessian
#' @param XX Points to calculate Hessian at
#' @param as_array Should result be an array?
hessian = function(XX, as_array=FALSE) {
if (!is.matrix(XX)) {
if (self$D == 1) XX <- matrix(XX, ncol=1)
else if (length(XX) == self$D) XX <- matrix(XX, nrow=1)
else stop('Predict input should be matrix')
} else {
if (ncol(XX) != self$D) {stop("Wrong dimension input")}
}
hess1 <- array(NA_real_, dim = c(nrow(XX), self$D, self$D))
for (i in 1:nrow(XX)) { # 0 bc assume trend has zero hessian
d2 <- self$kernel$d2C_dx2(XX=XX[i,,drop=F], X=self$X)
for (j in 1:self$D) {
hess1[i, j, ] <- 0 + d2[1,j,,] %*% self$Kinv %*%
(self$Z - self$mu_hatX)
}
}
if (nrow(XX) == 1 && !as_array) { # Return matrix if only one value
hess1[1,,]
} else {
hess1
}
},
#' @description Calculate gradient of the predictive variance
#' @param XX points to calculate at
gradpredvar = function(XX) {
if (!is.matrix(XX)) {
if (length(XX) == self$D) {
XX <- matrix(XX, nrow=1)
} else {
stop("XX must have length D or be matrix with D columns")
}
}
KX.XX <- self$kernel$k(self$X, XX)
dKX.XX <- self$kernel$dC_dx(X=self$X,XX=XX)
# -2 * dK %*% self$Kinv %*% KX.XX
t2 <- self$Kinv %*% KX.XX
ds2 <- matrix(NA, nrow(XX), ncol(XX))
for (i in 1:nrow(XX)) {
ds2[i, ] <- (-2 * dKX.XX[i, , ] %*% t2[, i])[,1]
}
ds2
},
#' @description Sample at rows of XX
#' @param XX Input matrix
#' @param n Number of samples
sample = function(XX, n=1) {
# Generates n samples at rows of XX
px <- self$pred(XX, covmat = T)
Sigma.try <- try(newy <- MASS::mvrnorm(n=n, mu=px$mean, Sigma=px$cov))
if (inherits(Sigma.try, "try-error")) {
message("Adding nugget to get sample")
Sigma.try2 <- try(newy <- MASS::mvrnorm(n=n, mu=px$mean,
Sigma=px$cov +
diag(self$nug, nrow(px$cov))))
if (inherits(Sigma.try2, "try-error")) {
stop("Can't do sample, can't factor Sigma")
}
}
newy # Not transposing matrix since it gives var a problem
},
#' @description Optimize any function of the GP prediction over the
#' valid input space.
#' If there are inputs that should only be optimized over a discrete set
#' of values, specify `mopar` for all parameters.
#' Factor inputs will be handled automatically.
#' @param fn Function to optimize
#' @param lower Lower bounds to search within
#' @param upper Upper bounds to search within
#' @param n0 Number of points to evaluate in initial stage
#' @param minimize Are you trying to minimize the output?
#' @param fn_args Arguments to pass to the function fn.
#' @param gr Gradient of function to optimize.
#' @param fngr Function that returns list with names elements "fn" for the
#' function value and "gr" for the gradient. Useful when it is slow to
#' evaluate and fn/gr would duplicate calculations if done separately.
#' @param mopar List of parameters using mixopt
#' @param groupeval Can a matrix of points be evaluated? Otherwise just
#' a single point at a time.
optimize_fn = function(fn=NULL,
lower=apply(self$X, 2, min),
upper=apply(self$X, 2, max),
n0=100, minimize=FALSE,
fn_args=NULL,
gr=NULL, fngr=NULL,
mopar=NULL,
groupeval=FALSE) {
stopifnot(all(lower < upper))
stopifnot(length(n0)==1, is.numeric(n0), n0>=1)
# If any inputs are factors but mopar is not given, create mopar
if (is.null(mopar)) {
# print('fixing maxEI with factors')
# browser()
fkfd <- GauPro:::find_kernel_factor_dims2(self$kernel)
fkcd <- GauPro:::find_kernel_cts_dims(self$kernel)
factorinds <- if (is.null(fkfd)) {
c()
} else {
fkfd[seq(1, length(fkfd), 3)]
}
ctsinds <- setdiff(1:self$D, factorinds)
mopar <- list()
for (i in 1:self$D) {
if (i %in% ctsinds) {
mopar[[i]] <- mixopt::mopar_cts(lower=lower[i],
upper=upper[i])
} else {
stopifnot(length(fkfd) > .5, i %in% factorinds)
fkfdind <- which(fkfd[(which(seq_along(fkfd) %% 3 == 1))] == i)
nlev <- fkfd[(fkfdind-1)*3 + 2]
isordered <- fkfd[(fkfdind-1)*3 + 3] > .5
if (isordered) {
mopar[[i]] <- mixopt::mopar_ordered(values=1:nlev)
} else {
mopar[[i]] <- mixopt::mopar_unordered(values=1:nlev)
}
}
}
attr(mopar, "converted") <- TRUE
}
# Pass this in to EI so it doesn't recalculate it unnecessarily every time
# selfXmeanpred <- self$pred(self$X, se.fit=F, mean_dist=T)
minmult <- if (minimize) {1} else {-1}
# Convert functions
convert_function_with_inputs <- function(fn, is_fngr=FALSE) {
if (is.null(fn)) {
return(NULL)
}
function(xx){
if (is.null(self$formula) || !is.null(attr(mopar, "converted"))) {
xx2 <- unlist(xx)
} else {
# Convert to data frame since it will convert to formula.
# This way is probably slow.
# Alternatively, convert to all numeric, no df/formula
xx2 <- as.data.frame(xx)
colnames(xx2) <- colnames(self$X)
}
# Eval fn
if (is.null(fn_args)) {
fnout <- fn(xx2)
} else {
stopifnot(is.list(fn_args))
fnout <- do.call(fn, c(list(xx2), fn_args))
}
if (is_fngr) {
fnout$fn <- fnout$fn * minmult
fnout$gr <- fnout$gr * minmult
} else {
fnout <- fnout * minmult
}
fnout
}
}
opt_fn <- convert_function_with_inputs(fn)
opt_gr <- convert_function_with_inputs(gr)
opt_fngr <- convert_function_with_inputs(fngr, is_fngr=TRUE)
# if (!is.null(mopar)) {
# Use mixopt, allows for factor/discrete/integer inputs
stopifnot(self$D == length(mopar))
moout <- mixopt::mixopt_multistart(
par=mopar,
n0=n0,
fn=opt_fn, gr=opt_gr, fngr=opt_fngr,
groupeval=groupeval
) # End mixopt
# Convert output back to input scale
if (is.null(self$formula)) {
# Convert list to numeric
moout_par <- unlist(moout$par)
} else if (!is.null(attr(mopar, "converted"))) {
# Convert numericback to named to data.frame
moout_par <- GauPro:::convert_X_with_formula_back(self, moout$par)
colnames(moout_par) <- colnames(self$X)
} else {
# Convert list to data frame
moout_par <- as.data.frame(moout$par)
colnames(moout_par) <- colnames(self$X)
}
return(list(
par=moout_par,
value=moout$val * minmult
))
},
#' @description Calculate expected improvement
#' @param x Vector to calculate EI of, or matrix for whose rows it should
#' be calculated
#' @param minimize Are you trying to minimize the output?
#' @param eps Exploration parameter
#' @param return_grad Should the gradient be returned?
#' @param ... Additional args
EI = function(x, minimize=FALSE, eps=0, return_grad=FALSE, ...) {
stopifnot(length(minimize)==1, is.logical(minimize))
stopifnot(length(eps)==1, is.numeric(eps), eps >= 0)
dots <- list(...)
if (is.matrix(x)) {
stopifnot(ncol(x) == ncol(self$X))
} else if (is.vector(x) && self$D==1) {
x <- matrix(x, ncol=1)
} else if (is.vector(x)) {
stopifnot(length(x) == ncol(self$X))
} else if (is.data.frame(x) && !is.null(self$formula)) {
# Fine here, will get converted in predict
} else {
stop(paste0("bad x in EI, class is: ", class(x)))
}
# stopifnot(is.vector(x), length(x) == ncol(self$X))
# fxplus <- if (minimize) {min(self$Z)} else {max(self$Z)}
# pred <- self$pred(x, se.fit=T)
# Need to use prediction of mean
xnew_meanpred <- self$pred(x, se.fit=T, mean_dist=T, return_df=F)
if (is.null(dots$selfXmeanpred)) {
selfXmeanpred <- self$pred(self$X, se.fit=F, mean_dist=T)
} else {
selfXmeanpred <- dots$selfXmeanpred
stopifnot(is.numeric(selfXmeanpred),
length(selfXmeanpred) == length(self$Z))
}
# Use predicted mean at each point since it doesn't make sense not to
# when there is noise. Or should fxplus be optimized over inputs?
fxplus <- if (minimize) {min(selfXmeanpred)} else {
max(selfXmeanpred)}
if (minimize) {
# Ztop <- fxplus - pred$mean - eps
Ztop <- fxplus - xnew_meanpred$mean - eps
} else {
# Ztop <- pred$mean - fxplus - eps
Ztop <- xnew_meanpred$mean - fxplus - eps
}
# Z <- Ztop / pred$se
Z <- Ztop / xnew_meanpred$se
# if (pred$se <= 0) {return(0)}
# (Ztop) * pnorm(Z) + pred$se * dnorm(Z)
# ifelse(pred$se <= 0, 0,
# (Ztop) * pnorm(Z) + pred$se * dnorm(Z))
EI <- ifelse(xnew_meanpred$se <= 0, 0,
(Ztop) * pnorm(Z) + xnew_meanpred$se * dnorm(Z))
if (return_grad) {
minmult <- if (minimize) {1} else {-1}
s <- xnew_meanpred$se
s2 <- xnew_meanpred$s2
y <- xnew_meanpred$mean
f <- fxplus - eps * minmult
z <- Z
ds2_dx <- self$gradpredvar(x) # GOOD
ds_dx <- .5/s * ds2_dx # GOOD
# z <- (f - y) / s
dy_dx <- self$grad(x) # GOOD
dz_dx <- -dy_dx / s + (f - y) * (-1/s2) * ds_dx # GOOD
dz_dx <- dz_dx * minmult
ddnormz_dz <- -dnorm(z) * z # GOOD
# daug_dx = .5*sigma_eps / (s2 + sigma_eps2)^1.5 * ds2_dx # GOOD
dEI_dx = minmult * (-dy_dx*pnorm(z) + (f-y)*dnorm(z)*dz_dx) +
ds_dx*dnorm(z) + s*ddnormz_dz*dz_dx #GOOD
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);( EI(pr$mean,pr$se))}, x)
# dAugEI_dx = EI * daug_dx + dEI_dx * Aug
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);( EI(pr$mean,pr$se)*augterm(pr$s2))}, x)
return(list(
EI=EI,
grad=dEI_dx
))
}
EI
},
#' @description Find the point that maximizes the expected improvement.
#' If there are inputs that should only be optimized over a discrete set
#' of values, specify `mopar` for all parameters.
#' @param lower Lower bounds to search within
#' @param upper Upper bounds to search within
#' @param n0 Number of points to evaluate in initial stage
#' @param minimize Are you trying to minimize the output?
#' @param eps Exploration parameter
#' @param mopar List of parameters using mixopt
#' @param dontconvertback If data was given in with a formula, should
#' it converted back to the original scale?
#' @param EItype Type of EI to calculate. One of "EI", "Augmented",
#' or "Corrected"
#' @param usegrad Should the gradient be used when optimizing?
#' Can make it faster.
maxEI = function(lower=apply(self$X, 2, min),
upper=apply(self$X, 2, max),
n0=100, minimize=FALSE, eps=0,
dontconvertback=FALSE,
EItype="corrected",
mopar=NULL,
usegrad=FALSE) {
# Pass this in to EI so it doesn't recalculate it unnecessarily every time
selfXmeanpred <- self$pred(self$X, se.fit=F, mean_dist=T)
stopifnot(is.character(EItype), length(EItype)==1)
EItype <- tolower(EItype)
EIfunc <- if (EItype %in% c("ei")) {
self$EI
} else if (EItype %in% c("augmented", "aug", "augmentedei")) {
# Aug needs se
selfXmeanpred <- self$pred(self$X, se.fit=T, mean_dist=T)
self$AugmentedEI
} else if (EItype %in% c("corrected", "cor", "correctedei")) {
self$CorrectedEI
} else {
stop("Bad EItype given to maxEI")
}
# -EIfunc(xx2, minimize = minimize, eps=eps,
# selfXmeanpred=selfXmeanpred)
fn <- function(xx2) {
EIfunc(xx2, minimize = minimize, eps=eps,
selfXmeanpred=selfXmeanpred)
}
if (usegrad) {
fngr <- function(xx2) {
out <- EIfunc(xx2, minimize = minimize, eps=eps,
selfXmeanpred=selfXmeanpred, return_grad=TRUE)
names(out) <- c("fn", "gr")
out
}
} else {
fngr <- NULL
}
self$optimize_fn(fn, minimize=FALSE,
fngr=fngr,
lower=lower, upper=upper,
mopar=mopar, n0=n0)
},
#' @description Find the multiple points that maximize the expected
#' improvement. Currently only implements the constant liar method.
#' @param npoints Number of points to add
#' @param method Method to use for setting the output value for the points
#' chosen as a placeholder.
#' Can be one of: "CL" for constant liar,
#' which uses the best value seen yet; or "pred", which uses the predicted
#' value, also called the Believer method in literature.
#' @param lower Lower bounds to search within
#' @param upper Upper bounds to search within
#' @param n0 Number of points to evaluate in initial stage
#' @param minimize Are you trying to minimize the output?
#' @param eps Exploration parameter
#' @param mopar List of parameters using mixopt
#' @param dontconvertback If data was given in with a formula, should
#' it converted back to the original scale?
#' @param EItype Type of EI to calculate. One of "EI", "Augmented",
#' or "Corrected"
maxqEI = function(npoints, method="pred",
lower=apply(self$X, 2, min),
upper=apply(self$X, 2, max),
n0=100, minimize=FALSE, eps=0,
EItype="corrected",
dontconvertback=FALSE,
mopar=NULL) {
stopifnot(is.numeric(npoints), length(npoints)==1, npoints >= 1)
if (npoints==1) {
# For single point, use proper function
return(self$maxEI(lower=lower, upper=upper, n0=n0,
minimize=minimize, eps=eps, EItype=EItype,
mopar=mopar,
dontconvertback=dontconvertback))
}
stopifnot(length(method)==1, method %in% c("CL", "pred"))
# If factor dims in kernel, make sure mopar is given
if (length(find_kernel_factor_dims(self$kernel)) > 0 && is.null(mopar)) {
warning("maxqEI wasn't given mopar but kernel has factor dimensions")
}
# Clone object since we will add fake data
gpclone <- self$clone(deep=TRUE)
# Track points selected
selectedX <- matrix(data=NA, nrow=npoints, ncol=ncol(self$X))
# Xmeanpred <- self$pred(self$X, se.fit=T, mean_dist=T)
Xmeanpred <- self$pred(self$X, se.fit=F, mean_dist=T)
# Zimpute <- if (minimize) {min(self$Z)} else {max(self$Z)}
# Constant liar value
# Zimpute <- if (minimize) {min(Xmeanpred$mean)} else {max(Xmeanpred$mean)}
Zimpute <- if (minimize) {min(Xmeanpred)} else {max(Xmeanpred)}
for (i in 1:npoints) {
# Find and store point that maximizes EI
maxEI_i <- gpclone$maxEI(lower=lower, upper=upper,
n0=n0, eps=eps,
minimize=minimize, EItype=EItype,
mopar=mopar,
dontconvertback=TRUE)
xi <- maxEI_i$par
# mixopt could return data frame. Need to convert it to numeric since
# it will be added to gpclone$X
if (is.data.frame(xi)) {
xi <- convert_X_with_formula(xi, self$convert_formula_data,
self$formula)
}
stopifnot(is.numeric(xi))
selectedX[i, ] <- xi
if (method == "pred") {
Zimpute <- self$predict(xi)
}
# Update clone with new data, don't update parameters since
# it's fake data
if (i < npoints) {
gpclone$update(Xnew=xi, Znew=Zimpute, no_update=TRUE)
}
}
# Return matrix of points
# selectedX
# done
# Convert factor/char indexes back to level/value
if (!is.null(self$formula) && !dontconvertback) {
selectedX <- convert_X_with_formula_back(gpdf=self, x=selectedX)
}
# Return list, same format as DiceOptim::max_EI
return(
list(
par=selectedX,
value=NA
)
)
},
#' @description Calculate Knowledge Gradient
#' @param x Point to calculate at
#' @param minimize Is the objective to minimize?
#' @param eps Exploration parameter
#' @param current_extreme Used for recursive solving
KG = function(x, minimize=FALSE, eps=0, current_extreme=NULL) {
# if (exists('kgbrow') && kgbrow) {browser()}
xkg <- x
if (!is.matrix(xkg)) {
stopifnot(length(xkg) == self$D)
xkg <- matrix(xkg, nrow=1)
}
if (missing(current_extreme) || is.null(current_extreme)) {
# Find current max/min
# Find current max
gpkgmax <- optim(par=self$X[which.max(self$Z)[1],],
fn=function(xx) {-self$pred(xx)},
method='Brent', lower=0, upper=1)
current_extreme <- -gpkgmax$value
} else {
stopifnot(is.numeric(current_extreme), length(current_extreme) == 1)
}
# Sample at xkg
xkgpred <- gpkg$pred(xkg, se.fit = T)
xkgpred
nsamps <- 5
xkgsamps <- qnorm(((1:nsamps)-.5)/nsamps, xkgpred$mean, xkgpred$se)
kgs <- rep(NA, nsamps)
gpkgclone <- gpkg$clone(deep=TRUE)
for (i in 1:nsamps) {
xkgsamp <- xkgsamps[i]
# xkgsamp <- rnorm(1, xkgpred$mean, xkgpred$se)
# Add samp to mod
# gpkgclone <- gpkg$clone(deep=TRUE)
# gpkgclone$update(Xnew=xkg, Znew=xkgsamp, no_update = TRUE)
gpkgclone$update(Xall=rbind(self$X, xkg),
Zall=rbind(self$Z, xkgsamp),
no_update = TRUE)
# gpkgclone$plot1D()
# Find clone max after adding sample
gpkgmaxclone <- optim(par=gpkgclone$X[which.max(gpkgclone$Z)[1],],
fn=function(xx) {-gpkgclone$pred(xx)},
method='Brent', lower=0, upper=1)
gpkgmaxclone
# gpkgmaxclone$value - gpkgmax$value
kgs[i] <- (-gpkgmaxclone$value) - current_extreme #gpkgmax$value
}
kgs
mean(kgs)
},
#' @description Calculated Augmented EI
#' @param x Vector to calculate EI of, or matrix for whose rows it should
#' be calculated
#' @param minimize Are you trying to minimize the output?
#' @param eps Exploration parameter
#' @param return_grad Should the gradient be returned?
#' @param f The reference max, user shouldn't change this.
#' @param ... Additional args
AugmentedEI = function(x, minimize=FALSE, eps=0,
return_grad=F, ...) {
stopifnot(length(minimize)==1, is.logical(minimize))
stopifnot(length(eps)==1, is.numeric(eps), eps >= 0)
dots <- list(...)
if (is.matrix(x)) {
stopifnot(ncol(x) == ncol(self$X))
} else if (is.vector(x) && self$D==1) {
x <- matrix(x, ncol=1)
} else if (is.vector(x)) {
stopifnot(length(x) == ncol(self$X))
} else if (is.data.frame(x) && !is.null(self$formula)) {
# Fine here, will get converted in predict
} else {
stop(paste0("bad x in EI, class is: ", class(x)))
}
if (is.null(dots$f)) {
if (is.null(dots$selfXmeanpred)) {
selfXmeanpred <- self$pred(self$X, se.fit=T, mean_dist=T)
} else {
selfXmeanpred <- dots$selfXmeanpred
stopifnot(is.list(selfXmeanpred),
length(selfXmeanpred$mean) == length(self$Z))
}
# Get preds at existing points, calculate best
# pred_X <- self$predict(self$X, se.fit = T)
pred_X <- selfXmeanpred
if (minimize) {
u_X <- -pred_X$mean - pred_X$se
star_star_index <- which.max(u_X)
} else {
# warning("AugEI must minimize for now")
u_X <- +pred_X$mean + pred_X$se
star_star_index <- which.max(u_X)
}
f <- pred_X$mean[star_star_index]
} else {
f <- dots$f
}
stopifnot(is.numeric(f), length(f) == 1)
minmult <- if (minimize) {1} else {-1}
# Adjust target by eps
f <- f - minmult * eps
predx <- self$pred(x, se=T)
y <- predx$mean
s <- predx$se
s2 <- predx$s2
z <- (f - y) / s * minmult
EI <- (f - y) * minmult * pnorm(z) + s * dnorm(z)
# Calculate "augmented" term
sigma_eps <- self$nug * self$s2_hat
sigma_eps2 <- sigma_eps^2
Aug <- 1 - sigma_eps / sqrt(s2 + sigma_eps2)
AugEI <- Aug * EI
if (return_grad) {
# x <- .8
ds2_dx <- self$gradpredvar(x) # GOOD
ds_dx <- .5/s * ds2_dx # GOOD
# z <- (f - y) / s
dy_dx <- self$grad(x) # GOOD
dz_dx <- -dy_dx / s + (f - y) * (-1/s2) * ds_dx # GOOD
dz_dx <- dz_dx * minmult
ddnormz_dz <- -dnorm(z) * z # GOOD
daug_dx = .5*sigma_eps / (s2 + sigma_eps2)^1.5 * ds2_dx # GOOD
dEI_dx = minmult * (-dy_dx*pnorm(z) + (f-y)*dnorm(z)*dz_dx) +
ds_dx*dnorm(z) + s*ddnormz_dz*dz_dx #GOOD
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);
# ( EI(pr$mean,pr$se))}, x)
dAugEI_dx = EI * daug_dx + dEI_dx * Aug
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);
# ( EI(pr$mean,pr$se)*augterm(pr$s2))}, x)
return(list(
AugEI=AugEI,
grad=dAugEI_dx
))
}
AugEI
},
#' @description Calculated Augmented EI
#' @param x Vector to calculate EI of, or matrix for whose rows it should
#' be calculated
#' @param minimize Are you trying to minimize the output?
#' @param eps Exploration parameter
#' @param return_grad Should the gradient be returned?
#' @param ... Additional args
CorrectedEI = function(x, minimize=FALSE, eps=0,
return_grad=F, ...) {
stopifnot(length(minimize)==1, is.logical(minimize))
stopifnot(length(eps)==1, is.numeric(eps), eps >= 0)
dots <- list(...)
if (is.matrix(x)) {
stopifnot(ncol(x) == ncol(self$X))
} else if (is.vector(x) && self$D == 1) {
# stopifnot(length(x) == ncol(self$X))
x <- matrix(x, ncol=1)
} else if (is.vector(x)) {
stopifnot(length(x) == ncol(self$X))
x <- matrix(x, nrow=1)
} else if (is.data.frame(x) && !is.null(self$formula)) {
# Need to convert here
x <- convert_X_with_formula(x, self$convert_formula_data,
self$formula)
}
else if (is.data.frame(x)) {
x <- as.matrix(x)
} else {
stop(paste0("bad x in EI, class is: ", class(x)))
}
if (is.null(dots$f)) {
if (is.null(dots$selfXmeanpred)) {
selfXmeanpred <- self$pred(self$X, se.fit=F, mean_dist=T)
} else {
selfXmeanpred <- dots$selfXmeanpred
stopifnot(is.numeric(selfXmeanpred),
length(selfXmeanpred) == length(self$Z))
}
# Get preds at existing points, calculate best
# pred_X <- self$predict(self$X, se.fit = F)
pred_X <- selfXmeanpred
if (minimize) {
# u_X <- -pred_X$mean - pred_X$se
star_star_index <- which.min(pred_X)
} else {
# warning("AugEI must minimize for now")
# u_X <- +pred_X$mean + pred_X$se
star_star_index <- which.max(pred_X)
}
f <- pred_X[star_star_index]
} else {
f <- dots$f
}
stopifnot(is.numeric(f), length(f) == 1)
minmult <- if (minimize) {1} else {-1}
# Adjust target by eps
f <- f - minmult * eps
# predx <- self$pred(x, se=T)
# y <- predx$mean
# s <- predx$se
# s2 <- predx$s2
# u represents the point measured with noise
# a represents the point (same as u) but measured without noise (mean)
u <- x
X <- self$X
mu_u <- self$trend$Z(u)
Ku.X <- self$kernel$k(u, X)
mu_X <- self$trend$Z(X)
Ka <- self$kernel$k(u)
Ku <- Ka + self$nug * self$s2_hat
Ku_given_X <- Ku - Ku.X %*% self$Kinv %*% t(Ku.X)
# Need to fix negative variances that show up
Ku_given_X <- pmax(Ku_given_X, self$nug * self$s2_hat)
y <- c(mu_u + Ku.X %*% self$Kinv %*% (self$Z - mu_X))
s2 <- diag((Ku_given_X - self$nug*self$s2_hat) ^ 2 / (Ku_given_X))
s2 <- pmax(s2, 0)
# if (ncol(s2) > 1.5) {s2 <- diag(s2)}
s <- sqrt(s2)
# int from f to Inf: (x-f) p(x) dx
z <- (f - y) / s * minmult
CorEI <- (f - y) * minmult * pnorm(z) + s * dnorm(z)
if (F) {
tdf <- 3
CorEIt <- (f - y) * minmult * pt(z,tdf) + s * dt(z,tdf)
plot(x, CorEI)
plot(x, s, ylim=c(0,.3))
points(x, self$pred(x, se=T)$se,col=2)
points(x, self$pred(x, se=T, mean_dist = T)$se,col=3)
cbind(x, y, s, z, CorEI=CorEI, EIt=(f - y) * minmult * pt(z,3) + s * dt(z, 3))
legend(x='topright', legend=c(""), fill=1:3)
}
# # Calculate "augmented" term
# sigma_eps <- self$nug * self$s2_hat
# sigma_eps2 <- sigma_eps^2
# Aug <- 1 - sigma_eps / sqrt(s2 + sigma_eps2)
# AugEI <- Aug * EI
if (return_grad) {
# CorrectedEI grad looks good. Need to check for eps, minimize, tdf
# x <- .8
# ds2_dx <- self$gradpredvar(x) # GOOD
# ds2_dx <- -2 * Ku.X %*% self$Kinv %*% t(self$kernel$dC_dx(XX=u, X=self$X))
ds2_dx_t1 <- -2 * Ku.X %*% self$Kinv
dC_dx <- (self$kernel$dC_dx(XX=u, X=self$X))
ds2_dx <- u*NaN
for (i in 1:nrow(u)) {
# ds2_dx[i, ] <- ds2_dx_t1[i, ] %*% (dC_dx[i, , ])
ds2_dx[i, ] <- t(dC_dx[i, , ] %*% ds2_dx_t1[i, ] )
}
ds2_dx <- ds2_dx * (1-self$nug^2*self$s2_hat^2/diag(Ku_given_X)^2)
ds_dx <- .5/s * ds2_dx # GOOD
# z <- (f - y) / s
dy_dx <- self$grad(x) # GOOD
dz_dx <- -dy_dx / s + (f - y) * (-1/s2) * ds_dx # GOOD
dz_dx <- dz_dx * minmult
ddnormz_dz <- -dnorm(z) * z # GOOD
# daug_dx = .5*sigma_eps / (s2 + sigma_eps2)^1.5 * ds2_dx # GOOD
dEI_dx = minmult * (-dy_dx*pnorm(z) + (f-y)*dnorm(z)*dz_dx) +
ds_dx*dnorm(z) + s*ddnormz_dz*dz_dx #GOOD
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);( EI(pr$mean,pr$se))}, x)
# dAugEI_dx = EI * daug_dx + dEI_dx * Aug
# numDeriv::grad(function(x) {pr <- self$pred(x,se=T);( EI(pr$mean,pr$se)*augterm(pr$s2))}, x)
return(list(
EI=CorEI,
grad=dEI_dx
))
}
c(CorEI)
},
#' @description Feature importance
#' @param plot Should the plot be made?
#' @param print_bars Should the importances be printed as bars?
#' @references
#' https://scikit-learn.org/stable/modules/permutation_importance.html#id2
importance = function(plot=TRUE, print_bars=TRUE) {
# variable importance
# Permutation alg
# https://scikit-learn.org/stable/modules/permutation_importance.html#id2
stopifnot(is.logical(plot), length(plot)==1)
nouter <- 10
# rmse if just predicting mean
rmse0 <- sqrt(mean((mean(self$Z) - self$Z)^2))
rmsemod <- sqrt(mean((predict(self, self$X) - self$Z)^2))
# Track in loop
rmses <- rep(0, self$D)
rsqs <- rep(0, self$D)
# Outer loop to repeat for stability
for (iouter in 1:nouter) {
# Loop over dimensions
for (i in 1:self$D) {
Xshuffle <- self$X
# Shuffle single column, corrupted data
Xshuffle[, i] <- sample(Xshuffle[, i], nrow(Xshuffle), replace=F)
# Predict on corrupted, get RMSE
predi <- self$pred(Xshuffle)
rmse <- sqrt(mean((predi - self$Z)^2))
rsq <- 1 - (sum((predi-self$Z)^2)) / (sum((mean(self$Z)-self$Z)^2))
rmses[i] <- rmses[i] + rmse
rsqs[i] <- rsqs[i] + rsq
}
}
rmses <- rmses / nouter
rsqs <- rsqs / nouter
if (!is.null(colnames(self$X))) {
names(rmses) <- colnames(self$X)
} else {
names(rmses) <- paste0("X", seq_along(rmses))
}
# I'm defining importance as this ratio.
# 0 means feature has no effect on the predictions.
# 1 or higher means that corrupting that feature completely destroys
# model, it's worse than just predicting mean.
# imp <- rmses / rmse0
# Avoid divide by zero issue. Happens with white kernel.
if (abs(rmse0 - rmsemod) < 1e-64) {
imp <- 0 * rmses
} else {
imp <- (rmses - rmsemod) / (rmse0 - rmsemod)
}
imp <- round(imp, 4)
if (plot) {
ggp <- data.frame(name=factor(names(imp), levels = rev(names(imp))),
val=imp) %>%
ggplot2::ggplot(ggplot2::aes(val, name)) +
ggplot2::geom_vline(xintercept=1) +
ggplot2::geom_bar(stat='identity', fill="blue") +
ggplot2::xlab("Importance") +
ggplot2::ylab("Variable")
print(ggp)
}
# Print bars
if (print_bars) {
impwidth <- 12
namewidth <- max(10, max(nchar(names(imp))) + 4)
# nchar1 <- 120
# Number of characters until hitting where 1 is.
nchar1 <- floor(
(getOption("width") - 12 - impwidth - namewidth)/max(1, imp)
)
if (nchar1 < 5) {
return(imp)
}
s <- paste0(format("Input", width=namewidth),
format("Importance", width=impwidth),
"\n")
catt <- function(...) {
dots <- list(...)
for (i in seq_along(dots)) {
s <<- paste0(s, dots[[i]])
}
}
for (i in seq_along(imp)) {
catt(format(names(imp)[i], width=namewidth),
# format(round(imp[i], 3), width=impwidth, justify = "left")
paste0(c(round(imp[i], 3),
rep(" ", impwidth - nchar(round(imp[i], 3)))),
collapse='')
)
j <- 1
while (imp[i] >= j/nchar1) {
if (j == nchar1) {
catt("|")
} else {
catt("=")
}
j <- j + 1
}
while (j < nchar1) {
# cat(".")
catt(" ")
j <- j + 1
}
if (j == nchar1) {
catt("|")
}
catt("\n")
}
cat(s)
invisible(imp)
} else {
# Return importances
imp
}
},
#' @description Print this object
print = function() {
cat("GauPro kernel model object\n")
if (!is.null(self$formula)) {
formchar <- as.character(self$formula)
stopifnot(length(formchar) == 3)
formchar2 <- paste(formchar[2], formchar[1], formchar[3])
cat("\tFormula:", formchar2, "\n")
}
cat(paste0("\tD = ", self$D, ", N = ", self$N,"\n"))
cat(paste0("\tNugget = ", signif(self$nug, 3), "\n"))
cat("\tRun $update() to add data and/or optimize again\n")
cat("\tUse $pred() to get predictions at new points\n")
cat("\tUse $plot() to visualize the model\n")
invisible(self)
},
#' @description Summary
#' @param ... Additional arguments
summary = function(...) {
# Just return summary of Z
# return(summary(self$Z[,1]))
# Follow example of summary.lm
ans <- list()
class(ans) <- c("summary.GauPro")
ans$D <- self$D
ans$N <- self$N
# AIC
ans$AIC <- self$AIC()
# Use LOO predictions
ploo <- self$pred_LOO(se.fit = T)
loodf <- cbind(ploo, Z=self$Z)
loodf$upper <- loodf$fit + 1.96 * loodf$se.fit
loodf$lower <- loodf$fit - 1.96 * loodf$se.fit
loodf$upper68 <- loodf$fit + 1.00 * loodf$se.fit
loodf$lower68 <- loodf$fit - 1.00 * loodf$se.fit
# LOO residuals
ans$residualsLOO <- c(ploo$fit - self$Z)
# Add LOO coverage, R-sq
coverage95vec <- with(loodf, upper >= Z & lower <= Z)
coverage95 <- mean(coverage95vec)
ans$coverage95LOO <- coverage95
coverage68vec <- with(loodf, upper68 >= Z & lower68 <= Z)
coverage68 <- mean(coverage68vec)
ans$coverage68LOO <- coverage68
rsq <- with(loodf, 1 - (sum((fit-Z)^2)) / (sum((mean(Z)-Z)^2)))
ans$r.squaredLOO <- rsq
ans$r.squared.adjLOO <- (
1 - ((1-rsq)*(self$N-1) /
(self$N-1-length(self$param_optim_start(nug.update=self$nug.est,
jitter=F))))
)
# Feature importance
ans$importance <- self$importance(plot=FALSE, print_bars=FALSE)
# Formula
if (!is.null(self$formula)) {
formchar <- as.character(self$formula)
stopifnot(length(formchar) == 3)
formchar2 <- paste(formchar[2], formchar[1], formchar[3])
ans$formula <- formchar2
} else if (!is.null(colnames(self$X))) {
if (is.null(colnames(self$Z))) {
ans$formula <- "Z ~ "
} else {
ans$formula <- paste(colnames(self$Z)[1], " ~ ")
}
for (i in 1:self$D) {
if (i==1) {
ans$formula <- paste0(ans$formula, colnames(self$X)[i])
} else {
ans$formula <- paste0(ans$formula, " + ", colnames(self$X)[i])
}
}
} else {
# No colnames or formula
ans$formula <- "Z ~ "
for (i in 1:self$D) {
if (i==1) {
ans$formula <- paste0(ans$formula, " X", i)
} else {
ans$formula <- paste0(ans$formula, " + X", i)
}
}
}
ans
}
),
private = list(
)
)
|
13defe2159ffe04150a3e90bb5dfa5e08e288988
|
e979752c4498c5edf47791d8b7eaafb2730524bf
|
/sim20032009/figs/points/fixed/anualSeries.R
|
f2f9e131094c3845b40649e9c027de8fe41396e9
|
[] |
no_license
|
ClaudiaGEscribano/aod_and_PV
|
0bf4d6c7398351aebdef7b9f9538246c5ee2bd49
|
77eaa4e454ce4ec4ec784795e2e89b8945bc1737
|
refs/heads/master
| 2021-03-27T15:41:09.158241
| 2018-12-10T14:58:39
| 2018-12-10T14:58:39
| 83,782,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,241
|
r
|
anualSeries.R
|
## Este script sive para representar las series temporales de la media anual de radiación en los distintos puntos donde están las estaciones de bsrn.
library(zoo)
## MODELOS
load("../../../calc/points/fixed/bsrn_fixedY_caer.Rdata")
load("../../../calc/points/fixed/bsrn_fixedY_cno.Rdata")
## SATELITE
load("../../../calc/points/fixed/bsrn_fixedY_sat.Rdata")
#########################################
## CARPENTRAS
########################################
load("../../../data/estaciones_data/carpentras20032009.Rdata")
carpentrasYmean <- carpentras20032009[,14]
carpentrasBSRN <- carpentrasYmean
## Para los modelos y el satélite son los datos de la fila 3
carpentrasCAER <- bsrn_fixedY_caer[3,]
carpentrasCNO <- bsrn_fixedY_cno[3,]
carpentrasSAT <- bsrn_fixedY_sat[3,]
carpentras <- cbind(carpentrasSAT, carpentrasCAER, carpentrasCNO)
carpentras <- zoo(carpentras)
pdf('carpentraYfixed2.pdf')
xyplot(carpentras, lwd=3,superpose=TRUE, xlab='Years', ylab='kWh/kWp')
dev.off()
## Represento la diferencia
carpentrasDif <- carpentras - carpentrasSAT
carpentrasDif <- carpentrasDif[,2:3]
carpentrasDif <- carpentrasDif/carpentrasSAT
pdf('carpentraYreldifFixed.pdf')
xyplot(carpentrasDif, lwd=2, superpose=TRUE, xlab='Years', ylab='Rel. Dif')
dev.off()
############################################################
## SEDEBROKER
###########################################################
## En este caso el problema es que para algunos años, faltan meses de la serie temporal. Haré la media anual eliminando estos meses, aunque estos años serán poco realistas.
load("../../../data/estaciones_data/sedebroker20032009.Rdata")
sedebroker20032009[sedebroker20032009[]==-999] <- NA
sedebroker20032009m <- sedebroker20032009[,2:13]
sedebrokerYmean <- rowMeans(sedebroker20032009m, na.rm=TRUE)
sedebrokerBSRN <- sedebrokerYmean
sedebrokerCAER <- bsrn_fixedY_caer[7,]
sedebrokerCNO <- bsrn_fixedY_cno[7,]
sedebrokerSAT <- bsrn_fixedY_sat[7,]
sedebroker <- cbind(sedebrokerSAT, sedebrokerCAER, sedebrokerCNO)
sedebroker <- zoo(sedebroker)
pdf('sedebrokerYfixed2.pdf')
xyplot(sedebroker, lwd=3, superpose=TRUE, xlab='Years', ylab='[kWh/kWP]')
dev.off()
sedebrokerDif <- sedebroker - sedebrokerSAT
sedebrokerDif <- sedebrokerDif[,2:3]
sedebrokerDif <- sedebrokerDif/sedebrokerSAT
pdf('sedebrokerYreldifFixed.pdf')
xyplot(sedebrokerDif, lwd=2, superpose=TRUE, xlab='Years', ylab='Rel. Dif')
dev.off()
############################################################
## PAYERNE
###########################################################
load("../../../data/estaciones_data/payerne20032009.Rdata")
payerneYmean <- payerne20032009[,14]
payerneBSRN <- payerneYmean
payerneCAER <- bsrn_fixedY_caer[6,]
payerneCNO <- bsrn_fixedY_cno[6,]
payerneSAT <- bsrn_fixedY_sat[6,]
payerne <- cbind(payerneSAT, payerneCAER, payerneCNO)
payerne <- zoo(payerne)
names(payerne) <- c("PyreneSAT", "PyreneCAER", "PYRENEcno")
pdf('payerneYfixed2.pdf')
xyplot(payerne, lwd=3,superpose=TRUE, xlab='Years', ylab='[kWh/kWp]')
dev.off()
pyreneDif <- payerne - payerneSAT
pyreneDif <- pyreneDif[,2:3]
pyreneDif <- pyreneDif/payerneSAT
pdf('payerneYreldifFixed.pdf')
xyplot(pyreneDif, lwd=2, superpose=TRUE, xlab='Years', ylab='Rel. Dif')
dev.off()
|
7ae3499eebc4cc5207ea6f78ee515873334f8a74
|
0ca78ef5a8670fbdab55409eecda579cec2baf68
|
/globaltool/data_handle/collectdata/winddata/forex/getFxWind.R
|
fa98ff50788e8c7c2bb78843256fe5bba43550d2
|
[] |
no_license
|
zhurui1351/RSTOCK_TRAIL
|
ab83fdef790778a1e792d08a876522ef13a872e6
|
2396c512c8df81a931ea3ca0c925c151363a2652
|
refs/heads/master
| 2021-01-23T09:01:36.814253
| 2019-05-17T15:26:19
| 2019-05-17T15:26:19
| 23,482,375
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 281
|
r
|
getFxWind.R
|
getFxdata_wind = function()
{
path='D:/data/collectdata/windata/forex/usdcny_d.csv'
today = as.character(as.Date(now()))
w_wsd_data<-w.wsd("USDCNY.EX","close","1995-01-01",today,"Fill=Previous;PriceAdj=F")
data = w_wsd_data$Data
write.csv(data,path,row.names=F,quote=F)
}
|
802c7d267673a057cc81bee5c0c40b3789567fc1
|
bb7eb91b3bb804dc4c479450fc6bef8143ef5619
|
/get_random_english_tweets.R
|
ec0aaba8a1cff706abb0ab255cad382634b72ddb
|
[] |
no_license
|
Octophi/covid_tweets
|
624765d757089b0cab0ef9973f39861e5b089bc4
|
b212d96cabe45f72cd34f58f543a7a4327f32d77
|
refs/heads/master
| 2023-02-19T04:22:24.142195
| 2021-01-09T00:30:42
| 2021-01-09T00:30:42
| 327,993,462
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,341
|
r
|
get_random_english_tweets.R
|
# Gets sample of 4000 random English tweets from a particular day
# List of Things you might have to change, in order of importance
# - the filepath in filepath_prefix on line 10
# - the filepath for where you want to save your stuff on line
# - the seed in set.seed, if you're not getting enough tweets on line 34
# Stores all the tweet ids
tweet_ids <- as.data.frame(NULL)
# Replace this with whatever file path you need.
filepath_prefix = "C:/Users/ddrsq/rscripts/covid_tweets/03-02/coronavirus-tweet-id-2020-03-02-"
# This loop will autocomplete the file path with 00, 01, ..., 23 to grab and concatenate all the hours
for(i in 0:23){
# Pads your number to make it two digits
two_digit <- toString(i)
if(str_length(two_digit)==1){
two_digit <- paste("0",two_digit, sep="")
}
# Final filepath you want to look up
filepath <- paste(filepath_prefix, two_digit, ".txt", sep="")
# Unpack the txt file
curr_tweet_ids <- read.delim(filepath, header = FALSE, sep = "\n", colClasses = "character")
colnames(curr_tweet_ids) <- "ids"
tweet_ids <- rbind(tweet_ids, curr_tweet_ids)
}
# We're going to grab 3400 random numbers in the range to get random tweets.
# Ultimately, we'll want 2000 of these to be English tweets
# Don't ever change this seed value, it ensures the random indices are reproducible
set.seed(5)
distribution <- runif(n= 4000, min= 0.5, max = length(tweet_ids$ids)+0.499)
distribution <- unique(round(distribution))
distribution2 <- runif(n= 8000, min= 0.5, max = length(tweet_ids$ids)+0.499)
distribution2 <- unique(round(distribution2))
# For whatever reason it turns our thing into a vector instead of a dataframe but whatever
sampled_tweets <- tweet_ids[distribution,]
# Now filter out the ones that aren't English
english_tweets <- as.data.frame(NULL)
all_tweets <- as.data.frame(NULL)
# This is just to make the loop more efficient
english_tweet_tally <- 0
for(i in 1:length(sampled_tweets)){
# Just to track your progress
if(english_tweet_tally%%100==0){
print(english_tweet_tally)
print(i)
}
# Grabs the whole tweet with its metadata
curr_tweet <- lookup_tweets(sampled_tweets[i], parse = TRUE, token = NULL)
# This is the part of the tweet we actually care about
curr_tweet_tib <- tibble(
date = c(as.character(curr_tweet$created_at)),
language = c(curr_tweet$lang),
text = c(curr_tweet$text)
)
# Just if anything weird happens, so you can see what things you might've skipped over
all_tweets <- rbind(all_tweets, curr_tweet_tib)
# Check language
if(nrow(curr_tweet)==0 || curr_tweet$lang != "en"){
next
}
# Adds English tweets to database
english_tweets <- rbind(english_tweets, curr_tweet_tib)
# Once we've got 2000 tweets just stop
english_tweet_tally <- english_tweet_tally + 1
if(english_tweet_tally == 4000){
break
}
# Deal with rate-limiting
if(i%% 800 == 0){
print("pausing...")
Sys.sleep(15*60)
}
}
View(english_tweets)
tweets_0308_extended <- unique(rbind(tweets_0308_extended, english_tweets))
save(tweets_0301, tweets_0303, tweets_0304, tweets_0306_incomplete, tweets_0308, file="C:/Users/ddrsq/rscripts/covid_tweets/english_tweets.Rdata")
save(tweets_0308_extended, file="C:/Users/ddrsq/rscripts/covid_tweets/0308_extended.Rdata")
View(tweets_0308_incomplete)
|
5f3fe800cbd45227710fe9cb1f01faaa44d23afd
|
e2f3fee3cb8f1abdee08724f0fe8a89b5756cfbe
|
/COSTdata/man/CEFAS2006cs.Rd
|
d6516cb0a359178711c23c922f168ca4ea00d12c
|
[] |
no_license
|
BackupTheBerlios/cost-project
|
1a88c928f4d99db583a95324b31d6a02d9bd20c9
|
4ab39d16c48f031ca46512545895cb17e5586139
|
refs/heads/master
| 2021-01-21T12:39:53.387734
| 2012-03-26T14:58:36
| 2012-03-26T14:58:36
| 40,071,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,800
|
rd
|
CEFAS2006cs.Rd
|
\name{CEFAS2006cs}
\alias{CEFAS2006cs}
\alias{CEFAS2006ce}
\alias{CEFAS2006cl}
\docType{data}
\title{CEFAS sampling, landings and effort data}
\description{
CEFAS commercial sampling, landings and effort data in the COST data exchange format.
\cr Mixed spaecies data for 2006.
}
\usage{
data(CEFAS2006cs)
data(CEFAS2006ce)
data(CEFAS2006cl)
}
\format{
Formal class 'csData' [package "COSTcore"] with 6 slots: \code{desc}, \code{tr}, \code{hh}, \code{sl}, \code{hl} and \code{ca}.
Formal class 'ceData' [package "COSTcore"] with 2 slots: \code{desc} and \code{ce}.
Formal class 'clData' [package "COSTcore"] with 2 slots: \code{desc} and \code{cl}.
}
\details{
The tables below give details on the fields of the Cefas Biological Sampling System (BSS) and Fishing Activity Database (FAD) used in producing market sampling data for the FishFrame-COST data exchange format description 26Feb2008.
Note that:
data for some of the mandatory fields are not stored so may have to be assumed or imputed from other fields
species and gear are currently in local codes and landings value is in pounds sterling, we will look to convert these in a later extraction or within R.
David Maxwell and Peter Robinson, 2 April 2008.
}
\section{Warning }{
Cefas data provided for COST (EU FISH/2006/15 Lot 2)
The data are not official statistics. Please contact the Marine Fisheries Agency if official catch or landings statistics are required.
The data are not to be used outside the current project (EU FISH/2006/15 Lot 2) or stored after the project.
Defra must be acknowledged when the data are used.
Cefas must give permission for any publication involving the data.
}
\source{
CEFAS
}
%\references{
% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(CEFAS2006cs)
}
\keyword{datasets}
|
24dd0e12d96e0d597b17a3b7beff6e0f54301a4e
|
a5895f7ac0ad3f9e892faac18807278d41172bb5
|
/Rscripts/trials/PdPy_Div_Assembly_Bacq0_HNFq1_int.R
|
b67d2c25cd6385ffc05386ea0ff1d14c5c438d49
|
[] |
no_license
|
OscarFHC/PdPy_Div
|
9c149d9020446ff9e446321df9c4b246187dfe32
|
a373aa152ade2bcba957b564e5c2a58f3b3794c9
|
refs/heads/master
| 2021-12-15T07:31:23.671225
| 2021-12-02T10:20:27
| 2021-12-02T10:20:27
| 202,524,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,776
|
r
|
PdPy_Div_Assembly_Bacq0_HNFq1_int.R
|
###############################################################################################
##### Loading packages ########################################################################
###############################################################################################
if (!require(rmarkdown)) {
install.packages("rmarkdown", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(rmarkdown)
}else{library(rmarkdown)}
if (!require(knitr)) {
install.packages("knitr", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(knitr)
}else{library(knitr)}
if (!require(tidyverse)) {
install.packages("tidyverse", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(tidyverse)
}else{library(tidyverse)}
if (!require(vegan)) {
install.packages("vegan", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(vegan)
}else{library(vegan)}
if (!require(ape)) {
install.packages("ape", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(ape)
}else{library(ape)}
if (!require(parallel)) {
install.packages("vegan", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(parallel)
}else{library(parallel)}
if (!require(SpadeR)) {
install.packages("SpadeR", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(SpadeR)
}else{library(SpadeR)}
if (!require(iNEXT)) {
install.packages("iNEXT", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(iNEXT)
}else{library(iNEXT)}
if (!require(picante)) {
install.packages("picante", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(picante)
}else{library(picante)}
if (!require(geiger)) {
install.packages("geiger", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(geiger)
}else{library(geiger)}
if (!require(GUniFrac)) {
install.packages("GUniFrac", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(GUniFrac)
}else{library(GUniFrac)}
if (!require(phytools)) {
install.packages("phytools", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(phytools)
}else{library(phytools)}
if (!require(ecodist)) {
install.packages("ecodist", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(ecodist)
}else{library(ecodist)}
if (!require(ade4)) {
install.packages("ade4", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(ade4)
}else{library(ade4)}
# install.packages("iNEXT")
#
# ## install the latest version from github
# install.packages('devtools')
# library(devtools)
# install_github('JohnsonHsieh/iNEXT')
# library(iNEXT)
if (!require(lavaan)) {
install.packages("lavaan", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(lavaan)
}else{library(lavaan)}
if (!require(semTools)) {
install.packages("semTools", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(semTools)
}else{library(semTools)}
if (!require(psych)) {
install.packages("psych", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(psych)
}else{library(psych)}
if (!require(plspm)) {
install.packages("plspm", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(plspm)
}else{library(plspm)}
if (!require(nlme)) {
install.packages("nlme", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(nlme)
}else{library(nlme)}
if (!require(piecewiseSEM)) {
install.packages("piecewiseSEM", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(piecewiseSEM)
}else{library(piecewiseSEM)}
if (!require(brms)) {
install.packages("brms", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(brms)
}else{library(brms)}
if (!require(blavaan)) {
install.packages("blavaan", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(blavaan)
}else{library(blavaan)}
if (!require(rstanarm)) {
install.packages("rstanarm", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(rstanarm)
}else{library(rstanarm)}
library("rstan")
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
if (!require(loo)) {
install.packages("loo", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(loo)
}else{library(loo)}
if (!require(lmodel2)) {
install.packages("lmodel2", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(lmodel2)
}else{library(lmodel2)}
if (!require(abind)) {
install.packages("abind", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(abind)
}else{library(abind)}
if (!require(ggplot2)) {
install.packages("ggplot2", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(ggplot2)
}else{library(ggplot2)}
if (!require(viridis)) {
install.packages("viridis", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(viridis)
}else{library(viridis)}
if (!require(GGally)) {
install.packages("GGally", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(GGally)
}else{library(GGally)}
if (!require(cowplot)) {
install.packages("cowplot", dependencies=TRUE, repos = 'http://cran.us.r-project.org')
library(cowplot)
}else{library(cowplot)}
###############################################################################################
##### Loading packages ########################################################################
###############################################################################################
###############################################################################################
##### Loading data ############################################################################
###############################################################################################
Bac_comm <- as.data.frame(t(read.table(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/16s_seqXst.csv",
sep = ",", header = TRUE, row.names = 1, stringsAsFactors = FALSE, fill = TRUE)))
Bac_ra_comm <- Bac_comm / rowSums(Bac_comm)
Bac_phylo<- read.tree(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/treeNJ_16s.tree")
NF_comm <- as.data.frame(t(read.table(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/prot_seqXst_PR2.csv",
sep = ",", header = TRUE, row.names = 1, stringsAsFactors = FALSE, fill = TRUE)))
NF_ra_comm <- NF_comm / rowSums(NF_comm)
NF_phylo<- read.tree(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/prot_treeNJ_PR2.tree")
HNF_comm <- as.data.frame(t(read.table(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/HNF_seqXst_PR2.csv",
sep = ",", header = TRUE, row.names = 1, stringsAsFactors = FALSE, fill = TRUE)))
HNF_ra_comm <- HNF_comm / rowSums(HNF_comm)
HNF_phylo<- read.tree(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/HNF_treeNJ_PR2.tree")
Vars <- read.table(file = "https://raw.githubusercontent.com/OscarFHC/PdPy_Div/master/data/sECS_Vars.csv", sep = ",",
header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
###############################################################################################
##### Loading data ############################################################################
###############################################################################################
###############################################################################################
##### Loading nulls ###########################################################################
###############################################################################################
Bac_MNTD_null <- read.table(file = "D:/Research/PdPy_Div_Results/Bac_MNTD_null.csv", sep = ",",
header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
Bac_MNTD <- Bac_MNTD_null %>%
select(c(obs, Var1, Var2)) %>%
mutate(MNTD_null_mean = apply(Bac_MNTD_null[, !names(Bac_MNTD_null) %in% c("obs", "Var1", "Var2")], 1, mean),
MNTD_null_sd = apply(Bac_MNTD_null[, !names(Bac_MNTD_null) %in% c("obs", "Var1", "Var2")], 1, sd),
Bac_select_strength = (obs - MNTD_null_mean) / MNTD_null_sd,
Bac_select_p = pnorm(-abs(Bac_select_strength), 0, 1))
Bac_Chao_null <- read.table(file = "D:/Research/PdPy_Div_Results/Bac_Chao_null.csv", sep = ",",
header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
Bac_BDiv_Chao <- Bac_Chao_null %>%
select(c(obs, Var1, Var2)) %>%
mutate(Chao_null_mean = apply(Bac_Chao_null[, !names(Bac_Chao_null) %in% c("obs", "Var1", "Var2")], 1, mean),
Chao_null_sd = apply(Bac_Chao_null[, !names(Bac_Chao_null) %in% c("obs", "Var1", "Var2")], 1, sd),
Bac_disp_strength = (obs - Chao_null_mean) / Chao_null_sd,
Bac_disp_p = pnorm(Bac_disp_strength, 0, 1))
HNF_MNTD_null <- read.table(file = "D:/Research/PdPy_Div_Results/HNF_MNTD_null_PR2.csv", sep = ",",
header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
HNF_MNTD <- HNF_MNTD_null %>% select(c(obs, Var1, Var2)) %>%
mutate(MNTD_null_mean = apply(HNF_MNTD_null[, !names(HNF_MNTD_null) %in% c("obs", "Var1", "Var2")], 1, mean),
MNTD_null_sd = apply(HNF_MNTD_null[, !names(HNF_MNTD_null) %in% c("obs", "Var1", "Var2")], 1, sd),
HNF_select_strength = (obs - MNTD_null_mean) / MNTD_null_sd,
HNF_select_p = pnorm(-abs(HNF_select_strength), 0, 1))
HNF_Chao_null <- read.table(file = "D:/Research/PdPy_Div_Results/HNF_Chao_null_PR2.csv", sep = ",",
header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
HNF_BDiv_Chao <- HNF_Chao_null %>%
select(c(obs, Var1, Var2)) %>%
mutate(Chao_null_mean = apply(HNF_Chao_null[, !names(HNF_Chao_null) %in% c("obs", "Var1", "Var2")], 1, mean),
Chao_null_sd = apply(HNF_Chao_null[, !names(HNF_Chao_null) %in% c("obs", "Var1", "Var2")], 1, sd),
HNF_disp_strength = (obs - Chao_null_mean) / Chao_null_sd,
HNF_disp_p = pnorm(HNF_disp_strength, 0, 1))
###############################################################################################
##### Loading nulls ###########################################################################
###############################################################################################
###############################################################################################
##### Loading functions #######################################################################
###############################################################################################
cor_fun <- function(data, mapping, method="pearson", ndp=2, sz=5, stars=TRUE, ...){
x <- eval_data_col(data, mapping$x)
y <- eval_data_col(data, mapping$y)
corr <- cor.test(x, y, method = method)
est <- corr$estimate
lb.size <- 6#sz* abs(est)
if(stars){
stars <- c("***", "**", "*", "")[findInterval(corr$p.value, c(0, 0.001, 0.01, 0.05, 1))]
lbl <- paste0(round(est, ndp), stars)
}else{
lbl <- round(est, ndp)
}
ggplot(data = data, mapping = mapping) +
annotate("text", x = mean(x, na.rm = TRUE), y = mean(y, na.rm = TRUE), label = lbl, size = lb.size, ...)+
theme(panel.grid = element_blank())
}
fit_fun <- function(data, mapping, ...){
p <- ggplot(data = data, mapping = mapping) +
geom_point() +
geom_smooth(method = loess, fill = "red", color = "red", ...) +
geom_smooth(method = lm, fill = "blue", color = "blue", ...)
p
}
###############################################################################################
##### Loading functions #######################################################################
###############################################################################################
###############################################################################################
##### Alpha level analyses ####################################################################
###############################################################################################
##### Preping data ##########
Bac_A <- iNEXT(t(Bac_comm), q = 0, datatype = "abundance", size = max(colSums(Bac_comm)) + 100000)$AsyEst %>%
select(Site, Diversity, Estimator) %>%
spread(Diversity, Estimator) %>%
rename(Bac_SR = "Species richness", Bac_Shannon = "Shannon diversity", Bac_Simpson = "Simpson diversity") %>%
mutate(Site = rownames(Bac_comm))
HNF_A <- iNEXT(t(HNF_comm), q = 0, datatype = "abundance", size = max(colSums(HNF_comm)) + 100000)$AsyEst %>%
select(Site, Diversity, Estimator) %>%
spread(Diversity, Estimator) %>%
rename(HNF_SR = "Species richness", HNF_Shannon = "Shannon diversity", HNF_Simpson = "Simpson diversity") %>%
mutate(Site = rownames(HNF_comm))
Bac_selec <- Bac_MNTD %>%
filter(Bac_select_p < 0.05) %>%
group_by(Var2) %>%
summarize(Bac_select = mean(Bac_select_strength, na.rm = TRUE))
HNF_selec <- HNF_MNTD %>%
filter(HNF_select_p < 0.05) %>%
group_by(Var2) %>%
summarize(HNF_select = mean(HNF_select_strength, na.rm = TRUE))
HNF_Bac_A <- Bac_selec %>%
inner_join(HNF_selec, by = c("Var2" = "Var2")) %>%
inner_join(Bac_A, by = c("Var2" = "Site")) %>%
inner_join(HNF_A, by = c("Var2" = "Site")) %>%
inner_join(Vars, by = c("Var2" = "SampleID")) %>%
filter(!is.na(NF_Biom)) %>%
mutate(ln.Bac_SR = log(Bac_SR),
ln.HNF_SR = log(HNF_SR),
ln.Bac_Simpson = log(Bac_Simpson),
ln.HNF_Simpson = log(HNF_Simpson),
ln.Bac_Shannon = log(Bac_Shannon),
ln.HNF_Shannon = log(HNF_Shannon),
ln.Bac_Biom = log(Bac_Biom),
ln.HNF_Biom = log(HNF_Biom),
ln.Temp = log(Temp),
ln.Sal = log(Sal),
ln.PAR = log(PAR),
ln.NO2 = log(NO2 + 0.0001),
ln.NO3 = log(NO3 + 0.0001),
ln.DIN = log(DIN + 0.0001),
ln.PO3 = log(PO3 + 0.0001),
ln.Chla = log(Chla + 0.0001))
HNF_Bac_A <- as.data.frame(HNF_Bac_A)
head(HNF_Bac_A)
##### Preping data ##########
##### exploratory factor analyses on environmental data ##########
### plotting
p_Envi_pairs <- HNF_Bac_A %>%
ggpairs(columns = c("Temp", "Sal", "PAR", "NO2", "NO3", "DIN", "PO3", "Chla"),
columnLabels = c("Temperature", "Salinity", "PAR", "Nitrite", "Nitrate", "TN", "TP", "Chla"),
upper = list(continuous = cor_fun),
lower = list(continuous = fit_fun)) +
theme(strip.text.x = element_text(color = "black", size = 14),
strip.text.y = element_text(angle = 45, color = "black", size = 14))
p_Envi_pairs
# ggsave(p_Envi_pairs, file = "D:/Research/PdPy_Div_Results/p_Envi_pairs.jpeg",
# dpi = 600, width = 34, height = 28, units = "cm")
Envi <- HNF_Bac_A[, c("Temp", "Sal", "PAR", "DIN", "PO3", "Chla")] #, "NO2", "NO3"
fa <- fa.parallel(Envi, fm = "mle", fa = 'fa')
fa1 <- fa(Envi, nfactors = 1, rotate = "varimax", fm = "mle", n.iter = 1000)
print(fa1)
fa2 <- fa(Envi, nfactors = 2, rotate = "varimax", fm = "mle", n.iter = 1000)
print(fa2)
fa3 <- fa(Envi, nfactors = 3, rotate = "varimax", fm = "mle", n.iter = 1000)
print(fa3)
fa4 <- fa(Envi, nfactors = 4, rotate = "varimax", fm = "mle", n.iter = 1000)
print(fa4)
fa5 <- fa(Envi, nfactors = 5, rotate = "varimax", fm = "mle", n.iter = 1000)
print(fa5)
fa.diagram(fa3)
##### exploratory factor analyses on environmental data ##########
##### Pair-wise plot of bio-variables ##########
p_Adiv_pairs <- HNF_Bac_A %>%
ggpairs(columns = c("ln.Bac_SR", "ln.HNF_SR", "ln.Bac_Shannon", "ln.HNF_Shannon", "ln.Bac_Simpson", "ln.HNF_Simpson",
"ln.Bac_Biom", "ln.HNF_Biom", "Bac_select", "HNF_select"),
columnLabels = c("Bacteria\nspecies\nrichness", "HNF\nspecies\nrichness",
"Bacteria\nShannon\ndiversity", "HNF\nShannon\ndiversity",
"Bacteria\nSimpson\ndiversity", "HNF\nSimpson\ndiversity",
"log(Bacteria\nbiomass)", "log(HNF\nbiomass)", "Bacteria\nselection", "HNF\nselection"),
#mapping = ggplot2::aes(colour = Cruise),
upper = list(continuous = cor_fun),
lower = list(continuous = fit_fun)) +
theme(strip.text.x = element_text(color = "black", size = 14),
strip.text.y = element_text(angle = 45, color = "black", size = 14))
p_Adiv_pairs
# ggsave(p_Adiv_pairs, file = "D:/Research/PdPy_Div_Results/p_ADiv_pairs_ln.jpeg",
# dpi = 600, width = 34, height = 28, units = "cm")
# zooming
p_Adiv_pairs <- HNF_Bac_A %>%
ggpairs(columns = c("ln.Bac_SR", "ln.HNF_SR", "ln.HNF_Shannon", "ln.HNF_Simpson",
"ln.Bac_Biom", "ln.HNF_Biom", "Bac_select", "HNF_select"),
columnLabels = c("Bacteria\nspecies\nrichness",
"HNF\nspecies\nrichness", "HNF\nShannon\ndiversity", "HNF\nSimpson\ndiversity",
"log(Bacteria\nbiomass)", "log(HNF\nbiomass)", "Bacteria\nselection", "HNF\nselection"),
#mapping = ggplot2::aes(colour = Cruise),
upper = list(continuous = cor_fun),
lower = list(continuous = fit_fun)) +
theme(strip.text.x = element_text(color = "black", size = 14),
strip.text.y = element_text(angle = 45, color = "black", size = 14))
p_Adiv_pairs
# ggsave(p_Adiv_pairs, file = "D:/Research/PdPy_Div_Results/p_ADiv_pairsZoomIn_ln.jpeg",
# dpi = 600, width = 34, height = 28, units = "cm")
##### Pair-wise plot of bio-variables ##########
##### Path model analysis : Bac_q0 vs HNF_q1 ##########
### Step 1
Bacq0_HNFq1_mod1.0 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.1 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.2 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.3 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.4 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.5 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.6 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.7 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.8 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.9 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.10 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.11 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.12 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.13 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.HNF_Shannon
'
Bacq0_HNFq1_mod1.14 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.15 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_Biom
'
Bacq0_HNFq1_mod1.16 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.17 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.18 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.19 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.20 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.21 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.22 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.23 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.24 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.25 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.26 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.27 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.28 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.29 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.HNF_Biom
'
Bacq0_HNFq1_mod1.30 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.31 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Shannon + ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Shannon ~ ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.32 <- '
# regressions
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.33 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.34 <- '
# regressions
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.35 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.36 <- '
# regressions
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.37 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.38 <- '
# regressions
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.39 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.40 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.41 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.42 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.43 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.44 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.45 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR
'
Bacq0_HNFq1_mod1.46 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.47 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom
'
Bacq0_HNFq1_mod1.48 <- '
# regressions
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.49 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.50 <- '
# regressions
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.51 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.52 <- '
# regressions
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.53 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.54 <- '
# regressions
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.55 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.56 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.57 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.58 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.HNF_Biom ~ ln.Bac_Biom + ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.59 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_Biom
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.60 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.61 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.62 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Biom ~ ln.Bac_SR
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_mod1.63 <- '
# regressions
ln.Bac_SR ~ ln.Bac_Biom + ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom
ln.HNF_Shannon ~ ln.Bac_SR + ln.Bac_Biom + ln.HNF_Biom
'
Bacq0_HNFq1_lavaan1.0 <- sem(Bacq0_HNFq1_mod1.0, data = HNF_Bac_A)#, se = "bootstrap")
Bacq0_HNFq1_lavaan1.1 <- sem(Bacq0_HNFq1_mod1.1, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.2 <- sem(Bacq0_HNFq1_mod1.2, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.3 <- sem(Bacq0_HNFq1_mod1.3, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.4 <- sem(Bacq0_HNFq1_mod1.4, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.5 <- sem(Bacq0_HNFq1_mod1.5, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.6 <- sem(Bacq0_HNFq1_mod1.6, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.7 <- sem(Bacq0_HNFq1_mod1.7, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.8 <- sem(Bacq0_HNFq1_mod1.8, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.9 <- sem(Bacq0_HNFq1_mod1.9, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.10 <- sem(Bacq0_HNFq1_mod1.10, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.11 <- sem(Bacq0_HNFq1_mod1.11, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.12 <- sem(Bacq0_HNFq1_mod1.12, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.13 <- sem(Bacq0_HNFq1_mod1.13, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.14 <- sem(Bacq0_HNFq1_mod1.14, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.15 <- sem(Bacq0_HNFq1_mod1.15, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.16 <- sem(Bacq0_HNFq1_mod1.16, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.17 <- sem(Bacq0_HNFq1_mod1.17, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.18 <- sem(Bacq0_HNFq1_mod1.18, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.19 <- sem(Bacq0_HNFq1_mod1.19, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.20 <- sem(Bacq0_HNFq1_mod1.20, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.21 <- sem(Bacq0_HNFq1_mod1.21, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.22 <- sem(Bacq0_HNFq1_mod1.22, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.23 <- sem(Bacq0_HNFq1_mod1.23, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.24 <- sem(Bacq0_HNFq1_mod1.24, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.25 <- sem(Bacq0_HNFq1_mod1.25, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.26 <- sem(Bacq0_HNFq1_mod1.26, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.27 <- sem(Bacq0_HNFq1_mod1.27, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.28 <- sem(Bacq0_HNFq1_mod1.28, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.29 <- sem(Bacq0_HNFq1_mod1.29, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.30 <- sem(Bacq0_HNFq1_mod1.30, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.31 <- sem(Bacq0_HNFq1_mod1.31, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.32 <- sem(Bacq0_HNFq1_mod1.32, data = HNF_Bac_A)#, se = "bootstrap")
Bacq0_HNFq1_lavaan1.33 <- sem(Bacq0_HNFq1_mod1.33, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.34 <- sem(Bacq0_HNFq1_mod1.34, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.35 <- sem(Bacq0_HNFq1_mod1.35, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.36 <- sem(Bacq0_HNFq1_mod1.36, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.37 <- sem(Bacq0_HNFq1_mod1.37, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.38 <- sem(Bacq0_HNFq1_mod1.38, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.39 <- sem(Bacq0_HNFq1_mod1.39, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.40 <- sem(Bacq0_HNFq1_mod1.40, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.41 <- sem(Bacq0_HNFq1_mod1.41, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.42 <- sem(Bacq0_HNFq1_mod1.42, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.43 <- sem(Bacq0_HNFq1_mod1.43, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.44 <- sem(Bacq0_HNFq1_mod1.44, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.45 <- sem(Bacq0_HNFq1_mod1.45, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.46 <- sem(Bacq0_HNFq1_mod1.46, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.47 <- sem(Bacq0_HNFq1_mod1.47, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.48 <- sem(Bacq0_HNFq1_mod1.48, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.49 <- sem(Bacq0_HNFq1_mod1.49, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.50 <- sem(Bacq0_HNFq1_mod1.50, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.51 <- sem(Bacq0_HNFq1_mod1.51, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.52 <- sem(Bacq0_HNFq1_mod1.52, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.53 <- sem(Bacq0_HNFq1_mod1.53, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.54 <- sem(Bacq0_HNFq1_mod1.54, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.55 <- sem(Bacq0_HNFq1_mod1.55, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.56 <- sem(Bacq0_HNFq1_mod1.56, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.57 <- sem(Bacq0_HNFq1_mod1.57, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.58 <- sem(Bacq0_HNFq1_mod1.58, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.59 <- sem(Bacq0_HNFq1_mod1.59, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.60 <- sem(Bacq0_HNFq1_mod1.60, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.61 <- sem(Bacq0_HNFq1_mod1.61, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.62 <- sem(Bacq0_HNFq1_mod1.62, data = HNF_Bac_A)
Bacq0_HNFq1_lavaan1.63 <- sem(Bacq0_HNFq1_mod1.63, data = HNF_Bac_A)
AICstep1 <- AIC(Bacq0_HNFq1_lavaan1.0, Bacq0_HNFq1_lavaan1.1, Bacq0_HNFq1_lavaan1.2, Bacq0_HNFq1_lavaan1.3,
Bacq0_HNFq1_lavaan1.4, Bacq0_HNFq1_lavaan1.5, Bacq0_HNFq1_lavaan1.6, Bacq0_HNFq1_lavaan1.7,
Bacq0_HNFq1_lavaan1.8, Bacq0_HNFq1_lavaan1.9, Bacq0_HNFq1_lavaan1.10, Bacq0_HNFq1_lavaan1.11,
Bacq0_HNFq1_lavaan1.12, Bacq0_HNFq1_lavaan1.13, Bacq0_HNFq1_lavaan1.14, Bacq0_HNFq1_lavaan1.15,
Bacq0_HNFq1_lavaan1.16, Bacq0_HNFq1_lavaan1.17, Bacq0_HNFq1_lavaan1.18, Bacq0_HNFq1_lavaan1.19,
Bacq0_HNFq1_lavaan1.20, Bacq0_HNFq1_lavaan1.21, Bacq0_HNFq1_lavaan1.22, Bacq0_HNFq1_lavaan1.23,
Bacq0_HNFq1_lavaan1.24, Bacq0_HNFq1_lavaan1.25, Bacq0_HNFq1_lavaan1.26, Bacq0_HNFq1_lavaan1.27,
Bacq0_HNFq1_lavaan1.28, Bacq0_HNFq1_lavaan1.29, Bacq0_HNFq1_lavaan1.30, Bacq0_HNFq1_lavaan1.31,
Bacq0_HNFq1_lavaan1.32, Bacq0_HNFq1_lavaan1.33, Bacq0_HNFq1_lavaan1.34, Bacq0_HNFq1_lavaan1.35,
Bacq0_HNFq1_lavaan1.36, Bacq0_HNFq1_lavaan1.37, Bacq0_HNFq1_lavaan1.38, Bacq0_HNFq1_lavaan1.39,
Bacq0_HNFq1_lavaan1.40, Bacq0_HNFq1_lavaan1.41, Bacq0_HNFq1_lavaan1.42, Bacq0_HNFq1_lavaan1.43,
Bacq0_HNFq1_lavaan1.44, Bacq0_HNFq1_lavaan1.45, Bacq0_HNFq1_lavaan1.46, Bacq0_HNFq1_lavaan1.47,
Bacq0_HNFq1_lavaan1.48, Bacq0_HNFq1_lavaan1.49, Bacq0_HNFq1_lavaan1.50, Bacq0_HNFq1_lavaan1.51,
Bacq0_HNFq1_lavaan1.52, Bacq0_HNFq1_lavaan1.53, Bacq0_HNFq1_lavaan1.54, Bacq0_HNFq1_lavaan1.55,
Bacq0_HNFq1_lavaan1.56, Bacq0_HNFq1_lavaan1.57, Bacq0_HNFq1_lavaan1.58, Bacq0_HNFq1_lavaan1.59,
Bacq0_HNFq1_lavaan1.60, Bacq0_HNFq1_lavaan1.61, Bacq0_HNFq1_lavaan1.62, Bacq0_HNFq1_lavaan1.63)
AICstep1 <- AICstep1 %>% cbind(row.names(AICstep1)) %>%
arrange(AIC)
# AICstep1[1:10,]
moreFitIndices(Bacq0_HNFq1_lavaan1.21, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.23, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.29, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.31, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.53, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.55, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.61, fit.measures = "all", nPrior = 1)
moreFitIndices(Bacq0_HNFq1_lavaan1.63, fit.measures = "all", nPrior = 1)
Bacq0_HNFq1_mod1.53 <- '
# regressions
ln.Bac_SR ~ ln.HNF_Biom
ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon
ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom
'
Bacq0_HNFq1_lavaan1.53 <- sem(Bacq0_HNFq1_mod1.53, data = HNF_Bac_A)
summary(Bacq0_HNFq1_lavaan1.53)
### Step 2 : include selection processes as the interaction terms and grouping variables (random effects)
HNF_Bac_A <- HNF_Bac_A %>%
mutate(Bac_Bac_int = ln.Bac_SR * Bac_select,
Bac_HNF_int = ln.Bac_SR * HNF_select)
HNF_Bac_A <- as.data.frame(HNF_Bac_A)
Bacq0_HNFq1_mod2.Cr <- psem(
lme(ln.Bac_SR ~ ln.HNF_Biom + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Cruise, data = HNF_Bac_A),
lme(ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Cruise, data = HNF_Bac_A),
lme(ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom + Bac_Bac_int + Bac_HNF_int + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Cruise, data = HNF_Bac_A),
lme(ln.HNF_Biom ~ ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Cruise, data = HNF_Bac_A),
data = HNF_Bac_A
)
Bacq0_HNFq1_mod2.Season <- psem(
lme(ln.Bac_SR ~ ln.HNF_Biom + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Season, data = HNF_Bac_A),
lme(ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Season, data = HNF_Bac_A),
lme(ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom + Bac_Bac_int + Bac_HNF_int + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Season, data = HNF_Bac_A),
lme(ln.HNF_Biom ~ ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Season, data = HNF_Bac_A),
data = HNF_Bac_A
)
Bacq0_HNFq1_mod2.St <- psem(
lme(ln.Bac_SR ~ ln.HNF_Biom + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Station, data = HNF_Bac_A),
lme(ln.Bac_Biom ~ ln.HNF_Biom + ln.Bac_SR + ln.HNF_Shannon + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Station, data = HNF_Bac_A),
lme(ln.HNF_Shannon ~ ln.Bac_SR + ln.HNF_Biom + Bac_Bac_int + Bac_HNF_int + ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Station, data = HNF_Bac_A),
lme(ln.HNF_Biom ~ ln.Temp + ln.Sal + ln.PAR + ln.DIN + ln.PO3 + ln.Chla,
random = ~ 1 | Season, data = HNF_Bac_A),
data = HNF_Bac_A
)
AIC(Bacq0_HNFq1_mod2.Cr)
AIC(Bacq0_HNFq1_mod2.Season)
AIC(Bacq0_HNFq1_mod2.St)
summary(Bacq0_HNFq1_mod2.Cr)
summary(Bacq0_HNFq1_mod2.Season)
summary(Bacq0_HNFq1_mod2.St)
##### Path model analysis : Bac_q0 vs HNF_q0 ##########
##### Plotting HNF-Bac A diversity relationship with selection as color code ##########
community.labs <- c("Selection on bacteria community", "Selection on HNF community")
names(community.labs) <- c("Bac_select", "HNF_select")
p_ADiv_Select <- HNF_Bac_A %>%
select(Bac_SR, HNF_Shannon, Bac_select, HNF_select) %>%
gather(key = "community", value = "Selection", -c(Bac_SR, HNF_Shannon)) %>%
ggplot() +
geom_point(aes(x = Bac_SR, y = HNF_Shannon, color = Selection), size = 3) +
facet_grid(~ community, labeller = labeller(community = community.labs)) +
scale_colour_viridis(alpha = 0.7) +
labs(x = expression("Bacteria species richness (q = 0)"),
y = expression("HNF Shannon diversity (q = 1)"),
colour = expression(paste("\U03B2", "NTI"))) +
theme(
strip.text.x = element_text(size = 12, face = "bold"),
)
p_ADiv_Select
ggsave(p_ADiv_Select, file = "D:/Research/PdPy_Div_Results/p_ADiv_Bacq0_HNFq1_Select.jpeg",
dpi = 600, width = 34, height = 28, units = "cm")
##### Plotting HNF-Bac A diversity relationship with selection as color code ##########
##### Analyzing ##########
### Univariate relationships
# Selection vs alpha diversity
p_Bac_Selec <- HNF_Bac_A %>%
select(ln.Bac_Shannon, Bac_select, HNF_select) %>%
gather(Community, Select, -ln.Bac_Shannon) %>%
ggplot(aes(x = Select, y = ln.Bac_Shannon)) +
geom_point(size = 3) +
facet_grid(cols = vars(Community), scales = "free") +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
geom_smooth(method = mgcv::gam, formula = y ~ s(x), se = TRUE, color = "red")
#geom_smooth(formula = y ~ x, method = lm, se = TRUE, )
ggsave(p_Bac_Selec, file = "D:/Research/PdPy_Div_Results/p_Bac_Selec.jpeg")
gam0 <- gam(ln.Bac_Shannon ~ Bac_select + HNF_select + s(Bac_select) + s(HNF_select), data = HNF_Bac_A)
summary(gam0)
lm0_BacADiv_Shannon_Sea <- lme(Bac_Shannon ~ Bac_select + HNF_select, random = ~1 | Season, data = HNF_Bac_A)
lm0_BacADiv_Shannon_Cr <- lme(Bac_Shannon ~ Bac_select + HNF_select, random = ~1 | Cruise, data = HNF_Bac_A)
lm0_BacADiv_Shannon_St <- lme(Bac_Shannon ~ Bac_select + HNF_select, random = ~1 | Station, data = HNF_Bac_A)
AIC(lm0_BacADiv_Shannon_Sea, lm0_BacADiv_Shannon_Cr, lm0_BacADiv_Shannon_St)
summary(lm0_BacADiv_Shannon_Sea)
###############################################################################################
##### Alpha level analyses ####################################################################
###############################################################################################
|
fa31c1c50efe0894f871e94fc967390cc158a7d0
|
c892b24af15e4ca31a72137b3c9ab1db056dfdcc
|
/R/zzz.R
|
44aba989b1e3ecff2fe2aa4d9746f95577201bd0
|
[] |
no_license
|
melimore86/nestR
|
4ac72ca0856aac03cfa5117aaf66fbfe23ee25c4
|
51b094927fbca2b63e357bb71c9d35501c3af835
|
refs/heads/master
| 2020-08-05T03:59:57.996812
| 2019-08-20T15:58:16
| 2019-08-20T15:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 89
|
r
|
zzz.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Welcome to nestR!")
}
|
33d69ebec6ea6268441e31d749aff0294d24d50d
|
bbc3754d8900e36146bf80d3cc98f5c36c450cd2
|
/man/ggplot_summary.Rd
|
6f616057dd04936b83d4111abea20708fee59960
|
[] |
no_license
|
netterie/cantrance
|
e9782750337bffd0476b76bfed627df0dddf6c5f
|
a2b29ed44c6dfd50858be06ec03accb2264b85fa
|
refs/heads/master
| 2021-01-25T07:34:45.788607
| 2015-08-26T02:51:45
| 2015-08-26T02:51:45
| 41,400,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 893
|
rd
|
ggplot_summary.Rd
|
\name{ggplot_summary}
\alias{ggplot_summary}
\title{Plots survival results}
\description{Takes in a list of survival summaries to
plot using ggplot2 and graphs them as survival curves}
\usage{ggplot_summary(summ, estimate_column, covar.order = NULL, ys = c(0,
1), ybreaks = seq(0, 1, by = 0.1), graph.title = "")}
\arguments{
\item{summ}{A named list of survival summaries}
\item{estimate_column}{Name of the column containing the point estimate}
\item{covar.order}{The order in which the survival summaries should
appear, specified as a vector of names corresponding
to the names of summ}
\item{ys}{Limits for the y-axis}
\item{ybreaks}{Breaks for the y-axis}
\item{graph.title}{Title of graph}
}
\value{A ggplot2 object}
\author{Jeanette Birnbaum & Leslie Mallinger}
\note{I need to refresh my memory on the format of the
survival summary}
|
9c52f938379cd55bde90766eaaba24aabd992215
|
fa1c8875e84c8ea311e839c0e55409745b21744a
|
/기말대비 연습문제 풀이.R
|
bf0aa95f6f18daf89f6e6afaeeb0d0289fa97fa9
|
[] |
no_license
|
HwayoungYoon/2019-2-Statistical-Programming-R
|
e187cdf023c9da3afb4e1b6e8f81ae6af49df361
|
ab3726a778853df006bfbedb6283be5f67a7603a
|
refs/heads/master
| 2021-02-12T20:32:35.525111
| 2020-03-03T12:18:43
| 2020-03-03T12:18:43
| 244,628,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,757
|
r
|
기말대비 연습문제 풀이.R
|
#6.5
dia=read.csv("C:/RR/c6no5.csv",header=T)
dia
attach(dia)
#6.5.a
cor(y1,y2)
#6.5.b
cor(x1,y2)
#6.5.c
cor.test(x1,y2)
#6.5.d
tdia=table(dia$y2>=90)
names(tdia)=c("y2>=90","y2<90")
pie(tdia)
#6.5.e
plot(y1,y2)
#6.5.f
plot(x1,y2)
#6.5.g
cor(dia)
#7.2
li=c(25,16,44,62,36,58,38)
#7.2.a
mean(li)
#7.2.b
var(li)
#7.2.c
sd(li)
#7.2.d
boxplot(li)
#7.5
x1=c(51,27,37,42,27,43,41,38,36,26,29,35)
x2=c(36,20,22,36,18,32,22,21,23,31,20,30)
#7.5.a
mean(x1); var(x1); sd(x1)
mean(x2); var(x2); sd(x2)
#7.5.b
t.test(x1,mu=30)
#7.5.c
t.test(x2,mu=25)
#7.8
prop.test(x=50,n=200,p=0.2)
#7.9
xx=c(3000,2000)
xn=c(5500,3000)
prop.test(xx,xn,alt="less")
#7.10
ph1=c(14,15,16,13,12,17,15,13,16,13)
ph2=c(8,11,9,8,10,11,7,9,6,8,7,10)
#7.10.a
mean(ph1); sd(ph1)
#7.10.b
mean(ph2); sd(ph2)
#7.10.c
var.test(ph1,ph2)
t.test(ph1,ph2,var.equal=T)
#7.11
h1=c(67,79,57,66,71,78)
h2=c(42,61,64,76,45,58)
var.test(h1,h2)
t.test(h1,h2,var.equal=T)
#7.12
b1=c(2.1,5.0,1.4,4.6,3.0,4.3,3.2)
b2=c(1.9,0.5,2.8,3.1,2.7,1.8)
var.test(b1,b2)
t.test(b1,b2,var.equal=T)
#7.14
E=c(90,88,78,65,78,60,89,73)
F=c(80,78,75,69,73,62,79,70)
t.test(E,F,paired=T)
#8.1
Bg=matrix(c(30,30,10,10), nr=2)
#8.1.b
chisq.test(Bg)
#8.2
Cg=matrix(c(30,30,20,10),nc=2)
#8.2.b
chisq.test(Cg)
#8.5
day=c(53,42,51,45,36,37,65)
pd=rep(1/7,7)
chisq.test(day,p=pd)
#8.6
sb=matrix(c(60,5,10,20),nc=2)
#8.6.a
chisq.test(sb)
#8.6.b
chisq.test(sb)$expected
#8.9
h=matrix(c(27,7,18,10),nc=2)
#8.9.a
h*100/sum(h)
#8.9.b
chisq.test(h)$expected
#8.9.c
chisq.test(h)
#8.10
pre=matrix(c(115,169,225,395,221,125),nc=2)
chisq.test(pre)
#8.11
co=matrix(c(24,32,54,20,30,60,50,20,15),nc=3)
chisq.test(co)
|
de725dec28ebfd1729851d50db54ecf3a5b5ee2d
|
2ba1c86ff2118b2df0b1dfb12f3a28ad7997385a
|
/EMMA200EMMAX+FarmCPU/longtowide.R
|
f936f931829ab7b43324cda223b3d49ad6b2610e
|
[] |
no_license
|
cks2903/FromFabaBaseToGWAS
|
6013403b9eb1d52b279ba9b055754e9b993ea1a2
|
f147e86278b2a6688f693f6c25c288c44b9128d5
|
refs/heads/main
| 2022-12-30T05:46:58.943678
| 2020-10-22T14:12:02
| 2020-10-22T14:12:02
| 306,330,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,430
|
r
|
longtowide.R
|
# a script to reshape the genotype data outputtet by mysql
# load libraries
library(tidyr)
# Load data
extremelylargetable <- read.csv("genotypes.txt",sep='\t',head=F)
colnames(extremelylargetable)=c("GenotypingDate","GenotypeCall","Genotyping_Comments","SNPIdentifier","ProbeSetID","Start","Strand","Gene","Annotation","Chromosome","Position","CallRate","FLD","HetSO","HomRO","GenotypingPlatform","SNP_Comments","Name","AlternativeName","Donor","GeographicOrigin","Maintaining","Germplasm_Comments")
extremelylargetable=as.data.frame(extremelylargetable)
# convert table from long t
print(paste("This many unique seed lot ids:",length(unique(extremelylargetable$Name)),sep=" "))
print("starting pivot conversion")
extremelylargetable$AlternativeName=NULL
extremelylargetable$Donor=NULL
extremelylargetable$GeographicOrigin=NULL
extremelylargetable$Maintaining=NULL
extremelylargetable$Germplasm_Comments=NULL
wide_DF <- extremelylargetable %>% spread("Name", "GenotypeCall")
head(wide_DF)
firsthalf_wide_DF=wide_DF[,1:16]
secondhalf_wide_DF=wide_DF[,17:ncol(wide_DF)]
firsthalf_wide_DF[firsthalf_wide_DF=="\\N"]="NA"
secondhalf_wide_DF <- data.frame(lapply(secondhalf_wide_DF, as.character), stringsAsFactors=FALSE)
secondhalf_wide_DF[secondhalf_wide_DF=="\\N"]=as.character("NA/NA")
combined=cbind(firsthalf_wide_DF,secondhalf_wide_DF)
write.table(combined,"Genotypefile.csv",col.names=T,row.names=F,quote=F,sep=";")
|
db28ee90bb1594da1ad4986b607d21d87d4beebd
|
3ce2834cbc34f99f3e4b9d062cb296ddb7e24227
|
/LinearModelwithKFold.R
|
2b93afd0f2c07b44da9a2a0a816cd8543313dde1
|
[] |
no_license
|
caitdreis/sys6018-competition-safe-driver
|
7bf9781be312d4b926e92c9bbb73ddc5ff2da160
|
b8fe426f71fb872ae863196e53b03ed1df8bc18f
|
refs/heads/master
| 2021-05-08T07:16:03.041846
| 2017-11-04T01:45:03
| 2017-11-04T01:45:03
| 106,733,620
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,971
|
r
|
LinearModelwithKFold.R
|
#Kaggle Linear Model
#--------------------- Working Directory and Read in Data
#set working directory & read in data
setwd("~/Dropbox (Personal)/Academic/University of Virginia/Data Science Institute/Fall 2017/SYS 6018/sys6018-competition-safe-driver-master")
train <- read.csv("train.csv")
#--------------------- Packages
library(tidyr)
library(psych)
library(tidyverse)
library(e1071)
library(car)
library(caret) #for
install.packages("MLmetrics")
library(MLmetrics)
#--------------------- Data Cleaning & Imputation
train %>%
sapply(function(x) sum(x==-1))
# ps_car_03_cat has 411,231 missings and ps_car_05_cat has 266,551 missings, so we'll drop these columns.
train <- train %>%
select(-ps_car_03_cat, -ps_car_05_cat)
# We will ignore the -1s present in ordinal and categorical variables, on the assumption that keeping
# "-1" as a factor will help our model if missingness is predictive, and it (theoretically) shouldn't
# make much difference if it is not predictive.
# However, we need to deal with significant number of -1s in the continuous variables ps_reg_03 and ps_car_14.
ps_reg_03_df <- train %>%
select(ps_reg_03) %>%
filter(ps_reg_03!=-1)
ps_reg_03_mean <- mean(ps_reg_03_df$ps_reg_03)
# 0.8940473
ps_car_14_df <- train %>%
select(ps_car_14) %>%
filter(ps_car_14!=-1)
ps_car_14_mean <- mean(ps_car_14_df$ps_car_14)
# 0.3746906
# Replace -1s with imputed means
train$ps_reg_03[train$ps_reg_03==-1] <- ps_reg_03_mean
train$ps_car_14[train$ps_car_14==-1] <- ps_car_14_mean
#--------------------- Signficance Testing of Variables & Simple Linear Regression in Training Set
sig.lm <- aov(target ~., data=train) #testing significance of variables in the training set
summary(sig.lm) #output for only significant variables shown below
# Df Sum Sq Mean Sq F value Pr(>F)
#ps_ind_01 1 7 7.209 206.797 < 2e-16 ***
#ps_ind_05_cat 1 17 17.113 490.917 < 2e-16 ***
#ps_ind_06_bin 1 18 17.769 509.749 < 2e-16 ***
#ps_ind_07_bin 1 11 11.375 326.329 < 2e-16 ***
#ps_ind_08_bin 1 4 4.140 118.764 < 2e-16 ***
#ps_ind_15 1 8 8.407 241.168 < 2e-16 ***
#ps_ind_16_bin 1 8 8.209 235.504 < 2e-16 ***
#ps_ind_17_bin 1 15 15.165 435.051 < 2e-16 ***
#ps_reg_01 1 11 10.685 306.516 < 2e-16 ***
#ps_reg_02 1 9 9.099 261.010 < 2e-16 ***
#ps_car_02_cat 1 5 4.920 141.141 < 2e-16 ***
#ps_car_04_cat 1 5 4.679 134.221 < 2e-16 ***
#ps_car_07_cat 1 16 16.221 465.321 < 2e-16 ***
#ps_car_08_cat 1 4 4.120 118.183 < 2e-16 ***
#ps_car_12 1 3 2.666 76.493 < 2e-16 ***
#ps_car_13 1 6 6.015 172.565 < 2e-16 ***
#Residuals 595157 20747 0.035
anova_sig.lm <- aov(sig.lm) #creates an ANOVA table to look at significance of the testing
anova_sig.lm #provides the sum of squares across all the columns
#--------------------- Tradition Cross Validation (see K-fold below)
#Set parameters for cross validation
set.seed(100)
train_CV <- 0.7
#create randomly generated sample for cross validation
cv_train <- sample(1:nrow(train),round(train_CV*nrow(train)))
#create test and train datasets
train_sample <- train[cv_train,] #subsets data into training
test_sample <- train[-cv_train,] #subsets data into testing
#build a linear model with all the variables of significance
line.lm <- lm(target ~ ps_car_01_cat +
ps_car_02_cat+
ps_car_04_cat+
ps_car_07_cat+
ps_car_09_cat+
ps_car_13+
ps_ind_04_cat+
ps_ind_05_cat+
ps_ind_15+
ps_ind_16_bin+
ps_ind_17_bin+
ps_reg_01, train_sample)
#Residual standard error: 0.1865 on 416635 degrees of freedom
#Multiple R-squared: 0.006774, Adjusted R-squared: 0.006745
#F-statistic: 236.8 on 12 and 416635 DF, p-value: < 2.2e-16
summary(line.lm)
#Estimate Std. Error t value Pr(>|t|)
#(Intercept) 1.318e-02 2.433e-03 5.415 6.13e-08 ***
#ps_car_01_cat 2.472e-04 1.246e-04 1.984 0.04723 *
#ps_car_02_cat -2.562e-03 8.847e-04 -2.895 0.00379 **
#ps_car_04_cat -3.410e-05 1.675e-04 -0.204 0.83865
#ps_car_07_cat -1.364e-02 8.623e-04 -15.812 < 2e-16 ***
#ps_car_09_cat 6.012e-04 3.111e-04 1.932 0.05331 .
#ps_car_13 3.819e-02 1.798e-03 21.238 < 2e-16 ***
#ps_ind_04_cat 5.511e-03 5.904e-04 9.335 < 2e-16 ***
#ps_ind_05_cat 4.507e-03 2.146e-04 21.001 < 2e-16 ***
#ps_ind_15 -9.013e-04 8.764e-05 -10.284 < 2e-16 ***
#ps_ind_16_bin -2.220e-03 7.637e-04 -2.907 0.00365 **
#ps_ind_17_bin 1.647e-02 1.060e-03 15.541 < 2e-16 ***
#ps_reg_01 9.405e-03 1.030e-03 9.131 < 2e-16 ***
#Predict on subset of testing data in linear regression
pred_test <- predict(line.lm, test_sample) #predict based on the linear model in the testing data
mse1 <- sum((test_sample$target-pred_test)^2); mse1 #6275.981
#--------------------- Linear models with K-fold cross validation set at 10
#boosted linear model
bstlm.model <- train(target ~ ps_car_01_cat +
ps_car_02_cat+
ps_car_04_cat+
ps_car_07_cat+
ps_car_09_cat+
ps_car_11_cat+
ps_car_13+
ps_ind_04_cat+
ps_ind_05_cat+
ps_ind_15+
ps_ind_16_bin+
ps_ind_17_bin+
ps_reg_01, train, method = "BstLm", #tuning mstop (# Boosting Iterations)
# or nu (Shrinkage)
trControl = trainControl(method = "cv", number = 10, #can iterate over best kfold number
verboseIter = TRUE))
summary(bstlm.model)
pred_test1 <- predict(bstlm.model, train) #predict based on the linear model in the testing data
mse2 <- sum((train$target-pred_test1)^2); mse2 #20806.14
#Generalized boosted linear model
train$target <- factor(train$target) #need to make target a factor with 2 levels
glmboost.model <- train(target ~ ps_car_01_cat +
ps_car_02_cat+
ps_car_04_cat+
ps_car_07_cat+
ps_car_09_cat+
ps_car_11_cat+
ps_car_13+
ps_ind_04_cat+
ps_ind_05_cat+
ps_ind_15+
ps_ind_16_bin+
ps_ind_17_bin+
ps_reg_01, train, method='glmboost', #could tune with mtry or #Randomly Selected Predictors
trControl = trainControl(method = "cv", number = 10,
verboseIter = TRUE))
summary(glmboost.model)
pred_test3 <- predict(glmboost.model, train) #predict based on the linear model in the testing data
#Regularized Logistic Regression, takes significant time
reg.model <- train(target ~ ps_car_01_cat +
ps_car_02_cat+
ps_car_04_cat+
ps_car_07_cat+
ps_car_09_cat+
ps_car_11_cat+
ps_car_13+
ps_ind_04_cat+
ps_ind_05_cat+
ps_ind_15+
ps_ind_16_bin+
ps_ind_17_bin+
ps_reg_01, train, method='regLogistic',
trControl = trainControl(method = "cv", number = 10,
verboseIter = TRUE))
summary(reg.model)
pred_test4 <- predict(reg.model, train) #predict based on the linear model in the testing data
mse4 <- sum((train$target-pred_test1)^2); mse4
#--------------------- Alternative Coding Structure for Linear models with K-fold cross validation set at 10
#Reference Dr. Gerber: Calculates unnormalized Gini index from ground truth and predicted probabilities.
unnormalized.gini.index = function(ground.truth, predicted.probabilities) {
if (length(ground.truth) != length(predicted.probabilities))
{
stop("Actual and Predicted need to be equal lengths!")}
# arrange data into table with columns of index, predicted values, and actual values
gini.table = data.frame(index = c(1:length(ground.truth)), predicted.probabilities, ground.truth)
# sort rows in decreasing order of the predicted values, breaking ties according to the index
gini.table = gini.table[order(-gini.table$predicted.probabilities, gini.table$index), ]
# get the per-row increment for positives accumulated by the model
num.ground.truth.positivies = sum(gini.table$ground.truth)
model.percentage.positives.accumulated = gini.table$ground.truth / num.ground.truth.positivies
# get the per-row increment for positives accumulated by a random guess
random.guess.percentage.positives.accumulated = 1 / nrow(gini.table)
# calculate gini index
gini.sum = cumsum(model.percentage.positives.accumulated - random.guess.percentage.positives.accumulated)
gini.index = sum(gini.sum) / nrow(gini.table)
return(gini.index)
}
#Calculates normalized Gini index from ground truth and predicted probabilities.
normalized.gini.index = function(ground.truth, predicted.probabilities) {
model.gini.index = unnormalized.gini.index(ground.truth, predicted.probabilities)
optimal.gini.index = unnormalized.gini.index(ground.truth, ground.truth)
return(model.gini.index / optimal.gini.index)
}
train$target <- factor(train$target)
K <- 10 #can iterate over number of K folds
rand_nums <- sample(NROW(train), NROW(train))
splits <- cut(1:NROW(train), K)
output <- lapply(1:K, function(X){
d <- rand_nums[which(levels(splits)[X] == splits)]
glm.fit <- glm(target ~ ps_car_13 + ps_reg_03 + ps_car_06_cat + ps_car_14,
family = 'binomial',
data = train[d,])
yhat <- predict.glm(glm.fit, newdata = train[-d,], type = 'response')
gini_coeff = normalized.gini.index(as.numeric(train[-d,]$target),
yhat)
})
output #Gini indexes for each of the 10 folds
#[1] 0.1693486
#[2] 0.1685148
#[3] 0.1692306
#[4] 0.1656972
#[5] 0.1640496
#[6] 0.1648028
#[7] 0.168505
#[8] 0.1670531
#[9] 0.1694688
#[10] 0.1712042
#--------------------- Preparing Testing Data
test <- read_csv("test.csv")
# Impute mean for missing values in ps_reg_03 and ps_car_14.
psreg03_all <- data.frame(c(train$ps_reg_03, test$ps_reg_03))
names(psreg03_all) <- "var"
psreg03_all <- psreg03_all %>%
filter(var!=-1)
psreg03_mean <- mean(psreg03_all$var)
ps_car_14_all <- data.frame(c(train$ps_car_14, test$ps_car_14))
names(ps_car_14_all) <- "var"
ps_car_14_all <- ps_car_14_all %>%
filter(var!=-1)
pscar14_mean <- mean(ps_car_14_all$var)
# Replace -1s with imputed means
test$ps_reg_03[test$ps_reg_03==-1] <- psreg03_mean
test$ps_car_14[test$ps_car_14==-1] <- pscar14_mean
table(test$ps_car_02_cat) # mode is 1
table(test$ps_car_11) # mode is 3
test$ps_car_02_cat[test$ps_car_02_cat==-1] <- 1
test$ps_car_11[test$ps_car_11==-1] <-3
#--------------------- Optimal Model Testing in Test Set
#predictions using the best model
preds2 <- predict.glm(glm.fit, test, type = "response"); preds2
#Prepare to write predictions for preds2 to CSV
write.table(preds2, file="Kaggle.csv", row.names = FALSE, sep=";")
preds2 <- read.csv("Kaggle.csv", header=TRUE, sep=";")
preds2$target <- preds2$x
preds2$id <- subset(test, select=c("id")) #only take id column from testing data
preds2$id <- preds2$x
preds2$x <- NULL
preds2 <- preds2[ , order(names(preds2))] #sort columns to fit identified order
#write to csv for submission
write.table(preds2, file = "Linear_preds_cd2.csv",
row.names=F, col.names=T, sep=",")
|
79b30d96b74df868bd0eb572fe0736713a0eebdd
|
97558a08d71b43814c2e8884d40cd13fed3efab3
|
/man/conv_size.Rd
|
bd0ffe2a8df5a73dbc9108daf8395bebe7c4c1bf
|
[
"MIT"
] |
permissive
|
sambold/bagR
|
3cde66501497d3da08209d34d4cfdc19c9ea5a2f
|
eba539bd5aedcb766921a00183592d3d196d8f18
|
refs/heads/master
| 2020-09-25T08:25:55.671750
| 2020-09-08T20:45:36
| 2020-09-08T20:45:36
| 225,961,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 595
|
rd
|
conv_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conv_size.R
\name{conv_size}
\alias{conv_size}
\title{conv_size: konvertiert Fontsize von Theme in size-Einheit}
\usage{
conv_size(x)
}
\arguments{
\item{x}{Fontsize (analog to Theme), die in size-Einheit konvertiert werden soll}
}
\value{
konvertierte Fontsize
}
\description{
oder so aehnlich ... was genau wie und wo ist mir noch nicht ganz klar. Aber
damit sollte die Schriftgroesse definiert durch theme und die Schriftgroesse
definiert ueber z.b. geom_text gleich/vergleichbar sein
}
\examples{
conv_size(14)
}
|
94cbe27778e000eddd32b5bb4e21037dcff0ac68
|
f515db530e0b9879055ddb951afc1fcf510cafec
|
/R/get_pred.R
|
27559b89c6f9e958a275877bc77838b5d3ed9737
|
[
"MIT"
] |
permissive
|
MiguelRodo/modutils
|
cc65a580c087a3427bb6790d170b0a6d403e631a
|
6c3c34e18e58a84b3c60ecec010d12c1d14d1056
|
refs/heads/master
| 2023-08-17T10:07:20.197874
| 2021-10-04T08:40:12
| 2021-10-04T08:40:12
| 413,340,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,939
|
r
|
get_pred.R
|
if(FALSE){
library(lme4)
library(merTools)
library(tibble)
library(magrittr)
library(tidyr)
library(ggplot2)
library(cowplot)
library(dplyr)
theme_set(theme_cowplot())
set.seed(2106); n_id <- 30
test_tbl <- tibble::tibble(id = rep(as.character(1:n_id), each = 2),
grp = rep(as.character(1:2), each = n_id),
y_re = rep(rnorm(n_id), each = 2),
x = rnorm(n_id * 2),
z = rnorm(n_id * 2),
y = x^4 * ifelse(grp == "1", 3, 0) + z + y_re + rnorm(20, sd = 0.3)) %>%
mutate(y = y + abs(min(y)) + 0.001) %>%
mutate(y = y/max(y + 0.001))
get_link_inv_fn <- function(mod) UseMethod("get_link_inv_fn")
get_link_inv_fn.lm <- function(mod) function(x) x
get_link_inv_fn.lmerMod <- function(mod) function(x) x
get_link_inv_fn.glmerMod <- function(mod){
link <- summary(mod)$link
switch(link,
"inverse" = function(x) 1/x,
"log" = exp,
"logit" = function(x) 1/(1+exp(-x)),
stop(paste0("link ", link, " not recognised in get_link_inv_fn.glmer")))
}
get_fml <- function(mod_sum) UseMethod("get_fml")
get_fml.summary.merMod <- function(mod_sum) mod_sum$call$formula
get_mod_cov <- function(mod_sum){
fml <- get_fml(mod_sum = mod_sum)
fml_comps_rhs <- stringr::str_split(fml, pattern = "[[~]]|[[+]]")[[3]]
cov_vec <- purrr::map(fml_comps_rhs, function(x){
if(stringr::str_detect(x, "[[|]]")) return(NULL)
stringr::str_trim(x)
}) %>%
purrr::compact() %>%
as.character()
# remove references to removing or specifying intercept
cov_vec[!cov_vec %in% c("1", "-1")]
}
get_coef <- function(mod_sum) UseMethod("get_coef")
get_coef.summary.merMod <- function(mod_sum) coefficients(mod_sum)[,"Estimate"]
get_coef.summary.lm <- function(mod_sum) coefficients(mod_sum)
#' @title Get linear predictor
get_lp <- function(cov, coef, data){
# get vector to add to
lp_vec <- rep(0, nrow(data))
# add intercept
if(any(grepl("Intercept", names(coef)))){
lp_vec <- lp_vec + coef[[grep("Intercept", names(coef))]]
}
# add other covariates
for(x in cov){
# add covariate if it appears
# exactly in coefficients, i.e. continuous covariates
if(x %in% names(coef)){
if(!x %in% colnames(data)){
stop(paste0("covariate ", x, " not found in colnames(data) in get_lp"))
}
lp_vec <- lp_vec + data[[x]] * coef[[grep(x, names(coef))]]
}
# if there are bs or ns splines specified as their own terms, will need to do that
if(stringr::str_detect(x, "ns[[(]]|bs[[(]]")){
spline_command_loc <- stringr::str_locate(x, "ns[[(]]|bs[[(]]")[,"end"][[1]]
x_after_spline_command <-
first_comma_after_bs_or_ns_loc <- str_locate(stringr::str_sub(x,
bs_or_ns_loc+ 1))
}
# get indices that match to current covariate
# - may be more than one as current covariate has levels
cov_ind_vec <- grep(x, names(coef))
# stop is covariate not found in coefficient names
if(length(cov_ind_vec) == 0){
stop(paste0("covariate ", x, " not found in names(coef) in get_lp"))
}
# for each individaul that has right level, add corresponding coefficient
for(cov_ind in cov_ind_vec){
coef_curr <- names(coef)[cov_ind] # current coefficient name
cov_level <- stringr::str_remove(coef_curr, x) # level of covariate
lp_vec <- lp_vec + coef[[cov_ind]] * (data[[x]] == cov_level) # add coefficient for all that match right level
}
}
lp_vec
}
# y - outcome
# x - spline term
# z - extra term
# id - random effect term
# grp - interacts with splines
mod_lm <- lm(y ~ x, data = test_tbl)
mod_lmer <- lme4::lmer(y ~ x + (1|id), data = test_tbl)
mod_glmer <- lme4::glmer(y ~ x + grp + (1|id), data = test_tbl,
family = 'Gamma')
mod_sum_lmer <- summary(mod_lmer)
get_pred <- function(mod, data = NULL, type = "response"){
# preparation
# ------------------
# get model summary info
mod_sum <- summary(mod)
# get components needed for calculation
cov_vec <- get_mod_cov(mod_sum = mod_sum) # non-re and non-int model covariates
coef_vec <- get_coef(mod_sum = mod_sum) # model coefficients
# prepare data used for prediction
if(is.null(data)) data <- mod@frame
if(class(data)[1] == 'matrix') data <- as.data.frame(data)
# get predictions
# --------------------
# get linear predictors
lp_vec <- get_lp(cov = cov_vec, coef = coef_vec, data = data)
if(type == 'link') return(lp_vec) # return if that is right scale
# get predictions on response scale
inv_link_fn <- get_link_inv_fn(mod = mod)
inv_link_fn(lp_vec)
}
get_pred(mod = mod_glmer, data = test_tbl)
plot_tbl <- test_tbl %>%
mutate(pred_lm = predict(mod_lm),
pred_lmer = predict(mod_lmer),
#pred_lmer_man = get_pred(mod = mod_lmer,
# data = test_tbl),
pred_glmer_man = get_pred(mod = mod_glmer,
data = test_tbl),
pred_glmer = predict(mod_glmer, type = 'response'),
pred_glmer_newdata = predict(mod_glmer, newdata = test_tbl %>%
mutate(id = "1"),
type = 'resp')) %>%
pivot_longer(-(id:y),
names_to = "type",
values_to = "pred")
ggplot(plot_tbl %>%
filter(type == 'pred_glmer_newdata')) +
geom_point(aes(x, y, col = grp)) +
geom_line(aes(x, y = pred, col = grp, linetype = type), size = 2, alpha = 0.5) +
scale_colour_brewer(palette = "Set1")
}
|
9c69bd19fac72d0b973e5e543af8ab464292811b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BioGeoBEARS/examples/lrttest_on_summary_table.Rd.R
|
c0ded77f2502b557efd2d49cb40e00294797d5cc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
lrttest_on_summary_table.Rd.R
|
library(BioGeoBEARS)
### Name: lrttest_on_summary_table
### Title: Calculate Likelihood Ratio Test (LRT) results, and add to table
### Aliases: lrttest_on_summary_table
### ** Examples
test=1
|
71c7ea0cf77192e29439b70fb08ae6e918823e8e
|
d1668835b545c49d79c5e37b8b8132c82db14e70
|
/Plot6.R
|
f36075655a1ea5fa30de4b9b36c00a49f287348d
|
[] |
no_license
|
igembitsky/P2.5-Emissions
|
e62328c402b97d6a9be1c3282a9c8f6521dfcc5e
|
3856fd1dd35c1462ab86c5ab08341d61c11f516a
|
refs/heads/master
| 2020-04-13T09:50:42.052904
| 2015-02-21T04:56:28
| 2015-02-21T04:56:28
| 31,111,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,481
|
r
|
Plot6.R
|
## Set working directory
setwd("/Users/Igor/Coursera/ExploratoryAnalysis/2/")
## Read data
NEI <- readRDS("summarySCC_PM25.rds")
## Compare emissions from motor vehicle sources in Baltimore City with emissions
## from motor vehicle sources in Los Angeles County, California (fips == "06037").
## Which city has seen greater changes over time in motor vehicle emissions?
### LA
#create a variable of motor vehicle sources in Baltimore from "ON-ROAD" NEI data
balt.veh.emit <- NEI[(NEI$fips == "24510") & (NEI$type == "ON-ROAD"),]
#create a variable of motor vehicle sources in LA from "ON-ROAD" NEI data
la.veh.emit <- NEI[(NEI$fips == "06037") & (NEI$type == "ON-ROAD"),]
#aggregate datasets by emission totals per year
balt.veh.emit.by.year <- aggregate(Emissions ~ year, balt.veh.emit, sum)
la.veh.emit.by.year <- aggregate(Emissions ~ year, la.veh.emit, sum)
#combine the two datasets
balt.veh.emit.by.year$city <- "Baltimore"
la.veh.emit.by.year$city <- "Los Angeles"
la.balt.emit <- rbind(balt.veh.emit.by.year, la.veh.emit.by.year)
#ggplot Plot6.PNG to compare emissions from motor vehicles in Baltimore and LA
library(ggplot2)
png(filename = "plot6.png", width = 480, height = 480)
ggplot(la.balt.emit,
aes(x = factor(year),
y = Emissions),
fill = city) +
geom_bar(stat = "identity") +
ggtitle("Total On-Road Emissions per Year in Baltimore/LA") +
xlab("Year") +
ylab("Aggregate On-Road Emissions") +
facet_grid(. ~ city, scales = "free")
dev.off()
|
615e12650e32f9722dfafffa465473e609806e1e
|
210683b5347b6f584b258f26c7d48ab51a518fe3
|
/man/MatrixPaste.Rd
|
ab695a6e0fa304bf5a21de581aa2d271d8beda84
|
[
"MIT"
] |
permissive
|
statisticsnorway/SSBtools
|
6b95eab7f46c1096cd7d6ee3f61d3898150d49d0
|
aa2728571e0840e1965f3e7ed0f1984c818ca7a1
|
refs/heads/master
| 2023-06-24T02:48:17.178606
| 2023-06-23T08:05:58
| 2023-06-23T08:05:58
| 137,074,899
| 5
| 0
|
Apache-2.0
| 2023-06-23T08:06:00
| 2018-06-12T13:21:36
|
R
|
UTF-8
|
R
| false
| true
| 952
|
rd
|
MatrixPaste.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MatrixPaste.R
\name{MatrixPaste}
\alias{MatrixPaste}
\alias{MatrixPaste1}
\title{Combining columns of a matrix}
\usage{
MatrixPaste(x, sep = "_", forceCharacter = FALSE, stringEmpty = " ")
MatrixPaste1(x, stringEmpty = "1")
}
\arguments{
\item{x}{Matrix or vector}
\item{sep}{String used to combine columns}
\item{forceCharacter}{When FALSE single column input will keep to original class in output.}
\item{stringEmpty}{String used when input is empty (can be set to NULL)}
}
\value{
Character vector or possibly same vector as input
}
\description{
Combining columns of a matrix
}
\details{
Each row in input will be combined to a single string using sep.
}
\examples{
\dontrun{
MatrixPaste(matrix(1:12,3,4))
MatrixPaste(1:5)
MatrixPaste(1:5, forceCharacter=TRUE)
MatrixPaste(matrix(integer(0),3,0))
MatrixPaste(NULL)
}
}
\author{
Øyvind Langsrud
}
\keyword{internal}
|
c7fbadde7738b96a285e459f10b651e93b27c23e
|
57d62d29e15c2c4adaddedaa94dfdb07cf75dea7
|
/R/prettyTable.R
|
7effaecd8d848156209cafd6ba3446fcab7d2e83
|
[] |
no_license
|
cran/admisc
|
c604fc786939c73f9b369ae11e96966ed7c640ef
|
448868ac6bffbff436f45ae43712cc2cadfe0373
|
refs/heads/master
| 2023-07-05T20:06:42.303908
| 2023-06-30T20:30:13
| 2023-06-30T20:30:13
| 183,607,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,297
|
r
|
prettyTable.R
|
# Copyright (c) 2019 - 2023, Adrian Dusa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, in whole or in part, are permitted provided that the
# following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The names of its contributors may NOT be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ADRIAN DUSA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
`prettyTable` <-
function(input) {
if (methods::is(input, "QCA_pic")) {
class(input) <- "matrix"
}
else {
input <- as.matrix(input)
}
if (is.logical(input)) {
input2 <- input
input[input2] <- "x"
input[!input2] <- "-"
}
if(is.null(colnames(input))) colnames(input) <- rep(" ", ncol(input))
nchars <- nchar(colnames(input))
colnames(input)[nchars == 1] <- format(colnames(input)[nchars == 1], width = 2, justify = "centre")
nchars[nchars == 1] <- 2
for (i in seq((ncol(input) - any(colnames(input) == "lines")))) {
input[, i] <- format(format(input[, i]), width = nchars[i], justify = "centre")
}
rownames(input) <- paste(rownames(input), "")
return(noquote(input))
}
|
d45dc02d4a7071bf04e1b8c613b8b488bd0a8d46
|
f6ef5e5c0722748229da8dc257e48650e27f7576
|
/inst/shiny/ui.R
|
013971cf9e67215dec918f67f24d23461cd1624b
|
[] |
no_license
|
jemus42/gridsampler
|
5bb6e1151c185f2638d434bad248ef939d9d50fc
|
a4a7440dd995524ac7765671b32771eaa6b5cf3c
|
refs/heads/master
| 2020-05-20T12:58:03.642667
| 2016-08-21T16:35:24
| 2016-08-21T16:35:24
| 58,479,108
| 0
| 0
| null | 2016-05-10T17:03:09
| 2016-05-10T17:03:09
| null |
UTF-8
|
R
| false
| false
| 10,219
|
r
|
ui.R
|
#### Shiny UI definition ####
#### Intro Tour using boostrap-tour library (http://bootstraptour.com/) ####
# Add a tour to GUI to explain the panels and basis steps.
# The tour is defined in www/tour.js
header <- list(tags$script(src = "bootstrap-tour-0.10.3/js/bootstrap-tour.min.js"),
tags$link(rel = "stylesheet", type = "text/css", href = "style.css"),
tags$script("var a = $('#ourNavbar a[data-value=\"Tour\"]');
a.attr('data-toggle', 'noclass');
a.click(function() {
tour.restart();
console.log('Tour started');
});") )
footer <- tags$script(src = "tour.js") # add tour
#### End Intro Tour components, begin shinyUI ####
shinyUI(navbarPage(title = gridsampler_version,
id = "ourNavbar",
inverse = T,
theme = shinytheme("flatly"),
header = header,
footer = footer,
#### Main tab ####
tabPanel("Simulate", icon = icon("tasks"),
wellPanel(
fluidPage(
#### Column 1 ####
column(3,
h3("1. Number of Attributes"),
desc_col1,
# Attribute min / max selection
fluidRow(
column(6, numericInput("minimum1", "Minimum",
value = default_attributes_min,
min = 1, max = 100, step = 1, width = "100%")),
column(6, numericInput("maximum1", "Maximum",
value = default_attributes_max
, min = 1, max = 100, step = 1, width = "100%"))
),
plotOutput("plot1", height = "300px"),
checkboxInput("plot1_fixy", "Fix y to [0, 1]", value = F),
# Manual item probability adjustment
fluidRow(
column(6, numericInput("attribute_num", "No. of Attributes",
value = default_attributes_min,
min = default_attributes_min, max = default_attributes_max, step = 1, width = "100%")
),
column(6, numericInput("probability1", "Probability",
value = round(default_attributes_probs[1], 3),
min = 0.001, max = 1, step = 0.001, width = "100%")
)),
# Preset section of column 1
wellPanel(
h4("Probability Presets"),
fluidRow(
# Selection of probability types in column 1
column(6, selectInput("preset_types1", "Type",
choices = c("Uniform", "Normal", "Poisson", "Exponential"),
selected = "Normal", selectize = F)
),
column(6,
# Show preset arguments depending on distribution selection
conditionalPanel("input.preset_types1 == 'Normal'",
numericInput("1_norm_mean", "Mean",
value = default_attributes_norm_mean,
step = .1),
numericInput("1_norm_sd", "SD",
value = default_attributes_norm_sd,
min = 0.01, step = 0.01)
),
conditionalPanel("input.preset_types1 == 'Poisson'",
numericInput("1_pois_lambda", "Lamda",
value = default_attributes_lambda)
),
conditionalPanel("input.preset_types1 == 'Exponential'",
numericInput("1_exp_rate", "Rate",
value = default_attributes_exp_rate,
step = 0.01)
)
)
),
fluidRow(
# Action button in column 1
column(12, actionButton("preset_go1", "Apply Preset", width = "100%"))
)
) # wellPanel ends here
),
#### Column 2 ####
column(3,
h3("2. Probability of Categories"),
desc_col2,
# Panel 2 top input controls
fluidRow(
column(12, numericInput("maximum2", "No. of Categories",
value = default_category_count,
min = 1, step = 1, width = "100%"))
),
# Panel 2 plot output
plotOutput("plot2", height = "300px"),
checkboxInput("plot2_fixy", "Fix y to [0, 1]", value = F),
# Panel 2 manual prob adjustment
fluidRow(
column(6, numericInput("category", "Category",
value = 1, min = 1, max = 500,
step = 1, width = "100%")),
column(6, numericInput("probability2", "Probability",
value = round(default_category_probs[1], 3),
min = 0, max = 1, step = 0.001, width = "100%"))
),
# Preset section of panel 2
wellPanel(
h4("Probability Presets"),
fluidRow(
# Selection of probability types in column 2
column(6, selectInput("preset_types2", "Type",
choices = c("Uniform", "Exponential", "Linear"),
selected = "Exponential", selectize = F)
),
column(6,
# Show preset arguments depending on distribution selection
conditionalPanel("input.preset_types2 == 'Exponential'",
numericInput("2_exp_rate", "Rate",
value = default_category_exp_rate,
step = 0.01)
),
conditionalPanel("input.preset_types2 == 'Linear'",
numericInput("2_lin_min", "Minimum",
value = default_category_lin_min,
min = 0.001, step = 0.001)
)
)
),
fluidRow(
# Action button in column 2
column(12, actionButton("preset_go2", "Apply Preset", width = "100%"))
)
) # wellPanel ends here
),
#### Column 3 ####
column(6,
h3("3. Simulation"),
desc_col3,
# UI controls in panel 3, top
fluidRow(
column(6, numericInput("sample_size", "Sample Size (N)", value = "100")),
column(6, numericInput("run_times", "Simulation Runs (R)", value = "10"))
),
fluidRow(
column(6, actionButton("sample_random", "Random Sample", width = "100%")),
column(6, actionButton("run_button", "Run", width = "100%"))
),
tags$br(),
# Plot in panel 1, top
plotOutput("plot3_1", height = "250px"),
tags$hr(),
fluidRow(
# Panel 3 bottom input controls, left side
column(6,
wellPanel(
fluidRow(column(12, textInput("sample_size2", "Sample Size (N)",
value = "10, 20, 30, 40, 50, 60, 70, 80"))),
fluidRow(column(12, numericInput("runs_per_sample", "Simulation Runs (R)",
value = 100, step = 1))),
fluidRow(column(12, actionButton("simulate", "Simulate", width = "100%"))
))),
# Panel 3 bottom input controls, right side
column(6,
wellPanel(
fluidRow(column(12, textInput("mincount_m", "Minimum Count (M)",
value = "4, 5, 6"))),
fluidRow(column(12, textInput("proportion_k", "Coverage (C)",
value = "0.9, 0.95, 1"))),
fluidRow(column(12, actionButton("redraw", "Redraw", width = "100%"))
)))
),
tags$br(),
# Only show plot if the simulate button has been pressed, show text otherwise
conditionalPanel("input.simulate == 0",
tags$span(class = "help-block", "No simulations run yet!",
tags$br(),
"A plot will appear here after you press “Simulate“.")),
conditionalPanel("input.simulate > 0", plotOutput("plot3_2", height = "300px")),
tags$br()
)
#### End of column 3 ####
))
), # End of first tabPanel, "Simulate" tab
#### About tab ####
tabPanel("About", icon = icon("question-circle"),
fluidPage(
fluidRow(
# Use a smaller column size for better readability
column(10, offset = 1,
includeHTML("text/index.html")
)
)
)
),
tabPanel("Tour", icon = icon("question-circle") # trigger intro tour
)
))
|
633bf88803324c78586ca9a35787ee3d1eacbc5d
|
b873d459c9abb805ae9bcf70a012be294a5beb11
|
/ClusterAnalysis_cereals.R
|
22da549c1763b5f190f556d3e05dfc0605796adf
|
[] |
no_license
|
suman083/ImarticusProject2
|
f28e986ef599bc947ba5b590f5b1b179a49f1088
|
671510eef6983b2ce14af414776113bd1a320827
|
refs/heads/master
| 2023-02-17T18:39:26.125074
| 2021-01-17T17:12:55
| 2021-01-17T17:12:55
| 317,057,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
ClusterAnalysis_cereals.R
|
cereals_data <- read.csv("G:/Suman/batch34/cluster analysis exercise/cereals_data.csv", row.names=1, stringsAsFactors=TRUE)
df<-cereals_data[,c(4,5,6,7)]
df
library(cluster)
library(factoextra)
dim(df)
df<-na.omit(df)
df<-scale(df)
df
#samplew of 15 rows
ss<-sample(1:77,15)
df1<-df[ss,]
df1
df1.scaled<-scale(df1)
df1.scaled
#Euclidean Distance
dist.euc1_16<-dist(df1.scaled,method = 'euclidean')
dist.euc1_16
round(as.matrix(dist.euc1_16)[1:3, 1:3], 1)
dist.euc1_16
fviz_dist(dist.euc1_16)
fviz_nbclust(df,kmeans, method = 'wss')+geom_vline(xintercept = 5, linetype=5,col='red')
set.seed(123)
km.res<-kmeans(df,5,nstart = 25)
km.res
km.res$totss
km.res$betweenss
###
fviz_cluster(km.res,data = df, palette= c('#2E9FDF', '#00AFBB','#E7B800', '#FC4E07','#999999'),
ellipse.type = 'euclid', stars.plot=T,
repel = T,
ggtheme = theme(),main = 'Cluster Analysis')
?palette
palette.pals()
palette.colors()
##Try more
res.dist<-dist(df,method = 'euclidean')
head(res.dist)
#round(as.matrix(res.dist)[1:3,1:3],1)
##Hierarchical Clustering: (Agglomeration) Linkage Methods
fviz_dist(res.dist)
res.hc<-hclust(d=res.dist, method = 'ward.D2')
fviz_dend(res.hc,cex = 0.5)
fviz_dend(res.hc,k=4,cex=0.5,k_colors =c("#000000","#E69F00","#56B4E9","#009E73","#F0E442","#0072B2","#D55E00","#CC79A7","#999999"),
color_labels_by_k = T,
rect = T)
|
5394e64cf196ad8d9b54ef2d7941b6ddec19b411
|
74e89fd67001e7268fb0f7fb55a1b47f76f7b84b
|
/man/hbbrPilotResp.Rd
|
2a01a7c9f8bd346920ac9e62bdc15d98712a1d70
|
[] |
no_license
|
cran/hbbr
|
49591e0deb23c4e495f518507f98fe02ec9531b5
|
39f252c403b158e0fe01fe29be92960d1beeb768
|
refs/heads/master
| 2020-08-27T22:26:12.258942
| 2019-10-25T07:20:02
| 2019-10-25T07:20:02
| 217,504,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,296
|
rd
|
hbbrPilotResp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbbrPilotResponse.R
\docType{data}
\name{hbbrPilotResp}
\alias{hbbrPilotResp}
\title{A list consisting of pilot data and associated discrete choice design information for the HBBR model framework.}
\format{A list consisting of pilot response data from 23 experts and design information
to be used to fit the HBBR model
\describe{
\item{brdta}{A data frame with 23x18 = 414 rows and 15 columns consists of responses from
23 experts each providing tradeoff responses to 18 choice pairs. The 1st column consists of
responders' id. The 2nd column contains binary responses (1 indicating 1st of the choice
pair was selected, 0 indicating 2nd was selected). Remaining 13 columns
contain the design matrix X taking values 0, 1, or -1; a value of 1 or -1 is used to
indicate presence of an attribute level in the 1st choice or in the 2nd choice of the choice
pair, respectively; a value of 0 is used to indicate absence of an attribute in the
choice pair. See Details below for more about the discrete choice experiment that is coded as
design matrix X.}
\item{design}{A list of structure (b, r, bl, rl), where b and r indicate number of benefit
and risk attributes, bl is a vector of integers of size b consisting number of levels within
each benefit attribute; similarly rl is a vector of integers of size r consisting number
of levels within each risk attribute.}
}}
\usage{
data(hbbrPilotResp)
}
\description{
Data from 23 respondents each choosing preference from 18 choice cards.
Choice cards were randomly generated from 108 total choices. For details see article by
Mukhopadhyay, S., et al. "Hierarchical Bayesian Benefit-Risk Modeling
and Assessment Using Choice Based Conjoint." Statistics in Biopharmaceutical
Research 11.1 (2019): 52-60.
}
\details{
The discrete choice experiment (DCE) included 3 benefit
attributes (b=3): overall survival (OS), objective response rate (ORR),
fatigue reduction (FTG);
and 2 risk attributes (r=2): febrile neutropenia (FebNEU) and severe pneumonia (SevPNA).
There were 4 levels for each of the benefit attributes (ORR, OS, and FTG)
(i.e. bl= rep(4,3)) and
3 levels for each of the 2 risk attributes (FebNEU and SevPNA)
(i.e. rl = rep(3,2)).
The DCE produced b*r*(4 choose 2)*(3 choose 2) = 108 distinct non-dominant choice
pairs each with one benefit and one
risk attribute. Panels (questionnaires) were generated with 18 randomly selected
choice pairs per panel from the set of 108 choice pairs.
Since the part-worth of various levels within each attribute are to be measured
relatively to the part-worth of the 1st level of the attribute, columns for
the 1st level of the attributes are not required. Thus, we have
sum(bl)-b + sum(br)-r = 13 columns are needed to
obtain information on the X matrix which are stored as the last 13 columns of brdta.
}
\examples{
data(hbbrPilotResp)
}
\references{
Mukhopadhyay, S. et al. "Hierarchical Bayesian Benefit–Risk
Modeling and Assessment Using Choice Based Conjoint." Statistics in
Biopharmaceutical Research 11.1 (2019): 52-60.
}
\keyword{datasets}
|
0230ca0e2a6459daa06a8f03b94dc3b651addd70
|
025083fb3f57193e94ea887ad8ee16dc41ac275e
|
/man/structure_control.Rd
|
963f2853a6bf9b116f29557156ef3084fc6925f8
|
[] |
no_license
|
bridachristian/DataQualityCheckEuracAlpEnv
|
891671f84e6036c60d7733bfecce3fc9dd50ddc8
|
8941a20caf657cbc66e7f38ef5d553665457feb8
|
refs/heads/master
| 2020-08-26T20:17:15.864913
| 2019-10-23T15:08:38
| 2019-10-23T15:08:38
| 112,318,390
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,383
|
rd
|
structure_control.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/structure_control.R
\name{structure_control}
\alias{structure_control}
\title{This function check if in input file there is empty rows and if a row is shifted in the previous line}
\usage{
structure_control(FILE_PATH, FILE_NAME, DATETIME_HEADER = "TIMESTAMP",
DATETIME_FORMAT = "yyyy-mm-dd HH:MM", DATA_FROM_ROW = 5,
HEADER_ROW_NUMBER = 2)
}
\description{
@param FILE_PATH directory where input file is stored
@param FILE_NAME name of file to read. Admitted files .csv and .dat
@param DATETIME_HEADER header corresponding to datetime
@param DATETIME_FORMAT format of datetime (E.g. "yyyy-mm-dd HH:MM")
@param DATA_FROM_ROW the number of row of the first value
@param HEADER_ROW_NUMBER the number of row of the header
}
\details{
@return a list containing a data.frame of header, a data.frame of column names, a data.frame of data
@export
@examples
read_data(FILE_PATH = "~/Data/Input/", FILE_NAME = "M4s.dat", DATETIME_HEADER = "TIMESTAMP" , DATETIME_FORMAT = "yyyy-mm-dd HH:MM", DATA_FROM_ROW = 5, HEADER_ROW_NUMBER = 2)
read_data(FILE_PATH = "Your input file storage", FILE_NAME = "Your data name", DATETIME_HEADER = "Your datetime headere" , DATETIME_FORMAT = "Your datetime format", DATA_FROM_ROW = "The row of your first data", HEADER_ROW_NUMBER = "The row of your data column names")
}
|
5e93301ca2b47d802edc583e40fd442ed19e2939
|
22ad2af5206643829c70b65a63b331891ed5af8a
|
/2019_Nature_Communications_Chloropicon_primus/Synteny/vsCHROMOSOMES/CONCATENATED/OLUCI.R
|
54e7604e40ed5becc6c0aa2b475822afd2766cfe
|
[
"MIT"
] |
permissive
|
PombertLab/Publication_scripts
|
ef93c8ede6be8ab3d57313ae65eca57478227a1b
|
0cc4c36a5522525e4a9c7853a755e09af1c3b491
|
refs/heads/master
| 2023-06-22T11:13:38.866526
| 2023-06-14T15:02:45
| 2023-06-14T15:02:45
| 56,014,929
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,049
|
r
|
OLUCI.R
|
#!/usr/bin/Rscript
library(ComplexHeatmap)
library(RColorBrewer)
library(methods)
library(circlize)
colors <- colorRamp2(c(0, 20, 40, 60, 80, 100), c("white", "yellow", "lightblue", "blue", "magenta", "red"))
ht_global_opt(heatmap_row_names_gp = gpar(fontsize = 1, fontface = "italic"), heatmap_column_names_gp = gpar(fontsize = 1), heatmap_column_title_gp = gpar(fontsize = 1))
OLUCIvsBATHY <- read.csv("OLUCIvsBATHY.matrix", header=TRUE)
rownames(OLUCIvsBATHY) <- OLUCIvsBATHY[,1]
colnames(OLUCIvsBATHY)
data_OLUCIvsBATHY <- data.matrix(OLUCIvsBATHY[,2:ncol(OLUCIvsBATHY)])
ht_OLUCIvsBATHY = Heatmap(data_OLUCIvsBATHY, name = "OLUCIvsBATHY", width = unit(57, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsBATHY", col = colors)
class(ht_OLUCIvsBATHY)
OLUCIvsCCMP <- read.csv("OLUCIvsCCMP.matrix", header=TRUE)
rownames(OLUCIvsCCMP) <- OLUCIvsCCMP[,1]
colnames(OLUCIvsCCMP)
data_OLUCIvsCCMP <- data.matrix(OLUCIvsCCMP[,2:ncol(OLUCIvsCCMP)])
ht_OLUCIvsCCMP = Heatmap(data_OLUCIvsCCMP, name = "OLUCIvsCCMP", width = unit(60, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsCCMP", col = colors)
class(ht_OLUCIvsCCMP)
OLUCIvsCOCCO <- read.csv("OLUCIvsCOCCO.matrix", header=TRUE)
rownames(OLUCIvsCOCCO) <- OLUCIvsCOCCO[,1]
colnames(OLUCIvsCOCCO)
data_OLUCIvsCOCCO <- data.matrix(OLUCIvsCOCCO[,2:ncol(OLUCIvsCOCCO)])
ht_OLUCIvsCOCCO = Heatmap(data_OLUCIvsCOCCO, name = "OLUCIvsCOCCO", width = unit(87, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsCOCCO", col = colors)
class(ht_OLUCIvsCOCCO)
OLUCIvsMCOMMODA <- read.csv("OLUCIvsMCOMMODA.matrix", header=TRUE)
rownames(OLUCIvsMCOMMODA) <- OLUCIvsMCOMMODA[,1]
colnames(OLUCIvsMCOMMODA)
data_OLUCIvsMCOMMODA <- data.matrix(OLUCIvsMCOMMODA[,2:ncol(OLUCIvsMCOMMODA)])
ht_OLUCIvsMCOMMODA = Heatmap(data_OLUCIvsMCOMMODA, name = "OLUCIvsMCOMMODA", width = unit(51, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsMCOMMODA", col = colors)
class(ht_OLUCIvsMCOMMODA)
OLUCIvsMPUSI <- read.csv("OLUCIvsMPUSI.matrix", header=TRUE)
rownames(OLUCIvsMPUSI) <- OLUCIvsMPUSI[,1]
colnames(OLUCIvsMPUSI)
data_OLUCIvsMPUSI <- data.matrix(OLUCIvsMPUSI[,2:ncol(OLUCIvsMPUSI)])
ht_OLUCIvsMPUSI = Heatmap(data_OLUCIvsMPUSI, name = "OLUCIvsMPUSI", width = unit(60, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsMPUSI", col = colors)
class(ht_OLUCIvsMPUSI)
OLUCIvsOLUCI <- read.csv("OLUCIvsOLUCI.matrix", header=TRUE)
rownames(OLUCIvsOLUCI) <- OLUCIvsOLUCI[,1]
colnames(OLUCIvsOLUCI)
data_OLUCIvsOLUCI <- data.matrix(OLUCIvsOLUCI[,2:ncol(OLUCIvsOLUCI)])
ht_OLUCIvsOLUCI = Heatmap(data_OLUCIvsOLUCI, name = "OLUCIvsOLUCI", width = unit(63, "mm"), show_row_names = FALSE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsOLUCI", col = colors)
class(ht_OLUCIvsOLUCI)
OLUCIvsOTAURI <- read.csv("OLUCIvsOTAURI.matrix", header=TRUE)
rownames(OLUCIvsOTAURI) <- OLUCIvsOTAURI[,1]
colnames(OLUCIvsOTAURI)
data_OLUCIvsOTAURI <- data.matrix(OLUCIvsOTAURI[,2:ncol(OLUCIvsOTAURI)])
ht_OLUCIvsOTAURI = Heatmap(data_OLUCIvsOTAURI, name = "OLUCIvsOTAURI", width = unit(66, "mm"), show_row_names = TRUE, cluster_rows = FALSE, cluster_columns = FALSE, rect_gp = gpar(col = "white", lty = 1, lwd = 0.01),column_title = "OLUCIvsOTAURI", col = colors)
class(ht_OLUCIvsOTAURI)
pdf(file="OLUCI.pdf", useDingbats=FALSE, width=23, height=5)
ht_list = ht_OLUCIvsBATHY + ht_OLUCIvsCCMP + ht_OLUCIvsCOCCO + ht_OLUCIvsMCOMMODA + ht_OLUCIvsMPUSI + ht_OLUCIvsOLUCI + ht_OLUCIvsOTAURI
draw(ht_list, gap = unit(0.75, "mm"), heatmap_legend_side = "bottom")
dev.off()
|
986dc2f9ad0c535a8f1d4a2378a56cd0e710ad05
|
401d698eee53b3829ad9bd02579b797270d50253
|
/Analysis/Exp_1/Before_while_after.R
|
338ad2f66f7340e37ddebc7eea36e3cd32c16b70
|
[] |
no_license
|
maxgotts/Rubenstein_Mpala
|
2c98937afcdbdf8770b992eacc54a060dfb03af0
|
25ef37f35b0f6f265e29885e8c5a54bd2d4df714
|
refs/heads/main
| 2023-07-19T23:02:31.025276
| 2021-09-05T20:42:31
| 2021-09-05T20:42:31
| 403,402,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,050
|
r
|
Before_while_after.R
|
rm(list=ls())
library(dplyr)
library(lubridate)
library(ggplot2)
source('~/Desktop/MPALA/mpala.R')
df <- filter(df, is.before(mdy(df$Date),ymd("2021-07-05")), !(Identifier%in%blacklist)) #Species%in%zebra.abbr
df <- duplicate.rows(df)
df$Latitude <- as.numeric(df$Latitude)
df$Longitude <- as.numeric(df$Longitude)
mean.lat <- mean(filter(df, is.before(mdy(df$Date),ymd("2021-06-14")))$Latitude)
mean.long <- mean(filter(df, is.before(mdy(df$Date),ymd("2021-06-14")))$Longitude)
df$Distance.from.origin <- sqrt((df$Longitude-mean.long)^2+(df$Latitude-mean.lat)^2)
df$Angle.from.origin <- atan((df$Latitude-mean.lat)/(df$Longitude-mean.long+1e-12))
df.before <- filter(df, is.before(mdy(df$Date),ymd("2021-06-14")))
df.while <- filter(df, is.strictly.after(mdy(df$Date),ymd("2021-06-14")), is.strictly.before(mdy(df$Date),ymd("2021-06-19")))
df.after <- filter(df, is.after(mdy(df$Date),ymd("2021-06-19")))
df.before$Exp.1 <- "before"
df.while$Exp.1 <- "while"
df.after$Exp.1 <- "after"
df.exp.1 <- rbind(df.before,df.while,df.after)
df.exp.1$Angle.from.origin <- as.numeric(df.exp.1$Angle.from.origin)
df.exp.1$Distance.from.origin <- as.numeric(df.exp.1$Distance.from.origin)
df.exp.1$Distance.to.water <- as.numeric(df.exp.1$Distance.to.water)
df.exp.1$Distance.secondary <- as.numeric(df.exp.1$Distance.secondary)
df.exp.1$Distance.tertiary <- as.numeric(df.exp.1$Distance.tertiary)
df.exp.1$NDVI <- as.numeric(df.exp.1$NDVI)
df.exp.1$EVI <- as.numeric(df.exp.1$EVI)
df.exp.1$Distance.to.mob <- as.numeric(df.exp.1$Distance.to.mob)
GROUP_ITEMS <- function(VARNAME) { return(list(valid=T,order=c("before","while","after"), limits=c("Before phalanx arrival","During phalanx invasion","After phalanx dispersion"), group_name="Phase of experiment", variable_name=VARNAME)) }
Anova(df.exp.1, "Angle.from.origin", "Exp.1",GROUP_ITEMS("Angle from original position"))
Anova(df.exp.1, "Distance.from.origin", "Exp.1",GROUP_ITEMS("Distance from original position"))
Anova(df.exp.1, "Distance.to.water", "Exp.1",GROUP_ITEMS("Distance from water"))
Anova(df.exp.1, "NDVI", "Exp.1",GROUP_ITEMS("NDVI"))
Anova(df.exp.1, "EVI", "Exp.1",GROUP_ITEMS("EVI"))
Anova(filter(df.exp.1,!is.na(Distance.to.mob)), "Distance.to.mob", "Exp.1",GROUP_ITEMS("Distance to closest mob"))
df.exp.1$Tree.cover <- find_replace(df.exp.1$Primary.habitat,data.frame(x=bushland,y=c(0.1,0.5,0.9)))
Anova(df.exp.1, "Tree.cover", "Exp.1",GROUP_ITEMS("Primary habitat tree cover"))
df.exp.1$Secondary.tree.cover <- find_replace(df.exp.1$Secondary.habitat,data.frame(x=bushland,y=c(0.1,0.5,0.9)))
Anova(df.exp.1, "Secondary.tree.cover", "Exp.1",GROUP_ITEMS("Secondary habitat tree cover"))
Anova(df.exp.1, "Distance.secondary", "Exp.1",GROUP_ITEMS("Distance to secondary habitat"))
df.exp.1$Tertiary.tree.cover <- find_replace(df.exp.1$Tertiary.habitat,data.frame(x=bushland,y=c(0.1,0.5,0.9)))
Anova(df.exp.1, "Tertiary.tree.cover", "Exp.1",GROUP_ITEMS("Tertiary habitat tree cover"))
Anova(df.exp.1, "Distance.tertiary", "Exp.1",GROUP_ITEMS("Distance to tertiary habitat"))
|
14cec58226e0760ac0e6b36bd73c8402a1ae5ce2
|
0acd7d90e625234ef2d3b8fdbb505bd0d92fbef4
|
/Module_5.R
|
29871216f82f14b9d142e110e07b2ae02a30c9db
|
[] |
no_license
|
geochemica/Working_Repository
|
3e8e94010c9f12d786dc1240190c8e8ef075ad08
|
f6a7f76e022e97ba6140581e1c0914fea9720efe
|
refs/heads/master
| 2021-07-17T04:21:54.872771
| 2017-10-23T02:06:45
| 2017-10-23T02:06:45
| 103,066,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,296
|
r
|
Module_5.R
|
rm(list=ls())
#I can't clear the variables#
f <- "~/Desktop/CPDS-1960-2014-reduced.txt"
d <- read.table(f, header = TRUE, sep = "\t", stringsAsFactors = FALSE)
d
head(d) # lists the first 6 lines of data
#It's saying that head(d): object 'd' not found
tail(d) # shows the last 6 lines of data
#It's saying that head(d): object 'd' not found
class(d) # shows that tables are typically loaded as data frames
d <- read.delim(f, header = TRUE, stringsAsFactors = FALSE)
head(d)
f <- "~/Desktop/CPDS-1960-2014-reduced.csv"
d <- read.table(f, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
#again it's still saying that it cannot locate object 'd'
d <- read.csv(f, header = TRUE, stringsAsFactors = FALSE)
head(d)
#same error message agin about not finding object 'd'
f <- "~/Desktop/CPDS-1960-2014-reduced.txt"
d <- read_tsv(f, col_names = TRUE) # for tab-separated files
#The files are on the desktop so I don't understand what's wrong. Do I need to change the file path?#
head(d)
class(d)
d <- read_delim(f, delim = "\t", col_names = TRUE)
head(d)
require(readr)
f <- "~/Desktop/CPDS-1960-2014-reduced.csv"
d <- read_csv(f, col_names = TRUE) # for comma-separated files
#The error message about not being able to locate object 'd' continues#
head(d)
d <- read_delim(f, delim = ",", col_names = TRUE)
head(d)
#I DON'T HAVE EXCEL, DO I NEED IT FOR THIS CLASS OR CAN I CONVERT IT?#
#SO I WON'T BE ABLE TO DO THE XLCONNECT PORTION EITHER#
library(curl)
f <- curl("https://raw.githubusercontent.com/fuzzyatelin/fuzzyatelin.github.io/master/AN597_Fall17/CPDS-1960-2014-reduced.csv")
d <- read.csv(f, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
f <- curl("https://raw.githubusercontent.com/fuzzyatelin/fuzzyatelin.github.io/master/AN597_Fall17/CPDS-1960-2014-reduced.txt")
d <- read.table(f, header = TRUE, sep = "\t", stringsAsFactors = FALSE)
head(d)
library(readr)
f <- "https://raw.githubusercontent.com/fuzzyatelin/fuzzyatelin.github.io/master/AN597_Fall17/CPDS-1960-2014-reduced.csv"
d <- read_csv(f, col_names = TRUE)
head(d)
f <- "https://raw.githubusercontent.com/fuzzyatelin/fuzzyatelin.github.io/master/AN597_Fall17/CPDS-1960-2014-reduced.txt"
d <- read_tsv(f, col_names = TRUE)
head(d)
require(rdrop2)
drop_auth() # opens a browser dialog box to ask for authorization...
drop_dir() # lists the contents of your dropbox folder
f <- "CPDS-1960-2014-reduced.csv" # name of the file to read from
f <- drop_search(f) # searches your dropbox directory for file or directory names; this can be slow
f <- f$path # $path is the location of the results returned above
d <- drop_read_csv(f, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
str(d)
link <- "https://www.dropbox.com/s/hft09jnpjepy1a1/CPDS-1960-2014-reduced.csv?dl=0"
link <- gsub(pattern = "dl=0", replacement = "dl=1", x = link)
d <- read.csv(link, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
str(d)
require(repmis)
d <- source_data(link, header = TRUE, sep = ",")
head(d)
str(d)
#This one did not work and the error message says "Unknown reply."
require(googlesheets)
gs_ls() # gives you the option to log in to a google sheets account and see options for download
get_title("CPDS-1960-2014-reduced") # shows you worksheets from the file to which you have access
d <- gs_read("CPDS-1960-2014-reduced")
head(d)
#This one also came up with the unknown reply message#
filename <- "CPDS-1960-2014-reduced.csv" # name of file to download
f <- drop_search(filename) # searches your dropbox directory for that file or directory name
f <- f$path # $path is the location of the results returned above
drop_get(f, local_file = paste0("~/Desktop/", filename), overwrite = TRUE, progress = TRUE)
# this will save the file to the desktop
filename <- "CPDS-1960-2014-reduced.xlsx"
f <- drop_search(filename) # searches your dropbox directory for file or directory names
f <- f$path # $path is the location of the results returned above
drop_get(f, local_file = paste0("~/Desktop/", filename), overwrite = TRUE, progress = TRUE)
# again, saves to the desktop
gs_title("CPDS-1960-2014-reduced") #identifies the sheet you wish to download
gs_download("CPDS-1960-2014-reduced", to = "CPDS-1960-2014-reduced.xlsx")
# in this case, should save to your working directory
str(d)
|
40b918d3bbf07ca15d84fdf6a663098506ca1467
|
863f12248c5d33c96d06ce2c5fcd6a0f852e606f
|
/run_analysis.R
|
0772829c239f32af24026044949b842eb4e86391
|
[] |
no_license
|
pedelin/Getting-And-Cleaning-Data-Course-Project
|
0226123a0864a90bc06d042edb1fa2de68467b48
|
d125cd8ae79316aaeb809083cbc92ad8a20a168f
|
refs/heads/master
| 2021-01-18T08:12:05.841901
| 2016-09-15T11:29:31
| 2016-09-15T11:29:31
| 68,284,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,229
|
r
|
run_analysis.R
|
library(data.table)
# File URL
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# File name
filename <- "getdata_projectfiles.zip"
# Download and unzip file if not in wd
if(!file.exists("UCI HAR Dataset")){
download.file(fileUrl,filename)
unzip(filename)
# Removing zip-file
file.remove(filename)
}
# Read the Train data
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", sep = " ", header = FALSE)
# Read the Test data
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", sep = " ", header = FALSE)
## 1. Merges the training and the test sets to create one data set.
# Bind the y, x, and subject data by column and row bind the test and training
Total <- rbind(cbind(x_train,subject_train,y_train),cbind(x_test,subject_test,y_test))
# Read the activities and features
features <- read.table("UCI HAR Dataset/features.txt", sep = " ", header = FALSE)
activities <- read.table("UCI HAR Dataset/activity_labels.txt", sep = " ", header = FALSE)
colnames(Total) <- c(as.character(features[,2]),"SubjectID","ActivityID")
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
MeanSTD <- Total[,(grepl("-mean",colnames(Total))|grepl("std()",colnames(Total))|grepl("ActivityID",colnames(Total))|grepl("SubjectID",colnames(Total)))==TRUE]
## 3. Uses descriptive activity names to name the activities in the data set
# Define column names for activities data frame
colnames(activities)<-c("ActivityID","Activity")
#Merging activities data frame with mean and standrd deviation dataframe
Activity_MeanSTD <- merge(MeanSTD,activities,by='ActivityID')
#Removing the ActvityID column
Activity_MeanSTD$ActivityID<-NULL
## 4. Appropriately labels the data set with descriptive variable names.
# Defning a temp variable with the column names
columns_tmp <- colnames(Activity_MeanSTD)
# Rechaping the column names
columns_tmp<- gsub("\\()","",columns_tmp)
columns_tmp<- gsub("(-[Mm]ean)","-Mean",columns_tmp)
columns_tmp<- gsub("(-[Ss]td)","-STD",columns_tmp)
columns_tmp<- gsub("([Gg]ravity)","-Gravity",columns_tmp)
columns_tmp<- gsub("([Ff]req)","-Freq",columns_tmp)
columns_tmp<- gsub("([Jj]erk)","-Jerk",columns_tmp)
columns_tmp<- gsub("([Gg]yro)","-Gyro",columns_tmp)
columns_tmp<- gsub("([Aa]cc)","-Acc",columns_tmp)
columns_tmp<- gsub("([Mm]ag)","-Mag",columns_tmp)
columns_tmp<- gsub("([Bb]ody[Bb]ody|[Bb]ody)","-Body",columns_tmp)
columns_tmp<- gsub("^(t)","Time",columns_tmp)
columns_tmp<- gsub("^(f)","Freq",columns_tmp)
# Updating the column names
colnames(Activity_MeanSTD)<-columns_tmp
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
FinalTidy <- aggregate(. ~ Activity+SubjectID, Activity_MeanSTD,mean)
write.table(FinalTidy,file="FinalTidy.txt",row.name=FALSE)
|
49fd0e9d99663ee693d3f75b32c382d1b79487df
|
f501869c8aa8dee755092ce12ed5eeac31658374
|
/AFC/AFD-bac-dataset.R
|
bf3fc42247e91dfcf8d9ab56b3a094414fe99606
|
[
"MIT"
] |
permissive
|
Vergil592/R
|
7d8e65b05fca1646789aa8eb14504bda0b0f363e
|
2c86c8b7fe32ed54a0286b0afa505d01c1ca4170
|
refs/heads/master
| 2020-03-19T22:00:55.625206
| 2018-11-09T13:28:05
| 2018-11-09T13:28:05
| 136,957,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
AFD-bac-dataset.R
|
## AFD
require("MASS")
data3 <- read.csv("bacdata.csv",header=TRUE, sep='\t')
dim(data3)
n <-sum(data3)
D1 = diag( rowSums(data3) )
D2 = diag( colSums(data3) )
M <- solve(D2)*n
VM = t(data3) %*% solve(D1) %*% as.matrix(data3) %*% solve(D2)
decompo_VM <- eigen(VM)
# Plot
afc.data3 <- corresp(data3,nf=2)
plot(afc.data3)
|
804784fd28f73affb6628d10b3b5d9769ab35c36
|
af4bc29575bc35732de9f9dffe6e55a336467101
|
/R/timeseries.R
|
a3518bdfd600bc979f6e83a1ebaa5ae8d3648bff
|
[] |
no_license
|
tanvi-arora/timeseries
|
8bc574ac3b2faa431ac566dd839d96830e04dd08
|
170e5a7c050a28533a86107d3b2c125482245c82
|
refs/heads/master
| 2020-03-24T12:49:43.425272
| 2018-07-31T04:08:20
| 2018-07-31T04:08:20
| 142,725,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
timeseries.R
|
#################################
# Script name : timeseries.R
# Author : Tanvi Arora
# Date created : 2018/07/29
# Description : This script contains all code related to timeseries functions
#################################
##@knitr ses_maxtemp
# ses() or simple exponential smoothing
fit_ses <- ses(mt, h=5)
plot(fit_ses, plot.conf=FALSE, ylab="Temperature in Celsius", xlab="Year", main="", fcol="white",type="o")
lines(fitted(fit_ses), col="blue", type="o")
##@knitr ses_stats
# summary statistics
# AIC
fit_ses$model$aic
# AICC
fit_ses$model$aicc
##@knitr holt_maxtemp
# holt's linear trend method
fit_holt <- holt(mt, alpha=0.8, beta=0.2, initial="optimal", h=5)
plot(fit_holt, type="o", ylab="Temperature in Celsius", xlab="Year", fcol="white", plot.conf=FALSE, main="Forecast Using Holt's Method")
lines(fitted(fit_holt), col="blue")
##@knitr holt_stats
# Summary statistics
# AIC
fit_holt$model$aic
# AICC
fit_holt$model$aicc
|
a7d8c3247a6aae1ab4dd7d63a9105fb86f995484
|
a0e6041a0b64d2b60cdc5ce6ec127b027309d788
|
/data-raw/scraping.R
|
355814e034a85b49c4a1a835971ae33f7c162adc
|
[] |
no_license
|
davben/arvig
|
88c8593d2bdfba159bca740471b1b19f6834e276
|
18786b90455dd7668713c5cf6c4083d96b07ee50
|
refs/heads/master
| 2020-12-28T14:47:53.406961
| 2018-06-26T13:47:16
| 2018-06-26T13:47:16
| 45,528,107
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,631
|
r
|
scraping.R
|
#' Scrape Events of Anti-Refugee Violence from "Mut gegen rechte Gewalt" Chronicle.
#'
#' This function is built only for one specific purpose:
#' Scraping the website \url{https://www.mut-gegen-rechte-gewalt.de/service/chronik-vorfaelle}
#' for all events a specified year (currently, 2015 and 2016). It retrieves the date, location,
#' bundesland, category, summary and source for each event and returns a data frame.
#'
#' @param years numeric vector of years for which to retrieve data from the chronicle.
#' @return A data frame of events as listed on the website, consisting of columns for date, location, bundesland,
#' category, summary and source.
#' @examples
#' \dontrun{
#' chronicle <- read_chronicle(c(2015, 2016))
#' }
read_chronicle <- function(years) {
if (!requireNamespace("purrr", quietly = TRUE)) {
stop("purrr is needed for this function to work. Please install it.",
call. = FALSE)
} else if (!requireNamespace("lubridate", quietly = TRUE)) {
stop("lubridate is needed for this function to work. Please install it.",
call. = FALSE)
}
years %>%
purrr::map_df(read_chronicle_year) %>%
mutate(date = lubridate::dmy(date))
}
#' Scrape Events of Anti-Refugee Violence in a Given Year.
#'
#' This function scrapes all events in a specified year from the "Mut gegen rechte Gewalt" chronicle.
#' It retrieves the date, location,
#' bundesland, category, summary and source for each event and returns a data frame.
#'
#' @param year numeric value specifying the year for which to retrieve data from the chronicle.
#' Currently, it only works with 2015 or 2016.
#' @return A data frame of events from a single year as listed in the chronicle,
#' consisting of columns for date, location, bundesland, category, summary and source.
read_chronicle_year <- function(year) {
if (!requireNamespace("rvest", quietly = TRUE)) {
stop("rvest is needed for this function to work. Please install it.",
call. = FALSE)
} else if (!requireNamespace("stringi", quietly = TRUE)) {
stop("stringi is needed for this function to work. Please install it.",
call. = FALSE)
}
current_year <- lubridate::year(Sys.Date())
if (!(year >= 2015)) stop("Data can only be retrieved from the year 2015 onwards.")
if (!(year <= current_year)) stop("The chronicle does not contain events from the future.")
base_page <- paste0("https://mut-gegen-rechte-gewalt.de/service/chronik-vorfaelle?&&field_date_value[value][year]=", year,"&page=0")
chronicle <- xml2::read_html(base_page)
last_page <- chronicle %>%
html_node(".pager-last a") %>%
html_attr("href") %>%
stringi::stri_extract(regex = "page=[0-9]+") %>%
gsub("page=", "", .) %>%
as.numeric()
page_seq <- 0:last_page
print(paste0("Retrieving data for the year ", year, " from ", last_page + 1, " pages."))
out <- page_seq %>% purrr::map_df(read_chronicle_page, chronicle_year = year)
out
}
#' Scrape Individual Chronicle Page for Events of Anti-Refugee Violence in a Given Year.
#'
#' This function scrapes a single page from the "Mut gegen rechte Gewalt" chronicle.
#' It retrieves the date, location,
#' bundesland, category, summary and source for each event and returns a data frame.
#'
#' @param chronicle_year numeric value specifying the year for which to retrieve data from the chronicle.
#' Currently, it only works with 2015 or 2016.
#' @param page_nr numeric value specifying chronicle page from which to retrieve data.
#' @return A data frame of events from a single chronicle page as listed in the chronicle,
#' consisting of columns for date, location, bundesland, category, summary and source.
read_chronicle_page <- function(page_nr, chronicle_year) {
violence <- xml2::read_html(paste0("https://www.mut-gegen-rechte-gewalt.de/service/chronik-vorfaelle?field_date_value[value][year]=", chronicle_year,"&page=", page_nr))
location <- violence %>%
html_nodes(".field-name-field-city") %>%
html_text()
state <- violence %>%
html_nodes(".field-name-field-bundesland") %>%
html_text()
source_raw <- violence %>%
html_nodes(".node-chronik-eintrag")
source <- purrr::map_chr(source_raw, clean_source)
date <- violence %>%
html_nodes(".field-name-field-date") %>%
html_text()
category_de <- violence %>%
html_nodes(".field-name-field-art") %>%
html_text()
description <- violence %>%
html_nodes(".field-type-text-with-summary") %>%
html_text()
result <- tibble::tibble(date, location, state, category_de, description, source) %>%
mutate_all(funs(gsub("^\\s+|\\s+$", "", .)))
}
|
5dc1e64fb751b9f5406829eb002decef72f8ae7e
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/tools/R/check.R
|
38bd2fa4691682e90dfd9d96663a9402dd21ce4a
|
[
"Artistic-2.0",
"GPL-2.0-or-later",
"LGPL-2.0-or-later",
"Artistic-1.0",
"CECILL-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"GPL-2.0-only",
"BSL-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 267,019
|
r
|
check.R
|
# File src/library/tools/R/check.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2018 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
###- R based engine for R CMD check
## R developers can use this to debug the function by running it
## directly as tools:::.check_packages(args), where the args should
## be what commandArgs(TRUE) would return, that is a character vector
## of (space-delimited) terms that would be passed to R CMD check.
get_timeout <- function(tlim)
{
if(is.character(tlim)) {
if(grepl("m$", tlim))
tlim <- 60*as.numeric(sub("m$", "", tlim))
else if(grepl("h$", tlim))
tlim <- 3600*as.numeric(sub("h$", "", tlim))
else if(grepl("s$", tlim)) # for completeness, like GNU timeout.
tlim <- as.numeric(sub("s$", "", tlim))
}
tlim <- as.numeric(tlim)
if(is.na(tlim) || tlim < 0) tlim <- 0
tlim
}
report_timeout <- function(tlim)
{
tlim <- trunc(tlim)
if (tlim >= 3600)
warning(gettextf("elapsed-time limit of %g %s reached for sub-process",
round(tlim/3600, 1L), "hours"),
domain = NA, call. = FALSE)
else if (tlim >= 60)
warning(gettextf("elapsed-time limit of %g %s reached for sub-process",
round(tlim/60, 1L), "minutes"),
domain = NA, call. = FALSE)
else
warning(gettextf("elapsed-time limit of %g %s reached for sub-process",
tlim, "seconds"),
domain = NA, call. = FALSE)
}
## Find serialized objects (for load() and for readRDS()) in "allfiles" and
## report serialization versions (0 means not a serialized object,
## 1 means either version-1 or not a serialized object, 2 and more means
## serialized object of that version).
##
## These are most commonly data/*.{Rdata,rda}, R/sysdata.rda files,
## and build/vignette.rds
## But packages have other .rds files in many places.
## Despite its name, build/partial.rdb is created by saveRDS.
##
get_serialization_version <- function(allfiles)
{
getVerLoad <- function(file)
{
## This could look at the magic number, but for a short
## while version 3 files were produced with a version-2
## magic number. loadInfoFromConn2 checks if the magic number
## is sensible.
con <- gzfile(file, "rb"); on.exit(close(con))
## The .Internal gives an error on version-1 files
## (and on non-serialized files)
tryCatch(.Internal(loadInfoFromConn2(con))$version,
error = function(e) 1L)
}
getVerSer <- function(file)
{
con <- gzfile(file, "rb"); on.exit(close(con))
## In case this is not a serialized object
tryCatch(.Internal(serializeInfoFromConn(con))$version,
error = function(e) 0L)
}
loadfiles <- grep("[.](rda|RData|Rdata|rdata|Rda|bam|Rbin)$",
allfiles, value = TRUE)
serfiles <- c(grep("[.](rds|RDS|Rds|rdx)$", allfiles, value = TRUE),
grep("build/partial[.]rdb$", allfiles, value = TRUE))
vers1 <- sapply(loadfiles, getVerLoad)
vers2 <- sapply(serfiles, getVerSer)
c(vers1, vers2)
}
## Used for INSTALL and Rd2pdf
run_Rcmd <- function(args, out = "", env = "", timeout = 0)
{
status <- if(.Platform$OS.type == "windows")
system2(file.path(R.home("bin"), "Rcmd.exe"), args, out, out,
timeout = get_timeout(timeout))
else
system2(file.path(R.home("bin"), "R"), c("CMD", args), out, out,
env = env, timeout = get_timeout(timeout))
if(identical(status, 124L)) report_timeout(timeout)
status
}
R_runR <- function(cmd = NULL, Ropts = "", env = "",
stdout = TRUE, stderr = TRUE, stdin = NULL,
arch = "", timeout = 0)
{
timeout <- get_timeout(timeout)
out <- if (.Platform$OS.type == "windows") {
## workaround Windows problem with input = cmd
if (!is.null(cmd)) {
## In principle this should escape \
Rin <- tempfile("Rin"); on.exit(unlink(Rin)); writeLines(cmd, Rin)
} else Rin <- stdin
suppressWarnings(system2(if(nzchar(arch)) file.path(R.home(), "bin", arch, "Rterm.exe")
else file.path(R.home("bin"), "Rterm.exe"),
c(Ropts, paste("-f", Rin)), stdout, stderr,
env = env, timeout = timeout))
} else {
suppressWarnings(system2(file.path(R.home("bin"), "R"),
c(if(nzchar(arch)) paste0("--arch=", arch), Ropts),
stdout, stderr, stdin, input = cmd, env = env,
timeout = timeout))
}
if(identical(out, 124L) || identical(attr(out, "status"), 124L))
report_timeout(timeout)
out
}
setRlibs <-
function(lib0 = "", pkgdir = ".", suggests = FALSE, libdir = NULL,
self = FALSE, self2 = TRUE, quote = FALSE, LinkingTo = FALSE,
tests = FALSE)
{
WINDOWS <- .Platform$OS.type == "windows"
useJunctions <- WINDOWS && !nzchar(Sys.getenv("R_WIN_NO_JUNCTIONS"))
flink <- function(from, to) {
res <- if(WINDOWS) {
if(useJunctions) Sys.junction(from, to)
else file.copy(from, to, recursive = TRUE)
} else file.symlink(from, to)
if (!res) stop(gettextf("cannot link from %s", from), domain = NA)
}
pi <- .split_description(.read_description(file.path(pkgdir, "DESCRIPTION")))
thispkg <- unname(pi$DESCRIPTION["Package"])
## We need to make some assumptions about layout: this version
## assumes .Library contains standard and recommended packages
## and nothing else.
tmplib <- tempfile("RLIBS_")
dir.create(tmplib)
## Since this is under the session directory and only contains
## symlinks and dummies (hence will be small) we never clean it up.
test_recommended <-
config_val_to_logical(Sys.getenv("_R_CHECK_NO_RECOMMENDED_", "FALSE"))
if(test_recommended) {
## Now add dummies for recommended packages (removed later if declared)
recommended <- .get_standard_package_names()$recommended
## grDevices has :: to KernSmooth
## stats has ::: to Matrix, Matrix depends on lattice
## which gives false positives in MASS and Rcpp
## codetools is really part of tools
exceptions <- "codetools"
if (thispkg %in% c("MASS", "Rcpp"))
exceptions <- c(exceptions, "Matrix", "lattice")
if (thispkg %in%
c("Modalclust", "aroma.core", "iWebPlots",
"openair", "oce", "pcalg", "tileHMM"))
exceptions <- c(exceptions, "KernSmooth")
recommended <- recommended %w/o% exceptions
for(pkg in recommended) {
if(pkg == thispkg) next
dir.create(pd <- file.path(tmplib, pkg))
## some people remove recommended packages ....
f <- file.path(.Library, pkg, "DESCRIPTION")
if(file.exists(f)) file.copy(f, pd)
## to make sure find.package throws an error:
close(file(file.path(pd, "dummy_for_check"), "w"))
}
}
sug <- if (suggests) names(pi$Suggests)
else {
## we always need to be able to recognise 'vignettes'
VB <- unname(pi$DESCRIPTION["VignetteBuilder"])
sug <- if(is.na(VB)) character()
else {
VB <- unlist(strsplit(VB, ","))
sug <- unique(gsub('[[:space:]]', '', VB))
## too many people forgot this, but it will never get fixed if made an exception.
## if("knitr" %in% VB) sug <- c(sug, "rmarkdown")
sug
}
if(tests) ## we need the test-suite package available
c(sug, intersect(names(pi$Suggests), c("RUnit", "testthat")))
else sug
}
deps <- unique(c(names(pi$Depends), names(pi$Imports),
if(LinkingTo) names(pi$LinkingTo),
sug))
if(length(libdir) && self2) flink(file.path(libdir, thispkg), tmplib)
## .Library is not necessarily canonical, but the .libPaths version is.
lp <- .libPaths()
poss <- c(lp[length(lp)], .Library)
already <- thispkg
more <- unique(deps %w/o% already) # should not depend on itself ...
while(length(more)) {
m0 <- more; more <- character()
for (pkg in m0) {
if (test_recommended) {
if (pkg %in% recommended) unlink(file.path(tmplib, pkg), TRUE)
## hard-code dependencies for now.
if (pkg == "mgcv")
unlink(file.path(tmplib, c("Matrix", "lattice", "nlme")), TRUE)
if (pkg == "Matrix") unlink(file.path(tmplib, "lattice"), TRUE)
if (pkg == "class") unlink(file.path(tmplib, "MASS"), TRUE)
if (pkg == "nlme") unlink(file.path(tmplib, "lattice"), TRUE)
}
where <- find.package(pkg, quiet = TRUE)
if(length(where)) {
if (dirname(where) %notin% poss)
flink(where, tmplib)
else if (!test_recommended)
# If the package is in the standard library we can
# assume dependencies have been met, but we can
# only skip the traversal if we aren't testing recommended
# packages, because loading will fail if there is
# an indirect dependency to one that has been hidden
# by a dummy in tmplib.
next
pi <- readRDS(file.path(where, "Meta", "package.rds"))
more <- c(more, names(pi$Depends), names(pi$Imports),
names(pi$LinkingTo))
}
}
already <- c(already, m0)
more <- unique(more %w/o% already)
}
if (self) flink(normalizePath(pkgdir), tmplib)
# print(dir(tmplib))
rlibs <- tmplib
if (nzchar(lib0)) rlibs <- c(lib0, rlibs)
rlibs <- paste(rlibs, collapse = .Platform$path.sep)
if(quote) rlibs <- shQuote(rlibs)
c(paste0("R_LIBS=", rlibs),
if(WINDOWS) " R_ENVIRON_USER='no_such_file'" else "R_ENVIRON_USER=''",
if(WINDOWS) " R_LIBS_USER='no_such_dir'" else "R_LIBS_USER=''",
" R_LIBS_SITE='no_such_dir'")
}
add_dummies <- function(dir, Log)
{
dir1 <- file.path(dir, "R_check_bin")
if (dir.exists(file.path(dir1))) {
messageLog(Log, "directory ", sQuote(dir1), " already exists")
return()
}
dir.create(dir1)
if (!dir.exists(dir1)) {
messageLog(Log, "creation of directory ", sQuote(dir1), " failed")
return()
}
Sys.setenv(PATH = env_path(dir1, Sys.getenv("PATH")))
if(.Platform$OS.type != "windows") {
writeLines(c('echo "\'R\' should not be used without a path -- see par. 1.6 of the manual"',
'exit 1'),
p1 <- file.path(dir1, "R"))
writeLines(c('echo "\'Rscript\' should not be used without a path -- see par. 1.6 of the manual"',
'exit 1'),
p2 <- file.path(dir1, "Rscript"))
Sys.chmod(c(p1, p2), "0755")
} else {
## currently untested
writeLines(c('@ECHO OFF',
'echo "\'R\' should not be used without a path -- see par. 1.6 of the manual"',
'exit /b 1'),
p1 <- file.path(dir1, "R.bat"))
writeLines(c('@ECHO OFF',
'echo "\'Rscript\' should not be used without a path -- see par. 1.6 of the manual"',
'exit /b 1'),
p2 <- file.path(dir1, "Rscript.bat"))
}
}
###- The main function for "R CMD check"
.check_packages <- function(args = NULL, no.q = interactive())
{
WINDOWS <- .Platform$OS.type == "windows"
## this requires on Windows: file.exe (optional)
wrapLog <- function(...) {
text <- paste(..., collapse = " ")
## strwrap expects paras separated by blank lines.
## Perl's wrap split on \n
text <- strsplit(text, "\n", useBytes = TRUE)[[1L]]
printLog(Log, paste(strwrap(text), collapse = "\n"), "\n")
}
## used for R_runR2 and
## .check_package_description
## .check_package_description_encoding
## .check_package_license
## .check_demo_index
## .check_vignette_index
## .check_package_subdirs
## .check_citation
## .check_package_ASCII_code
## .check_package_code_syntax
## .check_packages_used
## .check_package_code_shlib
## .check_package_code_startup_functions
## .check_package_code_unload_functions
## .check_package_code_tampers
## .check_package_code_assign_to_globalenv
## .check_package_code_attach
## .check_package_code_data_into_globalenv
## .check_package_parseRd
## .check_Rd_metadata
## .check_Rd_line_widths
## .check_Rd_xrefs
## .check_Rd_contents
## .check_package_datasets
## .check_package_compact_datasets
## .check_package_compact_sysdata
## .check_make_vars
## check_compiled_code
## Checking loading
## Rdiff on reference output
## Creating -Ex.R
## Running examples (run_one_arch)
## .runPackageTestsR
## .run_one_vignette
## buildVignettes
def_tlim <- get_timeout(Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_"))
R_runR0 <- function(..., timeout = def_tlim)
R_runR(..., timeout = timeout)
## Used for
## .check_packages_used_in_examples
## .check_packages_used_in_tests
## .check_packages_used_in_vignettes
## checkS3methods
## checkReplaceFuns
## checkFF
## .check_code_usage_in_package (with full set)
## .check_T_and_F (with full set)
## .check_dotInternal (with full set)
## undoc, codoc, codocData, codocClasses
## checkDocFiles, checkDocStyle
## The default set of packages here are as they are because
## .get_S3_generics_as_seen_from_package needs utils,graphics,stats
## Used by checkDocStyle (which needs the generic visible) and checkS3methods.
R_runR2 <-
status <- if(WINDOWS) {
function(cmd,
env = "R_DEFAULT_PACKAGES=utils,grDevices,graphics,stats",
timeout = 0)
{
out <- R_runR(cmd, R_opts2, env, timeout = timeout)
## pesky gdata ....
filtergrep("^(ftype: not found|File type)", out)
}
} else
function(cmd,
env = "R_DEFAULT_PACKAGES='utils,grDevices,graphics,stats'",
timeout = 0)
{
out <- R_runR(cmd, R_opts2, env, timeout = timeout)
## htmltools produced non-UTF-8 output in Dec 2015
if (R_check_suppress_RandR_message)
filtergrep('^Xlib: *extension "RANDR" missing on display',
out, useBytes = TRUE)
else out
}
td0 <- Inf # updated below
print_time <- function(t1, t2, Log)
{
td <- t2 - t1
if(td[3L] < td0) return()
td2 <- if (td[3L] > 600) {
td <- td/60
if(WINDOWS) sprintf(" [%dm]", round(td[3L]))
else sprintf(" [%dm/%dm]", round(sum(td[-3L])), round(td[3L]))
} else {
if(WINDOWS) sprintf(" [%ds]", round(td[3L]))
else sprintf(" [%ds/%ds]", round(sum(td[-3L])), round(td[3L]))
}
cat(td2)
if (!is.null(Log) && Log$con > 0L) cat(td2, file = Log$con)
}
parse_description_field <- function(desc, field, default)
str_parse_logic(desc[field], default=default)
check_pkg <- function(pkg, pkgname, pkgoutdir, startdir, libdir, desc,
is_base_pkg, is_rec_pkg, subdirs, extra_arch)
{
Sys.setenv("_R_CHECK_PACKAGE_NAME_" = pkgname)
on.exit(Sys.unsetenv("_R_CHECK_PACKAGE_NAME_"))
## pkg is the argument we received from the main loop.
## pkgdir is the corresponding absolute path,
checkingLog(Log, "package directory")
setwd(startdir)
pkg <- sub("/$", "", pkg)
if (dir.exists(pkg)) {
setwd(pkg) ## wrap in try()?
pkgdir <- getwd()
resultLog(Log, "OK")
} else {
errorLog(Log, "Package directory ", sQuote(pkg), "does not exist.")
summaryLog(Log)
do_exit(1L)
}
haveR <- dir.exists("R") && !extra_arch
if (!extra_arch) {
if(dir.exists("build")) check_build()
db <- check_meta() # Check DESCRIPTION meta-information.
check_top_level()
check_detritus()
check_indices()
check_subdirectories(haveR, subdirs)
## Check R code for non-ASCII chars which
## might be syntax errors in some locales.
if (!is_base_pkg && haveR && R_check_ascii_code) check_non_ASCII()
} # end of !extra_arch
## Check we can actually load the package: base is always loaded
if (do_install && pkgname != "base") {
if (this_multiarch) {
Log$stars <<- "**"
for (arch in inst_archs) {
printLog(Log, "* loading checks for arch ", sQuote(arch), "\n")
check_loading(arch)
}
Log$stars <<- "*"
} else {
check_loading()
}
}
if (haveR) {
check_R_code() # unstated dependencies, S3 methods, replacement, foreign
check_R_files(is_rec_pkg) # codetools etc
}
check_Rd_files(haveR)
check_data() # 'data' dir and sysdata.rda
if (!is_base_pkg && !extra_arch) check_src_dir(desc)
check_src()
if(do_install &&
dir.exists("src") &&
length(so_symbol_names_table)) # suitable OS
check_sos()
miss <- file.path("inst", "doc", c("Rplots.ps", "Rplots.pdf"))
if (any(f <- file.exists(miss))) {
checkingLog(Log, "for left-overs from vignette generation")
warningLog(Log)
printLog(Log,
paste(" file", paste(sQuote(miss[f]), collapse = ", "),
"will not be installed: please remove it\n"))
}
if (dir.exists("inst/doc")) {
if (R_check_doc_sizes) check_doc_size()
else if (as_cran)
warningLog(Log, "'qpdf' is needed for checks on size reduction of PDFs")
}
if (dir.exists("inst/doc") && do_install) check_doc_contents()
if (dir.exists("vignettes")) check_vign_contents(ignore_vignettes)
if (!ignore_vignettes) {
if (dir.exists("inst/doc") && !dir.exists("vignettes")) {
pattern <- vignetteEngine("Sweave")$pattern
sources <- setdiff(list.files(file.path("inst", "doc"),
pattern = pattern),
list.files("vignettes", pattern = pattern))
buildPkgs <- .get_package_metadata(".")["VignetteBuilder"]
if (!is.na(buildPkgs)) {
buildPkgs <- unlist(strsplit(buildPkgs, ","))
buildPkgs <- unique(gsub('[[:space:]]', '', buildPkgs))
engineList <- vignetteEngine(package = buildPkgs)
for(nm in names(engineList)) {
pattern <- engineList[[nm]]$pattern
sources <- c(sources,
setdiff(list.files(file.path("inst", "doc"),
pattern = pattern),
list.files("vignettes", pattern = pattern)))
}
}
sources <- unique(sources)
if(length(sources)) {
checkingLog(Log, "for old-style vignette sources")
msg <- c("Vignette sources only in 'inst/doc':",
strwrap(paste(sQuote(sources), collapse = ", "),
indent = 2L, exdent = 2L),
"A 'vignettes' directory is required as from R 3.1.0",
"and these will not be indexed nor checked")
## warning or error eventually
noteLog(Log, paste(msg, collapse = "\n"))
}
}
}
setwd(pkgoutdir)
## Run the examples: this will be skipped if installation was
if (dir.exists(file.path(libdir, pkgname, "help"))) {
run_examples()
} else if (dir.exists(file.path(pkgdir, "man"))) {
checkingLog(Log, "examples")
resultLog(Log, "SKIPPED")
}
## Run the package-specific tests.
tests_dir <- file.path(pkgdir, test_dir)
if (test_dir != "tests" && !dir.exists(tests_dir)) {
warningLog(Log)
printLog(Log, "directory ", sQuote(test_dir), " not found\n")
}
if (dir.exists(tests_dir) && # trackObjs has only *.Rin
length(dir(tests_dir, pattern = "\\.(R|r|Rin)$")))
run_tests()
## Check package vignettes.
setwd(pkgoutdir)
if (!ignore_vignettes) run_vignettes(desc)
} ## end{ check_pkg }
check_file_names <- function()
{
## Check for portable file names.
checkingLog(Log, "for portable file names")
## Build list of exclude patterns.
ignore <- get_exclude_patterns()
ignore_file <- ".Rbuildignore"
if (ignore_file %in% dir())
ignore <- c(ignore, readLines(ignore_file))
## Ensure that the names of the files in the package are valid
## for at least the supported OS types. Under Unix, we
## definitely cannot have '/'. Under Windows, the control
## characters as well as " * : < > ? \ | (i.e., ASCII
## characters 1 to 31 and 34, 36, 58, 60, 62, 63, 92, and 124)
## are or can be invalid. (In addition, one cannot have
## one-character file names consisting of just ' ', '.', or
## '~'., and '~' has a special meaning for 8.3 short file
## names).
## Based on information by Uwe Ligges, Duncan Murdoch, and
## Brian Ripley: see also
## http://msdn.microsoft.com/en-us/library/aa365247%28VS.85%29.aspx
## In addition, Windows does not allow the following DOS type
## device names (by themselves or with possible extensions),
## see e.g.
## http://msdn.microsoft.com/library/default.asp?url=/library/en-us/fileio/fs/naming_a_file.asp
## http://msdn.microsoft.com/en-us/library/aa365247%28VS.85%29.aspx#naming_conventions
## and http://en.wikipedia.org/wiki/Filename (which as of
## 2007-04-22 is wrong about claiming that COM0 and LPT0 are
## disallowed):
##
## CON: Keyboard and display
## PRN: System list device, usually a parallel port
## AUX: Auxiliary device, usually a serial port
## NUL: Bit-bucket device
## CLOCK$: System real-time clock
## COM1, COM2, COM3, COM4, COM5, COM6, COM7, COM8, COM9:
## Serial communications ports 1-9
## LPT1, LPT2, LPT3, LPT4, LPT5, LPT6, LPT7, LPT8, LPT9:
## parallel printer ports 1-9
## In addition, the names of help files get converted to HTML
## file names and so should be valid in URLs. We check that
## they are ASCII and do not contain %, which is what is known
## to cause troubles.
allfiles <- dir(".", all.files = TRUE,
full.names = TRUE, recursive = TRUE)
allfiles <- c(allfiles, unique(dirname(allfiles)))
allfiles <- af <- sub("^./", "", allfiles)
ignore_re <- paste0("(", paste(ignore, collapse = "|"), ")")
allfiles <- filtergrep(ignore_re, allfiles)
bad_files <- allfiles[grepl("[[:cntrl:]\"*/:<>?\\|]",
basename(allfiles))]
is_man <- grepl("man$", dirname(allfiles))
bad <- sapply(strsplit(basename(allfiles[is_man]), ""),
function(x) any(grepl("[^ -~]|%", x)))
if (length(bad))
bad_files <- c(bad_files, (allfiles[is_man])[bad])
bad <- tolower(basename(allfiles))
## remove any extension(s) (see 'Writing R Extensions')
bad <- sub("[.].*", "", bad)
bad <- grepl("^(con|prn|aux|clock[$]|nul|lpt[1-9]|com[1-9])$", bad)
bad_files <- c(bad_files, allfiles[bad])
if (nb <- length(bad_files)) {
errorLog(Log)
msg <- ngettext(nb,
"Found the following file with a non-portable file name:\n",
"Found the following files with non-portable file names:\n",
domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
wrapLog("These are not valid file names",
"on all R platforms.\n",
"Please rename the files and try again.\n",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
maybe_exit(1L)
}
## Next check for name clashes on case-insensitive file systems
## (that is on Windows and (by default) on macOS).
dups <- unique(allfiles[duplicated(tolower(allfiles))])
if (nb <- length(dups)) {
errorLog(Log)
wrapLog("Found the following files with duplicate lower-cased file names:\n")
printLog0(Log, .format_lines_with_indent(dups), "\n")
wrapLog("File names must not differ just by case",
"to be usable on all R platforms.\n",
"Please rename the files and try again.\n",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
maybe_exit(1L)
}
## NB: the omission of ' ' is deliberate.
non_ASCII_files <-
allfiles[grepl("[^-A-Za-z0-9._!#$%&+,;=@^(){}\'[\\]]", #
basename(allfiles), perl = TRUE)]
any <- FALSE
if (nb <- length(non_ASCII_files)) {
any <- TRUE
warningLog(Log)
msg <- ngettext(nb,
"Found the following file with a non-portable file name:\n",
"Found the following files with non-portable file names:\n",
domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(non_ASCII_files), "\n")
wrapLog("These are not fully portable file names.\n",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
}
## now check lengths, as tarballs can only record up to 100 bytes
## plus perhaps 155 bytes as a prefix plus /
af <- file.path(pkgname, af)
lens <- nchar(af, "b")
if (any(lens > 100L)) {
bad_files <- af[lens > 100L]
OK <- TRUE
if (any(lens > 256L)) OK <- FALSE
else { # check if can be splt
for (f in bad_files) {
name <- charToRaw(f)
s <- max(which(name[1:155] == charToRaw("/")))
if(is.infinite(s) || s+100 < length(name)) {
OK <- FALSE; break
}
}
if (!OK) errorLog(Log)
else if(!any) {
noteLog(Log)
any <- TRUE
}
}
msg <- ngettext(length(bad_files),
"Found the following non-portable file path:\n",
"Found the following non-portable file paths:\n",
domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(bad_files), "\n\n")
wrapLog("Tarballs are only required to store paths of up to 100",
"bytes and cannot store those of more than 256 bytes,",
"with restrictions including to 100 bytes for the",
"final component.\n",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
if (!OK)
maybe_exit(1L)
}
if (!any) resultLog(Log, "OK")
allfiles
}
check_permissions <- function(allfiles)
{
checkingLog(Log, "for sufficient/correct file permissions")
## This used to be much more 'aggressive', requiring that dirs
## and files have mode >= 00755 and 00644, respectively (with
## an error if not), and that files know to be 'text' have
## mode 00644 (with a warning if not). We now only require
## that dirs and files have mode >= 00700 and 00400,
## respectively, and try to fix insufficient permission in the
## INSTALL code (Unix only).
##
## In addition, we check whether files 'configure' and
## 'cleanup' exists in the top-level directory but are not
## executable, which is most likely not what was intended.
## Phase A. Directories at least 700, files at least 400.
bad_files <- character()
## allfiles <- dir(".", all.files = TRUE,
## full.names = TRUE, recursive = TRUE)
## allfiles <- sub("^./", "", allfiles)
if(length(allfiles)) {
mode <- file.mode(allfiles)
bad_files <- allfiles[(mode & "400") < as.octmode("400")]
}
if(length(alldirs <- unique(dirname(allfiles)))) {
mode <- file.mode(alldirs)
bad_files <- c(bad_files,
alldirs[(mode & "700") < as.octmode("700")])
}
if (length(bad_files)) {
errorLog(Log)
wrapLog("Found the following files with insufficient permissions:\n")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
wrapLog("Permissions should be at least 700 for directories and 400 for files.\nPlease fix permissions and try again.\n")
maybe_exit(1L)
}
## Phase B. Top-level scripts 'configure' and 'cleanup'
## should really be mode at least 500, or they will not be
## necessarily be used (or should we rather change *that*?)
bad_files <- character()
for (f in c("configure", "cleanup")) {
if (!file.exists(f)) next
mode <- file.mode(f)
if ((mode & "500") < as.octmode("500"))
bad_files <- c(bad_files, f)
}
if (length(bad_files)) {
warningLog(Log)
wrapLog("The following files should most likely be executable (for the owner):\n")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
printLog(Log, "Please fix their permissions\n")
} else resultLog(Log, "OK")
}
## Look for serialized objects, and check their version
## We need to so this before installation, which may create
## src/symbols.rds in the sources.
check_serialization <- function(allfiles)
{
checkingLog(Log, "serialization versions")
bad <- get_serialization_version(allfiles)
bad <- names(bad[bad >= 3L])
if(length(bad)) {
msg <- "Found file(s) with version 3 serialization:"
warningLog(Log, msg)
printLog0(Log, paste0(.pretty_format(sort(bad)), "\n"))
wrapLog("Such files are only readable in R >= 3.5.0.\n",
"Recreate them with R < 3.5.0 or",
"save(version = 2) or saveRDS(version = 2)",
"as appropriate")
} else resultLog(Log, "OK")
}
check_meta <- function()
{
## If we just installed the package (via R CMD INSTALL), we already
## validated most of the package DESCRIPTION metadata. Otherwise,
## let us be defensive about this ...
checkingLog(Log, "DESCRIPTION meta-information")
dfile <- if (is_base_pkg) "DESCRIPTION.in" else "DESCRIPTION"
any <- FALSE
## Check the encoding -- do first as it gives a WARNING
Rcmd <- sprintf("tools:::.check_package_description_encoding(\"%s\")", dfile)
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
any <- TRUE
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
## FIXME: this does not need to be run in another process
## but that needs conversion to format().
Rcmd <- sprintf("tools:::.check_package_description(\"%s\", TRUE)",
dfile)
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if(any(!grepl("^Malformed (Title|Description)", out))) {
errorLog(Log)
printLog0(Log, paste(out, collapse = "\n"), "\n")
summaryLog(Log)
do_exit(1L)
} else {
noteLog(Log)
any <- TRUE
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
}
## Check the license.
## For base packages, the DESCRIPTION.in files have non-canonical
## License: Part of R @VERSION@
## entries because these really are a part of R: hence, skip the
## check.
check_license <- if (!is_base_pkg) {
Check_license <- Sys.getenv("_R_CHECK_LICENSE_", NA_character_)
if(is.na(Check_license)) {
## The check code conditionalizes *output* on _R_CHECK_LICENSE_.
Sys.setenv('_R_CHECK_LICENSE_' = "TRUE")
TRUE
} else config_val_to_logical(Check_license)
} else FALSE
if (!isFALSE(check_license)) {
Rcmd <- sprintf("tools:::.check_package_license(\"%s\", \"%s\")",
dfile, pkgdir)
## FIXME: this does not need to be run in another process
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if (check_license == "maybe") {
if (!any) warningLog(Log)
} else if(any(startsWith(out, "Standardizable: FALSE"),
startsWith(out, "Invalid license file pointers:"))) {
if (!any) warningLog(Log)
} else {
if (!any) noteLog(Log)
}
any <- TRUE
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
}
## .check_package_description() only checks Authors@R "if needed",
## and does not check for persons with no valid roles.
db <- .read_description(dfile)
if(!is.na(aar <- db["Authors@R"])) {
lev <- if(check_incoming) 2L else 1L
out <- .check_package_description_authors_at_R_field(aar,
strict = lev)
if(length(out)) {
if(!any) noteLog(Log)
any <- TRUE
out <- .format_check_package_description_authors_at_R_field_results(out)
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
## and there might be stale Authors and Maintainer fields
yorig <- db[c("Author", "Maintainer")]
if(check_incoming && any(!is.na(yorig))) {
enc <- db["Encoding"]
aar <- utils:::.read_authors_at_R_field(aar)
y <- c(Author =
utils:::.format_authors_at_R_field_for_author(aar),
Maintainer =
utils:::.format_authors_at_R_field_for_maintainer(aar))
## ignore formatting as far as possible
clean_up <- function(x) trimws(gsub("[[:space:]]+", " ", x))
yorig <- sapply(yorig, clean_up)
y <- sapply(y, clean_up)
diff <- y != yorig
## <FIXME>
if(diff[1L]
&& grepl("https://orcid.org/", y[1L], fixed = TRUE)) {
## Argh. Might be from using the new ORCID id
## mechanism but having built with R < 3.5.0.
## Let's ignore ...
## Remove eventually.
aar$comment <- lapply(aar$comment, unname)
y1 <- utils:::.format_authors_at_R_field_for_author(aar)
diff[1L] <- clean_up(y1) != yorig[1L]
}
## </FIXME>
if(any(diff)) {
if(!any) noteLog(Log)
any <- TRUE
if(diff[1L]) {
printLog(Log, "Author field differs from that derived from Authors@R", "\n")
printLog(Log, " Author: ", sQuote(yorig[1L]), "\n")
printLog(Log, " Authors@R: ", sQuote(y[1L]), "\n")
printLog(Log, "\n")
}
if(diff[2L]) {
printLog(Log, "Maintainer field differs from that derived from Authors@R", "\n")
printLog(Log, " Maintainer: ", sQuote(yorig[2L]), "\n")
printLog(Log, " Authors@R: ", sQuote(y[2L]), "\n")
printLog(Log, "\n")
}
}
}
}
if(!is_base_pkg && is.na(db["Packaged"])) {
if(!any) (noteLog(Log))
any <- TRUE
printLog(Log,
"Checking should be performed on sources prepared by 'R CMD build'.",
"\n")
}
if(!is.na(ncomp <- db["NeedsCompilation"])) {
if (ncomp %notin% c("yes", "no")) {
if(!any) noteLog(Log)
any <- TRUE
printLog(Log, "NeedsCompilation field must take value 'yes' or 'no'", "\n")
}
if((ncomp == "no") && dir.exists("src")) {
if(!any) noteLog(Log)
any <- TRUE
printLog(Log, "NeedsCompilation field should likely be 'yes'", "\n")
}
}
## check for BugReports field added at R 3.4.0
## This used to check for empty first line as that
## breaks bug.report() in R <= 3.3.2 -- but read.dcf in those
## versions adds back the newline.
if(!is.na(BR <- db["BugReports"])) {
if (nzchar(BR)) {
msg <- ""
## prior to 3.4.0 this was said to be
## 'a URL to which bug reports about the package
## should be submitted'
## We will take that to mean a http[s]:// URL,
isURL <- grepl("^https?://[^ ]*$", BR)
## As from 3.4.0 bug,report() is able to extract
## an email addr.
if(!isURL) {
findEmail <- function(x) {
x <- paste(x, collapse = " ")
if (grepl("mailto:", x))
sub(".*mailto:([^ ]+).*", "\\1", x)
else if (grepl("[^<]*<([^>]+)", x))
sub("[^<]*<([^>]+)>.*", "\\1", x)
else NA_character_
}
msg <- if (is.na(findEmail(BR))) {
if (grepl("(^|.* )[^ ]+@[[:alnum:]._]+", BR))
"BugReports field is not a suitable URL but appears to contain an email address\n not specified by mailto: nor contained in < >"
else
"BugReports field should be the URL of a single webpage"
} else
"BugReports field is not a suitable URL but contains an email address\n which will be used as from R 3.4.0"
}
} else {
msg <- "BugReports field should not be empty"
}
if (nzchar(msg)) {
if(!any) noteLog(Log)
any <- TRUE
printLog(Log, msg, "\n")
}
}
out <- format(.check_package_description2(dfile))
if (length(out)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
## Dependence on say R >= 3.4.3 when 3.4 is current can
## cause problems with revdeps (and did for 3.2.x).
## We only check recent ones: maybe previous two
## (R-release and R-old-release) while this is R-devel
Check_R_deps <- Sys.getenv("_R_CHECK_R_DEPENDS_", "FALSE")
act <- if(Check_R_deps %in% c("note", "warn")) TRUE
else config_val_to_logical(Check_R_deps)
if(act) {
Rver <-.split_description(db, verbose = TRUE)$Rdepends2
if(length(Rver) && Rver[[1L]]$op == ">=") {
ver <- unclass(Rver[[1L]]$version)[[1L]]
thisver <- unclass(getRversion())[[1L]]
## needs updating if we ever go to 4.0
tv <- if(thisver[1L] == 3L) thisver[2L] - 2L else 4L
if (length(ver) == 3L && ver[3L] != 0 &&
((ver[1L] > 3L) ||
(ver[1L] == 3L) && (ver[2L] >= tv) )) {
## This is not quite right: may have NOTE-d above
if(Check_R_deps == "warn") warningLog(Log)
else if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
sprintf("Dependence on R version %s not with patchlevel 0\n",
sQuote(format(Rver[[1L]]$version))))
}
}
}
if (!any) resultLog(Log, "OK")
return(db)
}
check_build <- function()
{
## currently only checks vignettes
if (ignore_vignettes) return()
fv <- file.path("build", "vignette.rds")
if(!file.exists(fv)) return()
checkingLog(Log, "'build' directory")
any <- FALSE
db <- readRDS(fv)
## do as CRAN-pack does
keep <- nzchar(db$PDF)
if(any(!keep)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- c("Vignette(s) without any output listed in 'build/vignette.rds'",
strwrap(sQuote(db$file[!keep]), indent = 2L, exdent = 2L))
printLog0(Log, paste(msg, collapse = "\n"), "\n")
}
pdfs <- file.path("inst", "doc", db[keep, ]$PDF)
missing <- !file.exists(pdfs)
if(any(missing)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- c("Output(s) listed in 'build/vignette.rds' but not in package:",
strwrap(sQuote(pdfs[missing]), indent = 2L, exdent = 2L))
printLog0(Log, paste(msg, collapse = "\n"), "\n")
}
if (!any) resultLog(Log, "OK")
}
check_top_level <- function()
{
checkingLog(Log, "top-level files")
topfiles <- Sys.glob(c("install.R", "R_PROFILE.R"))
any <- FALSE
if (length(topfiles)) {
any <- TRUE
warningLog(Log)
printLog0(Log, .format_lines_with_indent(topfiles), "\n")
wrapLog("These files are defunct.",
"See manual 'Writing R Extensions'.\n")
}
if(check_incoming) {
## CRAN must be able to convert
## inst/README.md or README.md
## inst/NEWS.md or NEWS.md
## to HTML using pandoc: check that this works fine.
md_files <-
c(Filter(file.exists,
c(file.path("inst", "README.md"),
"README.md"))[1L],
Filter(file.exists,
c(file.path("inst", "NEWS.md"),
"NEWS.md"))[1L])
md_files <- md_files[!is.na(md_files)]
if(length(md_files)) {
if(nzchar(Sys.which("pandoc"))) {
for(ifile in md_files) {
ofile <- tempfile("pandoc", fileext = ".html")
out <- .pandoc_md_for_CRAN(ifile, ofile)
if(out$status) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log,
sprintf("Conversion of '%s' failed:\n",
ifile),
paste(out$stderr, collapse = "\n"),
"\n")
}
unlink(ofile)
}
} else {
if(!any) noteLog(Log)
any <- TRUE
printLog(Log,
"Files 'README.md' or 'NEWS.md' cannot be checked without 'pandoc' being installed.\n")
}
}
}
topfiles <- Sys.glob(c("LICENCE", "LICENSE"))
if (length(topfiles)) {
## Are these mentioned in DESCRIPTION?
lic <- desc["License"]
if(!is.na(lic)) {
found <- sapply(topfiles,
function(x) grepl(x, lic, fixed = TRUE))
topfiles <- topfiles[!found]
if (length(topfiles)) {
if(!any) noteLog(Log)
any <- TRUE
one <- (length(topfiles) == 1L)
msg <- c(if(one) "File" else "Files",
"\n",
.format_lines_with_indent(topfiles),
"\n",
if(one) {
"is not mentioned in the DESCRIPTION file.\n"
} else {
"are not mentioned in the DESCRIPTION file.\n"
})
printLog(Log, msg)
}
}
}
topfiles <- Sys.glob(file.path("inst", c("LICENCE", "LICENSE")))
if (length(topfiles)) {
## Are these mentioned in DESCRIPTION?
lic <- desc["License"]
if(!is.na(lic)) {
found <- sapply(basename(topfiles),
function(x) grepl(x, lic, fixed = TRUE))
topfiles <- topfiles[!found]
if (length(topfiles)) {
if(!any) noteLog(Log)
any <- TRUE
one <- (length(topfiles) == 1L)
msg <- c(if(one) "File" else "Files",
"\n",
.format_lines_with_indent(topfiles),
"\n",
if(one) {
"will install at top-level and is not mentioned in the DESCRIPTION file.\n"
} else {
"will install at top-level and are not mentioned in the DESCRIPTION file.\n"
})
printLog(Log, msg)
}
}
}
if (!is_base_pkg && R_check_toplevel_files) {
## any others?
if(is.null(topfiles0)) {
topfiles <- dir()
## Now check if any of these were created since we started
topfiles <-
topfiles[file.info(topfiles, extra_cols = FALSE)$ctime
<= .unpack.time]
} else topfiles <- topfiles0
known <- c("DESCRIPTION", "INDEX", "LICENCE", "LICENSE",
"LICENCE.note", "LICENSE.note",
"MD5", "NAMESPACE", "NEWS", "PORTING",
"COPYING", "COPYING.LIB", "GPL-2", "GPL-3",
"BUGS", "Bugs",
"ChangeLog", "Changelog", "CHANGELOG", "CHANGES", "Changes",
"INSTALL", "README", "THANKS", "TODO", "ToDo",
"INSTALL.windows",
"README.md", "NEWS.md",
"configure", "configure.win", "cleanup", "cleanup.win",
"configure.ac", "configure.in",
"datafiles",
"R", "data", "demo", "exec", "inst", "man",
"po", "src", "tests", "vignettes",
"build", # used by R CMD build
".aspell", # used for spell checking packages
"java", "tools", "noweb") # common dirs in packages.
topfiles <- setdiff(topfiles, known)
if (file.exists(file.path("inst", "AUTHORS")))
topfiles <- setdiff(topfiles, "AUTHORS")
if (file.exists(file.path("inst", "COPYRIGHTS")))
topfiles <- setdiff(topfiles, "COPYRIGHTS")
if (lt <- length(topfiles)) {
if(!any) noteLog(Log)
any <- TRUE
printLog(Log, ## dirs are files, but maybe not on Windows
if(lt > 1L) "Non-standard files/directories found at top level:\n"
else "Non-standard file/directory found at top level:\n" )
msg <- strwrap(paste(sQuote(topfiles), collapse = " "),
indent = 2L, exdent = 2L)
printLog0(Log, paste(c(msg, ""), collapse="\n"))
cp <- grep("^copyright", topfiles,
ignore.case = TRUE, value = TRUE)
if (length(cp))
printLog(Log, "Copyright information should be in file inst/COPYRIGHTS\n")
if("AUTHORS" %in% topfiles)
printLog(Log, "Authors information should be in file inst/AUTHORS\n")
}
}
if (!any) resultLog(Log, "OK")
}
check_detritus <- function()
{
checkingLog(Log, "for left-over files")
files <- dir(".", full.names = TRUE, recursive = TRUE)
bad <- grep("svn-commit[.].*tmp$", files, value = TRUE)
bad <- c(bad, grep("^[.]/[^/]*[.][rR]d$", files, value = TRUE))
if (length(bad)) {
bad <- sub("^[.]/", paste0(pkgname, "/"), bad)
noteLog(Log)
printLog0(Log,
"The following files look like leftovers:\n",
paste(strwrap(paste(sQuote(bad), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\nPlease remove them from your package.\n")
} else resultLog(Log, "OK")
}
check_indices <- function()
{
## Check index information.
checkingLog(Log, "index information")
any <- FALSE
if (file.exists("INDEX") &&
!length(readLines("INDEX", warn = FALSE))) {
any <- TRUE
warningLog(Log, "Empty file 'INDEX'.")
}
if (dir.exists("demo")) {
index <- file.path("demo", "00Index")
if (!file.exists(index) ||
!length(readLines(index, warn = FALSE))) {
if(!any) warningLog(Log)
any <- TRUE
printLog0(Log,
sprintf("Empty or missing file %s.\n",
sQuote(index)))
} else {
Rcmd <- "options(warn=1)\ntools:::.check_demo_index(\"demo\")\n"
## FIXME: this does not need to be run in another process
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if(length(out)) {
if(!any) warningLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
}
}
}
if (dir.exists(file.path("inst", "doc"))) {
Rcmd <- "options(warn=1)\ntools:::.check_vignette_index(\"inst/doc\")\n"
## FIXME: this does not need to be run in another process
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if(length(out)) {
if(!any) warningLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
}
}
if (any)
wrapLog("See sections 'The INDEX file' and 'Package subdirectories' in the 'Writing R Extensions' manual.\n")
else resultLog(Log, "OK")
}
check_subdirectories <- function(haveR, subdirs)
{
checkingLog(Log, "package subdirectories")
any <- FALSE
if (haveR && !length(list_files_with_type("R", "code")) &&
!file.exists(file.path("R", "sysdata.rda"))) {
haveR <- FALSE
warningLog(Log, "Found directory 'R' with no source files.")
any <- TRUE
}
if (R_check_subdirs_nocase) {
## Argh. We often get submissions where 'R' comes out as 'r',
## or 'man' comes out as 'MAN', and we've just ran into 'DATA'
## instead of 'data' (2007-03-31). Maybe we should warn about
## this unconditionally ...
## <FIXME>
## Actually, what we should really do is check whether there is
## any directory with lower-cased name matching a lower-cased
## name of a standard directory, while differing in name.
## </FIXME>
## Watch out for case-insensitive file systems
if ("./r" %in% list.dirs(recursive = FALSE)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Found subdirectory 'r'.\n",
"Most likely, this should be 'R'.\n")
}
if ("./MAN" %in% list.dirs(recursive = FALSE)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Found subdirectory 'MAN'.\n",
"Most likely, this should be 'man'.\n")
}
if ("./DATA" %in% list.dirs(recursive = FALSE)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Found subdirectory 'DATA'.\n",
"Most likely, this should be 'data'.\n")
}
}
all_dirs <- list.dirs(".")
## several packages have had check dirs in the sources, e.g.
## ./languageR/languageR.Rcheck
## ./locfdr/man/locfdr.Rcheck
## ./clustvarsel/inst/doc/clustvarsel.Rcheck
## ./bicreduc/OldFiles/bicreduc.Rcheck
## ./waved/man/waved.Rcheck
## ./waved/..Rcheck
ind <- grepl("\\.Rcheck$", all_dirs)
if(any(ind)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(sum(ind),
"Found the following directory with the name of a check directory:\n",
"Found the following directories with names of check directories:\n", domain = NA)
printLog0(Log, msg,
.format_lines_with_indent(all_dirs[ind]),
"\n",
"Most likely, these were included erroneously.\n")
}
## Several packages had leftover Rd2dvi build directories in
## their sources
ind <- grepl("^\\.Rd2(dvi|pdf)", basename(all_dirs))
if(any(ind)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(sum(ind),
"Found the following directory with the name of a Rd2pdf directory:\n",
"Found the following directories with names of Rd2pdf directories:\n", domain = NA)
printLog0(Log, msg,
.format_lines_with_indent(all_dirs[ind]),
"\n",
"Most likely, these were included erroneously.\n")
}
if(!is_base_pkg && (istar || R_check_vc_dirs)) {
## Packages also should not contain version control subdirs
## provided that we check a .tar.gz or know we unpacked one.
ind <- basename(all_dirs) %in% .vc_dir_names
if(any(ind)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(sum(ind),
"Found the following directory with the name of a version control directory:\n",
"Found the following directories with names of version control directories:\n", domain = NA)
printLog0(Log, msg,
.format_lines_with_indent(all_dirs[ind]),
"\n",
"These should not be in a package tarball.\n")
}
}
if (subdirs != "no") {
Rcmd <- "tools:::.check_package_subdirs(\".\")\n"
## We don't run this in the C locale, as we only require
## certain filenames to start with ASCII letters/digits, and not
## to be entirely ASCII.
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if(length(out)) {
if(!any) warningLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("Please remove or rename the files.\n",
"See section 'Package subdirectories'",
"in the 'Writing R Extensions' manual.\n")
}
}
## Subdirectory 'data' without data sets?
if (dir.exists("data") &&
!length(list_files_with_type("data", "data"))) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Subdirectory 'data' contains no data sets.\n")
}
## Subdirectory 'demo' without demos?
if (dir.exists("demo")) {
demos <- list_files_with_type("demo", "demo")
if(!length(demos)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Subdirectory 'demo' contains no demos.\n")
} else {
## check for non-ASCII code in each demo
bad <- character()
for(d in demos) {
x <- readLines(d, warn = FALSE)
asc <- iconv(x, "latin1", "ASCII")
ind <- is.na(asc) | asc != x
if (any(ind)) bad <- c(bad, basename(d))
}
if (length(bad)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Demos with non-ASCII characters:")
if(length(bad) > 1L)
printLog0(Log, "\n",
.format_lines_with_indent(bad), "\n")
else printLog0(Log, " ", bad, "\n")
wrapLog("Portable packages must use only ASCII",
"characters in their demos.\n",
"Use \\uxxxx escapes for other characters.\n")
demos <- demos[basename(demos) %notin% bad]
}
## check we can parse each demo.
bad <- character()
for(d in demos)
tryCatch(parse(file = d),
error = function(e) bad <<- c(bad, basename(d)))
if (length(bad)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Demos which do not contain valid R code:")
if(length(bad) > 1L)
printLog0(Log, "\n",
.format_lines_with_indent(bad), "\n")
else printLog0(Log, " ", bad, "\n")
}
}
}
## Subdirectory 'exec' without files?
if (dir.exists("exec") && !length(dir("exec"))) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Subdirectory 'exec' contains no files.\n")
}
## Subdirectory 'inst' without files?
if (dir.exists("inst") && !length(dir("inst", recursive = TRUE))) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log, "Subdirectory 'inst' contains no files.\n")
}
## Subdirectory 'src' without sources?
if (dir.exists("src")) {
## <NOTE>
## If there is a Makefile (or a Makefile.win), we cannot assume
## that source files have the predefined extensions.
## </NOTE>
if (!any(file.exists(file.path("src",
c("Makefile", "Makefile.win",
"install.libs.R"))))) {
if (!length(dir("src", pattern = "\\.([cfmM]|cc|cpp|f90|f95|mm)"))) {
if (!any) warningLog(Log)
printLog(Log, "Subdirectory 'src' contains no source files.\n")
any <- TRUE
}
}
}
## Do subdirectories of 'inst' interfere with R package system
## subdirectories?
if (dir.exists("inst")) {
## These include pre-2.10.0 ones
R_system_subdirs <-
c("Meta", "R", "data", "demo", "exec", "libs",
"man", "help", "html", "latex", "R-ex", "build")
allfiles <- dir("inst", full.names = TRUE)
alldirs <- allfiles[dir.exists(allfiles)]
suspect <- basename(alldirs) %in% R_system_subdirs
if (any(suspect)) {
## check they are non-empty
suspect <- alldirs[suspect]
suspect <- suspect[sapply(suspect, function(x) {
length(dir(x, all.files = TRUE)) > 2L
})]
if (length(suspect)) {
if (!any) warningLog(Log)
any <- TRUE
wrapLog("Found the following non-empty",
"subdirectories of 'inst' also",
"used by R:\n")
printLog0(Log, .format_lines_with_indent(suspect), "\n")
wrapLog("It is recommended not to interfere",
"with package subdirectories used by R.\n")
}
}
}
## Valid NEWS.Rd?
nfile <- file.path("inst", "NEWS.Rd")
if(file.exists(nfile)) {
## Catch all warning and error messages.
## We use the same construction in at least another place,
## so maybe factor out a common utility function
## .try_catch_all_warnings_and_errors
## eventually.
## For testing package NEWS.Rd files, we really need a real
## QC check function eventually ...
.warnings <- NULL
.error <- NULL
withCallingHandlers(tryCatch(.build_news_db_from_package_NEWS_Rd(nfile),
error = function(e)
.error <<- conditionMessage(e)),
warning = function(e) {
.warnings <<- c(.warnings,
conditionMessage(e))
invokeRestart("muffleWarning")
})
msg <- c(.warnings, .error)
if(length(msg)) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log, "Problems with news in 'inst/NEWS.Rd':\n")
printLog0(Log,
paste0(" ",
unlist(strsplit(msg, "\n", fixed = TRUE)),
collapse = "\n"),
"\n")
}
}
## Valid CITATION metadata?
if (file.exists(file.path("inst", "CITATION"))) {
Rcmd <- if(do_install)
sprintf("tools:::.check_citation(\"inst/CITATION\", \"%s\")\n",
file.path(if(is_base_pkg) .Library else libdir,
pkgname))
else
"tools:::.check_citation(\"inst/CITATION\")\n"
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=utils")
if(length(out)) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log,
"Invalid citation information in 'inst/CITATION':\n")
printLog0(Log, .format_lines_with_indent(out), "\n")
}
}
## CITATION files in non-standard places?
## Common problems: rather than inst/CITATION, have
## CITATION
## CITATION.txt
## inst/doc/CITATION
## Of course, everything in inst is justifiable, so only give a
## note for now.
files <- dir(".", pattern = "^CITATION.*", recursive = TRUE)
files <- files[file_path_sans_ext(basename(files)) == "CITATION" &
files != file.path("inst", "CITATION")]
if(length(files)) {
if(!any) noteLog(Log)
any <- TRUE
msg <- ngettext(length(files),
"Found the following CITATION file in a non-standard place:\n",
"Found the following CITATION files in a non-standard place:\n", domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(files), "\n")
wrapLog("Most likely 'inst/CITATION' should be used instead.\n")
}
if(!any) resultLog(Log, "OK")
}
check_non_ASCII <- function()
{
checkingLog(Log, "R files for non-ASCII characters")
out <- R_runR0("tools:::.check_package_ASCII_code('.')",
R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
msg <- ngettext(length(out),
"Found the following file with non-ASCII characters:\n",
"Found the following files with non-ASCII characters:\n",
domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(out), "\n")
wrapLog("Portable packages must use only ASCII",
"characters in their R code,\n",
"except perhaps in comments.\n",
"Use \\uxxxx escapes for other characters.\n")
} else resultLog(Log, "OK")
checkingLog(Log, "R files for syntax errors")
Rcmd <- "options(warn=1);tools:::.check_package_code_syntax(\"R\")"
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (any(startsWith(out, "Error"))) {
errorLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
maybe_exit(1L)
} else if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
check_R_code <- function()
{
## if (!is_base_pkg) {
checkingLog(Log, "dependencies in R code")
if (do_install) {
Rcmd <- paste("options(warn=1, showErrorCalls=FALSE)\n",
sprintf("tools:::.check_packages_used(package = \"%s\")\n", pkgname))
out <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if(any(grepl("(not declared from|Including base/recommended)", out))) warningLog(Log)
else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
## wrapLog(msg_DESCRIPTION)
} else resultLog(Log, "OK")
} else {
## this needs to read the package code, and will fail on
## syntax errors such as non-ASCII code.
Rcmd <- paste("options(warn=1, showErrorCalls=FALSE)\n",
sprintf("tools:::.check_packages_used(dir = \"%s\")\n", pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if(any(grepl("not declared from", out))) warningLog(Log)
else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
## wrapLog(msg_DESCRIPTION)
} else resultLog(Log, "OK")
}
## }
## Check whether methods have all arguments of the corresponding
## generic.
checkingLog(Log, "S3 generic/method consistency")
Rcmd <- paste("options(warn=1)\n",
"options(expressions=1000)\n",
if (do_install)
sprintf("tools::checkS3methods(package = \"%s\")\n", pkgname)
else
sprintf("tools::checkS3methods(dir = \"%s\")\n", pkgdir))
out <- R_runR2(Rcmd)
if (length(out)) {
pos <- grep("^Found the following apparent S3 methods", out)
if(!length(pos)) {
out1 <- out
out2 <- character()
} else {
pos <- pos[1L]
out1 <- out[seq_len(pos - 1L)]
out2 <- out[seq.int(pos, length(out))]
}
if(length(out1)) {
warningLog(Log)
printLog0(Log, paste(c(out1, ""), collapse = "\n"))
wrapLog("See section 'Generic functions and methods'",
"in the 'Writing R Extensions' manual.\n")
} else
noteLog(Log)
if(length(out2)) {
printLog0(Log,
paste(c(if(length(out1)) "", out2, ""),
collapse = "\n"))
wrapLog("See section 'Registering S3 methods'",
"in the 'Writing R Extensions' manual.\n")
}
} else resultLog(Log, "OK")
## Check whether replacement functions have their final argument
## named 'value'.
checkingLog(Log, "replacement functions")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::checkReplaceFuns(package = \"%s\")\n", pkgname)
else
sprintf("tools::checkReplaceFuns(dir = \"%s\")\n", pkgdir))
out <- R_runR2(Rcmd)
if (length(out)) {
## <NOTE>
## We really want to stop if we find offending replacement
## functions. But we cannot use error() because output may
## contain warnings ...
warningLog(Log)
## </NOTE>
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("The argument of a replacement function",
"which corresponds to the right hand side",
"must be named 'value'.\n")
} else resultLog(Log, "OK")
## Check foreign function calls.
## The neverending story ...
## For the time being, allow to turn this off by setting the environment
## variable _R_CHECK_FF_CALLS_ to an empty value.
if (nzchar(R_check_FF)) {
registration <-
identical(R_check_FF, "registration") && install != "fake"
checkingLog(Log, "foreign function calls")
DUP <- R_check_FF_DUP
if(as_cran) {
Sys.setenv("_R_CHECK_FF_AS_CRAN_" = "TRUE")
DUP <- TRUE
}
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::checkFF(package = \"%s\", registration = %s, check_DUP = %s)\n",
pkgname, registration, DUP)
else
sprintf("tools::checkFF(dir = \"%s\", registration = %s, check_DUP = %s)\n",
pkgdir, "FALSE", DUP))
out <- R_runR2(Rcmd)
Sys.unsetenv("_R_CHECK_FF_AS_CRAN_")
if (length(out)) {
if(any(grepl("^Foreign function calls? with(out| empty)", out)) ||
(!is_base_pkg && any(grepl("to a base package:", out))) ||
any(grepl("^Undeclared packages? in", out)) ||
any(grepl("parameter[s]*, expected ", out))
) warningLog(Log)
else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
if(!is_base_pkg && any(grepl("to a base package:", out)))
wrapLog("Packages should not make",
".C/.Call/.External/.Fortran",
"calls to a base package.",
"They are not part of the API,",
"for use only by R itself",
"and subject to change without notice.")
else if(any(grepl("with DUP:", out)))
wrapLog("DUP is no longer supported and will be ignored.")
else
wrapLog("See chapter 'System and foreign language interfaces' in the 'Writing R Extensions' manual.\n")
} else resultLog(Log, "OK")
}
}
check_R_files <- function(is_rec_pkg)
{
checkingLog(Log, "R code for possible problems")
t1 <- proc.time()
if (!is_base_pkg) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_shlib(dir = \"%s\")\n",
pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
errorLog(Log)
wrapLog("Incorrect (un)loading of package",
"shared object.\n")
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("The system-specific extension for",
"shared objects must not be added.\n",
"See ?library.dynam.\n")
maybe_exit(1L)
}
}
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_startup_functions(dir = \"%s\")\n",
pkgdir))
out1 <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=")
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_unload_functions(dir = \"%s\")\n",
pkgdir))
out1a <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=")
out1 <- if (length(out1) && length(out1a)) c(out1, "", out1a)
else c(out1, out1a)
out2 <- out3 <- out4 <- out5 <- out6 <- out7 <- out8 <- NULL
if (!is_base_pkg && R_check_unsafe_calls) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_tampers(dir = \"%s\")\n",
pkgdir))
out2 <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
}
if (R_check_use_codetools && do_install) {
Rcmd <-
paste("options(warn=1)\n",
sprintf("tools:::.check_code_usage_in_package(package = \"%s\")\n", pkgname))
if(config_val_to_logical(Sys.getenv("_R_CHECK_CODE_USAGE_WITH_ONLY_BASE_ATTACHED_",
"true"))) {
out3 <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=NULL")
if(length(pos <-
grep("^Undefined global functions or variables:",
out3))) {
Rcmd <-
sprintf("writeLines(strwrap(tools:::imports_for_undefined_globals(\"%s\"), exdent = 11))\n",
paste(utils::tail(out3, -pos),
collapse = " "))
miss <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=")
## base has no NAMESPACE
if(length(miss) && pkgname != "base") {
msg3 <- if(length(grep("^importFrom\\(\"methods\"",
miss))) {
strwrap("to your NAMESPACE file (and ensure that your DESCRIPTION Imports field contains 'methods').")
} else "to your NAMESPACE file."
out3 <- c(out3,
c("Consider adding",
paste0(" ", miss),
msg3))
}
}
} else
out3 <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=")
}
if(!is_base_pkg && R_check_use_codetools && R_check_dot_internal) {
details <- pkgname != "relax" # has .Internal in a 10,000 line fun
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools:::.check_dotInternal(package = \"%s\",details=%s)\n", pkgname, details)
else
sprintf("tools:::.check_dotInternal(dir = \"%s\",details=%s)\n", pkgdir, details))
out4 <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=")
## Hmisc, gooJSON, quantmod give spurious output
if (!any(grepl("^Found.* .Internal call", out4))) out4 <- NULL
}
if(!is_base_pkg && R_check_code_assign_to_globalenv) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_assign_to_globalenv(dir = \"%s\")\n",
pkgdir))
out5 <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=")
}
if(!is_base_pkg && R_check_code_attach) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_attach(dir = \"%s\")\n",
pkgdir))
out6 <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=")
}
if(!is_base_pkg && R_check_code_data_into_globalenv) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_code_data_into_globalenv(dir = \"%s\")\n",
pkgdir))
out7 <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=")
}
## Use of deprecated, defunct and platform-specific devices?
if(!is_base_pkg && R_check_use_codetools && R_check_depr_def) {
win <- !is.na(OS_type) && OS_type == "windows"
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools:::.check_depdef(package = \"%s\", WINDOWS = %s)\n", pkgname, win)
else
sprintf("tools:::.check_depdef(dir = \"%s\", WINDOWS = %s)\n", pkgdir, win))
out8 <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=")
}
t2 <- proc.time()
print_time(t1, t2, Log)
if (length(out1) || length(out2) || length(out3) ||
length(out4) || length(out5) || length(out6) ||
length(out7) || length(out8)) {
ini <- character()
if(length(out4) ||
length(grep("^Found the defunct/removed function", out8)))
warningLog(Log) else noteLog(Log)
if (length(out4)) {
first <- grep("^Found.* .Internal call", out4)[1L]
if(first > 1L) out4 <- out4[-seq_len(first-1)]
printLog0(Log, paste(c(ini, out4, "", ""), collapse = "\n"))
wrapLog(c("Packages should not call .Internal():",
"it is not part of the API,",
"for use only by R itself",
"and subject to change without notice."))
ini <- ""
}
if (length(out8)) {
printLog0(Log, paste(c(ini, out8, ""), collapse = "\n"))
if(length(grep("^Found the defunct/removed function", out8)))
ini <- ""
}
## All remaining checks give notes and not warnings.
if(length(ini))
ini <- c("",
"In addition to the above warning(s), found the following notes:",
"")
if (length(out1)) {
printLog0(Log, paste(c(ini, out1, ""), collapse = "\n"))
ini <- ""
}
if (length(out2)) {
printLog0(Log,
paste(c(ini,
"Found the following possibly unsafe calls:",
out2, ""),
collapse = "\n"))
ini <- ""
}
if (length(out3)) {
printLog0(Log, paste(c(ini, out3, ""), collapse = "\n"))
ini <- ""
}
if (length(out5)) {
printLog0(Log, paste(c(ini, out5, ""), collapse = "\n"))
ini <- ""
}
if (length(out6)) {
printLog0(Log, paste(c(ini, out6, ""), collapse = "\n"))
ini <- ""
wrapLog(gettextf("See section %s in '%s'.",
sQuote("Good practice"), "?attach"))
}
if (length(out7)) {
printLog0(Log, paste(c(ini, out7, ""), collapse = "\n"))
ini <- ""
wrapLog(gettextf("See section %s in '%s'.",
sQuote("Good practice"), "?data"))
}
} else resultLog(Log, "OK")
}
check_Rd_files <- function(haveR)
{
msg_writing_Rd <-
c("See chapter 'Writing R documentation files' in the 'Writing R Extensions' manual.\n")
if (dir.exists("man") && !extra_arch) {
checkingLog(Log, "Rd files")
minlevel <- Sys.getenv("_R_CHECK_RD_CHECKRD_MINLEVEL_", "-1")
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::.check_package_parseRd('.', minlevel=%s)\n", minlevel))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if(length(grep("^prepare.*Dropping empty section", out,
invert = TRUE)))
warningLog(Log)
else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
checkingLog(Log, "Rd metadata")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools:::.check_Rd_metadata(package = \"%s\")\n", pkgname)
else
sprintf("tools:::.check_Rd_metadata(dir = \"%s\")\n", pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
## Check Rd line widths.
if(dir.exists("man") && R_check_Rd_line_widths) {
checkingLog(Log, "Rd line widths")
Rcmd <- paste("options(warn=1)\n",
if(do_install)
sprintf("tools:::.check_Rd_line_widths(\"%s\", installed = TRUE)\n",
file.path(if(is_base_pkg) .Library else libdir,
pkgname))
else
sprintf("tools:::.check_Rd_line_widths(\"%s\")\n",
pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if(length(out)) {
noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("These lines will be truncated in the PDF manual.\n")
} else resultLog(Log, "OK")
}
## Check cross-references in R documentation files.
## <NOTE>
## Installing a package warns about missing links (and hence R CMD
## check knows about this too provided an install log is used).
## However, under Windows the install-time check verifies the links
## against what is available in the default library, which might be
## considerably more than what can be assumed to be available.
##
## The formulations in section "Cross-references" of R-exts are not
## quite clear about this, but CRAN policy has for a long time
## enforced anchoring links to targets (aliases) from non-base
## packages.
## </NOTE>
if (dir.exists("man") && R_check_Rd_xrefs) {
checkingLog(Log, "Rd cross-references")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools:::.check_Rd_xrefs(package = \"%s\")\n", pkgname)
else
sprintf("tools:::.check_Rd_xrefs(dir = \"%s\")\n", pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if (!all(grepl("Package[s]? unavailable to check", out)))
warningLog(Log)
else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
## Check for missing documentation entries.
if (!extra_arch && (haveR || dir.exists("data"))) {
checkingLog(Log, "for missing documentation entries")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::undoc(package = \"%s\")\n", pkgname)
else
sprintf("tools::undoc(dir = \"%s\")\n", pkgdir))
## This is needed to pick up undocumented S4 classes.
## even for packages which only import methods.
## But as that check needs to run get() on all the lazy-loaded
## promises, avoid if possible.
## desc exists in the body of this function.
use_methods <- if(pkgname == "methods") TRUE else {
pi <- .split_description(desc)
"methods" %in% c(names(pi$Depends), names(pi$Imports))
}
out <- if (use_methods) {
env <- if(WINDOWS) "R_DEFAULT_PACKAGES=utils,grDevices,graphics,stats,methods" else "R_DEFAULT_PACKAGES='utils,grDevices,graphics,stats,methods'"
R_runR2(Rcmd, env = env)
} else R_runR2(Rcmd)
## Grr, get() in undoc can change the search path
## Current example is TeachingDemos
out <- filtergrep("^Loading required package:", out)
err <- grep("^Error", out)
if (length(err)) {
errorLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
maybe_exit(1L)
} else if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("All user-level objects",
"in a package",
if (any(startsWith(out, "Undocumented S4")))
"(including S4 classes and methods)",
"should have documentation entries.\n")
wrapLog(msg_writing_Rd)
} else resultLog(Log, "OK")
}
## Check for code/documentation mismatches.
if (dir.exists("man") && !extra_arch) {
checkingLog(Log, "for code/documentation mismatches")
if (!do_codoc) resultLog(Log, "SKIPPED")
else {
any <- FALSE
## Check for code/documentation mismatches in functions.
if (haveR) {
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::codoc(package = \"%s\")\n", pkgname)
else
sprintf("tools::codoc(dir = \"%s\")\n", pkgdir))
out <- R_runR2(Rcmd)
if (length(out)) {
any <- TRUE
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
}
}
## Check for code/documentation mismatches in data sets.
if (do_install) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools::codocData(package = \"%s\")\n", pkgname))
out <- R_runR2(Rcmd)
if (length(out)) {
if (!any) warningLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
}
}
## Check for code/documentation mismatches in S4 classes.
if (do_install && haveR) {
Rcmd <- paste("options(warn=1)\n",
sprintf("tools::codocClasses(package = \"%s\")\n", pkgname))
out <- R_runR2(Rcmd)
if (length(out)) {
if (!any) warningLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
}
}
if (!any) resultLog(Log, "OK")
}
}
## Check Rd files, for consistency of \usage with \arguments (are
## all arguments shown in \usage documented in \arguments?) and
## aliases (do all functions shown in \usage have an alias?)
if (dir.exists("man") && !extra_arch) {
checkingLog(Log, "Rd \\usage sections")
msg_doc_files <-
c("Functions with \\usage entries",
"need to have the appropriate \\alias entries,",
"and all their arguments documented.\n",
"The \\usage entries must correspond to syntactically",
"valid R code.\n")
any <- FALSE
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::checkDocFiles(package = \"%s\")\n", pkgname)
else
sprintf("tools::checkDocFiles(dir = \"%s\")\n", pkgdir))
out <- R_runR2(Rcmd)
if (length(out)) {
any <- TRUE
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog(msg_doc_files)
wrapLog(msg_writing_Rd)
}
if (R_check_Rd_style && haveR) {
msg_doc_style <-
c("The \\usage entries for S3 methods should use",
"the \\method markup and not their full name.\n")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools::checkDocStyle(package = \"%s\")\n", pkgname)
else
sprintf("tools::checkDocStyle(dir = \"%s\")\n", pkgdir))
out <- R_runR2(Rcmd)
if (length(out)) {
if (!any) noteLog(Log)
any <- TRUE
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog(msg_doc_style)
wrapLog(msg_writing_Rd)
}
}
if (!any) resultLog(Log, "OK")
}
## Check Rd contents
if (dir.exists("man") && R_check_Rd_contents && !extra_arch) {
checkingLog(Log, "Rd contents")
Rcmd <- paste("options(warn=1)\n",
if (do_install)
sprintf("tools:::.check_Rd_contents(package = \"%s\")\n", pkgname)
else
sprintf("tools:::.check_Rd_contents(dir = \"%s\")\n", pkgdir))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
## Check undeclared dependencies in examples (if any)
if (dir.exists("man") && do_install && !extra_arch && !is_base_pkg) {
checkingLog(Log, "for unstated dependencies in examples")
Rcmd <- paste("options(warn=1, showErrorCalls=FALSE)\n",
sprintf("tools:::.check_packages_used_in_examples(package = \"%s\")\n", pkgname))
out <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
# wrapLog(msg_DESCRIPTION)
} else resultLog(Log, "OK")
} ## FIXME, what if no install?
}
check_data <- function()
{
## Check contents of 'data'
if (!is_base_pkg && dir.exists("data")) {
checkingLog(Log, "contents of 'data' directory")
fi <- list.files("data")
if (!any(grepl("\\.[Rr]$", fi))) { # code files can do anything
dataFiles <- basename(list_files_with_type("data", "data"))
odd <- fi %w/o% c(dataFiles, "datalist")
if (length(odd)) {
warningLog(Log)
msg <-
c(sprintf("Files not of a type allowed in a %s directory:\n",
sQuote("data")),
paste0(.pretty_format(odd), "\n"),
sprintf("Please use e.g. %s for non-R data files\n",
sQuote("inst/extdata")))
printLog0(Log, msg)
} else resultLog(Log, "OK")
} else resultLog(Log, "OK")
}
## Check for non-ASCII characters in 'data'
if (!is_base_pkg && R_check_ascii_data && dir.exists("data")) {
checkingLog(Log, "data for non-ASCII characters")
out <- R_runR0("tools:::.check_package_datasets('.')", R_opts2)
out <- filtergrep("Loading required package", out)
out <- filtergrep("Warning: changing locked binding", out, fixed = TRUE)
if (length(out)) {
bad <- grep("^Warning:", out)
if (length(bad)) warningLog(Log) else noteLog(Log)
printLog0(Log, .format_lines_with_indent(out), "\n")
} else resultLog(Log, "OK")
}
## Check for ASCII and uncompressed/unoptimized saves in 'data'
if (!is_base_pkg && R_check_compact_data && dir.exists("data")) {
checkingLog(Log, "data for ASCII and uncompressed saves")
out <- R_runR0("tools:::.check_package_compact_datasets('.', TRUE)",
R_opts2)
out <- filtergrep("Warning: changing locked binding", out, fixed = TRUE)
if (length(out)) {
warningLog(Log)
printLog0(Log, .format_lines_with_indent(out), "\n")
} else resultLog(Log, "OK")
}
## Check for ASCII and uncompressed/unoptimized saves in 'sysdata':
## no base package has this
if (R_check_compact_data && file.exists(file.path("R", "sysdata.rda"))) {
checkingLog(Log, "R/sysdata.rda")
out <- R_runR0("tools:::.check_package_compact_sysdata('.', TRUE)",
R_opts2)
if (length(out)) {
bad <- grep("^Warning:", out)
if (length(bad)) warningLog(Log) else noteLog(Log)
printLog0(Log, .format_lines_with_indent(out), "\n")
} else resultLog(Log, "OK")
}
}
check_doc_contents <- function()
{
## Have already checked that inst/doc exists
doc_dir <- file.path(libdir, pkgname, "doc")
if (!dir.exists(doc_dir)) return()
checkingLog(Log, "installed files from 'inst/doc'")
## special case common problems.
any <- FALSE
files <- dir(file.path(pkgdir, "inst", "doc"))
already <- c("jss.cls", "jss.bst", "Rd.sty", "Sweave.sty")
bad <- files[files %in% already]
if (length(bad)) {
noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files are already in R: ",
paste(sQuote(bad), collapse = ", "), "\n",
"Please remove them from your package.\n")
}
files2 <- dir(file.path(pkgdir, "inst", "doc"), recursive = TRUE,
pattern = "[.](cls|sty|drv)$", full.names = TRUE)
## Skip Rnews.sty and RJournal.sty for now
files2 <- files2[basename(files2) %notin%
c("jss.cls", "jss.drv", "Rnews.sty", "RJournal.sty")]
bad <- character()
for(f in files2) {
pat <- "%% (This generated file may be distributed as long as the|original source files, as listed above, are part of the|same distribution.)"
if(length(grep(pat, readLines(f, warn = FALSE), useBytes = TRUE))
== 3L) bad <- c(bad, basename(f))
}
if (length(bad)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files contain a license that requires\n",
"distribution of original sources:\n",
" ", paste(sQuote(bad), collapse = ", "), "\n",
"Please ensure that you have complied with it.\n")
}
## Now look for TeX leftovers (and soiltexture, Amelia ...).
bad <- grepl("[.](log|aux|bbl|blg|dvi|toc|out|Rd|Rout|dbj|drv|ins)$",
files, ignore.case = TRUE)
if (any(bad)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files look like leftovers/mistakes:\n",
paste(strwrap(paste(sQuote(files[bad]), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\nPlease remove them from your package.\n")
}
files <- dir(doc_dir)
files <- files %w/o% already
bad <- grepl("[.](tex|lyx|png|jpg|jpeg|gif|ico|bst|cls|sty|ps|eps|img)$",
files, ignore.case = TRUE)
bad <- bad | grepl("(Makefile|~$)", files)
## How about any pdf files which look like figures files from vignettes?
vigns <- pkgVignettes(dir = pkgdir)
if (!is.null(vigns) && length(vigns$docs)) {
vf <- vigns$names
pat <- paste(vf, collapse="|")
pat <- paste0("^(", pat, ")-[0-9]+[.]pdf")
bad <- bad | grepl(pat, files)
}
bad <- bad | grepl("^fig.*[.]pdf$", files)
badf <- files[bad]
dirs <- basename(list.dirs(doc_dir, recursive = FALSE))
badd <- dirs[dirs %in% c("auto", "Bilder", "fig", "figs", "figures",
"Figures", "img", "images", "JSSstyle",
"jssStyle", "screenshots2", "src", "tex", "tmp")]
if (length(c(badf, badd))) {
if(!any) noteLog(Log)
any <- TRUE
if(length(badf))
printLog0(Log,
"The following files should probably not be installed:\n",
paste(strwrap(paste(sQuote(badf), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\n")
if(length(badd))
printLog0(Log,
"The following directories should probably not be installed:\n",
paste(strwrap(paste(sQuote(badd), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\n")
printLog0(Log, "\nConsider the use of a .Rinstignore file: see ",
sQuote("Writing R Extensions"), ",\n",
"or move the vignette sources from ",
sQuote("inst/doc"), " to ", sQuote("vignettes"), ".\n")
}
if (!any) resultLog(Log, "OK")
}
check_vign_contents <- function(ignore_vignettes = FALSE)
{
checkingLog(Log, "files in 'vignettes'")
if (ignore_vignettes) {
resultLog(Log, "SKIPPED")
return()
}
## special case common problems.
any <- FALSE
pattern <- vignetteEngine("Sweave")$pattern
vign_dir <- file.path(pkgdir, "vignettes")
sources <- setdiff(list.files(file.path(pkgdir, "inst", "doc"),
pattern = pattern),
list.files(vign_dir, pattern = pattern))
if(length(sources)) {
warningLog(Log)
any <- TRUE
msg <- c("Vignette sources in 'inst/doc' missing from the 'vignettes' directory:",
strwrap(paste(sQuote(sources), collapse = ", "),
indent = 2L, exdent = 4L),
"")
printLog0(Log, paste(msg, collapse = "\n"))
}
## Did the vignettes get updated in inst/doc?
inst_doc_files <- list.files(file.path(pkgdir, "inst", "doc"),
recursive = TRUE)
vignette_files <- list.files(vign_dir, recursive = TRUE)
if (!is_base_pkg && length(vignette_files)) {
if (!length(inst_doc_files)) {
if (!any) warningLog(Log)
any <- TRUE
msg <- c("Files in the 'vignettes' directory but no files in 'inst/doc':",
strwrap(paste(sQuote(vignette_files), collapse = ", "),
indent = 2L, exdent = 4L),
"")
printLog0(Log, paste(msg, collapse = "\n"))
} else {
## allow for some imprecision in file times (in secs)
time_tol <- as.double(Sys.getenv("_R_CHECK_FILE_TIMES_TOL_", 10))
vignette_times <- file.mtime(file.path(vign_dir, vignette_files))
inst_doc_times <- file.mtime(file.path(pkgdir, "inst", "doc", inst_doc_files))
if (sum(!is.na(vignette_times)) && sum(!is.na(inst_doc_times)) &&
max(vignette_times, na.rm = TRUE) > max(inst_doc_times, na.rm = TRUE) + time_tol) {
if (!any) warningLog(Log)
any <- TRUE
msg <- c("Files in the 'vignettes' directory newer than all files in 'inst/doc':",
strwrap(paste(sQuote(vignette_files[!is.na(vignette_times) & vignette_times > max(inst_doc_times, na.rm = TRUE)]),
collapse = ", "),
indent = 2L, exdent = 4L),
"")
keep <- is.na(vignette_times) |
vignette_times <= max(inst_doc_times, na.rm = TRUE) + time_tol
vignette_files <- vignette_files[keep]
vignette_times <- vignette_times[keep]
printLog0(Log, paste(msg, collapse = "\n"))
}
matches <- match(vignette_files, inst_doc_files)
newer <- vignette_times > inst_doc_times[matches] + time_tol
newer <- !is.na(matches) & !is.na(newer) & newer
if (any(newer)) {
if (!any) warningLog(Log)
any <- TRUE
msg <- c("Files in the 'vignettes' directory newer than same file in 'inst/doc':",
strwrap(paste(sQuote(vignette_files[newer]),
collapse = ", "),
indent = 2L, exdent = 4L),
"")
printLog0(Log, paste(msg, collapse = "\n"))
}
}
}
files <- dir(file.path(pkgdir, "vignettes"))
if(length(files) &&
!length(dir(file.path(pkgdir, "vignettes"),
pattern = pattern)) &&
is.na(desc["VignetteBuilder"])) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"Package has no Sweave vignette sources and no VignetteBuilder field.\n")
}
vigns <- pkgVignettes(dir = pkgdir, check = TRUE)
if(length(msg <- vigns[["msg"]])) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log, paste(msg, collapse = "\n"), "\n")
}
already <- c("jss.cls", "jss.bst", "Rd.sty", "Sweave.sty")
bad <- files[files %in% already]
if (length(bad)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files are already in R: ",
paste(sQuote(bad), collapse = ", "), "\n",
"Please remove them from your package.\n")
}
files2 <- dir(file.path(pkgdir, "vignettes"), recursive = TRUE,
pattern = "[.](cls|sty|drv)$", full.names = TRUE)
files2 <- files2[basename(files2) %notin%
c("jss.cls", "jss.drv", "Rnews.sty", "RJournal.sty")]
bad <- character()
for(f in files2) {
pat <- "%% (This generated file may be distributed as long as the|original source files, as listed above, are part of the|same distribution.)"
if(length(grep(pat, readLines(f, warn = FALSE), useBytes = TRUE))
== 3L) bad <- c(bad, basename(f))
}
if (length(bad)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files contain a license that requires\n",
"distribution of original sources:\n",
" ", paste(sQuote(bad), collapse = ", "), "\n",
"Please ensure that you have complied with it.\n")
}
## Now look for TeX leftovers (and soiltexture, Amelia ...).
bad <- grepl("[.](log|aux|bbl|blg|dvi|toc|out|Rd|Rout|dbj|drv|ins)$",
files, ignore.case = TRUE)
bad <- bad | (files %in% c("Rplots.ps", "Rplots.pdf"))
if (any(bad)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
"The following files look like leftovers/mistakes:\n",
paste(strwrap(paste(sQuote(files[bad]), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\nPlease remove them from your package.\n")
}
## Probable leftovers from knitr
dirs <- file.path(pkgdir, "vignettes", c("cache", "figure"))
dirs <- basename(dirs[dir.exists(dirs)])
if(length(dirs)) {
if(!any) noteLog(Log)
any <- TRUE
printLog0(Log,
if(length(dirs)> 1L) "The following directories look like leftovers from 'knitr':\n"
else "The following directory looks like a leftover from 'knitr':\n",
paste(strwrap(paste(sQuote(dirs), collapse = ", "),
indent = 2, exdent = 2), collapse = "\n"),
"\nPlease remove from your package.\n")
}
if (!any) resultLog(Log, "OK")
}
check_doc_size <- function()
{
## Have already checked that inst/doc exists and qpdf can be found
pdfs <- dir('inst/doc', pattern="\\.pdf",
recursive = TRUE, full.names = TRUE)
pdfs <- setdiff(pdfs, "inst/doc/Rplots.pdf")
if (length(pdfs)) {
checkingLog(Log, "sizes of PDF files under 'inst/doc'")
any <- FALSE
td <- tempfile('pdf')
dir.create(td)
file.copy(pdfs, td)
res <- compactPDF(td, gs_quality = "none") # use qpdf
res <- format(res, diff = 1e5)
if(length(res)) {
noteLog(Log)
any <- TRUE
printLog(Log,
" 'qpdf' made some significant size reductions:\n",
paste(" ", res, collapse = "\n"),
"\n",
" consider running tools::compactPDF() on these files\n")
}
if (R_check_doc_sizes2) {
gs_cmd <- find_gs_cmd()
if (nzchar(gs_cmd)) {
res <- compactPDF(td, gs_cmd = gs_cmd, gs_quality = "ebook")
res <- format(res, diff = 2.56e5) # 250 KB for now
if(length(res)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log,
" 'gs+qpdf' made some significant size reductions:\n",
paste(" ", res, collapse = "\n"),
"\n",
' consider running tools::compactPDF(gs_quality = "ebook") on these files\n')
}
} else {
if (!any) noteLog(Log)
any <- TRUE
printLog(Log, "Unable to find GhostScript executable to run checks on size reduction\n")
}
}
if (!any) resultLog(Log, "OK")
}
}
check_src_dir <- function(desc)
{
## Added in R 3.4.2: check line endings for shell scripts:
## for Unix CRLF line endings are fatal but these are not used
## on Windows and hence this is not detected.
## Packages could have arbitrary scripts, so we could
## extend this to look for scripts at top level or elsewhere.
scripts <- dir(".", pattern = "^(configure|configure.in|configure.ac|cleanup)$")
if(length(scripts)) {
checkingLog(Log, "line endings in shell scripts")
bad_files <- character()
for(f in scripts) {
contents <- readChar(f, file.size(f), useBytes = TRUE)
if (grepl("\r", contents, fixed = TRUE, useBytes = TRUE))
bad_files <- c(bad_files, f)
}
if (length(bad_files)) {
warningLog(Log, "Found the following shell script(s) with CR or CRLF line endings:")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
printLog(Log, "Non-Windows OSes require LF line endings.\n")
} else resultLog(Log, "OK")
}
## Check C/C++/Fortran sources/headers for CRLF line endings.
## <FIXME>
## Does ISO C really require LF line endings? (Reference?)
## We know that some versions of Solaris cc and f77/f95
## will not accept CRLF or CR line endings.
## (Sun Studio 12 definitely objects to CR in both C and Fortran).
## </FIXME>
if(dir.exists("src")) {
checkingLog(Log, "line endings in C/C++/Fortran sources/headers")
## pattern is "([cfh]|cc|cpp)"
files <- dir("src", pattern = "\\.([cfh]|cc|cpp)$",
full.names = TRUE, recursive = TRUE)
## exclude dirs starting src/win, e.g for tiff
files <- filtergrep("^src/[Ww]in", files)
bad_files <- character()
for(f in files) {
contents <- readChar(f, file.size(f), useBytes = TRUE)
if (grepl("\r", contents, fixed = TRUE, useBytes = TRUE))
bad_files <- c(bad_files, f)
}
if (length(bad_files)) {
warningLog(Log, "Found the following sources/headers with CR or CRLF line endings:")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
printLog(Log, "Some Unix compilers require LF line endings.\n")
} else resultLog(Log, "OK")
}
## Check src/Make* for LF line endings, as Sun make does not accept CRLF
## .win files are not checked, as CR/CRLF work there
all_files <-
dir("src",
pattern = "^(Makevars|Makevars.in|Makefile|Makefile.in)$",
full.names = TRUE, recursive = TRUE)
all_files <- c(all_files,
dir(".", pattern = "^Makefile$",
full.names = TRUE, recursive = TRUE))
all_files <- sub("^[.]/", "", all_files)
all_files <- unique(sort(all_files))
if(length(all_files)) {
checkingLog(Log, "line endings in Makefiles")
bad_files <- noEOL<- character()
for(f in all_files) {
if (!file.exists(f)) next
contents <- readChar(f, file.size(f), useBytes = TRUE)
if (grepl("\r", contents, fixed = TRUE, useBytes = TRUE))
bad_files <- c(bad_files, f)
if (!grepl("\n$", contents, useBytes = TRUE))
noEOL <- c(noEOL, f)
}
if (length(bad_files)) {
warningLog(Log, "Found the following Makefile(s) with CR or CRLF line endings:")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
printLog(Log, "Some Unix 'make' programs require LF line endings.\n")
} else if (length(noEOL)) {
noteLog(Log, "Found the following Makefile(s) without a final LF:")
printLog0(Log, .format_lines_with_indent(noEOL), "\n")
printLog(Log, "Some 'make' programs ignore lines not ending in LF.\n")
} else resultLog(Log, "OK")
}
## Check src/Makevars[.in] compilation flags.
if (length(makevars)) {
checkingLog(Log, "compilation flags in Makevars")
Rcmd <- sprintf("tools:::.check_make_vars(\"src\", %s)\n",
deparse(makevars))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
if(any(grepl("^(Non-portable flags|Variables overriding)", out)))
warningLog(Log) else noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
## Check GNUisms
if (length(all_files)) {
checkingLog(Log, "for GNU extensions in Makefiles")
bad_files <- character()
for(f in all_files) {
contents <- readLines(f, warn = FALSE)
contents <- filtergrep("^ *#", contents)
## Things like $(SUBDIRS:=.a)
contents <- filtergrep("[$][(].+:=.+[)]", contents)
if (any(grepl("([+]=|:=|[$][(]wildcard|[$][(]shell|[$][(]eval|[$][(]call|[$][(]patsubst|^ifeq|^ifneq|^ifdef|^ifndef|^endif)", contents)))
bad_files <- c(bad_files, f)
}
SysReq <- desc["SystemRequirements"]
if (length(bad_files)) {
if(!is.na(SysReq) && grepl("GNU [Mm]ake", SysReq)) {
noteLog(Log, "GNU make is a SystemRequirements.")
} else {
warningLog(Log, "Found the following file(s) containing GNU extensions:")
printLog0(Log, .format_lines_with_indent(bad_files), "\n")
wrapLog("Portable Makefiles do not use GNU extensions",
"such as +=, :=, $(shell), $(wildcard),",
"ifeq ... endif.",
"See section 'Writing portable packages'",
"in the 'Writing R Extensions' manual.\n")
}
} else resultLog(Log, "OK")
}
## check src/Makevar*, src/Makefile* for correct use of BLAS_LIBS
## FLIBS is not needed on Windows, at least currently (as it is
## statically linked).
makefiles <- Sys.glob(file.path("src",
c("Makevars", "Makevars.in",
"Makefile", "Makefile.win")))
if(length(makefiles)) {
checkingLog(Log, "for portable use of $(BLAS_LIBS) and $(LAPACK_LIBS)")
any <- FALSE
for (f in makefiles) {
lines <- readLines(f, warn = FALSE)
## Combine lines ending in escaped newlines.
if(any(ind <- grepl("[\\]$", lines, useBytes = TRUE))) {
## Eliminate escape.
lines[ind] <-
sub("[\\]$", "", lines[ind], useBytes = TRUE)
## Determine ids of blocks that need to be joined.
ind <- seq_along(ind) - c(0, cumsum(ind)[-length(ind)])
## And join.
lines <- unlist(lapply(split(lines, ind), paste,
collapse = " "))
}
## Truncate at first comment char
lines <- sub("#.*", "", lines)
c1 <- grepl("^[[:space:]]*PKG_LIBS", lines, useBytes = TRUE)
c2l <- grepl("\\$[{(]{0,1}LAPACK_LIBS", lines, useBytes = TRUE)
c2b <- grepl("\\$[{(]{0,1}BLAS_LIBS", lines, useBytes = TRUE)
c2lb <- grepl("\\$[{(]{0,1}LAPACK_LIBS.*\\$[{(]{0,1}BLAS_LIBS",
lines, useBytes = TRUE)
c2bf <- grepl("\\$[{(]{0,1}BLAS_LIBS.*\\$[{(]{0,1}FLIBS",
lines, useBytes = TRUE)
if (any(c1 & c2l & !c2lb)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log,
" apparently using $(LAPACK_LIBS) without following $(BLAS_LIBS) in ",
sQuote(f), "\n")
}
if (any(c1 & c2b & !c2bf)) {
if (!any) warningLog(Log)
any <- TRUE
printLog(Log,
" apparently using $(BLAS_LIBS) without following $(FLIBS) in ",
sQuote(f), "\n")
}
}
if (!any) resultLog(Log, "OK")
}
## Check include directives for use of R_HOME which may contain
## spaces for which there is no portable way to quote/escape.
all_files <-
dir(".",
pattern = "^(Makefile|Makefile.in|Makefile.win|makefile|GNUmakefile)$",
recursive = TRUE)
all_files <- unique(sort(all_files))
if(length(all_files)) {
checkingLog(Log, "include directives in Makefiles")
bad_lines <-
lapply(all_files,
function(f) {
s <- readLines(f, warn = FALSE)
grep("^include .*R_HOME", s, value = TRUE)
})
bad_files <- all_files[lengths(bad_lines) > 0L]
if(length(bad_files)) {
noteLog(Log,
"Found the following Makefile(s) with an include directive with a pathname using R_HOME:")
printLog0(Log, .format_lines_with_indent(bad_files),
"\n")
msg <-
c("Even though not recommended, variable R_HOME may contain spaces.",
"Makefile directives use space as a separator and there is no portable",
"way to quote/escape the space in Make rules and directives. However,",
"one can and should quote pathnames when passed from Makefile to the",
"shell, and this can be done specifically when invoking Make recursively.",
"It is therefore recommended to use the Make '-f' option to include files",
"in directories specified using R_HOME. This option can be specified",
"multiple times to include multiple Makefiles. Note that 'Makeconf' is",
"included automatically into top-level makefile of a package.",
"More information can be found in 'Writing R Extensions'.")
printLog0(Log, paste(msg, collapse = "\n"), "\n")
} else resultLog(Log, "OK")
}
}
check_src <- function() {
Check_pragmas <- Sys.getenv("_R_CHECK_PRAGMAS_", "FALSE")
if(config_val_to_logical(Check_pragmas) &&
any(dir.exists(c("src", "inst/include")))) {
checkingLog(Log, "pragmas in C/C++ headers and code")
ans <- .check_pragmas('.')
if(length(ans)) {
if(length(warn <- attr(ans, "warn")))
{
warningLog(Log)
msg <- if(length(warn) == 1L)
"File which contains pragma(s) suppressing important diagnostics:"
else
"Files which contain pragma(s) suppressing important diagnostics:"
msg <- c(msg, .pretty_format(warn))
rest <- setdiff(ans, warn)
if(length(rest)) {
msg <- c(msg, if(length(rest) == 1L)
"File which contains pragma(s) suppressing diagnostics:"
else
"Files which contain pragma(s) suppressing diagnostics:")
msg <- c(msg, .pretty_format(rest))
}
} else {
noteLog(Log)
msg <- if(length(ans) == 1L)
"File which contains pragma(s) suppressing diagnostics:"
else
"Files which contain pragma(s) suppressing diagnostics:"
msg <- c(msg, .pretty_format(ans))
}
printLog0(Log, paste(c(msg,""), collapse = "\n"))
} else resultLog(Log, "OK")
}
Check_flags <- Sys.getenv("_R_CHECK_COMPILATION_FLAGS_", "FALSE")
if(config_val_to_logical(Check_flags)) {
instlog <- if (startsWith(install, "check"))
install_log_path
else
file.path(pkgoutdir, "00install.out")
if (file.exists(instlog) && dir.exists('src')) {
checkingLog(Log, "compilation flags used")
lines <- readLines(instlog, warn = FALSE)
poss <- grep(" -W", lines, useBytes = TRUE, value = TRUE)
tokens <- unique(unlist(strsplit(poss, " ", perl = TRUE,
useBytes = TRUE)))
warns <- grep("^[-]W", tokens,
value = TRUE, perl = TRUE, useBytes = TRUE)
## Not sure -Wextra and -Weverything are portable, though
## -Werror is not compiler independent
## (as what is a warning is not)
## -Wno-dev is from qt, not a compiler flag.
except <- Sys.getenv("_R_CHECK_COMPILATION_FLAGS_KNOWN_", "")
except <- unlist(strsplit(except, "\\s", perl = TRUE))
warns <- setdiff(warns,
c(except, "-Wall", "-Wextra", "-Weverything",
"-Wno-dev"))
warns <- warns[!startsWith(warns, "-Wl,")] # linker flags
diags <- grep(" -fno-diagnostics-show-option", tokens,
useBytes = TRUE, value = TRUE)
## next set are about unsafe optimizations
opts <- grep("-f(fast-math|unsafe-math-optimizations|associative-math|reciprocal-math)",
tokens, useBytes = TRUE, value = TRUE)
warns <- c(warns, diags, opts)
if(any(grepl("^-Wno-", warns)) || length(diags)) {
warningLog(Log)
msg <- c("Compilation used the following non-portable flag(s):",
.pretty_format(sort(warns)),
"including flag(s) suppressing warnings")
printLog0(Log, paste(c(msg,""), collapse = "\n"))
} else if(length(warns)) {
warningLog(Log) # might consider NOTE instead
msg <- c("Compilation used the following non-portable flag(s):",
.pretty_format(sort(warns)))
printLog0(Log, paste(c(msg,""), collapse = "\n"))
} else
resultLog(Log, "OK")
}
}
}
check_sos <- function() {
checkingLog(Log, "compiled code")
## from sotools.R
Rcmd <- paste("options(warn=1)\n",
sprintf("tools:::check_compiled_code(\"%s\")",
file.path(libdir, pkgname)))
out <- R_runR0(Rcmd, R_opts2, "R_DEFAULT_PACKAGES=NULL")
if(length(out) == 1L && startsWith(out, "Note:")) {
## This will be a note about symbols.rds not being available
if(!is_base_pkg) {
noteLog(Log)
printLog0(Log, c(out, "\n"))
} else resultLog(Log, "OK")
} else if(length(out)) {
## If we have named objects then we have symbols.rds and
## will not be picking up symbols just in system libraries.
haveObjs <- any(grepl("^ *Object", out))
pat <- paste("possibly from",
sQuote("(abort|assert|exit|_exit|_Exit|stop)"))
if(haveObjs && any(grepl(pat, out)) && pkgname %notin% "parallel")
## need _exit in forked child
warningLog(Log)
else {
## look for Fortran detritus
pat1 <- paste("possibly from", sQuote("(open|close|rewind)"))
pat2 <- paste("possibly from", sQuote("(read|write)"))
pat3 <- paste("possibly from", sQuote("close"))
pat4 <- paste("possibly from", sQuote("open"))
if(haveObjs &&
(any(grepl(pat1, out)) && !any(grepl(pat2, out))) ||
(any(grepl(pat3, out)) && !any(grepl(pat4, out))) ||
(any(grepl(pat4, out)) && !any(grepl(pat3, out))))
warningLog(Log)
else noteLog(Log)
}
printLog0(Log, paste(c(out, ""), collapse = "\n"))
nAPIs <- length(grep("Found non-API", out))
nRS <- length(grep("Found no call", out))
nBad <- length(grep(", possibly from ", out))
msg <- if (nBad) {
if(haveObjs)
c("Compiled code should not call entry points which",
"might terminate R nor write to stdout/stderr instead of",
"to the console, nor use Fortran I/O nor system RNGs.\n")
else
c("Compiled code should not call entry points which",
"might terminate R nor write to stdout/stderr instead of",
"to the console, nor use Fortran I/O nor system RNGs.",
"The detected symbols are linked",
"into the code but might come from libraries",
"and not actually be called.\n")
} else character()
if(nAPIs)
msg <- c(msg,
"Compiled code should not call non-API entry points in R.\n")
if(nRS)
msg <- c(msg,
"It is good practice to register native routines and to disable symbol search.\n")
wrapLog("\n", paste(msg, collapse = " "), "\n",
"See 'Writing portable packages'",
"in the 'Writing R Extensions' manual.\n")
} else resultLog(Log, "OK")
}
check_loading <- function(arch = "")
{
checkingLog(Log, "whether the package can be loaded")
Rcmd <- sprintf("library(%s)", pkgname)
opts <- if(nzchar(arch)) R_opts4 else R_opts2
env <- "R_DEFAULT_PACKAGES=NULL"
env1 <- if(nzchar(arch)) env0 else character()
out <- R_runR0(Rcmd, opts, env1, arch = arch)
if(length(st <- attr(out, "status"))) {
errorLog(Log)
wrapLog("Loading this package had a fatal error",
"status code ", st, "\n")
if(length(out))
printLog0(Log,
paste(c("Loading log:", out, ""),
collapse = "\n"))
summaryLog(Log)
do_exit(1L)
}
if (any(startsWith(out, "Error"))) {
errorLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("\nIt looks like this package",
"has a loading problem: see the messages",
"for details.\n")
maybe_exit(1L)
} else resultLog(Log, "OK")
checkingLog(Log, "whether the package can be loaded with stated dependencies")
out <- R_runR0(Rcmd, opts, c(env, env1), arch = arch)
if (any(startsWith(out, "Error")) || length(attr(out, "status"))) {
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("\nIt looks like this package",
"(or one of its dependent packages)",
"has an unstated dependence on a standard",
"package. All dependencies must be",
"declared in DESCRIPTION.\n")
wrapLog(msg_DESCRIPTION)
} else resultLog(Log, "OK")
checkingLog(Log, "whether the package can be unloaded cleanly")
Rcmd <- sprintf("suppressMessages(library(%s)); cat('\n---- unloading\n'); detach(\"package:%s\")", pkgname, pkgname)
out <- R_runR0(Rcmd, opts, c(env, env1), arch = arch)
if (any(grepl("^(Error|\\.Last\\.lib failed)", out)) ||
length(attr(out, "status"))) {
warningLog(Log)
ll <- grep("---- unloading", out)
if(length(ll)) {
ll <- ll[length(ll)]
out <- out[ll:length(out)]
}
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
## and if it has a namespace, that we can load/unload just
## the namespace
if (file.exists(file.path(pkgdir, "NAMESPACE"))) {
checkingLog(Log, "whether the namespace can be loaded with stated dependencies")
Rcmd <-
sprintf("options(warn=1)\ntools:::.load_namespace_rather_quietly(\"%s\")",
pkgname)
out <- R_runR0(Rcmd, opts, c(env, env1), arch = arch)
any <- FALSE
if (any(startsWith(out, "Error")) || length(attr(out, "status"))) {
warningLog(Log)
any <- TRUE
} else {
## Drop tcltk warning if no DISPLAY variable
if(pkgname == "tcltk")
out <- filtergrep("Warning: no DISPLAY variable so Tk is not available",
out, fixed = TRUE)
## Drop warnings about replacing previous imports unless
## these were disabled for the installation check.
check_imports_flag <-
Sys.getenv("_R_CHECK_REPLACING_IMPORTS_", "TRUE")
if(config_val_to_logical(check_imports_flag))
out <- filtergrep("Warning: replacing previous import", out,
fixed = TRUE)
if(any(startsWith(out, "Warning"))) {
noteLog(Log)
any <- TRUE
}
}
if(any) {
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("\nA namespace must be able to be loaded",
"with just the base namespace loaded:",
"otherwise if the namespace gets loaded by a",
"saved object, the session will be unable",
"to start.\n\n",
"Probably some imports need to be declared",
"in the NAMESPACE file.\n")
} else resultLog(Log, "OK")
checkingLog(Log,
"whether the namespace can be unloaded cleanly")
Rcmd <- sprintf("invisible(suppressMessages(loadNamespace(\"%s\"))); cat('\n---- unloading\n'); unloadNamespace(\"%s\")",
pkgname, pkgname)
out <- if (is_base_pkg && pkgname != "stats4")
R_runR0(Rcmd, opts, "R_DEFAULT_PACKAGES=NULL", arch = arch)
else R_runR0(Rcmd, opts, env1)
if (any(grepl("^(Error|\\.onUnload failed)", out)) ||
length(attr(out, "status"))) {
warningLog(Log)
ll <- grep("---- unloading", out)
if(length(ll)) {
ll <- ll[length(ll)]
out <- out[ll:length(out)]
}
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
## No point in this test if already installed in .Library
if (pkgname %notin% dir(.Library)) {
checkingLog(Log, "loading without being on the library search path")
Rcmd <- sprintf("library(%s, lib.loc = '%s')", pkgname, libdir)
opts <- if(nzchar(arch)) R_opts4 else R_opts2
env <- setRlibs(pkgdir = pkgdir, libdir = libdir,
self2 = FALSE, quote = TRUE)
if(nzchar(arch)) env <- c(env, "R_DEFAULT_PACKAGES=NULL")
out <- R_runR0(Rcmd, opts, env, arch = arch)
if (any(startsWith(out, "Error"))) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
wrapLog("\nIt looks like this package",
"has a loading problem when not on .libPaths:",
"see the messages for details.\n")
} else resultLog(Log, "OK")
}
if(!extra_arch && !is_base_pkg) {
check_S3reg <-
Sys.getenv("_R_CHECK_OVERWRITE_REGISTERED_S3_METHODS_", "NA")
check_S3reg <- if(check_S3reg == "NA") check_incoming else {
config_val_to_logical(check_S3reg)
}
if(check_S3reg) {
checkingLog(Log, "use of S3 registration")
Rcmd <- sprintf("suppressWarnings(suppressPackageStartupMessages(loadNamespace('%s', lib.loc = '%s')))",
pkgname, libdir)
opts <- if(nzchar(arch)) R_opts4 else R_opts2
env <- Sys.getenv("_R_LOAD_CHECK_OVERWRITE_S3_METHODS_",
"NA")
env <- paste0("_R_LOAD_CHECK_OVERWRITE_S3_METHODS_=",
if(env == "all") env else pkgname)
## <FIXME>
## Oh dear. R-ints says that if env var
## '_R_CHECK_OVERWRITE_REGISTERED_S3_METHODS_' is set to
## something true,
## report already registered S3 methods in
## base/recommended packages which are overwritten
## when this package's namespace is loaded.
## As of 2017-12, to make this work as documented we
## really need to load all base and recommended packages
## which register S3 methods first, which takes *quite
## some time*. There really should be a better way ...
## Running with
## R_DEFAULT_PACKAGES=MASS,Matrix,boot,class,cluster,grDevices,graphics,grid,lattice,mgcv,nlme,nnet,parallel,rpart,spatial,splines,stats,survival,tcltk,tools,utils
## does not suppress package startup messages: so try to
## load the relevant base and recommended package
## namespaces quietly ...
Rcmd <-
c(sprintf("suppressPackageStartupMessages(loadNamespace('%s', lib.loc = '%s'))",
## Perhaps provide these sorted according
## to dependency?
c("MASS", "Matrix", "boot", "class",
"cluster", "grDevices", "graphics",
"grid", "lattice", "mgcv", "nlme",
"nnet", "parallel", "rpart", "spatial",
"splines", "stats", "survival", "tcltk",
"tools", "utils"),
.Library),
Rcmd)
env <- c(env, "R_DEFAULT_PACKAGES=NULL")
out <- R_runR0(Rcmd, opts, env, arch = arch)
## </FIXME>
if (any(grepl("^Registered S3 method.*overwritten", out))) {
out <- filtergrep("^<environment: namespace:", out)
warningLog(Log)
printLog0(Log, paste(out, collapse = "\n"), "\n")
} else resultLog(Log, "OK")
}
}
}
run_examples <- function()
{
run_one_arch <- function(exfile, exout, arch = "")
{
any <- FALSE
## moved here to avoid WARNING + OK
if (nzchar(enc) && is_ascii) {
warningLog(Log,
paste("checking a package with encoding ",
sQuote(e), " in an ASCII locale\n"))
any <- TRUE
}
Ropts <- if (nzchar(arch)) R_opts3 else R_opts
if (use_valgrind) Ropts <- paste(Ropts, "-d valgrind")
t1 <- proc.time()
tlim <- get_timeout(Sys.getenv("_R_CHECK_EXAMPLES_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
## might be diff-ing results against tests/Examples later
## so force LANGUAGE=en
status <- R_runR0(NULL, c(Ropts, enc),
c("LANGUAGE=en", "_R_CHECK_INTERNALS2_=1",
if(nzchar(arch)) env0, jitstr, elibs),
stdout = exout, stderr = exout,
stdin = exfile, arch = arch, timeout = tlim)
t2 <- proc.time()
if (status) {
errorLog(Log, "Running examples in ",
sQuote(basename(exfile)),
" failed")
## Try to spot the offending example right away.
txt <- paste(readLines(exout, warn = FALSE),
collapse = "\n")
## Look for the header section anchored by a
## subsequent call to flush(): needs to be kept in
## sync with the code in massageExamples (in
## testing.R). Should perhaps also be more
## defensive about the prompt ...
chunks <- strsplit(txt,
"> ### \\* [^\n]+\n> \n> flush[^\n]+\n> \n", useBytes = TRUE)[[1L]]
if((ll <- length(chunks)) >= 2) {
printLog(Log,
"The error most likely occurred in:\n\n")
printLog0(Log, chunks[ll], "\n")
} else {
## most likely error before the first example
## so show all the output.
printLog(Log, "The error occurred in:\n\n")
printLog0(Log, txt, "\n")
}
return(FALSE)
}
print_time(t1, t2, Log)
## Look at the output from running the examples. For
## the time being, report warnings about use of
## deprecated , as the next release will make
## them defunct and hence using them an error.
bad <- FALSE
lines <- readLines(exout, warn = FALSE)
bad_lines <- grep("^Warning: .*is deprecated.$",
lines, useBytes = TRUE, value = TRUE)
if(length(bad_lines)) {
bad <- TRUE
warningLog(Log, "Found the following significant warnings:\n")
printLog0(Log, .format_lines_with_indent(bad_lines), "\n")
wrapLog("Deprecated functions may be defunct as",
"soon as of the next release of R.\n",
"See ?Deprecated.\n")
}
bad_lines <- grep("^Warning.*screen devices should not be used in examples",
lines, useBytes = TRUE, value = TRUE)
if(length(bad_lines)) {
if(!bad) {
warningLog(Log,
"Found the following significant warnings:")
bad <- TRUE
}
printLog0(Log, .format_lines_with_indent(bad_lines), "\n")
wrapLog("dev.new() is the preferred way to open a new device,",
"in the unlikely event one is needed.")
}
bad_lines <- grep("^Warning: .*simultaneous processes spawned$",
lines, useBytes = TRUE, value = TRUE)
if(length(bad_lines)) {
if(!bad) {
warningLog(Log,
"Found the following significant warnings:")
bad <- TRUE
}
printLog0(Log, .format_lines_with_indent(bad_lines), "\n")
wrapLog("Note that CRAN packages must never use more than two",
"cores simultaneously during their checks.")
}
bad_lines <- grep("^Warning: working directory was changed to",
lines, useBytes = TRUE, value = TRUE)
if(length(bad_lines)) {
if(!bad) {
warningLog(Log,
"Found the following significant warnings:")
bad <- TRUE
}
printLog0(Log, .format_lines_with_indent(bad_lines), "\n")
}
bad_lines <- grep("^Warning: items .* were removed from the search path",
lines, useBytes = TRUE, value = TRUE)
if(length(bad_lines)) {
if(!bad) {
warningLog(Log,
"Found the following significant warnings:")
bad <- TRUE
}
printLog0(Log, .format_lines_with_indent(bad_lines), "\n")
}
any <- any || bad
if (!any && !(check_incoming && do_timings))
resultLog(Log, "OK")
if (do_timings) {
theta <-
as.numeric(Sys.getenv("_R_CHECK_EXAMPLE_TIMING_THRESHOLD_",
"5"))
tfile <- paste0(pkgname, "-Ex.timings")
times <-
utils::read.table(tfile, header = TRUE, row.names = 1L,
colClasses = c("character", rep("numeric", 3)))
o <- order(times[[1L]] + times[[2L]], decreasing = TRUE)
times <- times[o, ]
keep <- ((times[[1L]] + times[[2L]] > theta) |
(times[[3L]] > theta))
if(any(keep)) {
if(!any && check_incoming) {
noteLog(Log)
any <- TRUE
}
printLog(Log,
sprintf("Examples with CPU or elapsed time > %gs\n",
theta))
out <- utils::capture.output(format(times[keep, ]))
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
theta <-
as.numeric(Sys.getenv("_R_CHECK_EXAMPLE_TIMING_CPU_TO_ELAPSED_THRESHOLD_",
NA_character_))
if(!is.na(theta)) {
keep <- ((times[[1L]] + times[[2L]]) >=
pmax(theta * times[[3L]], 1))
if(any(keep)) {
if(!any && check_incoming) {
noteLog(Log)
any <- TRUE
}
printLog(Log,
sprintf("Examples with CPU time > %g times elapsed time\n",
theta))
bad <- times[keep, ]
ratio <- (bad[[1L]] + bad[[2L]]) / bad[[3L]]
bad <- cbind(bad, ratio = round(ratio, 3L))
bad <- bad[order(bad$ratio, decreasing = TRUE), ]
out <- utils::capture.output(format(bad))
printLog0(Log, paste(out, collapse = "\n"), "\n")
}
}
if(!any && check_incoming)
resultLog(Log, "OK")
}
## Try to compare results from running the examples to
## a saved previous version.
exsave <- file.path(pkgdir, test_dir, "Examples",
paste0(pkgname, "-Ex.Rout.save"))
if (file.exists(exsave)) {
checkingLog(Log, "differences from ",
sQuote(basename(exout)),
" to ", sQuote(basename(exsave)))
cmd <- paste0("invisible(tools::Rdiff('",
exout, "', '", exsave, "',TRUE,TRUE))")
out <- R_runR0(cmd, R_opts2)
resultLog(Log, "OK")
if(length(out))
printLog0(Log, paste(c("", out, ""), collapse = "\n"))
}
TRUE
}
checkingLog(Log, "examples")
if (!do_examples) resultLog(Log, "SKIPPED")
else {
pkgtopdir <- file.path(libdir, pkgname)
cmd <- sprintf('tools:::.createExdotR("%s", "%s", silent = TRUE, use_gct = %s, addTiming = %s, commentDontrun = %s, commentDonttest = %s)',
pkgname, pkgtopdir, use_gct, do_timings,
!run_dontrun, !run_donttest)
Rout <- tempfile("Rout")
## any arch will do here
status <- R_runR0(cmd, R_opts2, "LC_ALL=C",
stdout = Rout, stderr = Rout)
exfile <- paste0(pkgname, "-Ex.R")
if (status) {
errorLog(Log,
paste("Running massageExamples to create",
sQuote(exfile), "failed"))
printLog0(Log,
paste(readLines(Rout, warn = FALSE),
collapse = "\n"),
"\n")
maybe_exit(1L)
}
## It ran, but did it create any examples?
if (file.exists(exfile)) {
enc <- if (!is.na(e <- desc["Encoding"])) {
paste0("--encoding=", e)
} else ""
if (!this_multiarch) {
exout <- paste0(pkgname, "-Ex.Rout")
if(!run_one_arch(exfile, exout)) maybe_exit(1L)
} else {
printLog(Log, "\n")
Log$stars <<- "**"
res <- TRUE
for (arch in inst_archs) {
printLog(Log, "** running examples for arch ",
sQuote(arch), " ...")
if (arch %in% R_check_skip_examples_arch) {
resultLog(Log, "SKIPPED")
} else {
tdir <- paste0("examples_", arch)
dir.create(tdir)
if (!dir.exists(tdir)) {
errorLog(Log,
"unable to create examples directory")
summaryLog(Log)
do_exit(1L)
}
od <- setwd(tdir)
exout <- paste0(pkgname, "-Ex_", arch, ".Rout")
res <- res & run_one_arch(file.path("..", exfile),
file.path("..", exout),
arch)
setwd(od)
}
}
Log$stars <<- "*"
if (!res) maybe_exit(1L)
}
cntFile <- paste0(exfile, "-cnt")
if (file.exists(cntFile)) {
unlink(cntFile)
if (as_cran)
printLog(Log, "** found \\donttest examples:",
" check also with --run-donttest\n")
}
} else {
resultLog(Log, "NONE")
no_examples <<- TRUE
}
}
}
run_tests <- function()
{
if (!extra_arch && !is_base_pkg) {
checkingLog(Log, "for unstated dependencies in ", sQuote(test_dir))
Rcmd <- paste("options(warn=1, showErrorCalls=FALSE)\n",
sprintf("tools:::.check_packages_used_in_tests(\"%s\", \"%s\")\n", pkgdir, test_dir))
out <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
warningLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
# wrapLog(msg_DESCRIPTION)
} else resultLog(Log, "OK")
}
if (test_dir == "tests")
checkingLog(Log, "tests")
else
checkingLog(Log, "tests in ", sQuote(test_dir))
run_one_arch <- function(arch = "")
{
testsrcdir <- file.path(pkgdir, test_dir)
testdir <- file.path(pkgoutdir, "tests")
if(nzchar(arch)) testdir <- paste(testdir, arch, sep = "_")
if(!dir.exists(testdir)) dir.create(testdir, mode = "0755")
if(!dir.exists(testdir)) {
errorLog(Log,
sprintf("unable to create %s", sQuote(testdir)))
summaryLog(Log)
do_exit(1L)
}
file.copy(Sys.glob(paste0(testsrcdir, "/*")),
testdir, recursive = TRUE)
setwd(testdir)
logf <- gsub("\\", "/", tempfile(), fixed=TRUE)
extra <- c(if(use_gct) "use_gct = TRUE",
if(use_valgrind) "use_valgrind = TRUE",
if(!stop_on_test_error) "stop_on_error = FALSE",
paste0('Log="', logf, '"'))
## might be diff-ing results against tests/*.R.out.save
## so force LANGUAGE=en
cmd <- paste0("tools:::.runPackageTestsR(",
paste(extra, collapse = ", "), ")")
t1 <- proc.time()
tlim <- get_timeout(Sys.getenv("_R_CHECK_TESTS_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
status <- R_runR0(cmd,
if(nzchar(arch)) R_opts4 else R_opts2,
env = c("LANGUAGE=en",
"_R_CHECK_INTERNALS2_=1",
if(nzchar(arch)) env0, jitstr, elibs_tests),
stdout = "", stderr = "", arch = arch,
timeout = tlim)
t2 <- proc.time()
if (status) {
print_time(t1, t2, Log)
errorLog(Log)
if (Log$con > 0L && file.exists(logf)) {
## write individual results only to 00check.log
cat(readLines(logf, warn = FALSE),
sep = "\n", file = Log$con)
}
## Don't just fail: try to log where the problem occurred.
## First, find the test(s) which failed.
## (Maybe there was an error without a failing test.)
bad_files <- dir(".", pattern="\\.Rout\\.fail$")
if (length(bad_files)) {
## Read in output from the failed test(s)
## (As from R 3.4.0 there can be more than one
## with option --no-stop-on-test-error.)
for(f in bad_files) {
lines <- readLines(f, warn = FALSE)
f <- file.path(test_dir, sub("out\\.fail$", "", f))
src_files <- dir(".", pattern = "\\.[rR]$")
if (basename(f) %notin% src_files) {
f <- sub("R$", "r", f) # This assumes only one of foo.r and foo.R exists.
if (basename(f) %notin% src_files)
f <- sub("r$", "[rR]", f) # Just in case the test script got deleted somehow, show the pattern.
}
keep <- as.integer(Sys.getenv("_R_CHECK_TESTS_NLINES_",
"13"))
## keep = 0 means keep all of it, but we will
## always omit the R preamble and start at the first
## line with an R prompt.
ll <- length(lines)
st <- grep("^>", lines, useBytes = TRUE)
if (length(st)) {
lines <- lines[st[1L]:ll]
ll <- length(lines)
}
if (keep > 0L)
lines <- lines[max(1L, ll-keep-1L):ll]
if (R_check_suppress_RandR_message)
lines <- filtergrep('^Xlib: *extension "RANDR" missing on display',
lines, useBytes = TRUE)
printLog(Log, sprintf("Running the tests in %s failed.\n",
sQuote(f)))
printLog(Log, if(keep > 0L && keep < ll)
sprintf("Last %i lines of output:\n", keep)
else "Complete output:\n")
printLog0(Log, .format_lines_with_indent(lines), "\n")
}
}
return(FALSE)
} else {
print_time(t1, t2, Log)
resultLog(Log, "OK")
if (Log$con > 0L && file.exists(logf)) {
## write results only to 00check.log
lines <- readLines(logf, warn = FALSE)
cat(lines, sep="\n", file = Log$con)
unlink(logf)
}
}
setwd(pkgoutdir)
TRUE
}
if (do_install && do_tests) {
if (!this_multiarch) {
res <- run_one_arch()
} else {
printLog(Log, "\n")
res <- TRUE
for (arch in inst_archs)
if (arch %notin% R_check_skip_tests_arch) {
printLog(Log, "** running tests for arch ",
sQuote(arch), " ...")
res <- res & run_one_arch(arch)
}
}
if (!res) maybe_exit(1L)
} else resultLog(Log, "SKIPPED")
}
run_vignettes <- function(desc)
{
theta <-
as.numeric(Sys.getenv("_R_CHECK_VIGNETTE_TIMING_CPU_TO_ELAPSED_THRESHOLD_",
NA_character_))
libpaths <- .libPaths()
.libPaths(c(libdir, libpaths))
vigns <- pkgVignettes(dir = pkgdir)
.libPaths(libpaths)
if (is.null(vigns) || !length(vigns$docs)) return()
if(do_install && !spec_install && !is_base_pkg && !extra_arch) {
## fake installs don't install inst/doc
checkingLog(Log, "for unstated dependencies in vignettes")
Rcmd <- paste("options(warn=1, showErrorCalls=FALSE)\n",
sprintf("tools:::.check_packages_used_in_vignettes(package = \"%s\")\n", pkgname))
out <- R_runR2(Rcmd, "R_DEFAULT_PACKAGES=NULL")
if (length(out)) {
noteLog(Log)
printLog0(Log, paste(c(out, ""), collapse = "\n"))
} else resultLog(Log, "OK")
}
checkingLog(Log, "package vignettes in ", sQuote("inst/doc"))
any <- FALSE
## Do PDFs or HTML files exist for all package vignettes?
## A base source package need not have PDFs to avoid
## frequently-changing binary files in the SVN archive.
if (!is_base_pkg) {
dir <- file.path(pkgdir, "inst", "doc")
outputs <- character(length(vigns$docs))
for (i in seq_along(vigns$docs)) {
file <- vigns$docs[i]
name <- vigns$names[i]
engine <- vignetteEngine(vigns$engines[i])
outputs[i] <- tryCatch({
find_vignette_product(name, what="weave", final=TRUE, dir=dir, engine = engine)
}, error = function(ex) NA)
}
bad_vignettes <- vigns$docs[is.na(outputs)]
if (nb <- length(bad_vignettes)) {
any <- TRUE
warningLog(Log)
msg <- ngettext(nb,
"Package vignette without corresponding PDF/HTML:\n",
"Package vignettes without corresponding PDF/HTML:\n", domain = NA)
printLog0(Log, msg)
printLog0(Log,
paste(c(paste(" ",
sQuote(basename(bad_vignettes))),
"", ""), collapse = "\n"))
}
defaultEncoding <- .get_package_metadata(pkgdir)["Encoding"]
encs <- vapply(vigns$docs, getVignetteEncoding, "", default = defaultEncoding)
bad_vignettes <- vigns$docs[encs == "non-ASCII"]
if(nb <- length(bad_vignettes)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(nb,
"Non-ASCII package vignette without specified encoding:\n",
"Non-ASCII package vignettes without specified encoding:\n", domain = NA)
printLog0(Log, " ", msg)
printLog0(Log,
paste(c(paste(" ",
sQuote(basename(bad_vignettes))),
"", ""), collapse = "\n"))
}
}
# FIXME: we should do this check in build, not here. Currently not doing it at all.
# ## Do any of the .R files which will be generated
# ## exist in inst/doc? If so the latter will be ignored,
# sources <-
# basename(list_files_with_exts(file.path(pkgdir, "inst/doc"), "R"))
# custom <- !is.na(desc["VignetteBuilder"])
# if (length(sources) && !custom) {
# new_sources <- paste0(vigns$names, ".R")
# dups <- sources[sources %in% new_sources]
# if(nb <- length(dups)) {
# if(!any) warningLog(Log)
# any <- TRUE
# msg <- ngettext(nb,
# "Unused file in 'inst/doc' which is pointless or misleading",
# "Unused files in 'inst/doc' which are pointless or misleading", domain = NA)
# printLog(Log, " ",
# paste(msg,
# " as they will be re-created from the vignettes:", "",
# sep = "\n"))
# printLog(Log,
# paste(c(paste(" ", dups), "", ""),
# collapse = "\n"))
# }
# }
## avoid case-insensitive matching
if ("makefile" %in% dir(vigns$dir)) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log,
" Found 'inst/doc/makefile': should be 'Makefile' and will be ignored\n")
}
if ("Makefile" %in% dir(vigns$dir)) {
f <- file.path(vigns$dir, "Makefile")
lines <- readLines(f, warn = FALSE)
## remove comment lines
lines <- filtergrep("^[[:space:]]*#", lines)
if(any(grepl("[^/]R +CMD", lines))) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log,
" Found 'R CMD' in Makefile: should be '\"$(R_HOME)/bin/R\" CMD'\n")
}
contents <- readChar(f, file.size(f), useBytes = TRUE)
if(any(grepl("\r", contents, fixed = TRUE, useBytes = TRUE))) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log, "Found Makefile with CR or CRLF line endings:\n")
printLog(Log, "some Unix 'make' programs require LF line endings.\n")
}
if(any(grepl("[^/]Rscript", lines))) {
if(!any) warningLog(Log)
any <- TRUE
printLog(Log,
" Found 'Rscript' in Makefile: should be '\"$(R_HOME)/bin/Rscript\"'\n")
}
}
## If the vignettes declare an encoding, are they actually in it?
## (We don't check the .tex, though)
bad_vignettes <- character()
for (i in seq_along(vigns$docs)) {
v <- vigns$docs[i]
enc <- vigns$encodings[i]
if (enc %in% c("", "non-ASCII", "unknown")) next
lines <- readLines(v, warn = FALSE) # some miss final NA
lines2 <- iconv(lines, enc, "UTF-16LE", toRaw = TRUE)
if(any(vapply(lines2, is.null, TRUE)))
bad_vignettes <- c(bad_vignettes, v)
if(nb <- length(bad_vignettes)) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(nb,
"Package vignette which is not in its specified encoding:\n",
"Package vignettes which are not in their specified encoding:\n", domain = NA)
printLog0(Log, " ", msg)
printLog0(Log,
paste(c(paste(" ",
sQuote(basename(bad_vignettes))),
"", ""), collapse = "\n"))
}
}
if (!any) resultLog(Log, "OK")
if (do_install && do_vignettes) {
## Can we run the code in the vignettes?
## Should checking the vignettes assume the system default
## packages, or just base?
## FIXME: should we do this for multiple sub-archs?
## Re-building the vignette outputs also runs the code, so
## doing so as well creates no additional value unless the
## results are compared against saved results (which could
## perhaps also be integrated into buildVignettes().
## Hence, when re-building, skip running the code when there
## are no saved results.
## Could make this controllable via some env var ...
build_vignettes <-
parse_description_field(desc, "BuildVignettes", TRUE)
if (!build_vignettes && as_cran) {
## FOSS packages must be able to rebuild their vignettes
info <- analyze_license(desc["License"])
build_vignettes <- info$is_verified
}
do_build_vignettes <- do_build_vignettes && build_vignettes
skip_run_maybe <-
R_check_vignettes_skip_run_maybe && do_build_vignettes
vigns <- pkgVignettes(dir = pkgdir)
savefiles <-
file.path(dirname(vigns$docs),
paste0(vigns$names, ".Rout.save"))
ran <- FALSE
if(!skip_run_maybe || any(file.exists(savefiles))) {
checkingLog(Log, "running R code from vignettes")
res <- character()
cat("\n")
def_enc <- desc["Encoding"]
if( (is.na(def_enc))) def_enc <- ""
t1 <- proc.time()
iseq <- seq_along(savefiles)
if(skip_run_maybe)
iseq <- iseq[file.exists(savefiles)]
for (i in iseq) {
file <- vigns$docs[i]
name <- vigns$names[i]
enc <- vigns$encodings[i]
cat(" ", sQuote(basename(file)),
if(nzchar(enc)) paste("using", sQuote(enc)),
"...")
Rcmd <- paste0("options(warn=1)\ntools:::.run_one_vignette('",
basename(file), "', '", vigns$dir, "'",
if (nzchar(enc))
paste0(", encoding = '", enc, "'"),
", pkgdir='", vigns$pkgdir, "')")
outfile <- paste0(basename(file), ".log")
tlim <- get_timeout(Sys.getenv("_R_CHECK_ONE_VIGNETTE_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
t1b <- proc.time()
status <- R_runR0(Rcmd,
if (use_valgrind) paste(R_opts2, "-d valgrind") else R_opts2,
## add timing as footer, as BATCH does
env = c(jitstr, "R_BATCH=1234", elibs,
"_R_CHECK_INTERNALS2_=1"),
stdout = outfile, stderr = outfile,
timeout = tlim)
t2b <- proc.time()
out <- readLines(outfile, warn = FALSE)
pos <- which(out == " *** Run successfully completed ***")
if(!length(pos) || any(nzchar(out[seq_len(pos[1L] - 1L)])))
ran <- TRUE
savefile <- savefiles[i]
if(length(grep("^ When (running|tangling|sourcing)", out,
useBytes = TRUE))) {
cat(" failed\n")
keep <- as.numeric(Sys.getenv("_R_CHECK_VIGNETTES_NLINES_",
"10"))
res <- if (keep > 0)
c(res,
paste("when running code in", sQuote(basename(file))),
" ...",
utils::tail(out, keep))
else
c(res,
paste("when running code in", sQuote(basename(file))),
out)
} else if(status || " *** Run successfully completed ***" %notin% out) {
## (Need not be the final line if running under valgrind)
keep <- as.numeric(Sys.getenv("_R_CHECK_VIGNETTES_NLINES_",
"10"))
cat(" failed to complete the test\n")
out <- c(out, "", "... incomplete output. Crash?")
res <- if (keep > 0)
c(res,
paste("when running code in", sQuote(basename(file))),
" ...",
utils::tail(out, keep))
else
c(res,
paste("when running code in", sQuote(basename(file))),
out)
} else if (file.exists(savefile)) {
cmd <- paste0("invisible(tools::Rdiff('",
outfile, "', '", savefile, "',TRUE,TRUE))")
out2 <- R_runR0(cmd, R_opts2)
if(length(out2)) {
print_time(t1b, t2b, NULL)
cat("\ndifferences from ", sQuote(basename(savefile)),
"\n", sep = "")
writeLines(c(out2, ""))
} else {
print_time(t1b, t2b, NULL)
cat(" OK\n")
if (!config_val_to_logical(Sys.getenv("_R_CHECK_ALWAYS_LOG_VIGNETTE_OUTPUT_", use_valgrind)))
unlink(outfile)
}
} else {
print_time(t1b, t2b, NULL)
cat(" OK\n")
if (!config_val_to_logical(Sys.getenv("_R_CHECK_ALWAYS_LOG_VIGNETTE_OUTPUT_", use_valgrind)))
unlink(outfile)
}
if(!WINDOWS && !is.na(theta)) {
td <- t2b - t1b
cpu <- sum(td[-3L])
if(cpu >= pmax(theta * td[3L], 1)) {
ratio <- round(cpu/td[3L], 1L)
cat(sprintf("Running R code from vignette %s had CPU time %g times elapsed time\n",
sQuote((basename(file))), ratio))
}
}
}
t2 <- proc.time()
if(!ran) {
resultLog(Log, "NONE")
} else {
print_time(t1, t2, Log)
if(R_check_suppress_RandR_message)
res <- filtergrep('^Xlib: *extension "RANDR" missing on display',
res, useBytes = TRUE)
if(length(res)) {
if(length(grep("there is no package called", res,
useBytes = TRUE))) {
warningLog(Log, "Errors in running code in vignettes:")
printLog0(Log, paste(c(res, "", ""), collapse = "\n"))
} else {
errorLog(Log, "Errors in running code in vignettes:")
printLog0(Log, paste(c(res, "", ""), collapse = "\n"))
maybe_exit(1L)
}
} else resultLog(Log, "OK")
if(!WINDOWS && !is.na(theta)) {
td <- t2 - t1
cpu <- sum(td[-3L])
if(cpu >= pmax(theta * td[3L], 1)) {
ratio <- round(cpu/td[3L], 1L)
printLog(Log,
sprintf("Running R code from vignettes had CPU time %g times elapsed time\n",
ratio))
}
}
}
}
if (do_build_vignettes) {
checkingLog(Log, "re-building of vignette outputs")
## copy the whole pkg directory to check directory
## so we can work in place, and allow ../../foo references.
dir.create(vd2 <- "vign_test")
if (!dir.exists(vd2)) {
errorLog(Log, "unable to create 'vign_test'")
summaryLog(Log)
do_exit(1L)
}
file.copy(pkgdir, vd2, recursive = TRUE)
## since so many people use 'R CMD' in Makefiles,
oPATH <- Sys.getenv("PATH")
Sys.setenv(PATH = paste(R.home("bin"), oPATH,
sep = .Platform$path.sep))
on.exit(Sys.setenv(PATH = oPATH))
## And too many 'vignettes/Makefile's are not safe for
## parallel makes
Sys.setenv(MAKEFLAGS="")
## we could use clean = FALSE, but that would not be
## testing what R CMD build uses.
Rcmd <- "options(warn=1)\nlibrary(tools)\n"
Rcmd <- paste0(Rcmd, "buildVignettes(dir = '",
file.path(pkgoutdir, "vign_test", pkgname0),
"')")
tlim <- get_timeout(Sys.getenv("_R_CHECK_BUILD_VIGNETTES_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
t1 <- proc.time()
outfile <- file.path(pkgoutdir, "build_vignettes.log")
status <- R_runR0(Rcmd, R_opts2, c(jitstr, elibs),
stdout = outfile, stderr = outfile,
timeout = tlim)
t2 <- proc.time()
out <- readLines(outfile, warn = FALSE)
if(R_check_suppress_RandR_message)
out <- filtergrep('^Xlib: *extension "RANDR" missing on display',
out, useBytes = TRUE)
warns <- grep("^Warning: file .* is not portable",
out, value = TRUE, useBytes = TRUE)
print_time(t1, t2, Log)
if (status) {
keep <- as.numeric(Sys.getenv("_R_CHECK_VIGNETTES_NLINES_",
"25"))
if(skip_run_maybe || !ran) warningLog(Log) else noteLog(Log)
if(keep > 0) out <- utils::tail(out, keep)
printLog0(Log,
paste(c("Error in re-building vignettes:",
" ...", out, "", ""), collapse = "\n"))
} else if(nw <- length(warns)) {
if(skip_run_maybe || !ran) warningLog(Log) else noteLog(Log)
msg <- ngettext(nw,
"Warning in re-building vignettes:\n",
"Warnings in re-building vignettes:\n",
domain = NA)
wrapLog(msg)
printLog0(Log, .format_lines_with_indent(warns), "\n")
} else {
## clean up
if (config_val_to_logical(Sys.getenv("_R_CHECK_CLEAN_VIGN_TEST_", "true")))
unlink(vd2, recursive = TRUE)
if (!config_val_to_logical(Sys.getenv("_R_CHECK_ALWAYS_LOG_VIGNETTE_OUTPUT_", "false")))
unlink(outfile)
resultLog(Log, "OK")
}
if(!WINDOWS && !is.na(theta)) {
td <- t2 - t1
cpu <- sum(td[-3L])
if(cpu >= pmax(theta * td[3L], 1)) {
ratio <- round(cpu/td[3L], 1L)
printLog(Log,
sprintf("Re-building vignettes had CPU time %g times elapsed time\n",
ratio))
}
}
} else {
checkingLog(Log, "re-building of vignette outputs")
resultLog(Log, "SKIPPED")
}
} else {
checkingLog(Log, "running R code from vignettes")
resultLog(Log, "SKIPPED")
checkingLog(Log, "re-building of vignette outputs")
resultLog(Log, "SKIPPED")
}
}
check_pkg_manual <- function(pkgdir, pkgname)
{
## Run Rd2pdf on the manual, if there are man pages
## If it is installed there is a 'help' dir
## and for a source package, there is a 'man' dir
if (dir.exists(file.path(pkgdir, "help")) ||
dir.exists(file.path(pkgdir, "man"))) {
tlim <- get_timeout(Sys.getenv("_R_CHECK_PKGMAN_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
topdir <- pkgdir
Rd2pdf_opts <- "--batch --no-preview"
checkingLog(Log, "PDF version of manual")
build_dir <- gsub("\\", "/", tempfile("Rd2pdf"), fixed = TRUE)
man_file <- paste0(pkgname, "-manual.pdf ")
## precautionary remove in case some other attempt left it behind
if(file.exists(man_file)) unlink(man_file)
args <- c( "Rd2pdf ", Rd2pdf_opts,
paste0("--build-dir=", shQuote(build_dir)),
"--no-clean", "-o ", man_file , shQuote(topdir))
res <- run_Rcmd(args, "Rdlatex.log", timeout = tlim)
latex_log <- file.path(build_dir, "Rd2.log")
if (file.exists(latex_log))
file.copy(latex_log, paste0(pkgname, "-manual.log"))
if (res == 11) { ## return code from Rd2pdf
errorLog(Log, "Rd conversion errors:")
lines <- readLines("Rdlatex.log", warn = FALSE)
lines <- filtergrep("^(Hmm|Execution)", lines)
printLog0(Log, paste(c(lines, ""), collapse = "\n"))
unlink(build_dir, recursive = TRUE)
maybe_exit(1L)
} else if (res > 0) {
latex_file <- file.path(build_dir, "Rd2.tex")
if (file.exists(latex_file))
file.copy(latex_file, paste0(pkgname, "-manual.tex"))
warningLog(Log)
printLog0(Log,
paste0("LaTeX errors when creating PDF version.\n",
"This typically indicates Rd problems.\n"))
## If possible, indicate the problems found.
if (file.exists(latex_log)) {
lines <- .get_LaTeX_errors_from_log_file(latex_log)
printLog(Log, "LaTeX errors found:\n")
printLog0(Log, paste(c(lines, ""), collapse = "\n"))
}
unlink(build_dir, recursive = TRUE)
## for Windows' sake: errors can make it unwritable
build_dir <- gsub("\\", "/", tempfile("Rd2pdf"), fixed = TRUE)
checkingLog(Log, "PDF version of manual without hyperrefs or index")
## Also turn off hyperrefs.
Sys.setenv(R_RD4PDF = "times")
args <- c( "Rd2pdf ", Rd2pdf_opts,
paste0("--build-dir=", shQuote(build_dir)),
"--no-clean", "--no-index",
"-o ", man_file, topdir)
if (run_Rcmd(args, "Rdlatex.log", timeout = tlim)) {
## FIXME: the info is almost certainly in Rdlatex.log
errorLog(Log)
latex_log <- file.path(build_dir, "Rd2.log")
if (file.exists(latex_log))
file.copy(latex_log, paste0(pkgname, "-manual.log"))
else {
## No log file and thus no chance to find out
## what went wrong. Hence, re-run without
## redirecting stdout/stderr and hope that this
## gives the same problem ...
# printLog(Log, "Error when running command:\n")
# cmd <- paste(c("R CMD", args), collapse = " ")
# printLog(Log, strwrap(cmd, indent = 2, exdent = 4), "\n")
printLog(Log, "Re-running with no redirection of stdout/stderr.\n")
unlink(build_dir, recursive = TRUE)
build_dir <- gsub("\\", "/", tempfile("Rd2pdf"), fixed = TRUE)
args <- c( "Rd2pdf ", Rd2pdf_opts,
paste0("--build-dir=", shQuote(build_dir)),
"--no-clean", "--no-index",
"-o ", paste0(pkgname, "-manual.pdf "),
topdir)
run_Rcmd(args, timeout = tlim)
}
unlink(build_dir, recursive = TRUE)
maybe_exit(1L)
} else {
unlink(build_dir, recursive = TRUE)
resultLog(Log, "OK")
}
} else {
unlink(build_dir, recursive = TRUE)
resultLog(Log, "OK")
}
}
}
check_executables <- function()
{
owd <- setwd(pkgdir)
allfiles <- dir(".", all.files = TRUE, full.names = TRUE,
recursive = TRUE)
allfiles <- sub("^./","", allfiles)
## this is tailored to the FreeBSD/Linux 'file',
## see http://www.darwinsys.com/file/
## (Solaris has a different 'file' without --version)
## Most systems are now on >= 5.03, but macOS 10.5 had 4.17
## version 4.21 writes to stdout,
## 4.23 to stderr and sets an error status code
FILE <- "file"
lines <- suppressWarnings(tryCatch(system2(FILE, "--version", TRUE, TRUE), error = function(e) "error"))
## a reasonable check -- it does not identify itself well
have_free_file <- any(grepl("^(file-[45]|magic file from)", lines))
if (!have_free_file) {
## OpenCSW calls this 'gfile'
FILE <- "gfile"
lines <- suppressWarnings(tryCatch(system2(FILE, "--version", TRUE, TRUE), error = function(e) "error"))
have_free_file <- any(grepl("magic file from", lines))
}
if (have_free_file) {
checkingLog(Log, "for executable files")
## Watch out for spaces in file names here
## Do in parallel for speed on Windows, but in batches
## since there may be a line-length limit.
execs <- character()
files <- allfiles
while(ll <- length(files)) {
chunk <- seq_len(min(100, ll))
these <- files[chunk]
files <- files[-chunk]
lines <- suppressWarnings(system2(FILE, shQuote(these), TRUE, TRUE))
## avoid match to is_executable.Rd
ex <- grepl(" executable", lines, useBytes=TRUE)
ex2 <- grepl("script", lines, useBytes=TRUE) &
grepl("text", lines, useBytes=TRUE)
execs <- c(execs, lines[ex & !ex2])
}
if(length(execs)) {
execs <- sub(":[[:space:]].*$", "", execs, useBytes = TRUE)
known <- rep(FALSE, length(execs))
pexecs <- file.path(pkgname, execs)
## known false positives
for(fp in c("foreign/tests/datefactor.dta",
"msProcess/inst/data[12]/.*.txt",
"WMBrukerParser/inst/Examples/C3ValidationExtractSmall/RobotRun1/2-100kDa/0_B1/1/1SLin/fid",
"bayesLife/inst/ex-data/bayesLife.output/predictions/traj_country104.rda", # file 5.16
"alm/inst/vign/cache/signposts1_c96f55a749822dd089b636087766def2.rdb" # Sparc Solaris, file 5.16
) )
known <- known | grepl(fp, pexecs)
execs <- execs[!known]
}
} else {
## no 'file', so just check extensions
checkingLog(Log, "for .dll and .exe files")
execs <- grep("\\.(exe|dll)$", allfiles, value = TRUE)
}
if (R_check_executables_exclusions && file.exists("BinaryFiles")) {
excludes <- readLines("BinaryFiles")
execs <- execs %w/o% excludes
}
if (nb <- length(execs)) {
msg <- ngettext(nb,
"Found the following executable file:",
"Found the following executable files:",
domain = NA)
warningLog(Log, msg)
printLog0(Log, .format_lines_with_indent(execs), "\n")
wrapLog("Source packages should not contain undeclared executable files.\n",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
} else resultLog(Log, "OK")
setwd(owd)
}
## CRAN-pack knows about
.hidden_file_exclusions <-
c(".Renviron", ".Rprofile", ".Rproj.user",
".Rhistory", ".Rapp.history",
".tex", ".log", ".aux", ".pdf", ".png",
".backups", ".cvsignore", ".cproject", ".directory",
".dropbox", ".exrc", ".gdb.history",
".gitattributes", ".gitignore", ".gitmodules",
".hgignore", ".hgtags",
".project", ".seed", ".settings", ".tm_properties")
check_dot_files <- function(cran = FALSE)
{
checkingLog(Log, "for hidden files and directories")
owd <- setwd(pkgdir)
dots <- dir(".", all.files = TRUE, full.names = TRUE,
recursive = TRUE, pattern = "^[.]")
dots <- sub("^./","", dots)
allowed <-
c(".Rbuildignore", ".Rinstignore", "vignettes/.install_extras")
dots <- dots %w/o% allowed
alldirs <- list.dirs(".", full.names = TRUE, recursive = TRUE)
alldirs <- sub("^./","", alldirs)
alldirs <- alldirs[alldirs != "."]
bases <- basename(alldirs)
dots <- c(dots, setdiff(alldirs[startsWith(bases, ".")], ".aspell"))
if (length(dots)) {
noteLog(Log, "Found the following hidden files and directories:")
printLog0(Log, .format_lines_with_indent(dots), "\n")
wrapLog("These were most likely included in error.",
"See section 'Package structure'",
"in the 'Writing R Extensions' manual.\n")
if(cran) {
known <- basename(dots) %in% .hidden_file_exclusions
known <- known | grepl("^.Rbuildindex[.]", dots) |
## or? startsWith(dots,".Rbuildindex.") |
endsWith(dots, "inst/doc/.Rinstignore") |
endsWith(dots, "inst/doc/.build.timestamp") |
endsWith(dots, "vignettes/.Rinstignore") |
grepl("^src.*/[.]deps$", dots)
if (all(known))
printLog(Log, "\nCRAN-pack knows about all of these\n")
else if (any(!known)) {
printLog(Log, "\nCRAN-pack does not know about\n")
printLog0(Log, .format_lines_with_indent(dots[!known]), "\n")
}
}
} else resultLog(Log, "OK")
setwd(owd)
}
check_install <- function()
{
## Option '--no-install' turns off installation and the tests
## which require the package to be installed. When testing
## recommended packages bundled with R we can skip
## installation, and do so if '--install=skip' was given. If
## command line option '--install' is of the form
## 'check:FILE', it is assumed that installation was already
## performed with stdout/stderr redirected to FILE, the
## contents of which need to be checked (without repeating the
## installation). In this case, one also needs to specify
## *where* the package was installed to using command line
## option '--library'.
if (install == "skip")
messageLog(Log, "skipping installation test")
else {
tlim <- get_timeout(Sys.getenv("_R_CHECK_INSTALL_ELAPSED_TIMEOUT_",
Sys.getenv("_R_CHECK_ELAPSED_TIMEOUT_")))
use_install_log <-
(startsWith(install, "check") || R_check_use_install_log
|| !isatty(stdout()))
INSTALL_opts <- install_args
## don't use HTML, checkRd goes over the same ground.
INSTALL_opts <- c(INSTALL_opts, "--no-html")
if (install == "fake")
INSTALL_opts <- c(INSTALL_opts, "--fake")
else if (!multiarch)
INSTALL_opts <- c(INSTALL_opts, "--no-multiarch")
INSTALL_opts <- paste(INSTALL_opts, collapse = " ")
args <- c("INSTALL", "-l", shQuote(libdir), INSTALL_opts,
shQuote(if (WINDOWS) utils::shortPathName(pkgdir) else pkgdir))
if (!use_install_log) {
## Case A: No redirection of stdout/stderr from installation.
## This is very rare: needs _R_CHECK_USE_INSTALL_LOG_ set
## to false.
message("")
## Rare use of R CMD INSTALL
if (run_Rcmd(args, timeout = tlim)) {
errorLog(Log, "Installation failed.")
summaryLog(Log)
do_exit(1L)
}
message("")
} else {
## Case B. All output from installation redirected,
## or already available in the log file.
checkingLog(Log,
"whether package ", sQuote(desc["Package"]),
" can be installed")
outfile <- file.path(pkgoutdir, "00install.out")
if (startsWith(install, "check")) {
if (!nzchar(arg_libdir))
printLog(Log, "\nWarning: --install=check... specified without --library\n")
thislog <- install_log_path
if(!nzchar(thislog)) {
errorLog(Log,
sprintf("install log %s does not exist", sQuote(thislog)))
summaryLog(Log)
do_exit(2L)
}
file.copy(thislog, outfile)
install <- "check"
lines <- readLines(outfile, warn = FALSE)
## <NOTE>
## We used to have
## $install_error = ($lines[$#lines] !~ /^\* DONE/);
## but what if there is output from do_cleanup
## in (Unix) R CMD INSTALL?
## </NOTE>
install_error <- !any(grepl("^\\* DONE", lines))
} else {
## record in the log what options were used
cat("* install options ", sQuote(INSTALL_opts),
"\n\n", sep = "", file = outfile)
## env <- ""
## Normal use of R CMD INSTALL
t1 <- proc.time()
install_error <-
run_Rcmd(args, outfile, timeout = tlim)
t2 <- proc.time()
print_time(t1, t2, Log)
lines <- readLines(outfile, warn = FALSE)
}
if (install_error) {
errorLog(Log, "Installation failed.")
printLog0(Log, "See ", sQuote(outfile),
" for details.\n")
summaryLog(Log)
do_exit(1L)
}
## There could still be some important warnings that
## we'd like to report. For the time being, start
## with compiler warnings about non ISO C code (or
## at least, what looks like it), and also include
## warnings resulting from the const char * CHAR()
## change in R 2.6.0. (In theory, we should only do
## this when using GCC ...)
if (install != "check")
lines <- readLines(outfile, warn = FALSE)
lines0 <- lines
warn_re <- c("^WARNING:",
## This fires on ODS 12.5 warnings like
## Warning: original hides icu_55::PtnSkeleton::original.
## so filter out later.
"^Warning:",
## <FIXME>
## New style Rd conversion
## which may even show errors:
"^Rd (warning|error): ",
## </FIXME>
": warning: .*ISO C",
": warning: .* discards qualifiers from pointer target type",
": warning: .* is used uninitialized",
": warning: .* set but not used",
": warning: unused",
": warning: .* makes pointer from integer", # gcc
": warning: .* pointer.* conversion", # clang
": warning: improper pointer", # Solaris
": warning: unknown escape sequence", # gcc
": warning: use of non-standard escape character", # clang
## clang warning about invalid returns.
"warning: void function",
"warning: control reaches end of non-void function",
"warning: no return statement in function returning non-void",
## gcc-only form
## ": #warning",
## gcc indents these, igraph has space after #
"^ *# *warning",
## Solaris cc has
"Warning: # *warning",
# these are from era of static HTML
"missing links?:",
## From the byte compiler's 'warn' methods
"^Note: possible error in",
"^Note: (break|next) used in wrong context: no loop is visible"
)
## Warnings spotted by gcc with
## '-Wimplicit-function-declaration'
## which is implied by '-Wall'.
## Currently only accessible via an internal environment
## variable.
check_src_flag <-
Sys.getenv("_R_CHECK_SRC_MINUS_W_IMPLICIT_", "FALSE")
## (Not quite perfect, as the name should really
## include 'IMPLICIT_FUNCTION_DECLARATION'.)
if (config_val_to_logical(check_src_flag)) {
warn_re <- c(warn_re,
": warning: implicit declaration of function",
": warning: incompatible implicit declaration of built-in function")
}
## Warnings spotted by clang with
## '-Wreturn-type-c-linkage':
warn_re <- c(warn_re,
": warning: .* \\[-Wreturn-type-c-linkage\\]")
## gcc and clang warnings about sequencing
## gcc warnings
warn_re <- c(warn_re,
": warning: pointer of type .* used in arithmetic",
": warning: .* \\[-Wformat-contains-nul\\]",
": warning: .* \\[-Wformat-zero-length\\]",
": warning: .* \\[-Wpointer-to-int-cast\\]",
": warning: .* \\[-Wsequence-point\\]",
": warning: .* \\[-Wformat-overflow=\\]",
": warning: .* \\[-Wformat-truncation=\\]",
": warning: .* \\[-Wnonull",
": warning: .* \\[-Walloc-size-larger-than=\\]",
": warning: .* \\[-Wterminate\\]",
## Solaris warns on this next one. Also clang
": warning: .* \\[-Wint-conversion\\]",
## clang calls these 'a GNU extension'
": warning: .* GCC extension",
": warning: .* \\[-Wsizeof-pointer-memaccess\\]",
## usually | for ||, = for == (etc)
": warning: suggest parentheses around (comparison|assignment)",
": warning: .* \\[-Wstringop", # mainly gcc8
": warning: .* \\[-Wclass-memaccess\\]", # gcc8
## Fatal on clang and Solaris ODS
": warning: .* with a value, in function returning void"
)
## clang warnings
warn_re <- c(warn_re,
": warning: .* GNU extension",
": warning: .* \\[-Wdeprecated-register\\]",
": warning: .* \\[-Wformat-extra-args\\]", # also gcc
": warning: .* \\[-Wformat-security\\]",
": warning: .* \\[-Wheader-guard\\]",
": warning: .* \\[-Wpointer-arith\\]",
": warning: .* \\[-Wunsequenced\\]",
": warning: .* \\[-Wvla-extension\\]",
": warning: .* \\[-Wmismatched-new-delete\\]",
": warning: .* \\[-Wabsolute-value\\]",
": warning: .* \\[-Wreorder\\]", # also gcc
": warning: .* \\[-Wself-assign",
": warning: .* \\[-Wtautological", # also gcc
": warning: .* \\[-Wincompatible-pointer-types\\]",
": warning: format string contains '[\\]0'",
": warning: .* \\[-Wc[+][+]11-long-long\\]",
": warning: empty macro arguments are a C99 feature",
## for non-portable flags (seen in sub-Makefiles)
"warning: .* \\[-Wunknown-warning-option\\]"
)
warn_re <- paste0("(", paste(warn_re, collapse = "|"), ")")
lines <- grep(warn_re, lines, value = TRUE, useBytes = TRUE)
## gcc seems not to know the size of pointers, so skip
## some from -Walloc-size-larger-than= and -Wstringop-overflow=
lines <- grep("exceeds maximum object size.*-W(alloc-size-larger-than|stringop-overflow)", lines,
value = TRUE, useBytes = TRUE, invert = TRUE)
## skip for now some c++11-long-long warnings.
ex_re <- "(/BH/include/boost/|/RcppParallel/include/|/usr/include/|/usr/local/include/|/opt/X11/include/|/usr/X11/include/).*\\[-Wc[+][+]11-long-long\\]"
lines <- filtergrep(ex_re, lines, useBytes = TRUE)
## and GNU extensions in system headers
ex_re <- "^ *(/usr/|/opt/).*GNU extension"
lines <- filtergrep(ex_re, lines, useBytes = TRUE)
## and ODS 12.5 warnings
ex_re <- "^Warning: [[:alnum:]]+ hides"
lines <- filtergrep(ex_re, lines, useBytes = TRUE)
## Ignore install-time readLines() warnings about
## files with incomplete final lines. Most of these
## come from .install_package_indices(), and should be
## safe to ignore ...
lines <- filtergrep("Warning: incomplete final line found by readLines",
lines, useBytes = TRUE)
check_Stangle <- Sys.getenv("_R_CHECK_STANGLE_WARNINGS_", "TRUE")
if (!config_val_to_logical(check_Stangle))
lines <- filtergrep("Warning: value of .* option should be lowercase",
lines, useBytes = TRUE)
## Package writers cannot really do anything about
## non ISO C code in *system* headers. Also,
## GCC >= 3.4 warns about function pointers
## casts which are "needed" for dlsym(), but it
## seems that all systems which have dlsym() also
## support the cast. Hence, try to ignore these by
## default, but make it possible to get all ISO C
## warnings via an environment variable.
if (!R_check_all_non_ISO_C) {
lines <- filtergrep("^ */.*: warning: .*ISO C",
lines, useBytes = TRUE)
lines <- filtergrep("warning: *ISO C forbids.*function pointer",
lines, useBytes = TRUE)
if(WINDOWS) lines <- filtergrep(
"warning: *ISO C does not support.*ms_printf length modifier",
lines, useBytes = TRUE)
}
## Warnings spotted by gcc with
## '-Wunused'
## which is implied by '-Wall'.
## Currently only accessible via an internal environment
## variable.
check_src_flag <-
Sys.getenv("_R_CHECK_SRC_MINUS_W_UNUSED_", "FALSE")
if (!config_val_to_logical(check_src_flag)) {
lines <- filtergrep("warning: unused", lines,
ignore.case = TRUE, useBytes = TRUE)
lines <- filtergrep("warning: .* set but not used", lines,
ignore.case = TRUE, useBytes = TRUE)
}
## (gfortran seems to use upper case.)
## Warnings spotted by clang with
## '-Wsometimes-uninitialized'
## which is implied by '-Wall'.
## Currently only accessible via an internal environment
## variable.
check_src_flag <-
Sys.getenv("_R_CHECK_SRC_MINUS_W_SOMETIMES_UNINITIALIZED_",
"FALSE")
if (!config_val_to_logical(check_src_flag)) {
lines <- filtergrep("warning: .* is used uninitialized whenever",
lines, useBytes = TRUE)
}
## Warnings spotted by gfortran >= 4.0 with '-Wall'.
## Justified in principle, it seems.
## Let's filter them for the time being, and maybe
## revert this later on ... but make it possible to
## suppress filtering out by setting the internal
## environment variable _R_CHECK_WALL_FORTRAN_ to
## something "true".
## All gfortran -Wall warnings start Warning: so have been
## included. We exclude some now.
check_src_flag <- Sys.getenv("_R_CHECK_WALL_FORTRAN_", "FALSE")
if (!config_val_to_logical(check_src_flag)) {
warn_re <-
c("Label .* at \\(1\\) defined but not used",
"Line truncated at \\(1\\)",
"ASSIGN statement at \\(1\\)",
"Assigned GOTO statement at \\(1\\)",
"arithmetic IF statement at \\(1\\)",
"Nonconforming tab character (in|at)",
"Obsolescent feature:")
warn_re <- c(warn_re,
"Warning: .*\\[-Wconversion]",
## We retain [-Wuninitialized]
"Warning: .*\\[-Wmaybe-uninitialized]",
"Warning: .*\\[-Wintrinsic-shadow]",
## R itself uses these, the latter in LAPACK
"Warning: GNU Extension: DOUBLE COMPLEX",
"Warning: GNU Extension: .*COMPLEX[*]16"
)
check_src_flag <-
Sys.getenv("_R_CHECK_SRC_MINUS_W_UNUSED_", "FALSE")
if (!config_val_to_logical(check_src_flag))
warn_re <- c(warn_re,
"Warning: .*\\[-Wunused-function]",
"Warning: .*\\[-Wunused-dummy-argument]")
warn_re <- paste0("(", paste(warn_re, collapse = "|"), ")")
lines <- filtergrep(warn_re, lines)
}
if (WINDOWS) {
## Warning on Windows with some packages that
## cannot transparently be installed bi-arch.
lines <- filtergrep("Warning: this package has a non-empty 'configure.win' file",
lines)
## Warning on x64 Windows gcc 4.5.1 that
## seems to be spurious
lines <- filtergrep("Warning: .drectve .* unrecognized", lines)
}
check_imports_flag <-
Sys.getenv("_R_CHECK_REPLACING_IMPORTS_", "TRUE")
if (!config_val_to_logical(check_imports_flag))
lines <- filtergrep("Warning: replacing previous import", lines,
fixed = TRUE)
else {
this <- unique(grep("Warning: replacing previous import",
lines, fixed = TRUE, value = TRUE))
this <- grep(paste0(sQuote(pkgname), "$"), this,
value = TRUE)
lines <- filtergrep("Warning: replacing previous import", lines,
fixed = TRUE)
lines <- c(lines, this)
}
check_FirstLib_flag <-
Sys.getenv("_R_CHECK_DOT_FIRSTLIB_", "FALSE")
if (!config_val_to_logical(check_FirstLib_flag))
lines <- filtergrep("Warning: ignoring .First.lib()", lines,
fixed = TRUE)
lines <- unique(lines)
## Can get reports like
## Warning: No generic function ‘as.vector’ found corresponding to requested imported methods from package ‘Matrix’ when loading ‘MatrixModels’ (malformed exports?)
## Exclude these unless they are about the current package.
load_re <- "Warning: No generic function.*corresponding to requested imported methods"
ex <- grepl(load_re, lines, useBytes = TRUE) &
!grepl(pkgname, lines, fixed = TRUE, useBytes = TRUE)
lines <- lines[!ex]
note_re <-
"warning: control may reach end of non-void function"
notes <- grep(note_re, lines0, value = TRUE, useBytes = TRUE)
notes <- unique(notes)
if (length(lines)) {
warningLog(Log, "Found the following significant warnings:")
printLog0(Log, .format_lines_with_indent(lines), "\n")
if(length(notes)) {
printLog(Log,
"Found the following additional warnings:\n")
printLog0(Log, .format_lines_with_indent(notes),
"\n")
}
printLog0(Log, sprintf("See %s for details.\n",
sQuote(outfile)))
if(any(grepl("^Note:", lines, useBytes = TRUE)))
wrapLog("Information on the location(s)",
"of code generating the",
paste0(sQuote("Note"), "s"),
"can be obtained by re-running with",
"environment variable R_KEEP_PKG_SOURCE",
"set to 'yes'.\n")
} else if(length(notes)) {
noteLog(Log, "Found the following warnings:")
printLog0(Log, .format_lines_with_indent(notes), "\n")
printLog0(Log, sprintf("See %s for details.\n",
sQuote(outfile)))
} else resultLog(Log, "OK")
} ## end of case B
}
}
## This requires a GNU-like 'du' with 1k block sizes,
## so use -k (which POSIX requires).
## It also depends on the total being last.
check_install_sizes <- function()
{
pd <- file.path(libdir, pkgname)
## if we used a log, the installation would not need to remain.
if (!dir.exists(pd)) return()
checkingLog(Log, "installed package size")
owd <- setwd(pd)
res <- system2("du", "-k", TRUE, TRUE)
sizes <- as.integer(sub("\\D.*", "", res))
dirs <- sub("^\\d*\\s*", "", res)
res2 <- data.frame(size = sizes, dir = I(dirs))
total <- res2[nrow(res2), 1L]
if(!is.na(total) && total > 1024*5 && # report at 5Mb
pkgname != "Matrix") { # <- large recommended package
noteLog(Log)
printLog(Log, sprintf(" installed size is %4.1fMb\n", total/1024))
rest <- res2[-nrow(res2), ]
rest[, 2L] <- sub("./", "", rest[, 2L])
## keep only top-level directories
rest <- rest[!grepl("/", rest[, 2L]), ]
rest <- rest[rest[, 1L] > 1024, ] # > 1Mb
if(nrow(rest)) {
o <- sort.list(rest[, 2L])
printLog(Log, " sub-directories of 1Mb or more:\n")
size <- sprintf('%4.1fMb', rest[, 1L]/1024)
printLog0(Log,
paste0(" ", format(rest[o, 2L], justify = "left"),
" ", format(size[o], justify = "right"), "\n"))
}
} else resultLog(Log, "OK")
setwd(owd)
}
check_description <- function()
{
checkingLog(Log, "for file ",
sQuote(file.path(pkgname0, "DESCRIPTION")))
if ("DESCRIPTION" %in% dir(pkgdir)) {
f <- file.path(pkgdir, "DESCRIPTION")
desc <- tryCatch(.read_description(f), error = identity)
if(inherits(desc, "error")) {
errorLog(Log, conditionMessage(desc))
summaryLog(Log)
do_exit(1L)
} else if(!length(desc)) {
errorLog(Log, "File DESCRIPTION exists but is not in correct format")
summaryLog(Log)
do_exit(1L)
}
mandatory <- c("Package", "Version", "License", "Description",
"Title", "Author", "Maintainer")
OK <- sapply(desc[mandatory], function(x) !is.na(x) && nzchar(x))
if(!all(OK)) {
fail <- mandatory[!OK]
msg <- ngettext(length(fail),
"Required field missing or empty:",
"Required fields missing or empty:")
msg <- paste0(msg, "\n", .pretty_format(fail))
errorLog(Log, msg)
summaryLog(Log)
do_exit(1L)
}
if(!grepl("^[[:alpha:]][[:alnum:].]*[[:alnum:]]$", desc["Package"])
|| grepl("[.]$", desc["Package"])) {
warningLog(Log)
printLog(Log," Package name is not portable:\n",
" It must start with a letter, contain letters, digits or dot\n",
" have at least 2 characters and not end with a dot.\n")
} else resultLog(Log, "OK")
encoding <- desc["Encoding"]
} else if (file.exists(f <- file.path(pkgdir, "DESCRIPTION"))) {
errorLog(Log,
"File DESCRIPTION does not exist but there is a case-insensitive match.")
summaryLog(Log)
do_exit(1L)
} else {
errorLog(Log,
"File DESCRIPTION does not exist")
summaryLog(Log)
do_exit(1L)
}
if(!is.na(desc["Type"])) { # standard packages do not have this
checkingLog(Log, "extension type")
if(desc["Type"] != "Package") {
errorLog(Log,
sprintf("Extensions with Type %s cannot be checked.",
sQuote(desc["Type"])))
summaryLog(Log)
do_exit(0L)
} else resultLog(Log, desc["Type"])
}
if(!is.na(desc["Bundle"])) {
checkingLog(Log, "package bundle")
errorLog(Log,
sprintf("Looks like %s is a package bundle -- they are defunct",
sQuote(pkgname0)))
summaryLog(Log)
do_exit(1L)
}
messageLog(Log,
sprintf("this is package %s version %s",
sQuote(desc["Package"]),
sQuote(desc["Version"])))
if (!is.na(encoding))
messageLog(Log, "package encoding: ", encoding)
desc
}
check_CRAN_incoming <- function(localOnly)
{
checkingLog(Log, "CRAN incoming feasibility")
res <- .check_package_CRAN_incoming(pkgdir, localOnly)
bad <- FALSE
if(length(res)) {
out <- format(res)
if(length(out) == 1L && startsWith(out, "Maintainer: ")) {
## Special-case when there is only the maintainer
## address to note (if at all).
maintainer <- res$Maintainer
if(nzchar(maintainer) &&
identical(maintainer,
Sys.getenv("_R_CHECK_MAINTAINER_ADDRESS_"))) {
resultLog(Log, "OK")
out <- character()
}
else resultLog(Log, "Note_to_CRAN_maintainers")
} else if(length(res$bad_package)) {
errorLog(Log)
bad <- TRUE
} else if(length(res$bad_version) ||
length(res$strong_dependencies_not_in_mainstream_repositories) ||
isTRUE(res$foss_with_BuildVignettes) ||
res$Maintainer_invalid_or_multi_person ||
res$empty_Maintainer_name ||
res$Maintainer_needs_quotes)
warningLog(Log)
else if(length(res) > 1L) noteLog(Log)
else resultLog(Log, "OK")
printLog0(Log, c(paste(out, collapse = "\n\n"), "\n"))
if(bad) maybe_exit(1L)
} else resultLog(Log, "OK")
}
check_dependencies <- function()
{
## Try figuring out whether the package dependencies can be
## resolved at run time. Ideally, the installation
## mechanism would do this, and we also do not check
## versions ... also see whether vignette and namespace
## package dependencies are recorded in DESCRIPTION.
## <NOTE>
## We are not checking base packages here, so all packages do
## have a description file.
## </NOTE>
## <NOTE>
## If a package has a namespace, checking dependencies will
## try making use of it without the NAMESPACE file ever
## being validated.
## Uncaught errors can lead to messages like
## * checking package dependencies ... ERROR
## Error in e[[1]] : object is not subsettable
## Execution halted
## which are not too helpful :-(
## Hence, we try to intercept this here.
if (!extra_arch &&
file.exists(file.path(pkgdir, "NAMESPACE"))) {
checkingLog(Log, "package namespace information")
ns <- tryCatch(parseNamespaceFile(basename(pkgdir),
dirname(pkgdir)),
error = function(e) {
errorLog(Log)
printLog0(Log,
"Invalid NAMESPACE file, parsing gives:",
"\n", as.character(e), "\n")
msg_NAMESPACE <-
c("See section 'Package namespaces'",
" in the 'Writing R Extensions' manual.\n")
wrapLog(msg_NAMESPACE)
summaryLog(Log)
do_exit(1L)
})
OK <- TRUE
## Look for empty importFrom
imp <- ns$imports
lens <- lengths(imp)
imp <- imp[lens == 2L]
nm <- sapply(imp, "[[", 1)
lens <- sapply(imp, function(x) length(x[[2]]))
bad <- nm[lens == 0L]
if(length(bad)) {
OK <- FALSE
msg <- if(length(bad) == 1L)
sprintf(" Namespace with empty importFrom: %s", sQuote(bad))
else
paste0(" Namespaces with empty importFrom:\n",
.pretty_format(sort(bad)))
noteLog(Log, msg)
}
nS3methods <- nrow(ns$S3methods)
if (nS3methods > 500L) {
## check that this is installable in R 3.0.1
meta <- .read_description(file.path(pkgdir, "DESCRIPTION"))
deps <- .split_description(meta, verbose = TRUE)$Rdepends2
status <- 0L
current <- as.numeric_version("3.0.1")
for(depends in deps) {
## .check_package_description will insist on these operators
if(depends$op %notin% c("<=", ">=", "<", ">", "==", "!="))
next
status <- if(inherits(depends$version, "numeric_version"))
!do.call(depends$op, list(current, depends$version))
else {
ver <- R.version
if (ver$status %in% c("", "Patched")) FALSE
else !do.call(depends$op,
list(ver[["svn rev"]],
as.numeric(sub("^r", "", depends$version))))
}
if(status != 0L) break
}
if (status == 0L) {
OK <- FALSE
msg <- sprintf("R < 3.0.2 had a limit of 500 registered S3 methods: found %d",
nS3methods)
noteLog(Log, msg)
}
}
if(OK) resultLog(Log, "OK")
}
checkingLog(Log, "package dependencies")
## Everything listed in Depends or Suggests or Imports
## should be available for successfully running R CMD check.
## \VignetteDepends{} entries not "required" by the package code
## must be in Suggests. Note also that some of us think that a
## package vignette must require its own package, which OTOH is
## not required in the package DESCRIPTION file.
## Namespace imports must really be in Depends.
res <- .check_package_depends(pkgdir, R_check_force_suggests,
check_incoming, ignore_vignettes)
if(any(lengths(res) > 0L)) {
out <- format(res)
allowed <- c("suggests_but_not_installed",
"enhances_but_not_installed",
"many_depends",
"skipped",
"hdOnly",
if(!check_incoming) "bad_engine")
if(!all(names(res) %in% allowed)) {
errorLog(Log)
printLog0(Log, paste(out, collapse = "\n"), "\n")
if(length(res$suggested_but_not_installed))
wrapLog("The suggested packages are required for",
"a complete check.\n",
"Checking can be attempted without them",
"by setting the environment variable",
"_R_CHECK_FORCE_SUGGESTS_",
"to a false value.\n\n")
wrapLog(msg_DESCRIPTION)
summaryLog(Log)
do_exit(1L)
} else {
noteLog(Log)
printLog0(Log, paste(out, collapse = "\n"))
}
} else resultLog(Log, "OK")
}
check_sources <- function()
{
checkingLog(Log, "if this is a source package")
## <NOTE>
## This check should be adequate, but would not catch a manually
## installed package, nor one installed prior to 1.4.0.
## </NOTE>
if (!is.na(desc["Built"])) {
errorLog(Log)
printLog(Log, "Only *source* packages can be checked.\n")
summaryLog(Log)
do_exit(1L)
} else if (!startsWith(install, "check")) {
ini <- character()
## Check for package 'src' subdirectories with object
## files (but not if installation was already performed).
pat <- "(a|o|[ls][ao]|sl|obj|dll)" # Object file/library extensions.
any <- FALSE
srcd <- file.path(pkgdir, "src")
if (dir.exists(srcd) &&
length(of <- list_files_with_exts(srcd, pat))) {
if (!any) warningLog(Log)
any <- TRUE
of <- sub(paste0(".*/", file.path(pkgname, "src"), "/"),
"", of)
printLog0(Log,
sprintf("Subdirectory %s contains apparent object files/libraries\n",
sQuote(file.path(pkgname, "src"))),
paste(strwrap(paste(of, collapse = " "),
indent = 2L, exdent = 2L),
collapse = "\n"),
"\nObject files/libraries should not be included in a source package.\n")
ini <- ""
}
## A submission had src-i386 etc from multi-arch builds
ad <- list.dirs(pkgdir, recursive = FALSE)
if(thispkg_src_subdirs != "no" &&
any(ind <- grepl("/src-(i386|x64|x86_64|ppc)$", ad))) {
if(!any) warningLog(Log)
any <- TRUE
msg <- ngettext(sum(ind),
"Found the following directory with a name of a multi-arch build directory:\n",
"Found the following directories with names of multi-arch build directories:\n",
domain = NA)
printLog0(Log,
ini,
msg,
.format_lines_with_indent(basename(ad[ind])),
"\n",
"Most likely, these were included erroneously.\n")
ini <- ""
}
if (thispkg_src_subdirs != "no" && dir.exists(srcd)) {
setwd(srcd)
if (!file.exists("Makefile") &&
!file.exists("Makefile.win") &&
!(file.exists("Makefile.in") && spec_install)) {
## Recognized extensions for sources or headers.
srcfiles <- dir(".", all.files = TRUE)
srcfiles <- srcfiles[!dir.exists(srcfiles)]
srcfiles <- filtergrep(
"(\\.([cfmCM]|cc|cpp|f90|f95|mm|h|o|so)$|^Makevars|-win\\.def|^install\\.libs\\.R$)",
srcfiles)
if (length(srcfiles)) {
if (!any) warningLog(Log)
any <- TRUE
msg <- c(ini,
paste("Subdirectory",
sQuote("src"),
"contains:"),
strwrap(paste(srcfiles, collapse = " "),
indent = 2, exdent = 2),
strwrap("These are unlikely file names for src files."),
"")
printLog0(Log, paste(msg, collapse = "\n"))
ini <- ""
}
}
setwd(startdir)
}
## All remaining checks give notes and not warnings.
if(length(ini))
ini <- c("",
"In addition to the above warning(s), found the following notes:",
"")
files <- list.files(pkgdir, recursive = TRUE)
## Check for object files not directly in src.
## (Note that the above does not look for object files in
## subdirs of src.)
bad <- files[grepl(sprintf("\\.%s$", pat), basename(files))]
bad <- bad[dirname(bad) != "src" |
dirname(dirname(bad)) != "."]
if(length(bad)) {
if(!any) noteLog(Log)
any <- TRUE
msg <- c(ini,
"Found the following apparent object files/libraries:",
strwrap(paste(bad, collapse = " "),
indent = 2L, exdent = 2L),
"Object files/libraries should not be included in a source package.\n")
printLog0(Log, paste(msg, collapse = "\n"))
ini <- ""
}
## Check for installed copies of the package in some subdir.
files <- files[basename(dirname(files)) == "Meta"]
if(length(files) &&
all(!is.na(match(c("package.rds", "hsearch.rds"),
basename(files))))) {
if(!any) noteLog(Log)
any <- TRUE
msg <- c(ini,
sprintf("Subdirectory %s seems to contain an installed version of the package.\n",
sQuote(dirname(dirname(files[1L])))))
printLog0(Log, paste(msg, collapse = "\n"))
}
if (!any) resultLog(Log, "OK")
} else resultLog(Log, "OK")
}
do_exit <-
if(no.q)
function(status) (if(status) stop else message)(
".check_packages() exit status ", status)
else
function(status) q("no", status = status, runLast = FALSE)
maybe_exit <- function(status = 1L) {
if (R_check_exit_on_first_error) {
printLog(Log, "NOTE: Quitting check on first error.\n")
summaryLog(Log)
do_exit(status)
}
}
Usage <- function() {
cat("Usage: R CMD check [options] pkgs",
"",
"Check R packages from package sources, which can be directories or",
"package 'tar' archives with extension '.tar.gz', '.tar.bz2',",
"'.tar.xz' or '.tgz'.",
"",
"A variety of diagnostic checks on directory structure, index and",
"control files are performed. The package is installed into the log",
"directory and production of the package PDF manual is tested.",
"All examples and tests provided by the package are tested to see if",
"they run successfully. By default code in the vignettes is tested,",
"as is re-building the vignette PDFs.",
"",
"Options:",
" -h, --help print short help message and exit",
" -v, --version print version info and exit",
" -l, --library=LIB library directory used for test installation",
" of packages (default is outdir)",
" -o, --output=DIR directory for output, default is current directory.",
" Logfiles, R output, etc. will be placed in 'pkg.Rcheck'",
" in this directory, where 'pkg' is the name of the",
" checked package",
" --no-clean do not clean 'outdir' before using it",
" --no-codoc do not check for code/documentation mismatches",
" --no-examples do not run the examples in the Rd files",
" --no-install skip installation and associated tests",
" --no-tests do not run code in 'tests' subdirectory",
" --no-manual do not produce the PDF manual",
" --no-vignettes do not run R code in vignettes nor build outputs",
" --no-build-vignettes do not build vignette outputs",
" --ignore-vignettes skip all tests on vignettes",
" --run-dontrun do run \\dontrun sections in the Rd files",
" --run-donttest do run \\donttest sections in the Rd files",
" --use-gct use 'gctorture(TRUE)' when running examples/tests",
" --use-valgrind use 'valgrind' when running examples/tests/vignettes",
" --timings record timings for examples",
" --install-args= command-line args to be passed to INSTALL",
" --test-dir= look in this subdirectory for test scripts (default tests)",
" --no-stop-on-test-error do not stop running tests after first error",
" --check-subdirs=default|yes|no",
" run checks on the package subdirectories",
" (default is yes for a tarball, no otherwise)",
" --as-cran select customizations similar to those used",
" for CRAN incoming checking",
"",
"The following options apply where sub-architectures are in use:",
" --extra-arch do only runtime tests needed for an additional",
" sub-architecture.",
" --multiarch do runtime tests on all installed sub-archs",
" --no-multiarch do runtime tests only on the main sub-architecture",
" --force-multiarch run tests on all sub-archs even for packages",
" with no compiled code",
"",
"By default, all test sections are turned on.",
"",
"Report bugs at <https://bugs.R-project.org>.", sep="\n")
}
###--- begin{.check_packages()} "main" ---
options(showErrorCalls=FALSE, warn = 1)
## Read in check environment file.
Renv <- Sys.getenv("R_CHECK_ENVIRON", unset = NA_character_)
if(!is.na(Renv)) {
## Do not read any check environment file if R_CHECK_ENVIRON is
## set to empty of something non-existent.
if(nzchar(Renv) && file.exists(Renv)) readRenviron(Renv)
} else {
## Read in ~/.R/check.Renviron[.rarch] (if it exists).
rarch <- .Platform$r_arch
if (nzchar(rarch) &&
file.exists(Renv <- paste0("~/.R/check.Renviron.", rarch)))
readRenviron(Renv)
else if (file.exists(Renv <- "~/.R/check.Renviron"))
readRenviron(Renv)
}
td0 <- as.numeric(Sys.getenv("_R_CHECK_TIMINGS_"))
if (is.na(td0)) td0 <- Inf
## A user might have turned on JIT compilation. That does not
## work well, so mostly disable it.
jit <- Sys.getenv("R_ENABLE_JIT")
jitstr <- if(nzchar(jit)) {
Sys.setenv(R_ENABLE_JIT = "0")
paste0("R_ENABLE_JIT=", jit)
} else character()
if (is.null(args)) {
args <- commandArgs(TRUE)
## it seems that splits on spaces, so try harder.
args <- paste(args, collapse=" ")
args <- strsplit(args,'nextArg', fixed = TRUE)[[1L]][-1L]
}
clean <- TRUE
do_codoc <- TRUE
do_examples <- TRUE
do_install_arg <- TRUE; install <- ""
do_tests <- TRUE
do_vignettes <- TRUE
do_build_vignettes <- TRUE
ignore_vignettes <- FALSE
do_manual <- TRUE
use_gct <- FALSE
use_valgrind <- FALSE
do_timings <- FALSE
install_args <- NULL
test_dir <- "tests"
check_subdirs <- "" # defaults to R_check_subdirs_strict
extra_arch <- FALSE
spec_install <- FALSE
multiarch <- NA
force_multiarch <- FALSE
as_cran <- FALSE
run_dontrun <- FALSE
run_donttest <- FALSE
stop_on_test_error <- TRUE
libdir <- ""
outdir <- ""
pkgs <- character()
while(length(args)) {
a <- args[1L]
if (a %in% c("-h", "--help")) {
Usage()
do_exit(0L)
}
else if (a %in% c("-v", "--version")) {
cat("R add-on package check: ",
R.version[["major"]], ".", R.version[["minor"]],
" (r", R.version[["svn rev"]], ")\n", sep = "")
cat("",
"Copyright (C) 1997-2017 The R Core Team.",
"This is free software; see the GNU General Public License version 2",
"or later for copying conditions. There is NO warranty.",
sep="\n")
do_exit(0L)
} else if (a == "-o") {
if (length(args) >= 2L) {outdir <- args[2L]; args <- args[-1L]}
else stop("-o option without value", call. = FALSE)
} else if (substr(a, 1, 9) == "--output=") {
outdir <- substr(a, 10, 1000)
} else if (a == "-l") {
if (length(args) >= 2L) {libdir <- args[2L]; args <- args[-1L]}
else stop("-l option without value", call. = FALSE)
} else if (substr(a, 1, 10) == "--library=") {
libdir <- substr(a, 11, 1000)
} else if (a == "--no-clean") {
clean <- FALSE
} else if (a == "--no-codoc") {
do_codoc <- FALSE
} else if (a == "--no-examples") {
do_examples <- FALSE
} else if (a == "--no-install") {
do_install_arg <- FALSE
} else if (substr(a, 1, 10) == "--install=") {
install <- substr(a, 11, 1000)
} else if (a == "--no-tests") {
do_tests <- FALSE
} else if (a == "--no-build-vignettes") {
do_build_vignettes <- FALSE
} else if (a == "--no-rebuild-vignettes") { # pre-3.0.0 version
stop("'--no-rebuild-vignettes' is defunct: use '--no-build-vignettes' instead",
call. = FALSE, domain = NA)
} else if (a == "--no-vignettes") {
do_vignettes <- FALSE
} else if (a == "--ignore-vignettes") {
ignore_vignettes <- TRUE
do_vignettes <- FALSE
do_build_vignettes <- FALSE
} else if (a == "--no-manual") {
do_manual <- FALSE
} else if (a == "--no-latex") {
stop("'--no-latex' is defunct: use '--no-manual' instead",
call. = FALSE, domain = NA)
} else if (a == "--run-dontrun") {
run_dontrun <- TRUE
} else if (a == "--run-donttest") {
run_donttest <- TRUE
} else if (a == "--use-gct") {
use_gct <- TRUE
} else if (a == "--use-valgrind") {
use_valgrind <- TRUE
} else if (a == "--timings") {
do_timings <- TRUE
} else if (substr(a, 1, 15) == "--install-args=") {
install_args <- substr(a, 16, 1000)
} else if (substr(a, 1, 11) == "--test-dir=") {
test_dir <- substr(a, 12, 1000)
} else if (substr(a, 1, 16) == "--check-subdirs=") {
check_subdirs <- substr(a, 17, 1000)
} else if (a == "--extra-arch") {
extra_arch <- TRUE
} else if (a == "--multiarch") {
multiarch <- TRUE
} else if (a == "--no-multiarch") {
multiarch <- FALSE
} else if (a == "--force-multiarch") {
force_multiarch <- TRUE
} else if (a == "--as-cran") {
as_cran <- TRUE
} else if (a == "--no-stop-on-test-error") {
stop_on_test_error <- FALSE
} else if (substr(a, 1, 9) == "--rcfile=") {
warning("configuration files are not supported as from R 2.12.0")
} else if (substr(a, 1, 1) == "-") {
message("Warning: unknown option ", sQuote(a))
} else pkgs <- c(pkgs, a)
args <- args[-1L]
}
## record some of the options used.
opts <- character()
if (install == "fake") opts <- c(opts, "--install=fake")
if (!do_install_arg) opts <- c(opts, "--no-install")
if (install == "no") {
opts <- c(opts, "--install=no")
do_install_arg <- FALSE
}
if (run_dontrun) opts <- c(opts, "--run-dontrun")
if (run_donttest) opts <- c(opts, "--run-donttest")
opts0 <- opts # other options are added later.
if (install == "fake") {
## If we fake installation, then we cannot *run* any code.
do_examples <- do_tests <- do_vignettes <- do_build_vignettes <- 0
spec_install <- TRUE
multiarch <- FALSE
}
install_log_path <- ""
if(startsWith(install, "check")) {
## Expand relative to absolute if possible.
install_log_path <-
tryCatch(file_path_as_absolute(substr(install, 7L, 1000L)),
error = function(e) "")
}
if (!isFALSE(multiarch)) {
## see if there are multiple installed architectures, and if they work
if (WINDOWS) {
## always has sub-archs as from R 2.12.0.
## usually if two are installed, it was done on a 64-bit OS,
## but the filesystem might be shared betweeen OSes.
f <- dir(file.path(R.home(), "bin"))
archs <- f[f %in% c("i386", "x64")]
## if we have x64, can only run it on a 64-bit OS
if (length(archs) > 1L && !grepl("x64", utils::win.version()))
archs <- "i386"
} else {
wd2 <- setwd(file.path(R.home("bin"), "exec"))
archs <- Sys.glob("*")
setwd(wd2)
if (length(archs) > 1L)
for (arch in archs) {
if (arch == rarch) next
cmd <- paste0(file.path(R.home(), "bin", "R"),
" --arch=", arch,
" --version > /dev/null")
if (system(cmd)) archs <- archs[archs != arch]
}
}
if (length(archs) <= 1L && isTRUE(multiarch))
warning("'--multiarch' specified with only one usable sub-architecture",
call.=FALSE, immediate. = TRUE)
multiarch <- length(archs) > 1L
}
## Use system default unless explicitly specified otherwise.
Sys.setenv(R_DEFAULT_PACKAGES="")
## Configurable variables
R_check_use_install_log <-
config_val_to_logical(Sys.getenv("_R_CHECK_USE_INSTALL_LOG_", "TRUE"))
R_check_subdirs_nocase <-
config_val_to_logical(Sys.getenv("_R_CHECK_SUBDIRS_NOCASE_", "TRUE"))
R_check_all_non_ISO_C <-
config_val_to_logical(Sys.getenv("_R_CHECK_ALL_NON_ISO_C_", "FALSE"))
R_check_subdirs_strict <-
Sys.getenv("_R_CHECK_SUBDIRS_STRICT_", "default")
R_check_Rd_contents <-
config_val_to_logical(Sys.getenv("_R_CHECK_RD_CONTENTS_", "TRUE"))
R_check_Rd_line_widths <-
config_val_to_logical(Sys.getenv("_R_CHECK_RD_LINE_WIDTHS_", "FALSE"))
R_check_Rd_style <-
config_val_to_logical(Sys.getenv("_R_CHECK_RD_STYLE_", "TRUE"))
R_check_Rd_xrefs <-
config_val_to_logical(Sys.getenv("_R_CHECK_RD_XREFS_", "TRUE"))
R_check_use_codetools <-
config_val_to_logical(Sys.getenv("_R_CHECK_USE_CODETOOLS_", "TRUE"))
## However, we cannot use this if we did not install the recommended
## packages.
if(R_check_use_codetools) {
tmp <- tryCatch(find.package('codetools'), error = identity)
if(inherits(tmp, "error")) R_check_use_codetools <- FALSE
}
R_check_executables <-
config_val_to_logical(Sys.getenv("_R_CHECK_EXECUTABLES_", "TRUE"))
R_check_executables_exclusions <-
config_val_to_logical(Sys.getenv("_R_CHECK_EXECUTABLES_EXCLUSIONS_", "TRUE"))
R_check_permissions <-
config_val_to_logical(Sys.getenv("_R_CHECK_PERMISSIONS_",
as.character(.Platform$OS.type == "unix")))
R_check_dot_internal <-
config_val_to_logical(Sys.getenv("_R_CHECK_DOT_INTERNAL_", "TRUE"))
R_check_depr_def <-
config_val_to_logical(Sys.getenv("_R_CHECK_DEPRECATED_DEFUNCT_", "FALSE"))
R_check_ascii_code <-
config_val_to_logical(Sys.getenv("_R_CHECK_ASCII_CODE_", "TRUE"))
R_check_ascii_data <-
config_val_to_logical(Sys.getenv("_R_CHECK_ASCII_DATA_", "TRUE"))
R_check_compact_data <-
config_val_to_logical(Sys.getenv("_R_CHECK_COMPACT_DATA_", "TRUE"))
R_check_vc_dirs <-
config_val_to_logical(Sys.getenv("_R_CHECK_VC_DIRS_", "FALSE"))
R_check_pkg_sizes <-
config_val_to_logical(Sys.getenv("_R_CHECK_PKG_SIZES_", "TRUE")) &&
nzchar(Sys.which("du"))
R_check_doc_sizes <-
config_val_to_logical(Sys.getenv("_R_CHECK_DOC_SIZES_", "TRUE")) &&
nzchar(Sys.which(Sys.getenv("R_QPDF", "qpdf")))
R_check_doc_sizes2 <-
config_val_to_logical(Sys.getenv("_R_CHECK_DOC_SIZES2_", "FALSE"))
R_check_code_assign_to_globalenv <-
config_val_to_logical(Sys.getenv("_R_CHECK_CODE_ASSIGN_TO_GLOBALENV_",
"FALSE"))
R_check_code_attach <-
config_val_to_logical(Sys.getenv("_R_CHECK_CODE_ATTACH_", "FALSE"))
R_check_code_data_into_globalenv <-
config_val_to_logical(Sys.getenv("_R_CHECK_CODE_DATA_INTO_GLOBALENV_",
"FALSE"))
## Only relevant when the package is loaded, thus installed.
R_check_suppress_RandR_message <-
do_install_arg && config_val_to_logical(Sys.getenv("_R_CHECK_SUPPRESS_RANDR_MESSAGE_", "TRUE"))
R_check_force_suggests <-
config_val_to_logical(Sys.getenv("_R_CHECK_FORCE_SUGGESTS_", "TRUE"))
R_check_skip_tests_arch <-
unlist(strsplit(Sys.getenv("_R_CHECK_SKIP_TESTS_ARCH_"), ",")[[1]])
R_check_skip_examples_arch <-
unlist(strsplit(Sys.getenv("_R_CHECK_SKIP_EXAMPLES_ARCH_"), ",")[[1]])
R_check_skip_arch <-
unlist(strsplit(Sys.getenv("_R_CHECK_SKIP_ARCH_"), ",")[[1]])
R_check_unsafe_calls <-
config_val_to_logical(Sys.getenv("_R_CHECK_UNSAFE_CALLS_", "TRUE"))
R_check_depends_only <-
config_val_to_logical(Sys.getenv("_R_CHECK_DEPENDS_ONLY_", "FALSE"))
R_check_suggests_only <-
config_val_to_logical(Sys.getenv("_R_CHECK_SUGGESTS_ONLY_", "FALSE"))
R_check_FF <- Sys.getenv("_R_CHECK_FF_CALLS_", "true")
R_check_FF_DUP <-
config_val_to_logical(Sys.getenv("_R_CHECK_FF_DUP_", "TRUE"))
R_check_toplevel_files <-
config_val_to_logical(Sys.getenv("_R_CHECK_TOPLEVEL_FILES_", "FALSE"))
R_check_exit_on_first_error <-
config_val_to_logical(Sys.getenv("_R_CHECK_EXIT_ON_FIRST_ERROR_", "FALSE"))
R_check_vignettes_skip_run_maybe <-
config_val_to_logical(Sys.getenv("_R_CHECK_VIGNETTES_SKIP_RUN_MAYBE_",
"FALSE"))
R_check_serialization <-
config_val_to_logical(Sys.getenv("_R_CHECK_SERIALIZATION_", "FALSE"))
if (!nzchar(check_subdirs)) check_subdirs <- R_check_subdirs_strict
if (as_cran) {
if (extra_arch) {
message("'--as-cran' turns off '--extra-arch'")
extra_arch <- FALSE
}
Sys.setenv("_R_CHECK_TIMINGS_" = "10")
Sys.setenv("_R_CHECK_INSTALL_DEPENDS_" = "TRUE")
Sys.setenv("_R_CHECK_NO_RECOMMENDED_" = "TRUE")
Sys.setenv("_R_SHLIB_BUILD_OBJECTS_SYMBOL_TABLES_" = "TRUE")
Sys.setenv("_R_CHECK_DOT_FIRSTLIB_" = "TRUE")
Sys.setenv("_R_CHECK_PACKAGES_USED_CRAN_INCOMING_NOTES_" = "TRUE")
prev <- Sys.getenv("_R_CHECK_LIMIT_CORES_", NA_character_)
if(is.na(prev)) Sys.setenv("_R_CHECK_LIMIT_CORES_" = "TRUE")
prev <- Sys.getenv("_R_CHECK_SCREEN_DEVICE_", NA_character_)
if(is.na(prev)) Sys.setenv("_R_CHECK_SCREEN_DEVICE_" = "stop")
Sys.setenv("_R_CHECK_CODE_USAGE_VIA_NAMESPACES_" = "TRUE")
Sys.setenv("_R_CHECK_CODE_USAGE_WITH_ONLY_BASE_ATTACHED_" = "TRUE")
Sys.setenv("_R_CHECK_S3_METHODS_NOT_REGISTERED_" = "TRUE")
Sys.setenv("_R_CHECK_PACKAGE_DATASETS_SUPPRESS_NOTES_" = "TRUE")
Sys.setenv("_R_CHECK_PACKAGES_USED_IGNORE_UNUSED_IMPORTS_" = "TRUE")
Sys.setenv("_R_CHECK_NATIVE_ROUTINE_REGISTRATION_" = "TRUE")
Sys.setenv("_R_CHECK_NO_STOP_ON_TEST_ERROR_" = "TRUE")
Sys.setenv("_R_CHECK_PRAGMAS_" = "TRUE")
Sys.setenv("_R_CHECK_COMPILATION_FLAGS_" = "TRUE")
if(!nzchar(Sys.getenv("_R_CHECK_R_DEPENDS_")))
Sys.setenv("_R_CHECK_R_DEPENDS_" = "warn")
## until this is tested on Windows
Sys.setenv("_R_CHECK_R_ON_PATH_" = ifelse(WINDOWS, "FALSE", "TRUE"))
R_check_vc_dirs <- TRUE
R_check_executables_exclusions <- FALSE
R_check_doc_sizes2 <- TRUE
R_check_suggests_only <- TRUE
R_check_code_assign_to_globalenv <- TRUE
R_check_code_attach <- TRUE
R_check_code_data_into_globalenv <- TRUE
R_check_depr_def <- TRUE
R_check_Rd_line_widths <- TRUE
R_check_FF <- "registration"
do_timings <- TRUE
R_check_toplevel_files <- TRUE
R_check_vignettes_skip_run_maybe <- TRUE
R_check_serialization <- TRUE
} else {
## do it this way so that INSTALL produces symbols.rds
## when called from check but not in general.
if(is.na(Sys.getenv("_R_SHLIB_BUILD_OBJECTS_SYMBOL_TABLES_",
NA_character_)))
Sys.setenv("_R_SHLIB_BUILD_OBJECTS_SYMBOL_TABLES_" = "TRUE")
}
if (extra_arch) {
R_check_Rd_contents <- R_check_all_non_ISO_C <-
R_check_Rd_xrefs <- R_check_use_codetools <- R_check_Rd_style <-
R_check_executables <- R_check_permissions <-
R_check_dot_internal <- R_check_ascii_code <-
R_check_ascii_data <- R_check_compact_data <-
R_check_pkg_sizes <- R_check_doc_sizes <-
R_check_doc_sizes2 <-
R_check_unsafe_calls <-
R_check_toplevel_files <- FALSE
R_check_Rd_line_widths <- FALSE
}
startdir <- getwd()
if (is.null(startdir))
stop("current working directory cannot be ascertained")
if (!nzchar(outdir)) outdir <- startdir
setwd(outdir)
outdir <- getwd()
setwd(startdir)
R_LIBS <- Sys.getenv("R_LIBS")
arg_libdir <- libdir
if (nzchar(libdir)) {
setwd(libdir)
libdir <- getwd()
Sys.setenv(R_LIBS = path_and_libPath(libdir, R_LIBS))
setwd(startdir)
}
## all the analysis code is run with --slave
## examples and tests are not.
R_opts <- "--vanilla"
R_opts2 <- "--vanilla --slave"
## do run Renviron.site for some multiarch runs
## We set R_ENVIRON_USER to skip .Renviron files.
R_opts3 <- "--no-site-file --no-init-file --no-save --no-restore"
R_opts4 <- "--no-site-file --no-init-file --no-save --no-restore --slave"
env0 <- if(WINDOWS) "R_ENVIRON_USER='no_such_file'" else "R_ENVIRON_USER=''"
msg_DESCRIPTION <-
c("See section 'The DESCRIPTION file' in the 'Writing R Extensions' manual.\n")
if (!length(pkgs)) {
message("Error: no packages were specified")
do_exit(1L)
}
## This is the main loop over all packages to be checked.
for (pkg in pkgs) {
## pkg should be the path to the package root source
## directory, either absolute or relative to startdir.
## As from 2.1.0 it can also be a tarball
## The previous package may have set do_install to FALSE
do_install <- do_install_arg
no_examples <- FALSE
## $pkgdir is the corresponding absolute path.
## pkgname0 is the name of the top-level directory
## (and often the name of the package).
setwd(startdir)
pkg <- sub("/$", "", pkg) # strip any trailing '/'
pkgname0 <- basename(pkg)
is_ascii <- FALSE
thispkg_subdirs <- check_subdirs
## is this a tar archive?
if (dir.exists(pkg)) {
istar <- FALSE
if (thispkg_subdirs == "default") thispkg_subdirs <- "no"
} else if (file.exists(pkg)) {
istar <- TRUE
if (thispkg_subdirs == "default") thispkg_subdirs <- "yes-maybe"
pkgname0 <- sub("\\.(tar\\.gz|tgz|tar\\.bz2|tar\\.xz)$", "", pkgname0)
pkgname0 <- sub("_[0-9.-]*$", "", pkgname0)
} else {
warning(sQuote(pkg), " is neither a file nor directory, skipping\n",
domain = NA, call. = FALSE, immediate. = TRUE)
next
}
pkgoutdir <- file.path(outdir, paste0(pkgname0, ".Rcheck"))
if (clean && dir.exists(pkgoutdir)) {
unlink(pkgoutdir, recursive = TRUE)
if(WINDOWS) Sys.sleep(0.5) # allow for antivirus interference
}
dir.create(pkgoutdir, mode = "0755")
if (!dir.exists(pkgoutdir)) {
message(sprintf("ERROR: cannot create check dir %s", sQuote(pkgoutdir)))
do_exit(1L)
}
Log <- newLog(file.path(pkgoutdir, "00check.log"))
messageLog(Log, "using log directory ", sQuote(pkgoutdir))
messageLog(Log, "using ", R.version.string)
messageLog(Log, "using platform: ", R.version$platform,
" (", 8*.Machine$sizeof.pointer, "-bit)")
charset <-
if (l10n_info()[["UTF-8"]]) "UTF-8" else utils::localeToCharset()
messageLog(Log, "using session charset: ", charset)
is_ascii <- charset == "ASCII"
if(config_val_to_logical(Sys.getenv("_R_CHECK_R_ON_PATH_", "FALSE")))
add_dummies(file_path_as_absolute(pkgoutdir), Log)
if (istar) {
dir <- file.path(pkgoutdir, "00_pkg_src")
dir.create(dir, mode = "0755")
if (!dir.exists(dir)) {
checkingLog(Log, "whether tarball can be unpacked")
errorLog(Log, sprintf("cannot create %s", sQuote(dir)))
summaryLog(Log)
do_exit(1L)
}
## force the use of internal untar unless over-ridden
## so e.g. .tar.xz works everywhere
if (utils::untar(pkg, exdir = dir,
tar = Sys.getenv("R_INSTALL_TAR", "internal"))) {
checkingLog(Log, "whether tarball can be unpacked")
errorLog(Log, sprintf("cannot unpack %s", sQuote(pkg)))
summaryLog(Log)
do_exit(1L)
}
size <- file.info(pkg)$size
Sys.setenv("_R_CHECK_SIZE_OF_TARBALL_" = size)
## this assumes foo_x.y.tar.gz unpacks to foo, but we are about
## to test that.
pkg <- file.path(dir, pkgname0)
}
if (!dir.exists(pkg)) {
checkingLog(Log, "package directory")
errorLog(Log,
gettextf("package directory %s does not exist",
sQuote(pkg)))
summaryLog(Log)
do_exit(1L)
}
setwd(pkg)
pkgdir <- getwd()
thispkg_src_subdirs <- thispkg_subdirs
if (thispkg_src_subdirs == "yes-maybe") {
## now see if there is a 'configure' file
## configure files are only used if executable, but
## -x is always false on Windows.
if (WINDOWS) {
if (file_test("-f", "configure")) thispkg_src_subdirs <- "no"
} else {
if (file_test("-x", "configure")) thispkg_src_subdirs <- "no"
}
}
setwd(startdir)
.unpack.time <- Sys.time()
## report options used
opts <- opts0
if (!do_codoc) opts <- c(opts, "--no-codoc")
if (!do_examples && !spec_install) opts <- c(opts, "--no-examples")
if (!do_tests && !spec_install) opts <- c(opts, "--no-tests")
if (!do_manual && !spec_install) opts <- c(opts, "--no-manual")
if (ignore_vignettes) opts <- c(opts, "--ignore-vignettes")
else {
if (!do_vignettes && !spec_install)
opts <- c(opts, "--no-vignettes")
if (!do_build_vignettes && !spec_install)
opts <- c(opts, "--no-build-vignettes")
}
if (use_gct) opts <- c(opts, "--use-gct")
if (use_valgrind) opts <- c(opts, "--use-valgrind")
if (!stop_on_test_error) opts <- c(opts, "--no-stop-on-test-error")
if (as_cran) opts <- c(opts, "--as-cran")
if (length(opts) > 1L)
messageLog(Log, "using options ", sQuote(paste(opts, collapse=" ")))
else if (length(opts) == 1L)
messageLog(Log, "using option ", sQuote(opts))
if(identical(config_val_to_logical(Sys.getenv("_R_CHECK_NO_STOP_ON_TEST_ERROR_",
"FALSE")),
TRUE)) {
stop_on_test_error <- FALSE
}
if (!nzchar(libdir)) { # otherwise have set R_LIBS above
libdir <- pkgoutdir
Sys.setenv(R_LIBS = path_and_libPath(libdir, R_LIBS))
}
if (WINDOWS && grepl(" ", libdir)) # need to avoid spaces in libdir
libdir <- gsub("\\", "/", utils::shortPathName(libdir), fixed = TRUE)
## Package sources from the R distribution are special. They
## have a 'DESCRIPTION.in' file (instead of 'DESCRIPTION'),
## with Version and License fields containing '@VERSION@' for
## substitution by configure. Earlier bundles had packages
## containing DESCRIPTIION.in, hence the extra check for
## Makefile.in.
is_base_pkg <- is_rec_pkg <- FALSE
if (file.exists(f <- file.path(pkgdir, "DESCRIPTION.in")) &&
file.exists(file.path(pkgdir, "Makefile.in"))) {
desc <- try(read.dcf(f))
if (inherits(desc, "try-error") || !length(desc)) {
errorLog(Log, "File DESCRIPTION exists but is not in correct format")
summaryLog(Log)
do_exit(1L)
}
desc <- desc[1L, ]
if (identical(desc["Priority"], c(Priority = "base"))) { # Priority might be missing
messageLog(Log, "looks like ", sQuote(pkgname0),
" is a base package")
messageLog(Log, "skipping installation test")
is_base_pkg <- TRUE
pkgname <- desc["Package"] # should be same as pkgname0
}
}
this_multiarch <- multiarch
if (!is_base_pkg) {
desc <- check_description()
pkgname <- desc["Package"]
is_rec_pkg <- identical(desc["Priority"], c(Priority = "recommended"))
## Check if we have any hope of installing
OS_type <- desc["OS_type"]
if (do_install && !is.na(OS_type)) {
if (WINDOWS && OS_type != "windows") {
messageLog(Log, "will not attempt to install this package on Windows")
do_install <- FALSE
}
if (!WINDOWS && OS_type == "windows") {
messageLog(Log, "this is a Windows-only package, skipping installation")
do_install <- FALSE
}
} else OS_type <- NA
check_incoming <- Sys.getenv("_R_CHECK_CRAN_INCOMING_", "NA")
check_incoming <- if(check_incoming == "NA") as_cran else {
config_val_to_logical(check_incoming)
}
check_incoming_remote <- Sys.getenv("_R_CHECK_CRAN_INCOMING_REMOTE_", "NA")
check_incoming_remote <- if(check_incoming_remote == "NA") as_cran else {
config_val_to_logical(check_incoming_remote)
}
if (check_incoming) check_CRAN_incoming(!check_incoming_remote)
## <NOTE>
## We want to check for dependencies early, since missing
## dependencies may make installation fail, and in any case we
## give up if they are missing. But we don't check them if
## we are not going to install and hence not run any code.
## </NOTE>
if (do_install) {
topfiles0 <- dir(pkgdir)
check_dependencies()
} else topfiles0 <- NULL
check_sources()
checkingLog(Log, "if there is a namespace")
## careful: we need a case-sensitive match
if ("NAMESPACE" %in% dir(pkgdir))
resultLog(Log, "OK")
else if (file.exists(file.path(pkgdir, "NAMESPACE"))) {
errorLog(Log,
"File NAMESPACE does not exist but there is a case-insenstiive match.")
summaryLog(Log)
do_exit(1L)
} else if (dir.exists(file.path(pkgdir, "R"))) {
errorLog(Log)
wrapLog("All packages need a namespace as from R 3.0.0.\n",
"R CMD build will produce a suitable starting point,",
"but it is better to handcraft a NAMESPACE file.")
maybe_exit(1L)
} else {
noteLog(Log)
wrapLog("Packages without R code can be installed without",
"a NAMESPACE file, but it is cleaner to add",
"an empty one.")
}
## we need to do this before installation
if (R_check_executables) check_executables()
## (Alternatively, could use .unpack.time.)
check_dot_files(check_incoming)
setwd(pkgdir)
allfiles <- check_file_names()
if (R_check_permissions) check_permissions(allfiles)
if (!is_base_pkg && R_check_serialization) {
## We should not not do this if there is a dependence
## on R >= 3.5.0, and we have to check that on the sources.
db <- .read_description("DESCRIPTION")
Rver <-.split_description(db, verbose = TRUE)$Rdepends2
if(length(Rver) && Rver[[1L]]$op == ">="
&& Rver[[1L]]$version >= "3.5.0") {
## skip
} else check_serialization(allfiles)
}
setwd(startdir)
## record this before installation.
## <NOTE>
## Could also teach the code to check 'src/Makevars[.in]'
## files to use .unpack.time.
## (But we want to know if the sources contain
## 'src/Makevars' and INSTALL re-creates this.)
## </NOTE>
makevars <-
Sys.glob(file.path(pkgdir, "src",
c("Makevars.in", "Makevars")))
makevars <- basename(makevars)
if (do_install) {
check_install()
if(R_check_pkg_sizes) check_install_sizes()
}
if (multiarch) {
if (force_multiarch) inst_archs <- archs
else {
## check which architectures this package is installed for
if (dir.exists(dd <- file.path(libdir, pkgname, "libs"))) {
inst_archs <- dir(dd)
## xlsReadWrite has spurious subdir 'template'
inst_archs <- inst_archs[inst_archs %in% archs]
if (!identical(inst_archs, archs)) {
if (length(inst_archs) > 1)
printLog0(Log,
"NB: this package is only installed for sub-architectures ",
paste(sQuote(inst_archs), collapse=", "), "\n")
else {
printLog0(Log,
"NB: this package is only installed for sub-architecture ",
sQuote(inst_archs), "\n")
if(inst_archs == .Platform$r_arch)
this_multiarch <- FALSE
}
}
} else this_multiarch <- FALSE # no compiled code
}
if (this_multiarch && length(R_check_skip_arch))
inst_archs <- inst_archs %w/o% R_check_skip_arch
}
} else check_incoming <- FALSE ## end of if (!is_base_pkg)
elibs <- if(is_base_pkg) character()
else if(R_check_depends_only)
setRlibs(pkgdir = pkgdir, libdir = libdir)
else if(R_check_suggests_only)
setRlibs(pkgdir = pkgdir, libdir = libdir, suggests = TRUE)
else character()
elibs_tests <- if(is_base_pkg) character()
else if(R_check_depends_only)
setRlibs(pkgdir = pkgdir, libdir = libdir, tests = TRUE)
else if(R_check_suggests_only)
setRlibs(pkgdir = pkgdir, libdir = libdir, suggests = TRUE)
else character()
setwd(startdir)
check_pkg(pkgdir, pkgname, pkgoutdir, startdir, libdir, desc,
is_base_pkg, is_rec_pkg, thispkg_subdirs, extra_arch)
if (!extra_arch && do_manual) {
setwd(pkgoutdir)
instdir <- file.path(libdir, pkgname)
if (dir.exists(file.path(instdir, "help")))
check_pkg_manual(instdir, desc["Package"])
else
check_pkg_manual(pkgdir, desc["Package"])
}
if (!is_base_pkg && check_incoming && no_examples &&
dir.exists(file.path(pkgdir, "R"))) {
tests_dir <- file.path(pkgdir, test_dir)
if (dir.exists(tests_dir) &&
length(dir(tests_dir, pattern = "\\.(r|R|Rin)$")))
no_examples <- FALSE
vigns <- pkgVignettes(dir = pkgdir)
if (!is.null(vigns) && length(vigns$docs)) no_examples <- FALSE
if (no_examples) {
## figure out if the R code exercises anything
ns <- parseNamespaceFile(basename(pkgdir), dirname(pkgdir))
if(length(ns$exports) || length(ns$exportPatterns) ||
length(ns$exportMethods) || length(ns$S3methods)) {
checkingLog(Log, "for code which exercises the package")
warningLog(Log, "No examples, no tests, no vignettes")
}
}
}
summaryLog(Log)
if(config_val_to_logical(Sys.getenv("_R_CHECK_CRAN_STATUS_SUMMARY_",
"FALSE"))) {
s <- summarize_CRAN_check_status(pkgname)
if(nzchar(s)) {
writeLines(c("", s), Log$con)
}
}
if(Log$errors > 0L)
do_exit(1L)
closeLog(Log)
message("")
} ## end for (pkg in pkgs)
}
###--- end{ .check_packages }
.format_lines_with_indent <-
function(x)
paste0(" ", x, collapse = "\n")
## Hard-wire indent of 2 for now.
### Local variables:
### mode: R
### page-delimiter: "^###[#-]"
### End:
|
116a7b89f41ef073e0a1836aecfd17b0b4f881ea
|
8218c5e8a362673882278bbf58e42ebcd937dc86
|
/rfp4/S00-env.R
|
6905019c40fb57b66ab8feec324da6f172988994
|
[] |
no_license
|
Sjan1/r-for-proteomics-tsl
|
276fb78c84d82f89f1e5f17080c19c04bd2fa196
|
24a8e08ca3e00f8e73089755aaa00694d5d83a01
|
refs/heads/master
| 2021-05-04T13:58:42.792222
| 2020-05-11T18:40:21
| 2020-05-11T18:40:21
| 120,327,192
| 1
| 2
| null | 2018-02-05T16:00:33
| 2018-02-05T16:00:33
| null |
UTF-8
|
R
| false
| false
| 454
|
r
|
S00-env.R
|
#install.packages("BiocManager")
#BiocManager::install()
#install.packages("MSnbase")
#if(!requireNamespace("BiocManager",quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("MSnbase", version = "3.8")
#source("http://www.bioconductor.org/biocLite.R")
#library("BiocInstaller")
#biocLite("MSnbase")
#biocLite("limma")
library("MSnbase")
library("ggplot2")
library("dplyr")
library("magrittr")
library("readr")
library("MSnID")
|
c1790a5600a3a7661dea943825de88504c009d84
|
03c99906a94c70e9a13e7714aad996f461f339c1
|
/man/rare_Rao.Rd
|
67d9383e0c4e7cdc097830704a5f03986091313e
|
[] |
no_license
|
cran/adiv
|
6a111f6a1ef39fe302a2f882b9a9d04e7d652c04
|
d65d6e0301e4611a94a91933299bff1fdc06d96b
|
refs/heads/master
| 2022-10-28T08:07:33.352817
| 2022-10-06T12:40:04
| 2022-10-06T12:40:04
| 97,764,074
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,384
|
rd
|
rare_Rao.Rd
|
\name{rare_Rao}
\alias{rare_Rao}
\title{
Functional Rarefaction for Species Abundance Data
}
\description{
The function \code{Rare_Rao} performs distance-based rarefaction curves using species abundance data. It finds the expected functional diversity (if functional distances between species are used) as a function of the sampling effort. Two approaches are available: an analytical solution, a resampling approach.}
\usage{
rare_Rao(comm, dis, sim = TRUE, resampling = 999, formula = c("QE", "EDI"))
}
\arguments{
\item{comm}{
a data frame or a matrix with samples as rows, species as columns, and abundance or frequency as entries. If presences/absences (1/0) are given, the relative abundance of a given species in a community of S species will be considered equal to 1/S.
}
\item{dis}{
an object of class \code{dist} containing pairwise distances among species. The distance matrix should be squared Euclidean or simply Euclidean (see definition and details in Appendix S1 of Ricotta et al. (2012)). Alternatively, if \code{dis} is \code{NULL}, the Gini-Simpson index is used.
}
\item{sim}{
a logical; if \code{TRUE}, the resampling approach is used; if \code{FALSE}, the analytical solution is given.
}
\item{resampling}{
a numeric; number of times data are resampled to calculate the mean functional rarefaction curve (used if \code{sim=TRUE}).
}
\item{formula}{
either \code{"QE"} (default) or \code{"EDI"}. See details.
}
}
\details{
If \code{formula = "QE"}, the definition of the quadratic entropy is:
\deqn{QE(\mathbf{p}_i,\mathbf{D})=\sum_{k=1}^S\sum_{l=1}^S p_{k|i}p_{k|j}d_{kl}}{QE(p_i,D)=sum_k,l p_k|i p_k|j d_kl}
where \eqn{\mathbf{p}_i=(p_{1|i}, ..., p_{k|i}, ..., p_{S|i})}{p_i=(p_1|i, ..., p_k|i, ..., p_S|i)} is the vector of relative species abundance within sample \emph{i}; \emph{S} is the number of species; \eqn{\mathbf{D}=(d_{kl})}{D=(d_kl)} is the matrix of (phylogenetic or functional) dissimilarities among species, and \eqn{d_{kl}}{d_kl} is the (phylogenetic or functional) dissimilarity between species
\emph{k} and \emph{l}.
If \code{formula = "EDI"}, the definition of the quadratic entropy is:
\deqn{EDI(\mathbf{p}_i,\mathbf{D})=\sum_{k=1}^S\sum_{l=1}^S p_{k|i}p_{k|j}\frac{d_{kl}^2}{2}}{EDI(p_i,D)=sum_k,l p_k|i p_k|j (d_kl^2)/2}
EDI stands for the Euclidean Diversity Index of Champely and Chessel (2002) (equation 3 in Pavoine et al. 2004).
In both cases, if \code{dis = NULL}, the quadratic entropy is equal to Gini-Simpson entropy:
\deqn{H_{GS}(\mathbf{p}_i)=1 - \sum_{k=1}^S p_{k|i}^2}{H_GS(p_i)=1 - sum_k (p_k|i)^2}
}
\value{
If \code{sim = TRUE}, the function returns a data frame containing the Expected Rao Quadratic Entropy (column 'ExpRao'), the limits of the 95\% Confidence Interval (columns 'LeftIC' and 'RightIC') for each subsample dimension (M) out of the total set of samples (N). If \code{sim = FALSE}, the function returns a data frame containing the analytical solution for the Expected Rao Quadratic Entropy (column 'ExpRao') for each subsample dimension (M) out of the total set of samples (N).
}
\references{
Ricotta, C., Pavoine, S., Bacaro, G., Acosta, A. (2012) Functional rarefaction for species abundance data. \emph{Methods in Ecology and Evolution}, \bold{3}, 519--525.
Champely, S. and Chessel, D. (2002) Measuring biological diversity using Euclideanmetrics. \emph{Environmental and Ecological Statistics}, \bold{9}, 167--177.
Pavoine, S., Dufour, A.B., Chessel, D. (2004) From dissimilarities among species to dissimilarities among communities: a double principal coordinate analysis. \emph{Journal of Theoretical Biology}, \bold{228}, 523--537.
}
\author{
Giovanni Bacaro
and
Sandrine Pavoine \email{sandrine.pavoine@mnhn.fr}
}
\seealso{
\code{\link{QE}}
}
\examples{
\dontrun{
if(require(ade4)){
data(aviurba, package="ade4")
# Trait-based distances between bird species:
distances<-dist.ktab(ktab.list.df(list(aviurba$traits)), type = "N")
# The distances should be squared Euclidean;
# note that Euclidean distances can be used
# as they also are squared Euclidean.
# Species abundances in sites
abundances<- aviurba$fau
# Rarefaction of functional diversity
rare_Rao(abundances, distances, sim = TRUE, resampling = 100)
rare_Rao(abundances, distances, sim = FALSE)
}
}
}
\keyword{models}
|
bf8478b8804e5cbacfd3b00bfcd19c734133316b
|
c84970ea7163ace7b97839d529b0e171348fc35a
|
/plot1.R
|
f699e3114d45d5e16114dc868ad8df64e3db2354
|
[] |
no_license
|
JIsernhagen/ex_res_assgn_1
|
1e07b5a0bd44e960039ef60186739d8d671bdc47
|
0100a8bf36266c181badfb3f96c611e6ae55c23b
|
refs/heads/master
| 2020-08-06T16:57:35.331912
| 2015-09-12T03:12:01
| 2015-09-12T03:12:01
| 42,342,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 925
|
r
|
plot1.R
|
# "household_power_consumption.txt" must be unzipped in the working directory in order for this to function.
#read in the data, subset to the relevant dates, and transform/combine date and time into useful timestamp.
consumption <- read.table("household_power_consumption.txt", header = TRUE, sep=";", na.strings = "?", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
consumption <- subset(consumption, (Date == "1/2/2007" | Date == "2/2/2007"))
consumption <- within(consumption, Datetime <- as.POSIXlt(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S"))
#construct the plot
library(datasets)
hist(consumption$Global_active_power, col="red", breaks = 12, main = "Global Active Power", xlab="Global Active Power (kilowatts)")
#save the plot
dev.copy(png, file= "plot1.png") ##Copy my plot to a png file
dev.off() ## don't forget to close the png device.
|
5e99105b30a94a81f84d1a1faee400cad33535b8
|
bd238017a55ea6dc2a1674d8af58a781a7958094
|
/cachematrix.R
|
fdec9289b27692ec39c5bbe0d0796e4fb9cb5e7a
|
[] |
no_license
|
synchunt/ProgrammingAssignment2
|
65aa24445841d845299e09659fa6b3c0791d45da
|
30b9f70e213ba7cbcc8f5085312e3e6dc24e6105
|
refs/heads/master
| 2022-07-08T11:18:34.472911
| 2020-05-10T20:17:25
| 2020-05-10T20:17:25
| 262,659,501
| 0
| 0
| null | 2020-05-09T21:09:53
| 2020-05-09T21:09:52
| null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
cachematrix.R
|
## The first function is used mainly to get the matrix, which in reality
## is used to set and get the value of matrix and
## to then set and get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inver <- NULL
setMat <- function(o){
x <<- o
inver <<- NULL
}
get <- function(){x}
setInv <- function(inverse) {inver <<- solve(x)}
getInv <- function() {inver}
list(set = setMat,
get = get,
setInv = setInv,
getInv = getInv)
}
## The second function is basically used to Solve the inverse of the
## matrix but as we have addressed that computing the inverse of the
## matrix is heavy on resources thus this function first checks if the
## inverse has already been calculated or not, if yes then it returns
## the cached value else it solves the matrix and returns new inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inver <- x$getInv()
if(!is.null(inver)){
message("getting cached matrix")
return(inver)
}
mat <- x$get
inver <- solve(mat, ...)
x$setInv(inver)
inver
}
|
93bc688653ab807c9913587039743b2a723b6b8b
|
1bfe87671e72dcdfa187d9df747a3b4f8eae913a
|
/R/loadCrimedata.R
|
1531bda247c4953b25992febb74ce8b630f625d6
|
[] |
no_license
|
ian-flores/mapPR
|
592b5e925adf2a0a7ca25caaa6aa4f1827c0c430
|
8492574424790251b385933933eef9082cfe7f27
|
refs/heads/master
| 2020-04-05T15:57:05.666966
| 2018-11-14T02:40:09
| 2018-11-14T02:40:09
| 156,990,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 483
|
r
|
loadCrimedata.R
|
library(tidyverse)
library(lubridate)
library(rio)
mapPR_crime <- function(.data_format = 'all_data', .intial_date = '2017-01-01', .final_date = '2017-12-01'){
.base_url <- 'https://github.com/OpenDataPR/PR_Crime_Data/raw/master/data/'
.url <- paste0(.base_url, .data_format, '.feather')
.data <- rio::import(.url)
.data %>%
filter(incident_date > ymd(.initial_date),
incident_date < ymd(.final_date)) ->
.data
return(.data)
}
|
0f0b29aba3fd69e2a5c5283746d5c4e5ee1cf9b3
|
c507b15b53be5ddfae81bee94b5332ddd9ef5886
|
/code/site_setup/setup_my17_LOS_abundance.R
|
23dbc398508aaea315b4de6a9a65f5148829afff
|
[] |
no_license
|
ppgibson/ELH_AnnualReport
|
3dfe7bbd685553dc5cd2e22014b4af16742048e9
|
704255af1404e4594933f179d18d6e34fab96f10
|
refs/heads/master
| 2022-11-05T23:31:04.906269
| 2020-06-24T23:52:44
| 2020-06-24T23:52:44
| 274,791,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
setup_my17_LOS_abundance.R
|
## MY 17 LOSTINE
## ANALYSIS OPTIONS FOR ABUNDANCE CALCULATION
# Current analysis run
my.cur <- 2017
my.txt <- "my17"
site.cur <- "LOS"
# Analysis options
include.INT.catch <- TRUE #Default = TRUE. Std ELH policy is to include catch numbers from INT days in simple abundance estimates.
use.prv.strata <- FALSE #Default = FALSE. Use standard, algorithm-based stratification (FALSE); or use BG's, previous stratification (TRUE).
sub.sample <- TRUE #Was there any subsampling at this site/year?
abund.est <- "Petersen" #Petersen: Nhat = catch * (mark/rec) Simplest abundance estimate, as described in annual report and Thedinga etal 1994.
#Chapman: Nhat = catch * (mark+1)/(rec+1) Apparently corrects for bias in small sample size, esp of rec.
med.date.est <- "exp.catch" #Determines which method for calculating median date to report. {catch, exp.catch, gam}
# Custom mods
# Usable INT days
INT.mods <- c("2016-10-24") #Vector of INT dates determined, based on subjective assessment, to be usable for mark-recap anl.
# Adjustments to stratification
strat.mods <- list(chs.c.wks = c(39, 40, 41), #J.wks manually assigned to stratum "c" for chs
chs.e.wks = c(42, 43, 44) )
# Adjustments to CAL day assignment
# none for this data set
# Run standard Setup and functions
source("code//0_Setup.R")
source("code//func_CustomFunctions.R")
## END ##
|
8a4749574bc23a52dc6266f39bcbed922599ead7
|
251f6a072167f3de27c185dd766cbb5a8125d0ec
|
/Code/old/SF.Gibbs.Rerun.r
|
b98992f7a40adde74cda92ad63d90cf2d5def59f
|
[] |
no_license
|
bellland/StaCC-Model
|
6e62571268c7322d84f1b45d91aa553d92b04dd8
|
87e88339568bd0e084ee7cec505fd06ef60a30b2
|
refs/heads/master
| 2021-03-12T23:39:05.008912
| 2015-02-05T16:50:59
| 2015-02-05T16:50:59
| 17,839,579
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,882
|
r
|
SF.Gibbs.Rerun.r
|
# Run Gibbs Sampler for Sap Flux Model
setwd('/Volumes/dbell9$/Sap Flux')
files <- list.files(pattern='TC_1_')
load(files[17])
for(g in 1:ng){
if(!SECTION){
tmp <- rescale()
specvars <- tmp$sv2
}
ptmp2 <- update_processpars() #process parameters
aspec <- ptmp2$a
blitespec <- ptmp2$bl
bmoistspec <- ptmp2$bm
gspec <- ptmp2$gsr
ul <- ul + ptmp2$ul
um <- um + ptmp2$um
ug <- ug + ptmp2$ug
#priorgt.hi[Q[,1] < 1,] <- .2*gspec[site.all[,'specindex']] #nighttime conductance can be 20% of maximum
Gsmat <- gssMat(gspec,aspec,blitespec,bmoistspec) #steady state conductance
Gsmat[Gsmat>priorgt.hi] <- priorgt.hi[Gsmat>priorgt.hi]
Gsmat[Gsmat<priorgt.lo] <- priorgt.lo[Gsmat<priorgt.lo]
aig <- update_rint()
va <- update_va()
if(CAP) {
gtmp <- update_gt_CAP()
}
if(!CAP) gtmp <- update_gt_NOCAP()
Gtmat <- gtmp$Gtmat
Gtmat[Gtmat>priorgt.hi] <- priorgt.hi[Gtmat>priorgt.hi]
Gtmat[Gtmat<priorgt.lo] <- priorgt.lo[Gtmat<priorgt.lo]
if(CAP) {
Jtmat <- gtmp$Jtmat
Wtmat <- gtmp$Wtmat
}
if(!CAP) Jtmat <- Gtmat*e.qt
Jpred <- matrix(0,nt,nprobe)
pj <- pred_jt(aig,bag,bstat,SECTION)
Jpred <- Jtmat[,site.sensor]*matrix(pj,nt,nprobe,byrow=T)
colnames(Jpred) <- probe
pt1 <- update_datapars()
bag <- pt1$bag
if(BS) {
bstat <- pt1$bstat
ubs <- ubs + pt1$ubs
}
if(!BS) bstat <- rep(1,nspec)
ub <- ub + pt1$ub
sigma <- update_sigma()
verror <- update_verror()
tau <- update_tau()
if(CAP){
tmp <- update_alpha()
alpha <- tmp$alpha
uk <- tmp$uk + uk
}
#
dgibbs[g,] <- c(as.vector(bag),bstat)
ggibbs[g,] <- as.vector(gspec)
agibbs[g,] <- as.vector(aspec)
lgibbs[g,] <- as.vector(blitespec)
mgibbs[g,] <- as.vector(bmoistspec)
vgibbs[g,] <- c(sigma,verror,va,tau,alpha)
rgibbs[g,] <- aig
if(g == gburnin) {
Ggibbs <- matrix(0,nt,dim(site.all)[1])
colnames(Ggibbs) <- paste(species[site.all[,'specindex']],
SITE[site.all[,'SITE']],sep='.')
Ggibbs2 <- Ggibbs
Jgibbs <- matrix(0,nt,dim(site.all)[1])
colnames(Jgibbs) <- paste(species[site.all[,'specindex']],
SITE[site.all[,'SITE']],sep='.')
Jgibbs2 <- Jgibbs
Wgibbs <- matrix(0,nt,dim(site.all)[1])
colnames(Wgibbs) <- paste(species[site.all[,'specindex']],
SITE[site.all[,'SITE']],sep='.')
Wgibbs2 <- Wgibbs
}
Ggibbs <- Ggibbs + Gtmat
Ggibbs2 <- Ggibbs2 + Gtmat^2
Jgibbs <- Jgibbs + Jtmat
Jgibbs2 <- Jgibbs2 + Jtmat^2
if(CAP){
Wgibbs <- Wgibbs + Wtmat
Wgibbs2 <- Wgibbs2 + Wtmat^2
}
print(g)
print('data model')
print(c(bag,bstat))
print(aig)
print('process model')
print(rbind(gspec,aspec))
print(blitespec)
print(bmoistspec)
print('errors and algorithms')
print(c(sigma,verror,va,tau,alpha))
print(c("ga ",paste(ug,sep=" ")),quote=F)
print(c("light ",paste(ul,sep=" ")),quote=F)
print(c("moist ",paste(um,sep=" ")),quote=F)
print(c("data ",paste(ub,sep=" ")),quote=F)
print(c("alpha ",paste(uk,sep=" ")),quote=F)
if(g %in% kg){
if(min(ug) > 15) pcovga <- 2*var(cbind(ggibbs,agibbs)[(g-40):(g-1),])#*Ig
if(min(ug) < 5) pcovga <- pcovga*.2
if(min(ul) > 15) pcovQ <- 2*var(lgibbs[(g-40):(g-1),])#*Ik
if(min(ul) < 5) pcovQ <- pcovQ*.2
if(min(um) > 15) pcovM <- 2*var(mgibbs[(g-40):(g-1),])#*Ik
if(min(um) < 5) pcovM <- pcovM*.2
if(min(ub) > 12) pcovba <- 2*var(dgibbs[(g-40):(g-1),-grep('bstat',colnames(dgibbs))])*Ib
if(min(ub) < 5) pcovba <- pcovba*.2
if(CAP & priormat['loalpha',1]!=priormat['hialpha',1]){
tK <- pcovK
if(min(uk) > 12)
tK <- 2*var(vgibbs[(g-40):(g-1),grep('alpha',colnames(vgibbs))])
if(min(uk) < 5)
tK <- pcovK*.5
if(nspec==1)
if(tK>0) pcovK <- tK
if(nspec>1)
if(min(diag(tK))>0) pcovK <- tK
}
if(BS){
if(min(ubs) > 12) pcovbs <- apply(dgibbs[(g-40):(g-1),grep('bstat',colnames(dgibbs))],2,sd)
if(min(ubs) < 5) pcovbs <- pcovbs*.1
}
ul <- ul*0
um <- um*0
ub <- ub*0
ubs <- ubs*0
ug <- ug*0
uk <- uk*0
#############
if(!REMOTE) loop.conv()
#############
}
if(g %in% saveg)
save.image(paste(TITLE,'_',ng,'_',yrtrunc,'_TC_',TC,
'_SP_',SP,
'_flat_',FLAT,
'_rand_',RAND,
'_bstat_',BS,
'_DEF_',DEF,
'_REW_',RELM,
'.RData',sep=''))
}
|
d19443c88532265b2873cc8b2150a17b06efdf60
|
a286b67e469be262bfe0b40ca484ae270da6a1e5
|
/TEMP/Time Series Attempts/time_series_plots.R
|
b890c6a959947fee8ebf44840c818cdd11ca3725
|
[] |
no_license
|
alexazhu/PHIGHT-COVID
|
da9c213c3d9b4a01f8327bf9e7615fa3025d71ec
|
3258e9559335b4d144a0945d583cde57226ea309
|
refs/heads/main
| 2023-07-09T08:18:04.490684
| 2021-08-09T07:31:17
| 2021-08-09T07:31:17
| 338,371,542
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,311
|
r
|
time_series_plots.R
|
library(tidyverse)
require(scales)
source("step2_data_wrangle.R")
county_policy_wide$major_teaching <- factor(county_policy_wide$major_teaching,
levels = c("On Premises",
"Hybrid",
"Online Only"))
# see when the intesection happens
date.intercept <- as.Date("2020-11-24")
# add 95% confidence bans
confidence_level <- .95
z_cl <- qnorm(confidence_level)
# case_policy_wide
case_policy_wide <- cases %>%
left_join(county_policy_wide[,c("county","major_teaching","Online_Only","Hybrid","On_Premises")],
by = c("COUNTY" = "county")) %>%
mutate(death_prop = CUMDEATHS/POPULATION)
# plot death prop over time by the majority teaching method
case_policy_wide%>%
group_by(DATE, major_teaching) %>%
drop_na(major_teaching)%>%
summarise(total_deaths = sum(CUMDEATHS),
total_pop = sum(POPULATION),
death_prop = total_deaths/total_pop,
death_prop_upper = death_prop + z_cl*sqrt(death_prop*(1 - death_prop)/total_pop),
death_prop_lower = death_prop - z_cl*sqrt(death_prop*(1 - death_prop)/total_pop),
.groups = "drop") %>%
ggplot(aes(x = DATE, y = death_prop*1000, group = major_teaching))+
geom_rect(data=case_policy_wide[1,],
aes(xmin=as.Date("2020/08/18"), xmax=as.Date("2020/12/12"),
ymin=-Inf,ymax=Inf),
color = NA,alpha=0.2, show.legend = F, fill = "orange") +
geom_line(aes(color = major_teaching),size = 1, alpha = .8) +
geom_ribbon(aes(ymin = 1000*death_prop_lower, ymax = 1000*death_prop_upper,
fill= major_teaching),
alpha = .3, show.legend = F)+
geom_vline(xintercept = date.intercept, linetype = "dashed") +
annotate("text",x = date.intercept,y = 1.5,
label = date.intercept,
hjust = 1.1,size=5) +
theme_bw() +
labs(x = "Date", y = "Cumulative Death Incidence / 1,000 people",
color = "Majority Teaching Method") +
theme(legend.position = "")+
theme(axis.title = element_text(size=16),axis.text = element_text(size=15))
#theme(legend.position = "bottom")+
#theme(legend.title = element_text(size=13),legend.text = element_text(size=13),legend.background = element_rect(fill = alpha("orange",0.0)),legend.key.size = unit(1.4,"lines"),title = element_text(size=12.9))
#ggsave("totaldeath.jpg", width = 5,height = 5)
######################### Boxplots ############
# need open date for each county
district_policies <- OH_K12 %>%
distinct(county,county_enroll,leaid,district_enroll,schooltemporaryshutdown,opendategrouped,teachingmethod)
# Calculate the proportion and generate date brackets
major_opendate <- district_policies%>%
filter(!schooltemporaryshutdown %in% c('Closed indefinitely','Pending','Unknown'))%>%
group_by(county,county_enroll,opendategrouped)%>%
summarise(n_opendate = sum(district_enroll))%>% # number of students under certain date for each county
mutate(prop_opendate = round(n_opendate/county_enroll,2))%>% # proportion
group_by(county)%>%
#filter(prop_opendate>0.6)%>%
slice(which.max(prop_opendate))%>% # filter large proportions of students with same reopen dates #can be replaced with # slice(which.max(prop_opendate))#
mutate(reopen_3w_after = opendategrouped + 21)%>%
select(-n_opendate)
# join most common open date for each county
opendate_cases <- case_policy_wide%>%
inner_join(major_opendate[,c("county","opendategrouped")],by=c('COUNTY'='county'))
# Box Plots in Fall semester
library(PMCMRplus)
require(DescTools)
fall_cases <- opendate_cases %>%
filter(DATE >= opendategrouped & DATE <= as.Date("2020/12/15")) %>%
group_by(COUNTY) %>%
arrange(DATE) %>%
filter(row_number()==1 | row_number()==n()) %>%
mutate(death_incidence = diff(CUMDEATHS),
death_incidence_per_1000 = death_incidence*1000/POPULATION) %>%
distinct(COUNTY,POPULATION,major_teaching,
death_incidence,death_incidence_per_1000)
fall_major_teaching.aov <- aov(death_incidence_per_1000 ~ major_teaching,data = fall_cases)
summary(fall_major_teaching.aov) # p-value of .012
stat.test <- PostHocTest(fall_major_teaching.aov, method = "duncan")$major_teaching %>%
as.data.frame()%>%
rownames_to_column("group") %>%
separate(group,"-", into = c("group1","group2")) %>%
mutate(pval = round(pval,3),
p = case_when(pval <= .01~ "**",
pval <= .05 ~ "*",
TRUE ~ "NS"))%>%
select(group1, group2, pval, p)
library(ggpubr)
ggplot(fall_cases,aes(y = death_incidence_per_1000, x = major_teaching)) +
geom_boxplot(aes(fill = major_teaching))+
stat_compare_means(method = "anova")+
stat_pvalue_manual(stat.test, label = "p",y.position = 2.5, step.increase = 0.15)+
ylim(c(0,3.5))+
theme_bw()+
labs(y = "Death Incidence / 1,000 people",
fill = "Majority Teaching Method",
title = "Death Incidence in the Fall Semester",
caption = "Pairwise p-values come from Duncan pairwise comparison test") +
theme(legend.position = "bottom",
axis.text.x=element_blank())
#ggsave("fall_boxplots.jpg",width = 8.5, height = 5)
# Assign 3 key windows
interval_cases <- opendate_cases %>%
mutate(interval = case_when(DATE >= opendategrouped &
DATE <= opendategrouped + 21 ~ "Start of School - 3 weeks after Start of School",
DATE >= as.Date("2020/11/24") - 10 &
DATE<= as.Date("2020/11/24") + 11 ~ "11/14/2020 - 12/05/2020",
DATE >= as.Date("2020/12/15") + 21 &
DATE <= as.Date("2020/12/15") + 42 ~ "01/05/2021 - 01/26/2020"))%>%
group_by(COUNTY, interval) %>%
arrange(DATE) %>%
filter(row_number()==1 | row_number()==n())%>%
mutate(death_incidence = diff(CUMDEATHS),
death_incidence_per_1000 = death_incidence*1000/POPULATION) %>%
distinct(COUNTY,POPULATION,major_teaching,
death_incidence,death_incidence_per_1000,interval) %>%
filter(!is.na(interval))
interval_cases$interval <- factor(interval_cases$interval,
levels = c("Start of School - 3 weeks after Start of School",
"11/14/2020 - 12/05/2020",
"01/05/2021 - 01/26/2020"))
# Box Plots in 3 key Windows
ggplot(interval_cases, aes(y = death_incidence_per_1000,
x = major_teaching,fill = major_teaching)) +
geom_boxplot() +
stat_compare_means(method = "anova")+
facet_wrap(~interval) +
theme_bw() +
labs(y = "Death Incidence / 1,000 people",
fill = "Majority Teaching Method",
title = "Death Incidence in 3 Key Windows",
subtitle = "3 Week Windows",
caption = "The first window looks at the 3 weeks after the start of school, \nthe second is the widndow around the point of intersection, \nand the last window is 3 weeks after the end of the fall semester.") +
theme(legend.position = "bottom",
axis.text.x=element_blank())
#ggsave("int_boxplots.jpg",width = 8.5, height = 5)
# Derivative Plot
lag_cases <- case_mobility %>%
left_join(county_policy_wide[,c("county","major_teaching")],
by = c("COUNTY" = "county")) %>%
drop_na(major_teaching)%>%
select(COUNTY,DATE,CUMDEATHS,POPULATION,major_teaching)%>%
group_by(COUNTY) %>%
mutate(lag_total_deaths = lag(CUMDEATHS,21)) %>%
ungroup()%>%
group_by(DATE,major_teaching) %>%
summarise(total_deaths = sum(CUMDEATHS),
total_deaths_lag = sum(lag_total_deaths),
total_pop = sum(POPULATION),
death_prop = total_deaths/total_pop,
lag_death_prop = total_deaths_lag/total_pop,
death_prop_inc = (total_deaths-total_deaths_lag)/total_pop,
.groups = "drop")
peak.date <- as.Date("2020-12-23")
ggplot(lag_cases,aes(x = DATE, y = death_prop_inc*1000,
group = major_teaching)) +
geom_line(na.rm = T, aes(color = major_teaching)) +
geom_rect(data = lag_cases[1,],
aes(xmin=as.Date("2020/08/26"), xmax=as.Date("2020/12/12"),
ymin=-Inf,ymax=Inf),
color = NA,alpha=0.2, show.legend = F, fill = "orange") +
geom_vline(xintercept = peak.date, linetype = "dashed")+
annotate("text",x = peak.date,y = .0005,
label = peak.date,
hjust = 1.2) +
theme_bw() +
labs(x = "Date",
y = "Death Proportion / 1,000 people",
title = "Death Proportion Increase by Teaching Method",
subtitle = "Yellow area represents Fall Semester",
caption = "Increase compared to 3 Week Lag",
color = "Majority Teaching Method") +
scale_y_continuous(labels = comma) +
theme(legend.position = "bottom")
# create map plots
wide_teaching_enroll%>%
left_join(ohio_map,by='county')%>%
mutate(Online_Only= Online_Only*100)%>%
ggplot() + geom_polygon(aes(x = long, y = lat, group = group, fill = Online_Only), color = "gray") + coord_fixed(1.3) + theme_map() +
scale_fill_distiller(palette = "Spectral")+labs(fill='% Online Only')+
geom_text_repel(data=centroids2,aes(x = clong, y = clat,label=county), color = "black",size = 3)
|
7e161e29df3f2f41434b807025d996df16ec1ffb
|
4b8781be7f6063044b8d5431521e374879ede23b
|
/R/handle_mash_results.R
|
7fd8977384f47a86765b786a0bd26c287fdbbdcd
|
[] |
no_license
|
salarshaaf/CDBNgenomics
|
009561eeb5dcf1682265621c2c1fcd203c9814fe
|
e44ebb73f2a1f5868b23124f27f9b0334f269625
|
refs/heads/master
| 2022-04-11T23:46:30.901140
| 2020-04-09T16:04:21
| 2020-04-09T16:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,750
|
r
|
handle_mash_results.R
|
# --- Get Results from Mash Object ---------
#' @title Get column names from a mash object
#'
#' @description This function extracts the column names from the local false
#' sign rate table of a mash object's results. This can tell you the condition
#' names or phenotype names used in the mash object. That can be useful for
#' looking at a subset of these columns, say.
#'
#' @param m An object of type mash
#'
#' @return A vector of phenotype names
#'
#' @examples
#' \dontrun{get_colnames(m = mash_obj)}
#'
get_colnames <- function(m){
column_names <- colnames(m$result$lfsr)
return(column_names)
}
#' Return the Bayes Factor for each effect
#'
#' @param m the mash result (from joint or 1by1 analysis); must have been computed using usepointmass=TRUE
#'
#' @return if m was fitted using usepointmass=TRUE then returns a vector of
#' the log10(bf) values for each effect. That is, the jth element
#' lbf_j is log10(Pr(Bj | g=ghat-nonnull)/Pr(Bj | g = 0)) where gha
#' t-nonnull is the non-null part of ghat. Otherwise returns NULL.
#'
get_log10bf = function(m) {
if(is.null(m$null_loglik)){
return(NULL)
} else {
return((m$alt_loglik - m$null_loglik)/log(10))
}
}
#' @title Get mash marker_df
#'
#' @description Pulls the names of the SNP markers from the mash object.
#'
#' @param m An object of type mash
#'
#' @importFrom magrittr %>%
#' @importFrom rlang .data
#' @importFrom tibble enframe
#' @importFrom dplyr arrange
#'
get_marker_df <- function(m){
marker_df <- get_significant_results(m, thresh = 1) %>%
enframe(name = "Marker") %>%
arrange(.data$value)
return(marker_df)
}
#' Get number of conditions
#'
#' @param m The mash result
#'
#' @importFrom ashr get_pm
#'
get_ncond = function(m){
return(ncol(get_pm(m)))
}
#' Count number of conditions each effect is significant in
#'
#' @param m the mash result (from joint or 1by1 analysis)
#' @param thresh indicates the threshold below which to call signals significant
#' @param conditions which conditions to include in check (default to all)
#' @param sig_fn the significance function used to extract significance from mash object; eg could be ashr::get_lfsr or ashr::get_lfdr
#'
#' @return a vector containing the number of significant conditions
#'
get_n_significant_conditions = function(m, thresh = 0.05, conditions = NULL,
sig_fn = get_lfsr){
if (is.null(conditions)) {
conditions = 1:get_ncond(m)
}
return(apply(sig_fn(m)[,conditions,drop=FALSE] < thresh, 1, sum))
}
#' Compute the proportion of (significant) signals shared by magnitude in each pair of conditions, based on the poterior mean
#'
#' @param m the mash fit
#' @param factor a number between 0 and 1 - the factor within which effects are
#' considered to be shared.
#' @param lfsr_thresh the lfsr threshold for including an effect in the
#' assessment
#' @param FUN a function to be applied to the estimated effect sizes before
#' assessing sharing. The most obvious choice beside the default
#' 'FUN=identity' would be 'FUN=abs' if you want to ignore the sign of the
#' effects when assesing sharing.
#' @details For each pair of tissues, first identify the effects that are
#' significant (by lfsr<lfsr_thresh) in at least one of the two tissues.
#' Then compute what fraction of these have an estimated (posterior mean)
#' effect size within a factor `factor` of one another. The results are
#' returned as an R by R matrix.
#'
#' @examples
#' \dontrun{
#' get_pairwise_sharing(m) # sharing by magnitude (same sign)
#' get_pairwise_sharing(m, factor=0) # sharing by sign
#' get_pairwise_sharing(m, FUN=abs) # sharing by magnitude when sign is ignored
#' }
#'
#' @export
get_pairwise_sharing = function(m, factor=0.5, lfsr_thresh=0.05, FUN= identity){
R = get_ncond(m)
lfsr = get_lfsr(m)
S=matrix(NA,nrow = R, ncol=R)
for(i in 1:R){
for(j in i:R){
sig_i=get_significant_results(m,thresh=lfsr_thresh,conditions = i)
sig_j=get_significant_results(m,thresh=lfsr_thresh,conditions = j)
a=union(sig_i,sig_j)
ratio=FUN(get_pm(m)[a,i])/FUN(get_pm(m)[a,j])##divide effect sizes
S[i,j]=mean(ratio>factor & ratio<(1/factor))
}
}
S[lower.tri(S, diag = FALSE)] = t(S)[lower.tri(S, diag = FALSE)]
colnames(S) = row.names(S) = colnames(m$result$PosteriorMean)
return(S)
}
#' From a mash result, get effects that are significant in at least one condition
#'
#' @param m the mash result (from joint or 1by1 analysis)
#' @param thresh indicates the threshold below which to call signals significant
#' @param conditions which conditions to include in check (default to all)
#' @param sig_fn the significance function used to extract significance from mash object; eg could be ashr::get_lfsr or ashr::get_lfdr. (Small values must indicate significant.)
#'
#' @return a vector containing the indices of the significant effects, by order of most significant to least
#'
#' @importFrom ashr get_lfsr
#'
#' @export
get_significant_results = function(m, thresh = 0.05, conditions = NULL,
sig_fn = ashr::get_lfsr) {
if (is.null(conditions)) {
conditions = 1:get_ncond(m)
}
top = apply(sig_fn(m)[,conditions,drop=FALSE],1,min) # find top effect in each condition
sig = which(top < thresh)
ord = order(top[sig],decreasing=FALSE)
sig[ord]
}
# --- Plot & Save Plots ---------
#' ggplot of single mash effect
#'
#' @description Creates a plot with point estimates and standard errors for
#' effects of a single SNP in multiple conditions.
#'
#' @param m An object of type mash
#' @param n Optional. Integer or integer vector. The result number to plot, in
#' order of significance. 1 would be the top result, for example. Find
#' these with \code{\link{get_significant_results}}.
#' @param i Optional. Integer or integer vector. The result number to plot, in
#' the order of the mash object. 1 would be the first marker in the mash
#' object, for example. Find these with \code{\link{get_marker_df}}.
#' @param saveoutput Logical. Should the output be saved to the path?
#'
#' @note Specify only one of n or i.
#'
#' @importFrom ashr get_psd
#' @importFrom cowplot save_plot
#' @importFrom tibble enframe
#' @importFrom dplyr mutate
#' @import ggplot2
#' @importFrom purrr as_vector
#'
#' @export
mash_plot_effects <- function(m, n = NA, i = NA, saveoutput = FALSE){
stopifnot((!is.na(n[1]) | !is.na(i[1])))
if(is.na(i[1])){
i <- get_significant_results(m)[n]
}
effectplot <- get_colnames(m) %>%
enframe(name = "Conditions") %>%
mutate(mn = get_pm(m)[i,],
se = get_psd(m)[i,])
ggobject <- ggplot(data = effectplot) +
geom_point(mapping = aes(x = as.factor(.data$value), y = .data$mn)) +
geom_errorbar(mapping = aes(ymin = .data$mn - .data$se,
ymax = .data$mn + .data$se,
x = .data$Conditions), width = 0.3) +
geom_hline(yintercept = 0, lty = 2) +
labs(x = "Conditions", y = "Effect Size") +
scale_x_discrete(labels = as_vector(.data$value)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
if(saveoutput == TRUE){
if(is.na(n[1])){
save_plot(filename = paste0("Effect_plot_",
names(get_significant_results(m))[n], ".png"),
plot = ggobject, base_aspect_ratio = 0.8, base_height = 4.5)
} else {
plotname <- get_marker_df(m)[i]
save_plot(filename = paste0("Effect_plot_", plotname$Marker, ".png"),
plot = ggobject, base_aspect_ratio = 0.8, base_height = 4.5)
}
}
return(list(marker = i, effect_df = effectplot, ggobject = ggobject))
}
#' @title Manhattan plot in ggplot colored by significant conditions
#'
#' @description Takes a mash object and, for some vector of phenotypes, returns
#' a Manhattan plot ggplot object (and its dataframe). Each SNP in the plot
#' is colored by the number of phenotypes it is significant for. Even and
#' odd chromosomes have different shapes for their SNPs, so that
#' chromosome identity can be determined.
#'
#' @param m A mash object (outputted by mash).
#' @param cond A vector of phenotypes. Defaults to the names of each
#' column in the mash object.
#' @param saveoutput Logical. Should the output be saved to the path?
#' @param thresh Numeric. The threshold used for the local false sign rate to
#' call significance in a condition.
#'
#' @return A \code{tbl_df()} of the data used to make the Manhattan plot, and a
#' ggplot object containing the Manhattan.
#'
#' @importFrom cowplot save_plot
#' @importFrom dplyr rename select arrange mutate left_join
#' @import ggplot2
#' @importFrom tibble as_tibble rownames_to_column enframe
#' @importFrom tidyr separate
#' @import viridis
#' @importFrom stringr str_replace_all
#'
#' @examples
#' \dontrun{manhattan_out <- mash_ggman_by_condition(m = m, saveoutput = TRUE)}
#'
#' @export
mash_plot_manhattan_by_condition <- function(m, cond = NA,
saveoutput = FALSE, thresh = 0.05){
num_sig_in_cond <- c()
if(is.na(cond)[1]){
cond <- get_colnames(m = m)
}
log10bf_df <- get_log10bf(m = m) %>%
as.data.frame() %>%
rownames_to_column(var = "value") %>%
mutate(value = as.integer(.data$value)) %>%
as_tibble() %>%
left_join(get_marker_df(m = m)) %>%
dplyr::rename(log10BayesFactor = .data$V1) %>%
dplyr::select(-.data$value)
ggman_df <- get_n_significant_conditions(m = m, thresh = thresh,
conditions = cond) %>%
enframe(name = "Marker") %>%
rename(Num_Sig_Conditions = .data$value) %>%
separate(.data$Marker, into = c("Chr", "Pos"), remove = FALSE, sep = "_",
extra = "merge") %>%
mutate(Pos = as.numeric(.data$Pos)) %>%
left_join(log10bf_df, by = "Marker") %>%
arrange(.data$Chr, .data$Pos)
log10BF <- expression(paste("log"[10], plain("(Bayes Factor)")))
ggmanobject <- ggplot(data = ggman_df, aes(x = .data$Pos, y = .data$log10BayesFactor)) +
geom_point(aes(color = .data$Num_Sig_Conditions, fill = .data$Num_Sig_Conditions,
shape = as.factor(.data$Chr))) +
facet_wrap(~ .data$Chr, nrow = 1, scales = "free_x", strip.position = "bottom") +
scale_color_viridis(option = "B") + scale_fill_viridis(option = "B") +
theme(axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
panel.background = element_rect(fill=NA)) +
labs(x = "Chromosome", y = log10BF) +
scale_x_continuous(expand = c(0.3, 0.3)) +
scale_shape_manual(values = rep(c(21,22),9), guide = FALSE)
if(saveoutput == TRUE){
save_plot(paste0("Manhattan_mash_", str_replace_all(Sys.time(), ":", "."),
".png"), plot = ggmanobject, base_aspect_ratio = 2.5,
base_height = 4)
}
return(list(ggman_df = ggman_df, ggmanobject = ggmanobject))
}
#' @title Create a ggplot of pairwise sharing of mash effects
#'
#' @description Given a correlation matrix, an RDS with a correlation matrix, or
#' a mash object, create a ggplot of pairwise sharing of mash effects using
#' \code{\link{get_pairwise_sharing}} and \code{\link{ggcorr}}.
#'
#' @param m An object of type mash
#' @param effectRDS An RDS containing a correlation matrix.
#' @param corrmatrix A correlation matrix
#' @param reorder Logical. Should the columns be reordered by similarity?
#' @param saveoutput Logical. Should the output be saved to the path?
#' @param filename Character string with an output filename. Optional.
#' @param ... Other arguments to \code{\link{get_pairwise_sharing}} or
#' \code{\link{ggcorr}}.
#'
#' @importFrom GGally ggcorr
#' @import viridis
#'
#' @return A list containing a dataframe containing the correlations and a
#' ggplot2 object containing the correlation plot.
#'
#' @export
mash_plot_pairwise_sharing <- function(m = NULL, effectRDS = NULL,
corrmatrix = NULL, reorder = TRUE,
saveoutput = FALSE, filename = NA, ...){
# Additional arguments for get_pairwise_sharing, ggcorr, and save_plot
requireNamespace("dots")
factor <- dots::dots(name = 'factor', value = 0.5, ...)
lfsr_thresh <- dots::dots(name = 'lfsr_thresh', value = 0.05, ...)
FUN <- dots::dots(name = 'FUN', value = identity, ...)
geom <- dots::dots(name = 'geom', value = 'circle', ...)
label <- dots::dots(name = 'label', value = FALSE, ...)
label_alpha <- dots::dots(name = 'label_alpha', value = TRUE, ...)
label_size <- dots::dots(name = 'label_size', value = 3, ...)
hjust <- dots::dots(name = 'hjust', value = 0.95, ...)
vjust <- dots::dots(name = 'vjust', value = 0.3, ...)
layout.exp <- dots::dots(name = 'layout.exp', value = 9, ...)
min_size <- dots::dots(name = 'min_size', value = 0, ...)
max_size <- dots::dots(name = 'max_size', value = 3.5, ...)
option <- dots::dots(name = 'option', value = 'B', ...)
dpi <- dots::dots(name = 'dpi', value = 500, ...)
base_aspect_ratio <- dots::dots(name = 'base_aspect_ratio', value = 1.1, ...)
if(is.na(filename)[1]){
filename <- paste0("Mash_pairwise_shared_effects_",
str_replace_all(Sys.time(), ":", "."), ".png")
}
# look for a shared effects matrix in the path, and if not, generate one
if(!is.null(effectRDS) && is.null(m) && is.null(corrmatrix)){
shared_effects <- readRDS(effectRDS)
} else if(!is.null(corrmatrix) && is.null(effectRDS) && is.null(m)){
shared_effects <- corrmatrix
} else if(!is.null(m)){
shared_effects <- get_pairwise_sharing(m = m, factor = factor,
lfsr_thresh = lfsr_thresh, FUN = FUN)
} else {
stop(paste0("Please specify one of these: ",
"1. a mash output object (m), ",
"2. the path to a effect rds file (mashRDS), ",
"3. a correlation matrix (corrmatrix)."))
}
base_height <- dots::dots(name = 'base_height',
value = nrow(shared_effects)*0.33+1, ...)
if(reorder == TRUE){
corrdf <- reorder_cormat(cormat = shared_effects)
corrplot <- ggcorr(data = NULL, cor_matrix = corrdf, geom = geom,
label = label, label_alpha = label_alpha,
label_size = label_size, hjust = hjust, vjust = vjust,
layout.exp = layout.exp, min_size = min_size,
max_size = max_size) +
scale_color_viridis(option = option)
} else {
corrplot <- ggcorr(data = NULL, cor_matrix = shared_effects, geom = geom,
label = label, label_alpha = label_alpha,
label_size = label_size, hjust = hjust, vjust = vjust,
layout.exp = layout.exp, min_size = min_size,
max_size = max_size) +
scale_color_viridis(option = option)
}
if(saveoutput == TRUE){
save_plot(filename = filename, corrplot,
base_aspect_ratio = base_aspect_ratio, base_height = base_height,
dpi = dpi)
}
return(list(corr_matrix = shared_effects, gg_corr = corrplot))
}
#' @title Significant SNPs per number of conditions
#'
#' @description For some number of columns in a mash object that correspond to
#' conditions, find the number of SNPs that are significant for that number
#' of conditions.
#'
#' @param m An object of type mash
#' @param conditions A vector of conditions
#' @param saveoutput Logical. Save plot output to a file? Default is FALSE.
#' @param thresh What is the threshold to call an effect significant? Default is
#' 0.05.
#'
#' @return A list containing a dataframe of the number of SNPs significant per
#' number of conditions, and a ggplot object using that dataframe.
#'
#' @import ggplot2
#' @importFrom tibble enframe
#' @importFrom dplyr rename summarise filter group_by n
#'
#' @examples
#' \dontrun{mash_plot_sig_by_condition(m = mash_obj, saveoutput = TRUE)}
#'
#' @export
mash_plot_sig_by_condition <- function(m, conditions = NA, saveoutput = FALSE,
thresh = 0.05){
thresh <- as.numeric(thresh)
num_sig_in_cond <- c()
if(is.na(conditions)[1]){
cond <- get_colnames(m = m)
}
SigHist <- get_n_significant_conditions(m = m, thresh = thresh,
conditions = cond) %>%
enframe(name = "Marker") %>%
rename(Number_of_Conditions = .data$value) %>%
group_by(.data$Number_of_Conditions) %>%
summarise(Significant_SNPs = n()) %>%
filter(.data$Number_of_Conditions != 0)
vis <- ggplot(SigHist, aes(x = .data$Number_of_Conditions, y = .data$Significant_SNPs)) +
geom_line() +
geom_point() +
geom_hline(yintercept = 0, lty = 2) +
xlab(label = "Number of Conditions") +
ylab(label = "Number of Significant SNPs")
if(saveoutput == TRUE){
ggsave(paste0("SNPs with significant effects in n conditions ",
str_replace_all(Sys.time(), ":", "."),
".bmp"), width = 5, height = 3, units = "in", dpi = 400)
}
return(list(sighist = SigHist, ggobject = vis))
}
#' @title Reorder correlation matrix
#'
#' @description Reorder correlation coefficients from a matrix of things
#' (including NA's) and hierarchically cluster them
#'
#' @param cormat A correlation matrix
#'
#' @importFrom cluster daisy
#' @importFrom stats hclust
#'
reorder_cormat <- function(cormat){
# Use correlation between variables as distance
dd <- daisy(cormat, metric = "gower")
hc <- hclust(dd)
cormat <- cormat[hc$order, hc$order]
}
|
039d75d84fe1362969367bdedf8e3b3efb8e3129
|
ec82f12dede5a701549578c41e963bd4cfc79617
|
/man/assign-method.Rd
|
4d03bd5ff0b630f69f2379263ec6a39c56819a06
|
[] |
no_license
|
impromptuRong/microbr
|
43f7631d38e21802dc4a54ad9a9c28fb40da9bff
|
98698ff50246e3454acb8576fc3722b535bcb665
|
refs/heads/master
| 2021-01-23T04:13:29.363142
| 2015-04-28T03:12:22
| 2015-04-28T03:12:22
| 34,646,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,500
|
rd
|
assign-method.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/physet-class.R
\docType{methods}
\name{assign}
\alias{$<-,physet-method}
\alias{Snames<-}
\alias{Snames<-,physet-method}
\alias{Tnames<-}
\alias{Tnames<-,physet-method}
\alias{assign}
\alias{otu_table<-}
\alias{otu_table<-,physet-method}
\alias{phy_tree<-}
\alias{phy_tree<-,physet,phyloOrNULL-method}
\alias{sample_data<-}
\alias{sample_data<-,physet-method}
\alias{seqdep<-}
\alias{seqdep<-,physet,numeric-method}
\alias{tax_table<-}
\alias{tax_table<-,physet-method}
\title{Update or slots or attributes in physet object.}
\arguments{
\item{x}{A physet object.}
\item{value}{A new object with supported class for \code{name}.}
\item{name}{Available slot name are: \code{otu_table}, \code{sample_data},
\code{tax_table}, \code{phy_tree}, \code{seqdep}. Any attributes in
\code{names} for \code{sample_data(x)} and \code{tax_table(x)} can be used.
If \code{name} is a new attributes for \code{x} object, specify the slot
by using suffix \dQuote{._AddToS_} for \code{sample_data} and suffix
\dQuote{._AddToT_} for \code{tax_table} (the suffix will be removed).}
}
\value{
A new \code{\link{physet-class}} object with updated information.
}
\description{
Assign slots or attributes with new information or object. The
function will automatically update related slots based on changes.
}
\details{
The assign methods for updating whole slots actually generate a
new \code{\link{physet}} object by passing the original object to the
\code{\link{physet-constructor}}. While assign methods for adding variables
or change names will keep the original one. \cr
New \code{sample_data} and \code{tax_table} are actually added to the same
slot in the original \code{physet}object. Only those conflict attributes
are replaced. Use \dQuote{._AddToS_} and \dQuote{._AddToT_} suffix to add
extra attributes. \cr
Assign new values to \code{edge_com}, \code{edge_len} and \code{edge_mat}
manually are not recommended as they are linked with \code{otu_table} and
\code{py_tree}. Provide a new \code{otu_table} or \code{phy_tree} or both
automatically trigger the \code{\link{physet-constructor}} to re-build
these information securely.
}
\section{Usage}{
name(x) <- value
x$name <- value
}
\examples{
data(oral)
data(oral_raw)
otu_table(oral) <- oral_raw$rawdata[, 1:4]
oral$newtaxaID._AddToT_ <- paste("ID", c(1:Ntaxa(oral)), sep="")
}
\seealso{
\code{\link{access}}, \code{\link{extract}},
\code{\link{physet-constructor}}
}
|
fb5ea924b833af8ade5216e9995bc1676beb844d
|
0b01338c904a2662bc925a5df0462ae36f2210c4
|
/10_graphics_MA.R
|
6ece2cb38b072be5b161bd9fbc5e742017b4f180
|
[] |
no_license
|
JMF47/recountNNLSpaper
|
946980021d82e1b73eafab1fb69534e2c8d962fa
|
d251efaae1a20f2d96bf2f103445ab8a68905fc2
|
refs/heads/master
| 2021-09-14T12:20:29.791693
| 2018-05-13T17:22:10
| 2018-05-13T17:22:10
| 117,008,135
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,467
|
r
|
10_graphics_MA.R
|
####################################################
### MA Plots (supplements)
####################################################
cs = 0.15
ids = c("37_1", "37_2", "50_1", "50_2", "75_1", "75_2", "100_1", "100_2", "150_1", "150_2")
for(id in ids){
scenario = str_split_fixed(id, "_", n=2)
rl = scenario[1]
paired = scenario[2]
load(paste0("/dcl01/leek/data/ta_poc/geuvadis/simulation/", id, "/", rl, "_", paired, "_info.rda"))
ses = recountNNLSse
scores = recountNNLSscore
err = info-truth$reads
mlab = "MAE"
metric = round(apply(abs(err), 2, mean, na.rm=T), 3)
png(paste0('/dcl01/leek/data/ta_poc/graphics/', id, "_MA.png"),
width=400, height=800, units = "px", pointsize=18, type="cairo")
layout(matrix(c(1, 1, 1, 1, 1, 1, 1, 2:8, 9:15), nrow=7, byrow=F),
heights=c(1, 4, 4, 4, 4, 4, 1), widths=c(1, 4, 0.5))
### texts
par(mar=rep(0,4))
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
end = "Single End"
if(paired==2){end="Paired End"}
text(x=0.4, y=0.5, srt=90, paste0(rl, "bp ", end, " Simulation Results"), cex=2, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.5, y=0.8, pos=1, "Tx Level", cex=2)
bord = 12/2
### First set of MA plots
for(i in 1:5){
A = (log(info[,i]+1, 2) + log(truth$reads+1, 2))/2
M = log(info[,i]+1, 2) - log(truth$reads+1, 2)
plot(M~A, ylim=c(-bord*2, bord*2), xlim=c(0.5, bord)*2, col=rgb(0,0,0,0.1),
main = "", xlab="MA Plot: Truth - LM", xaxt="n", yaxt="n", pch=19, cex=cs)
# abline(h=0, col=2)
# abline(0, 1, lty=2, col=2); abline(0, -1, lty=2, col=2)
axis(side=2, at = log(c(1/1000, 1/100, 1/10, 1, 10, 100, 1000),2),
labels=parse(text = c('10^-3', '10^-2', '10^-1', '1', '10', '10^2', '10^3')) , las=2)
window = par()$usr
ywid = window[4]-window[3]
text((window[1]+window[2])/2, window[3]+ywid/10, labels=paste0(mlab, ": ", metric[i]), cex=1.5)
}
axis(side=1, at = log(c(1, 10, 100, 1000, 10000, 100000, 1000000), 2),
labels=parse(text = c(1, 10, 100, '10^3', '10^4', '10^5', '10^6')))
par(xpd=NA)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "recountNNLS", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "Kallisto", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "H2-Cufflinks", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "RSEM", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "Salmon", cex=1.4, pos=3)
par(xpd=F)
dev.off()
}
####################################################
### MA Plots - RSEM-based
####################################################
load("/dcl01/leek/data/ta_poc/geuvadis/simulation/rsem_based/rsem_based_0.rda")
out = cbind(out,out[,4])
err = out-count_mat[,1]
mlab = "MAE"
metric = round(apply(abs(err), 2, mean, na.rm=T), 3)
png(paste0('/dcl01/leek/data/ta_poc/graphics/RSEM_MA.png'),
width=400, height=800, units = "px", pointsize=18, type="cairo")
layout(matrix(c(1, 1, 1, 1, 1, 1, 1, 2:8, 9:15), nrow=7, byrow=F),
heights=c(1, 4, 4, 4, 4, 4, 1), widths=c(1, 4, 0.5))
### texts
par(mar=rep(0,4))
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
end = "Single End"
end="Paired End"
text(x=0.4, y=0.5, srt=90, paste0("RSEM-based Simulation Results"), cex=2, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.5, y=0.8, pos=1, "Tx Level", cex=2)
bord = 10
cs = 0.4
for(i in 1:5){
color = rgb(0,0,0,0.1)
if(i==4) color = rgb(0, 0, 0, 0)
A = (log(out[,i]+1, 2) + log(count_mat[,1]+1, 2))/2
M = log(out[,i]+1, 2) - log(count_mat[,1]+1, 2)
plot(M~A, ylim=c(-10, 10), xlim=c(0, 18), col=color,
main = "", xlab="MA Plot: Truth - LM", xaxt="n", yaxt="n", pch=19, cex=cs)
axis(side=2, at = log(c(1/1000, 1/100, 1/10, 1, 10, 100, 1000),2),
labels=parse(text = c('10^-3', '10^-2', '10^-1', '1', '10', '10^2', '10^3')) , las=2)
window = par()$usr
ywid = window[4]-window[3]
if(i!=4){text((window[1]+window[2])/2, window[3]+ywid/10, labels=paste0(mlab, ": ", metric[i]), cex=1.5)}
}
axis(side=1, at = log(c(1, 100, 10000, 10^8, 10^16), 2),
labels=parse(text = c(1, 100, '10^4', '10^8', '10^16')))
par(xpd=NA)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "recountNNLS", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "Kallisto", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "H2-Cufflinks", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "RSEM", cex=1.4, pos=3)
plot(c(0, 1), c(0,1), xlab="", ylab="", xaxt="n", yaxt="n", type="n", main="", xaxs="i", yaxs="i", bty="n")
text(x=0.6, y=0.5, srt=270, "Salmon", cex=1.4, pos=3)
par(xpd=F)
dev.off()
|
c36f62165b9822e435d58c663086ce5e52ae7fd2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/munsell/examples/mnsl2hvc.Rd.R
|
70025127c8eec87702d5abf31dc112e94c710e9a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 205
|
r
|
mnsl2hvc.Rd.R
|
library(munsell)
### Name: mnsl2hvc
### Title: Converts a Munsell colour to a hue, chroma and value triplet
### Aliases: mnsl2hvc
### ** Examples
mnsl2hvc("5PB 5/10")
hvc2mnsl(mnsl2hvc("5PB 5/10"))
|
74ff004f33c80e2b74bedf4b5e74b7b81ef88945
|
bca18488d27f397819fc39434dd2209a71485ab5
|
/Elasticsearch/estudio errores 3.R
|
035c338c9d4393727ed57f0847182a87ccceba2a
|
[] |
no_license
|
Xmael/R
|
c5819e8b7be9b3706b78de33494c376146c9248f
|
88eba6349bdb71b26162785226e7e51ae6e3cc6f
|
refs/heads/master
| 2021-01-19T13:29:50.465856
| 2016-03-10T08:30:48
| 2016-03-10T08:30:48
| 34,176,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,153
|
r
|
estudio errores 3.R
|
library(elastic)
library(RJSONIO)
library(XML)
connect(es_base="http://52.28.187.57")
today=Sys.Date()-1
periodo=seq(today, length.out=7, by="-1 day") #Obtengo las fechas desde hoy hasta diez días atrás
periodo=format(periodo,"%Y.%m.%d")
library(data.table)
load("~/Trivago/hoteles(20.1.16).RData")
parsea=function(mensaje){
r=xmlTreeParse(mensaje)
cadena=xmlValue(xmlRoot(r)[[2]])
cadena=strsplit(cadena,split = "#")
return(c(length(cadena[[1]])-1,cadena[[1]][length(cadena[[1]])]))
}
resumen.1=NULL
for(j in 1:7){
cat("#####",periodo[j],"#####\n")
db2=Search(index = paste("logstash-",periodo[j],sep=""),type = "has",size=100000,q="message: \"Unable To Process Request - GTA Internal Communication Error\" ")$hits$hits
pb=txtProgressBar(min = 0, max = length(db2), initial = 0, char = "=",width = NA, title, label, style = 3)
errores=NULL
for(i in 1:length(db2)){
setTxtProgressBar(pb, i)
if(!is.null(db2[[i]]$`_source`$`@message`)){
errores=rbind(errores,cbind(gsub("T.*","",db2[[i]]$`_source`$'@timestamp'),gsub(".*T","",db2[[i]]$`_source`$'@timestamp')))
}
}
errores=data.frame(errores,stringsAsFactors = F)
errores[,1]=as.POSIXct(paste(errores[,1],errores[,2],sep=" "),tz = "UTC")
errores=errores[order(errores[,1],decreasing = F),]
errores=errores[order(hms(errores[,2])),]
periodos=c(1:(24*60*60/60))
duracion=as.duration(60)
resumen=as.data.frame(matrix(NA,nrow=length(periodos),ncol=5,byrow = T))
colnames(resumen)=c("Periodo","N.errores","Intervalo medio entre errores","Desviacion","Intervalo mínimo")
pb=txtProgressBar(min = 0, max = nrow(resumen), initial = 0, char = "=",width = NA, title, label, style = 3)
for(i in 1:length(periodos)){
setTxtProgressBar(pb, i)
intervalo=interval(as.POSIXct(paste(gsub("\\.","-",periodo[j])," 00:00:00",sep=""),tz = "UTC")+(periodos[i]-1)*duracion,
as.POSIXct(paste(gsub("\\.","-",periodo[j])," 00:00:00",sep=""),tz = "UTC")+periodos[i]*duracion-1)
indices=which(errores[,1] %within% intervalo)
#indices=which(errores[,1] == (as.POSIXct("2016-01-20 00:00:00",tz = "UTC")+i))
resumen$Periodo[i]=as.character(intervalo)
resumen$N.errores[i]=length(indices)
cuenta=NULL
for(k in 2:length(indices)){
cuenta=c(cuenta,dseconds(interval(errores[indices[k-1],1],errores[indices[k],1])))
}
resumen$`Intervalo medio entre errores`[i]=mean(cuenta)
resumen$Desviacion[i]=sd(cuenta)
resumen$`Intervalo mínimo`[i]=min(cuenta)
}
resumen[is.na(resumen)]=0
resumen.1=rbind(resumen.1,resumen)
}
resumen=resumen.1
resumen.2=data.frame(hora=resumen[,1],fecha=as.Date(resumen[,1]),recuento=resumen[,c(2)])
resumen.2[,1]=gsub(" ","",gsub(" --.*","",gsub("UTC","",gsub("2016-01-..","",resumen.2[,1]))))
resumen.2[which(resumen.2[,1]==""),1]="00:00:00"
labels=as.character(resumen.2[seq(1,nrow(resumen.2),360),1])
resumen.2$fecha=as.character(resumen.2$fecha)
ggplot(resumen.2[which(resumen.2[,1]>="12:00:00" & resumen.2[,1]<="15:00:00"),],
aes(x=hora,y=recuento,group=fecha,colour=fecha))+geom_line()+scale_x_discrete(breaks=labels)
|
e6771d106a4ca46541021c4cb846db1a4fcf8fb5
|
278c4c50164dec711aa191b0b3dda4e8f3667112
|
/R/utils.r
|
cdd51cd164c7937632b968281e66fc5a10cd5e6d
|
[] |
no_license
|
tonyelhabr/tetext
|
ce3768c56d6093a7dc0a9e3aae2655b34d89ccc0
|
ed55c5be8a7e2fb72dc58d186ae553864e203e31
|
refs/heads/master
| 2021-04-29T23:55:15.076607
| 2018-10-08T12:58:48
| 2018-10-08T12:58:48
| 121,565,058
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,967
|
r
|
utils.r
|
get_color_hex_inverse <- function(color) {
grDevices::rgb(t(255 - grDevices::col2rgb(color)), max = 255)
}
generate_named_dual_colors <-
function(color_main,
lab_main,
lab_other,
color_default = "grey50") {
if (!is.null(color_main) & !is.null(lab_main)) {
out <- stats::setNames(c(color_main, get_color_hex_inverse(color_main)),
c(lab_main, lab_other))
} else {
out <- c(color_default, get_color_hex_inverse(color_default))
}
out
}
invert_pct <- function(num) {
if (num < 0.5) {
# message(sprintf("Inverting %f to %f.", num, 1 - num))
num <- 1 - num
}
num
}
validate_range <- function(x, max = 1, min = 0) {
if (x > max) {
stop("`x` must not be greater than ", max, ".", call. = FALSE)
}
if (x < min) {
stop("`x` must not be less than ", min, ".", call. = FALSE)
}
x
}
filter_num_top_at <-
function(data = NULL,
col = NULL,
num_top = NULL,
max = nrow(data),
min = 0,
desc = TRUE,
abs = FALSE,
keep_rank_col = FALSE) {
stopifnot(!is.null(data), is.data.frame(data))
stopifnot(!is.null(col))
stopifnot(!is.null(num_top))
num_top <- validate_range(x = num_top, max = max, min = min)
rank <- NULL
rank <- ".rank"
rank_quo <- rlang::sym(".rank")
col_quo <- rlang::sym(col)
out <- data
if (desc) {
if (abs) {
out <-
out %>%
dplyr::mutate(!!rank_quo := dplyr::row_number(dplyr::desc(abs(!!col_quo))))
} else {
out <-
out %>%
dplyr::mutate(!!rank_quo := dplyr::row_number(dplyr::desc(!!col_quo)))
}
} else {
if (abs) {
out <-
out %>%
dplyr::mutate(!!rank_quo := dplyr::row_number((abs(!!col_quo))))
} else {
out <-
out %>%
dplyr::mutate(!!rank_quo := dplyr::row_number(!!col_quo))
}
}
if (num_top >= 1) {
out <-
out %>%
dplyr::filter(!!rank_quo <= num_top)
} else {
# num_top <- (num_top) * (nrow(data))
num_top <- invert_pct(num_top)
out <-
out %>%
# dplyr::arrange(dplyr::desc(!!col_quo)) %>%
dplyr::filter(!!col_quo >= stats::quantile(!!col_quo, num_top, na.rm = TRUE))
}
if (!keep_rank_col) {
out <- out %>% dplyr::select(-dplyr::matches(rank))
}
out
}
require_ns <- function(pkg) {
if (!requireNamespace(pkg, quietly = TRUE)) {
stop(
sprintf(
'Package "%s" needed for this function to work. Please install it.',
pkg
),
call. = FALSE
)
}
}
get_class <- function(data, col) {
classes <- sapply(data, class)
# classes[names(classes) == col]
rev(classes[[col]])[1]
}
coerce_col_to_factor <- function(data, col) {
class_i <- get_class(data, col)
if (class_i != "factor") {
data <-
data %>% dplyr::mutate_at(dplyr::vars(dplyr::contains(col)), dplyr::funs(factor))
# message(sprintf("Coercing %s to a factor.", nm_i))
}
data
}
# NOTE: These could change in the future.
wrangle_color_col <-
function(data, col) {
coerce_col_to_factor(data, col)
}
# wrangle_facet_col <-
# function(data, col) {
# coerce_col_to_factor(data, col)
# }
filter_if_not_null_at <-
function(data = NULL,
col = NULL,
value = NULL,
invert = FALSE) {
stopifnot(!is.null(data), is.data.frame(data))
if (is.null(col))
return(data)
if (is.null(value))
return(data)
stopifnot((col %in% names(data)))
col_quo <- rlang::sym(col)
if (!invert) {
out <-
data %>%
dplyr::filter(!!col_quo %in% value)
} else {
out <-
data %>%
dplyr::filter(!(!!col_quo %in% value))
}
out
}
pull_distinctly_at <- function(data = NULL, col = NULL) {
stopifnot(!is.null(data), is.data.frame(data))
stopifnot((col %in% names(data)))
col_quo <- rlang::sym(col)
data %>%
dplyr::distinct(!!col_quo) %>%
dplyr::arrange(!!col_quo) %>%
dplyr::pull(!!col_quo)
}
filter_data_facet_at <-
function(data = NULL,
filter_facet = NULL,
params = NULL,
x_include = NULL,
y_include = NULL,
x_exclude = NULL,
y_exclude = NULL,
facet_main = NULL) {
stopifnot(!is.null(data), is.data.frame(data))
stopifnot(!is.null(filter_facet), is.logical(filter_facet))
if (filter_facet) {
stopifnot(!is.null(params), is.list(params))
data <-
data %>% filter_if_not_null_at("name_x", params$facet_main, invert = FALSE)
data <-
data %>% filter_if_not_null_at("name_x", params$x_include, invert = FALSE)
data <-
data %>% filter_if_not_null_at("name_y", params$y_include, invert = FALSE)
data <-
data %>% filter_if_not_null_at("name_x", params$x_exclude, invert = TRUE)
data <-
data %>% filter_if_not_null_at("name_y", params$y_exclude, invert = TRUE)
} else {
message("It's recommended to set `filter_facet = TRUE`.")
}
data
}
validate_x_main <-
function(data = NULL,
filter_x = NULL,
x = NULL,
xs = NULL,
x_main = NULL) {
stopifnot(!is.null(data), is.data.frame(data))
if (is.null(filter_x))
stop("`filter_x` cannot be NULL.", call. = FALSE)
if (filter_x) {
stopifnot(!is.null(x))
stopifnot(!is.null(xs))
stopifnot(!is.null(x_main))
if (!(x_main %in% xs))
return(stop(sprintf(
"`x_main` is not in %s.", paste(xs, collapse = ",")
), call. = FALSE))
if (length(x_main) > 1)
return(stop("`x_main` should be a singular value.", call. = FALSE))
}
data
}
create_name_xy_facet_lab <- function(data = NULL) {
name_xy <- name_x <- name_y <- NULL
data %>%
dplyr::mutate(name_xy = paste0(name_x, " vs. ", name_y))
}
create_logratio_dir_at <-
function(data = NULL,
cols_group = NULL,
num_top = NULL) {
stopifnot(!is.null(data), is.data.frame(data))
stopifnot(!is.null(cols_group), is.list(cols_group))
logratio_dir <- logratio <- name_x <- name_y <- NULL
cols_group_quo <- rlang::syms(cols_group)
data_proc <-
data %>%
dplyr::mutate(logratio_dir =
ifelse(
logratio > 0,
ifelse(name_x < name_y, TRUE, FALSE),
ifelse(name_x < name_y, FALSE, TRUE)
)) %>%
dplyr::group_by(!!!cols_group_quo) %>%
# dplyr::group_by_at(cols_group) %>%
filter_num_top_at("logratio", num_top, abs = TRUE) %>%
dplyr::ungroup()
}
process_logratio_dir_at <-
function(data = NULL,
token = NULL,
color = NULL,
facet = NULL,
facet_main = NULL,
lab_other = NULL) {
stopifnot(!is.null(data), is.data.frame(data))
stopifnot(!is.null(token), is.character(token))
stopifnot(!is.null(color), is.character(color))
stopifnot(!is.null(facet), is.character(facet))
stopifnot(!is.null(facet_main), is.character(facet_main))
stopifnot(!is.null(lab_other), is.character(lab_other))
token_quo <- rlang::sym(token)
color_quo <- rlang::sym(color)
facet_quo <- rlang::sym(facet)
logratio_dir <- logratio <- name_x <- name_xy <- NULL
data_proc <-
data %>%
dplyr::mutate(!!color_quo :=
ifelse(
logratio_dir,
ifelse(name_x > lab_other, lab_other, name_x),
ifelse(name_x > lab_other, name_x, lab_other)
)) %>%
dplyr::mutate(!!color_quo := factor(!!color_quo, levels = c(facet_main, lab_other))) %>%
dplyr::mutate(!!token_quo := reorder_within(!!token_quo, dplyr::desc(logratio), name_xy))
}
|
54e410360c0d37cccf88bf4361209053295fd346
|
85dea3294b6a57d2b0a052486d4a52334d6b63c8
|
/plot1.R
|
d644595ef40903507b825112e5da3b9efe72ff4c
|
[] |
no_license
|
yanyangai/ExData_Plotting1
|
ee79ec81c21e33600b3292aa5400193da094a97f
|
129cb881f95ed8c08fefd21a997f0a457585d83a
|
refs/heads/master
| 2020-12-31T06:22:13.873086
| 2015-11-08T22:07:58
| 2015-11-08T22:07:58
| 45,757,817
| 0
| 0
| null | 2015-11-07T22:42:01
| 2015-11-07T22:42:01
| null |
UTF-8
|
R
| false
| false
| 689
|
r
|
plot1.R
|
# read in data #
dat <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
dat2 <- dat
# format date and time variable #
dat2$datetime <- strptime(paste(dat2$Date, dat2$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
# subset data #
dat2$Date <- as.Date(dat2$Date, format = "%d/%m/%Y")
df3 <- subset(dat2, Date == "2007-02-01"|Date == "2007-02-02")
# format global active power #
df3$Global_active_power <- as.numeric(df$Global_active_power)
# first plot #
png("plot1.png", width = 480, height = 480)
hist(df3$Global_active_power, col = "red", main = "Global Active Power", xlab="Global Active Power(kilowatts)")
# save plot #
dev.off()
|
2a120f87134dce2041f79b82615cb98342e65c7c
|
fbd79e39470342d1bdd91a90ed94cfba25c91e31
|
/man/addValidation.Rd
|
0cdccea3109892e0e8a4c242b32038d1bd472711
|
[] |
no_license
|
judyheewonlee/Nh3D2.0
|
bcaa1b9e37d4dfbf389687a73efd2fc69e1a5aee
|
05add8a9216ca11f37a451185fe8de96d9df9d81
|
refs/heads/master
| 2020-03-31T11:21:07.916071
| 2019-04-15T05:11:01
| 2019-04-15T05:11:01
| 152,173,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 759
|
rd
|
addValidation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addValidation.R
\name{addValidation}
\alias{addValidation}
\title{addValidation.R}
\usage{
addValidation(xmlFile, pdbID, cathTable)
}
\arguments{
\item{xmlFile}{(vector) A XML file path that will be read}
\item{pdbID}{(string) The ID of the PDB ID}
\item{cathTable}{(data.frame) A dataframe containing domain entries
from the CATH database}
}
\value{
The \code{cathTable} with the validation scores added to
each protein entry
}
\description{
\code{addValidation} adds the stereochemical validation reports
into the \code{cathTable} by reading the given XML file.
This includes the \code{clashscore}, \code{Ramachandran Score},
\code{RSRZ score} and \code{percent outliers}.
}
|
305ae5dde71e32176385128bb2c33fa87d413e62
|
9cc49351c9649dd0b4a84eef9c3d5a98ab983b2d
|
/analyses/frequencies_vs_recovery/plot.r
|
ce5bdbff5007d0b229dbfc42602ce67ac13061bc
|
[] |
no_license
|
SimonGreenhill/northwind
|
8a926436d58711cd0b87c0d1ef48467d4e934d60
|
fa9e2e52bf3c2585a89a6c954eda183d6eb02812
|
refs/heads/master
| 2023-01-14T18:48:45.899610
| 2020-10-05T09:34:31
| 2020-10-05T09:34:31
| 197,758,170
| 0
| 0
| null | 2023-01-09T01:04:48
| 2019-07-19T11:04:09
|
Python
|
UTF-8
|
R
| false
| false
| 3,638
|
r
|
plot.r
|
library(ggplot2)
library(ggrepel)
library(viridis)
library(patchwork)
df <- read.delim('results.dat', header=TRUE)
# 1. Rank = how common it is cross-linguistically (= rank in Phoible)
# 2. Ni = number of illustrations listing the phoneme in inventory
# 3. Nt = number of illustrations attesting the phoneme in NWS text
# 4. C = Nt/Ni (i.e. type capture rate) = the proportion of languages with phoneme
# P that have P observed in the illustration.
# 5. R = rank in JIPA texts by phoneme frequency (summed over all languages).
# 6. AverageFirstObservationPercent = the average time to the first observation
# of Phoneme P
# Remove anything where we do not have the phoneme in the JIPA article
df <- df[df$Ni > 0, ]
# convert NI to Rank for consistency
df$RankNI <- rank(df$Ni, ties="average")
cor.test(df$AverageFirstObservationPercent, df$Rank, method="spearman")
cor.test(df$AverageFirstObservationPercent, df$Ni, method="spearman")
#-----------------
p <- ggplot(df, aes(x=AverageFirstObservationPercent, y=Rank, color=Rank))
p <- p + geom_point()
p <- p + theme_classic()
#p <- p + xlab("Average percentage of NWS text elapsed before first observation of phoneme")
p <- p + ylab("Global Ranking of Phoneme (Phoible)")
p <- p + scale_color_continuous('Type Capture Rate', type = "viridis")
p <- p + guides(color="none")
# Time to first observation vs. x-ling freq (in JIPA Ni)
q <- ggplot(df, aes(x=AverageFirstObservationPercent, y=Ni, color=Rank))
q <- q + geom_point()
q <- q + theme_classic()
#q <- q + xlab("Average percentage of NWS text elapsed before first observation of phoneme")
q <- q + ylab("Number of languages with Phoneme (JIPA)")
q <- q + scale_color_continuous('Global Ranking', type = "viridis")
q <- q + theme(
legend.direction = "horizontal",
legend.position=c(0.95, 0.95),
legend.justification=c(1, 1)
)
p <- p + geom_point(data=df[df$AverageFirstObservationPercent==100,], color="tomato")
q <- q + geom_point(data=df[df$AverageFirstObservationPercent==100,], color="tomato")
p <- p + xlab("") + ggtitle("a. Global Ranking vs. First Observation")
q <- q + xlab("") + ggtitle("b. JIPA Frequency vs. First Observation")
#p <- p + geom_smooth(method="lm", color="steelblue")
#q <- q + geom_smooth(method="lm", color="steelblue")
pq <- p + q + plot_annotation(tag_levels = 'a')
pq <- pq / grid::textGrob( # HACK
'Average percentage of NWS text elapsed before first observation of phoneme',
just = "centre"
)
pq <- pq + plot_layout(heights = unit(c(11, 0.5), c('null', 'cm')))
ggsave('firstobs.pdf', pq, height=6, width=10)
rare <- df[df$Rank >= 200, ]
p <- ggplot(rare, aes(x=AverageFirstObservationPercent, y=Rank, label=Phoneme, color=Rank))
p <- p + geom_point() + geom_text_repel()
p <- p + ylab("Number of languages with Phoneme (Phoible)")
p <- p + xlab("Average percentage of NWS text elapsed before first observation of phoneme")
p <- p + scale_color_continuous('', type = "viridis")
p <- p + theme_classic()
p <- p + guides(color="none")
ggsave("rare-1.png", p)
p <- ggplot(rare, aes(x = AverageFirstObservationPercent, y = 1, label = Phoneme, color=Rank))
p <- p + geom_point()
p <- p + geom_text_repel(
nudge_y = 0.05,
direction = "x",
vjust = 0,
segment.size = 0.1
)
p <- p + scale_color_continuous('', type = "viridis")
p <- p + ylab("Average percentage of NWS text elapsed before first observation of phoneme")
p <- p + theme_classic()
p <- p + theme(
axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank()
)
ggsave("rare-2.png", p)
|
e4e88f07944805b7100d1ae68d3c512a933e09de
|
d2d215fd569ce28476a290f29d08d553c0996cdc
|
/rcodes/adhoc_analysis.R
|
bbaf779c1e097e9dca58eb4219769b083fefcf55
|
[] |
no_license
|
vikasFid/lda_pipeline
|
a079dde1101b74f0d02a7315e9ce584b6a602838
|
fc8c9f6511a19b3a7cb3507e4510160acb867577
|
refs/heads/master
| 2021-07-03T15:15:36.485624
| 2020-08-25T09:07:12
| 2020-08-25T09:07:12
| 146,553,537
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,356
|
r
|
adhoc_analysis.R
|
#to take command line arguments
args<-commandArgs(TRUE)
#free up memory from previous runs
gc()
library(tm)
library(dplyr)
library(topicmodels)
library(LDAvis)
library(tsne)
library(servr)
library(Matrix)
library(tidytext)
library(igraph)
library(sqldf)
#inputFile = args[1]
txt_input = read.csv('adhoc_chunked_output.csv',
header = TRUE,sep = ',',colClasses = "character")
txt_data = as.character(txt_input[,2])
#writeLines(txt_data[4])
#txt_data = gsub("_"," ",txt_data)
txt_data = gsub("\\|"," ",txt_data)
txt_data = stripWhitespace(txt_data)
writeLines(txt_data[4])
textCorpus = VCorpus(VectorSource(txt_data))
#tokenizer = function(x)unlist(lapply(ngrams(strsplit(as.character(x),split="|",fixed=T), 1),paste, collapse = " "), use.names = FALSE)
removeTrivialTopics = args[3]
if(removeTrivialTopics == 1){
trivialWords = read.csv('trivialTopicTerms.txt',
header = TRUE,sep = ',',colClasses = "character")
trivialWords = as.character(trivialWords$x)
textCorpus <- tm_map(textCorpus, removeWords, trivialWords)
}
tokenizer = function(x)unlist(lapply(ngrams(words(x), 1),paste, collapse = " "), use.names = FALSE)
#dtm with tf, required for the LDA model
ctrl = list(tokenize=tokenizer,bounds = list(global=c(2,Inf)))
dtm = DocumentTermMatrix(textCorpus, control = ctrl)
dtm
tf_idf_filter = args[2]
if(tf_idf_filter == 1){
n_row = dtm$nrow
n_col = dtm$ncol
#dtm with normalized tf_idf, required for more accurate model
ctrl_tfidf = list(tokenize=tokenizer,bounds = list(global=c(2,Inf)),
weighting = function(x) weightTfIdf(x, normalize = TRUE))
dtm_tfidf = DocumentTermMatrix(textCorpus, control = ctrl_tfidf)
#creating sparse matrix
dtm = sparseMatrix(i=dtm$i, j=dtm$j, x=dtm$v,dimnames = dtm$dimnames,
dims=c(dtm$nrow, dtm$ncol))
dtm_tfidf = sparseMatrix(i=dtm_tfidf$i, j=dtm_tfidf$j, x=dtm_tfidf$v,
dimnames = dtm_tfidf$dimnames,
dims=c(dtm_tfidf$nrow, dtm_tfidf$ncol))
#get index and values of the sparse matrix
sum_dtm_tfidf = summary(dtm_tfidf)
#determine this cutoff using boxplot or the tf-idf score for a common word like account
#cutoff = as.numeric(summary(sum_dtm_tfidf$x)[3])
#other method to get cutoff is to get the mean score for a common word
temp = as.matrix(dtm_tfidf[,"account"])
#temp = temp[temp != 0,]
#summary(temp)
#need words to be at least as important as account, need to make it more precise as per zipf's law
cutoff = max(temp[temp!= 0,])
#below code sets dtm tf to 0 for a word that correspondingly in the dtm_tfidf
#has a very low tf-idf score
#taking values that are lower than the cutoff
keep_index = sum_dtm_tfidf[sum_dtm_tfidf$x > cutoff && sum_dtm_dtmtfidf$v %not in% trivialWords,1:2 ]
#making replace matrix have same dimension as the dtm
new_row = c(n_row,n_col)
keep_index = rbind(keep_index,new_row)
#adjacency matrix
adjMat = get.adjacency(graph.edgelist(as.matrix(keep_index), directed=TRUE))
#resize adjMat to the size of the dtm, no need to flip bits as it is very inefficient in sparseMatrix
adjMat = adjMat[1:n_row,1:n_col]
#element-wise product
dtm = dtm * adjMat
#dtm is sparse matrix, converting it back to dtm here
#dtm = tidy(dtm)
#dtm = dtm %>%cast_dtm(row, column, value)
#dtm
dtm = as.DocumentTermMatrix(dtm,weighting = function(x) weightTf(x))
}
#removing any doc with no terms
rowTotals = slam::row_sums(dtm, na.rm = T)
colTotals = slam::col_sums(dtm, na.rm = T)
dtm = dtm[rowTotals != 0,]
#because we have removed rows that have zero rowTotals, calculating dimension totals again
rowTotals = slam::row_sums(dtm, na.rm = T)
colTotals = slam::col_sums(dtm, na.rm = T)
dtm
#Topic Modeling: LDA
#Gibbs sampling works better with limited RAM
print('Building Model Now!')
nTopic = args[1]
modelTrain = LDA(dtm,k = nTopic,method="Gibbs",
control = list(alpha = 0.1),
iter = 2000,
beta = 0.001) # "Gibbs" or "VEM"
#save the model in case you want to use it later to find more topic calls
save(modelTrain,file = "adhocOutput/ldaModel.RData")
#LDA Visualization
myAPdata = list()
myAPdata$vocab = modelTrain@terms
myAPdata$term.frequency = as.integer(colTotals)
myAPdata$phi = exp(modelTrain@beta)
myAPdata$theta = modelTrain@gamma
myAPdata$doc.length = as.integer(rowTotals)
json = createJSON(phi=myAPdata$phi,
theta=myAPdata$theta,
vocab=myAPdata$vocab,
doc.length = myAPdata$doc.length,
term.frequency = myAPdata$term.frequency,
mds.method = function(x) tsne(svd(x)$u),
plot.opts = list(xlab="", ylab="")
)
#save(json, file = "C:/Users/a592407/Desktop/LDA_RDATA/LDAVis_361693.RData")
#load(file = "C:/Users/a592407/Desktop/LDA_RDATA/LDAVis_REDEEM_ALL.RData")
#serVis(json,out.dir="C:/Users/A592407/Desktop/LDA_VIS", open.browser = T)
print("Saving model to RData")
save(json, file = "adhocOutput/LDAVis_JSON.RData")
save(myAPdata, file = "adhocOutput/myAPdata.RData")
#load(file = "C:/Users/a592407/Desktop/LDA_RDATA/LDAVis_REDEEM_ALL.RData")
serVis(json,out.dir="adhoc_viz", open.browser = F)
#to make topic order same as LDAVis topic order
modelTrain@gamma = myAPdata$theta
#probabilities associated with each topic assignment
topicProbabilities = as.data.frame(modelTrain@gamma)
write.csv(topicProbabilities,file="adhocOutput/TOPIC_PROB.csv",
row.names = F)
#alternative logic:
docTopic = as.data.frame(cbind(document = row.names(topicProbabilities),
topic = apply(topicProbabilities,1,function(x) names(topicProbabilities)[which(x==max(x))])))
docTopic = data.frame(lapply(docTopic,as.character),stringsAsFactors = T)
#document topic assignment(add logic for threshold!), use above alternate logic!
#docTopic = as.data.frame(topics(modelTrain))
#write.csv(table(docTopic),file = "C:/Users/a592407/Documents/LDA_FILES/REDEEM_DOC_PER_TOPIC_2.csv",
# row.names = F)
doc_index = rownames(docTopic)
doc_df = data.frame(doc_id = txt_input[doc_index,1],
mergedPhrase = txt_input[doc_index,2],
chunkedPhrase = txt_input[doc_index,3],
topic = docTopic[,2])
write.csv(doc_df,file = "adhocOutput/DOC_TOPIC.csv",row.names = F)
#topic proportions as per docTopic assignment:
#library(sqldf)
groupedData = sqldf('SELECT topic, count(*) AS numDocs FROM doc_df GROUP BY topic')
groupedData$topicProportion = (groupedData$numDocs/sum(groupedData$numDocs))
write.csv(groupedData[order(-groupedData$topicProportion),],
'adhocOutput/DOC_TOPIC_PROP.csv',quote = F,
row.names = F)
#write topic call_date to see if there is spike in doc-topic assignment
#output = c()
#for (i in as.character(unique(unlist(doc_df$topic)))){
#doc_df_topic = doc_df[doc_df$topic == i,]
#print(nrow(doc_df_topic))
#group_topic_date = sqldf('SELECT call_date, count(*) AS numDocs FROM doc_df_topic GROUP BY call_date')
#group_topic_date$topic = i
#group_topic_date = group_topic_date[order(as.Date(group_topic_date$call_date))]
#output = rbind(output,group_topic_date)
#}
#write it to a file
#write.table(output,'topic_date_prop.tsv',quote = F,sep = '\t',row.names = F)
#clean up
print('Cleaning objects from RAM and exiting')
rm(list=ls())
|
e0d4013c8f2effa2d6b5fbdcfcb349cfdf053944
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ROCR/examples/ROCR.hiv.Rd.R
|
13f5c789fbef70a05888a486d5e710356c38b0dc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
ROCR.hiv.Rd.R
|
library(ROCR)
### Name: ROCR.hiv
### Title: Data set: Support vector machines and neural networks applied to
### the prediction of HIV-1 coreceptor usage
### Aliases: ROCR.hiv
### Keywords: datasets
### ** Examples
data(ROCR.hiv)
attach(ROCR.hiv)
pred.svm <- prediction(hiv.svm$predictions, hiv.svm$labels)
perf.svm <- performance(pred.svm, 'tpr', 'fpr')
pred.nn <- prediction(hiv.nn$predictions, hiv.svm$labels)
perf.nn <- performance(pred.nn, 'tpr', 'fpr')
plot(perf.svm, lty=3, col="red",main="SVMs and NNs for prediction of
HIV-1 coreceptor usage")
plot(perf.nn, lty=3, col="blue",add=TRUE)
plot(perf.svm, avg="vertical", lwd=3, col="red",
spread.estimate="stderror",plotCI.lwd=2,add=TRUE)
plot(perf.nn, avg="vertical", lwd=3, col="blue",
spread.estimate="stderror",plotCI.lwd=2,add=TRUE)
legend(0.6,0.6,c('SVM','NN'),col=c('red','blue'),lwd=3)
|
efa013e16845a88132c384f98093b1034a281ee9
|
8509f3c69283492cb6f3866aafd4a508cd037441
|
/grade_analysis.R
|
c88f994833bc92d4c06a315eff256118d1daacfa
|
[] |
no_license
|
ollikiili/Text-mining-MSc-Thesis-project
|
7cf1dc7e8a1f45bf3ed8d5499a81c6c61497d5d6
|
25dd2b1f168e61ec220e6553396a2316db222b8e
|
refs/heads/master
| 2020-05-25T02:46:25.182930
| 2019-05-27T08:42:58
| 2019-05-27T08:42:58
| 187,586,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
grade_analysis.R
|
setwd("Z:\\Documents")
options(stringsAsFactors=F)
Sys.setlocale('LC_ALL','C')
#apparently this file is not used so commented the load out
#load("TI_data_by_reviews_preprocessed")
load("tokenized_words")
load("tokenized_bigrams")
unigrams <- c("room", "staff", "check", "location", "casino", "pool", "bed", "food", "view",
"restaurant", "show", "buffet")
bigrams <- c("resort fee", "front desk", "pool area",
"customer service", "coffee maker", "room service", "coffee shop")
unigram_data <- filter(tokenized_words, word %in% unigrams)
unigram_data <- unigram_data %>% select(-sentence)
unigram_data <- unigram_data %>% distinct()
unigram_grade_means <- tibble(unigrams = unigrams, mean = 0, difference = 0)
for (i in 1:length(unigrams)) {
data <- filter(unigram_data, word == unigrams[i] )
mean <- mean(data$score)
unigram_grade_means$mean[i] = mean
unigram_grade_means$difference[i] = mean - 8.09
}
unigram_grade_means
bigram_data <- filter(tokenized_bigrams, bigram %in% bigrams)
bigram_data <- bigram_data %>% select(-sentence)
bigram_data <- bigram_data %>% distinct()
bigram_grade_means <- tibble(bigrams = bigrams, mean = 0, difference = 0)
for (i in 1:length(bigrams)) {
data <- filter(bigram_data, bigram == bigrams[i] )
mean <- mean(data$score)
bigram_grade_means$mean[i] = mean
bigram_grade_means$difference[i] = mean - 8.09
}
bigram_grade_means
feature_grade_means <- tibble(feature = c(unigrams, bigrams),
mean = c(unigram_grade_means$mean, bigram_grade_means$mean),
difference = c(unigram_grade_means$difference, bigram_grade_means$difference))
feature_grade_means <- arrange(feature_grade_means, desc(mean))
#write.xlsx(feature_grade_means, "mean_grades_excelV2.xlsx")
grade_barchart <- ggplot(feature_grade_means, aes(x = reorder(feature, difference), y = difference,
label = round(difference, digits = 2))) +
geom_bar(stat = "identity") +
coord_flip() + labs(x = "feature", title = "Difference to mean of all reviews") +
theme_economist() +
geom_text(size = 3, position = position_stack(vjust = 0.5))
|
fed109e08f738c6398de179086cbc33c2eb503e5
|
c7d9640c5b23575db444ff2539187fbbb65e163f
|
/app/server.R
|
4054286de9abdc5229a8799e26f4483fd65806ad
|
[] |
no_license
|
datasketch/transparencia-2019
|
164e051fc77dd437dd8b11e84178ef1adda65243
|
86d4d876fa486928dcc8d0f0bf2769e35d5359e8
|
refs/heads/master
| 2020-05-18T19:49:29.301455
| 2019-07-03T20:52:30
| 2019-07-03T20:52:30
| 184,615,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,447
|
r
|
server.R
|
shinyServer(function(input, output, session) {
# Datos para graficar
output$base_opts <- renderUI({
data_dis <- setNames(c("Territorios de concentración/consolidación", "informe II 2016-2018"), c("Territorios de paz", "Hechos 2016-2018"))
checkboxGroupInput("base_cliente", HTML("<div style='font-weight: 700;'>BUSCAR POR</div>"), choices = data_dis, inline = T, selected = "informe II 2016-2018")
})
# bases
casos_data <- reactive({
tema <- input$base_cliente
d <- casos %>% dplyr::filter(caso_emblematico %in% tema)
d
})
actores_data <- reactive({
tema <- input$base_cliente
d <- actores %>% dplyr::filter(caso_emblematico %in% tema)
d <- d %>% distinct(id_actor, .keep_all = T)
d
})
#valor absoluto o relativo
output$operacion <- renderUI({
checkboxInput("tipo_agg", "Valor Total/Porcentaje")
})
# Graficos para opt básicas y avanzadas
output$vizOptions <- renderUI({
charts <- c("map", "barrash", "barras" ,"bubbles", "pie", "lines")
buttonImage(id = "last_chart", charts, charts, file = "icons/", format = "svg", classImg = "imgStyle")
})
# Opciones Básicas
output$basicos <- renderUI({
l <- purrr::map(1:nrow(basicos), function(z){
actionButton(inputId = basicos[z,]$id, label = basicos[z,]$preguntas, class = "needed")
})
l[[1]] <- gsub("needed", "needed basic_active", l[[1]])
l[[1]] <- HTML(paste0(paste(l[[1]], collapse = '')))
div(class = "opciones_basicas",
tags$button(id = "Basico", class = "click_option", HTML("<div id='opts' class = 'active_opt'>></div> OPCIONES BÁSICAS")),
div(id = "cont_basicas",
HTML("<div style='font-size:15px;margin-top: 9px;'>Visualizar</div>"),
div(class = "preguntas_basicas",
l
)))
})
# variables según base
#variables_avanzadas <- reactiveValues(var = NULL)
outVar <- reactive({
data_sel <- input$base_sel
if (is.null(data_sel)) return()
chart <- input$last_chart
df <- avanzados %>%
dplyr::filter(base == data_sel)
if(chart != "lines") {
df <- df
} else {
df <- df %>%
dplyr::filter(variables_id != c('ano_final_hecho', 'ano_hecho'))
}
setNames(df$variables_id, df$variables_label)
})
observe({
updateSelectizeInput(session, "var_principal",
choices = outVar())
})
outCross <- reactive({
viz_sel <- input$last_chart
if (is.null(viz_sel)) return()
var_cho <- input$var_principal
if (is.null(var_cho)) return()
sometimes <- data.frame(org = c('departamento'),
var_label = c('Departamento'))
if (viz_sel == "barras" | viz_sel == "barrash" | viz_sel == "bubbles"){
df <- cruces %>% dplyr::filter(var_sel == var_cho)
if (nrow(df) == 0) {
l <- sometimes
} else {
l <- df %>% drop_na()
}
} else if (viz_sel == "map") {
l <- sometimes
} else if (viz_sel == "lines") {
l <- cruces %>% dplyr::filter(plot == "lineas")
} else {
l <- NULL
}
setNames(as.character(l$org), as.character(l$var_label))
})
observe({
updateSelectizeInput(session, "var_cruce",
choices = outCross())
updateSelectizeInput(session, "var_principal",
selected = input$var_principal)
})
# Opciones avanzadas
output$avanzadas <- renderUI({
div(class = "opciones_avanzadas",
tags$button(id = "Avanzada", class = "click_option", HTML("<div id='opts_a'>></div> OPCIONES AVANZADAS")),
div(id = "cont_avanzada", class = "hideOptions",
radioButtons(inputId = "base_sel", label = "Información de", c('Hechos', 'Actores'), inline = T),
selectizeInput(inputId = 'var_principal', label = "Visualizar", choices = NULL, options = list(
#placeholder = 'Selecciona una variable para visualizar',
#onInitialize = I('function() { this.setValue(""); }'),
plugins= list('remove_button')
)),
selectizeInput(inputId = 'var_cruce', label = "organizado por", choices = NULL,options = list(
placeholder = 'Selecciona una variable para cruzar',
onInitialize = I('function() { this.setValue(""); }'),
plugins= list('remove_button')
))
)
)
})
output$red_var <- renderUI({
varRed <- setNames(red$id, red$label)
selectizeInput("var_red", "Selecciona variable para filtros", varRed,
options = list(
placeholder = 'Sin filtros',
onInitialize = I('function() { this.setValue(""); }'),
plugins= list('remove_button')))
})
output$red_cat <- renderUI({
variable_red <- input$var_red
if (variable_red != "") {
dt <- actores_data()[[variable_red]]
categorias_red <- as.character(unique(dt))
} else {
categorias_red <- NULL
}
selectizeInput("cat_red", "Selecciona categoria(s) para filtrar", choices = categorias_red,
#multiple = TRUE,
options = list(
placeholder = 'Todas',
onInitialize = I('function() { this.setValue(""); }'),
plugins= list('remove_button'))
)
})
output$red <- renderUI({
div(class = "opciones_red",
tags$button(id = "Red", class = "click_option", HTML("<div id='opts_r'>></div> LA RED DE LA CORRUPCIÓN")),
div(id = "cont_red", class = "hideOptions",
uiOutput("red_var"),
uiOutput("red_cat")
))
})
data_red <- reactive({
var_red_sel <- input$var_red
var_cat_sel <- input$cat_red
if (var_cat_sel == "" | is.null(var_cat_sel)) {
dt <- actores_data()
} else {
dt <- actores_data()[actores_data()[, var_red_sel] == c(var_cat_sel), ]
}
casosAll <- dt
situacion <- c("Sancionado disciplinariamente","Condenado penalmente", "Sanción Fiscal")
casosAll <- map_df(situacion, function(x) casosAll[grepl(x, casosAll$situacion_judicial),] )
func_paste <- function(x) paste(unique(x), collapse = ', ')
casosRed <- casosAll %>%
group_by(id_caso) %>%
dplyr::summarise_each(funs(func_paste))
net <- casosAll[c("id_caso","nombre_publico","id_actor","nombre_actor")]
net$cid <- paste0("c",net$id_caso)
net$aid <- paste0("a",net$id_actor)
net
})
edges <- reactive({
net <- data_red()
edges <- net[c("cid","aid")]
names(edges) <- c("from","to")
edges
})
nodes <- reactive({
net <- data_red()
nodesCaso <- net[c("cid","nombre_publico")]
names(nodesCaso) <- c("id","label")
nodesCaso$type <- "Caso"
nodesActor <- net[c("aid","nombre_actor")]
names(nodesActor) <- c("id","label")
nodesActor$type <- "Actor"
nodes <- bind_rows(
nodesActor,
nodesCaso
)
nodes$color <- "#fa8223"
nodes$color[nodes$type == "Caso"] <- "#f03a47"
nodes$size <- 30
nodes$size[nodes$type == "Caso"] <- 50
nodes <- nodes %>% distinct(id,.keep_all = TRUE)
nodes
})
output$netViz <- renderVisNetwork({
viz <- visNetwork(edges = edges(), nodes = nodes(), width = "100%") %>%
visInteraction(navigationButtons = TRUE) %>%
visEvents(
click = "function(nodes) {
console.info('click')
console.info(nodes)
Shiny.onInputChange('clickRed', {nodes : nodes.nodes[0]});
;}"
) %>%
visPhysics(stabilization= FALSE)
viz
})
data_viz <- reactive({
l_o <- input$last_option
if (is.null(l_o)) l_o <- "Basico"
if (l_o == "Basico") {
q_sel <- input$last_click
if (is.null(q_sel)) q_sel <- 'q1'
dt_bs <- basicos %>% dplyr::filter(id == q_sel)
var_sel <- dt_bs$variable
var_sel <- c("id_caso", var_sel)
if (dt_bs$base == 'notas') {
dt <- notas
} else if (dt_bs$base == 'casos') {
dt <- casos_data()[var_sel]
if (dt_bs$variable == 'delito') {
dt <- separate_rows(dt, delito, convert = TRUE, sep = ",")
dt$delito <- trimws(dt$delito)
}
} else {
dt <- actores_data() %>% dplyr::filter(tipo_participacion == 'Actor involucrado')
dt <- dt[var_sel]
}
} else if (l_o == "Avanzada"){
base <- input$base_sel
if (is.null(base)) return()
var_prim <- input$var_principal
if (is.null(var_prim)) return()
var_cruce <- input$var_cruce
if (is.null(var_cruce)) return()
click_chart <- input$last_chart
if (click_chart == "pie" | click_chart == "map") var_cruce <- ""
if (base == "Hechos") {
if (var_cruce == ""){
dt <- casos_data() %>% dplyr::select_('id_caso', var_prim)
} else {
dt <- casos_data() %>% dplyr::select_('id_caso', var_prim, var_cruce)
}
} else {
if (var_cruce == ""){
dt <- actores_data() %>% dplyr::select_('id_caso', var_prim)
} else {
dt <- actores_data() %>% dplyr::select_('id_caso', var_prim, var_cruce)
}
}
if (var_prim == 'delito') {
dt <- separate_rows(dt, delito, convert = TRUE, sep = ",")
dt$delito <- trimws(dt$delito)
}
} else {
dt <- actores_data()
}
dt
})
# Titulos
data_titles <- reactive({
l_o <- input$last_option
if (is.null(l_o)) l_o <- "Basico"
base_sel <- input$base_cliente
if (l_o == "Basico") {
q_sel <- input$last_click
if (is.null(q_sel)) q_sel <- 'q1'
if (length(base_sel) == 1) {
if (base_sel == "Territorios de concentración/consolidación") {
title <- basicos$titulos_dos[basicos$id == q_sel]
} else {
title <- basicos$titulos_uno[basicos$id == q_sel]
}} else {
title <- basicos$titulos_tres[basicos$id == q_sel]
}
} else if (l_o == "Avanzada") {
var_prim <- input$var_principal
var_cruce <- input$var_cruce
if (!is.null(var_prim) & (is.null(var_cruce) | var_cruce == "")) {
var_avz <- dic_casos$label[dic_casos$id == var_prim]
} else {
var_avz <- paste0(dic_casos$label[dic_casos$id == var_prim], ' y ', dic_casos$label[dic_casos$id == var_cruce])
}
base_avz <- input$base_sel
if (base_avz == 'Hechos') {
if (length(base_sel) == 1) {
if (base_sel == "Territorios de concentración/consolidación") {
title <- paste0('Hechos de corrupción investigados y reportados por la prensa en territorios de paz según ', var_avz, ' (2016-2018).')
} else {
title <- paste0('Hechos de corrupción investigados y reportados por la prensa en Colombia según ', var_avz, ' (2010-2016).')
}} else {
title <- paste0('Hechos de corrupción investigados y reportados por la prensa en Colombia según ', var_avz, ' (2010-2018).')
}
} else {
if (length(base_sel) == 1) {
if (base_sel == "Territorios de concentración/consolidación") {
title <- paste0('Actores involucrados en hechos de corrupción investigados y reportados por la prensa en territorios de paz según ', var_avz, ' (2016-2018).')
} else {
title <- paste0('Actores involucrados en hechos de corrupción investigados y reportados por la prensa en Colombia según ', var_avz, ' (2010-2016).')
}} else {
title <- paste0('Actores involucrados en hechos de corrupción investigados y reportados por la prensa en Colombia según ', var_avz, ' (2010-2018).')
}
}
} else {
title <- ' '
}
title
})
capt <- reactive({
base_sel <- input$base_cliente
if (length(base_sel == 1)) {
if (base_sel == "Territorios de concentración/consolidación") {
cp <- "<span style='font-size: 11px;margin-top: 2px; color:#000000'>Fuente: Monitor Ciudadano de la Corrupción 2017</span>"
} else {
cp <- "<span style='font-size: 11px;margin-top: 2px; color:#000000'>Fuente: Monitor Ciudadano de la Corrupción 2019</span>"
}} else {
cp <- "<span style='font-size: 11px;margin-top: 2px; color:#000000'>Fuente: Monitor Ciudadano de la Corrupción 2017-2019</span>"
}
cp
})
nom_base <- reactive({
l_o <- input$last_option
if (is.null(l_o)) l_o <- "Basico"
if (l_o == "Basico") {
q_sel <- input$last_click
if (is.null(q_sel)) q_sel <- 'q1'
tx <- gsub("casos", "hechos", basicos$base[basicos$id == q_sel])
} else if (l_o == "Avanzada") {
base_avz <- input$base_sel
if (base_avz == 'Hechos') {
tx <- "hechos"
} else {
tx <- "actores"
}
} else {
return()
}
tx
})
# Graficos (basicos y avanzados)
output$viz_hgch <- renderHighchart({
click_chart <- input$last_chart
if (is.null(click_chart)) return()
click_chart <- gsub('lines', 'line', click_chart)
type_agg <- input$tipo_agg
add_p <- "Número"
if (type_agg) add_p <- "Porcentaje"
orientacion <- 'ver'
horLabel <- NULL
verLabel <- paste0(add_p, " de ", nom_base())
if (click_chart == 'barrash'){
orientacion <- 'hor'
horLabel <- paste0(add_p, " de ", nom_base())
verLabel <- NULL
}
click_chart <- gsub('barras|barrash', 'bar', click_chart)
df <- data_viz() %>% select(-id_caso)
if (grepl("actor_individual",names(df))) {
df$tipo_actor_individual[df$tipo_actor_individual == "No Aplica"] <- NA
df <- df %>% drop_na()
}
if (grepl("actor_colectivo",names(df))) {
df$tipo_actor_colectivo[df$tipo_actor_colectivo == "No Aplica"] <- NA
df <- df %>% drop_na()
}
if(click_chart == 'map' & length(names(df)) == 1 & grepl("departamento", names(df))) {
viz <- 'hgch_map_choropleth_GnmNum'
df <- df %>% dplyr::group_by_all() %>% dplyr::summarise(Total = n())
} else if (click_chart == 'map') {
viz <- 'hgch_map_bubbles_CatGltGlnNum'
dt_cor <- casos_data() %>% select(id_caso, latitud, longitud)
#dt_cor$latitud <- dt_cor$latitud + runif(nrow(dt_cor), -1, 1)
df <- left_join(data_viz(), dt_cor, by = "id_caso")
df <- df[,c(-1)]
df <- df %>% group_by_all() %>% dplyr::summarise(Total = n())
} else {
typeDt <- 'Cat'
if (ncol(df) != 1) typeDt <- 'CatCat'
viz <- paste0('hgch_', click_chart, '_', typeDt)
df <- df
}
line_width <- 2
if (click_chart == 'map' | click_chart == 'bubbles') {
line_width <- 0
}
colors <- "#fdb731"
colSc <- "no"
if (click_chart == 'pie' | click_chart == 'map') {
colSc <- "discrete"
colors <- c("#fdb731","#0a446b", "#137fc0", "#c250c2", "#fa8223", "#64c6f2", "#f49bf9", "#fc7e5b", "#ffe566", "#64f4c8", "#137fc0", "#c250c2", "#f03a47", "#fdb731", "#36c16f", "#022f40", "#691f6b", "#931c4d", "#fa8223", "#2b6d46")
}
dic <- data.frame(id = as.character(names(df)))
dic <- dplyr::left_join(dic, dic_casos)
show_text <- TRUE
if (click_chart != "map") {
df <- datafringe::fringe(df, dic)
} else {
df <- df
show_text = FALSE
}
myFunc <- JS("function(event) {Shiny.onInputChange('hcClicked', {id:event.point.name, timestamp: new Date().getTime()});}")
if (click_chart == 'line') myFunc <- JS("function(event) {Shiny.onInputChange('hcClicked', {id:event.point.category.name, timestamp: new Date().getTime()});}")
if (sum(grep('Cat', getCtypes(df))) >= 3 & click_chart != 'line') {
colSc <- "discrete"
colors <- c("#fdb731","#0a446b", "#137fc0", "#c250c2", "#fa8223", "#64c6f2", "#f49bf9", "#fc7e5b", "#ffe566", "#64f4c8", "#137fc0", "#c250c2", "#f03a47", "#fdb731", "#36c16f", "#022f40", "#691f6b", "#931c4d", "#fa8223", "#2b6d46")
myFunc <- JS("function(event) {Shiny.onInputChange('hcClicked', {id:event.point.category.name, cat:this.name, timestamp: new Date().getTime()});}")
}
nDg <- 0
if (type_agg) nDg <- 2
opts_viz <- list(
title = HTML(paste0(as.character(data_titles())), collapse = ''),
subtitle = NULL,
caption = capt(),
horLabel = horLabel,
verLabel = verLabel,
orientation = orientacion,
colors = colors,
color_scale = colSc,
percentage = type_agg,
marks = c(".", ","),
nDigits = nDg,
dropNa = FALSE,
sort = "desc",
clickFunction = myFunc,
labelWrap = 150,
bubble_min = '1%',
bubble_max = '2%',
showText = show_text,
allow_point = TRUE,
cursor = 'pointer',
color_hover = "#fa8223",
color_click = "#fa8223",
labelWrapV = c(30, 30),
legend_position = "center",
startAtZero = TRUE,
spline = FALSE,
fill_opacity = 0.9,
agg_text = " ",
export = TRUE,
border_color = '#000000',
theme = tma(custom = list(stylesX_lineWidth = 0,
height = 570,
showText = show_text,
colors = colors,
font_family = "Raleway",
font_size = '11px',
font_color = '#000000',
line_width = line_width,
stylesTitleY_fontWeight = 'bold',
stylesTitleX_fontWeight = 'bold'))
)
do.call(viz, c(list(data = df, opts = opts_viz)))
})
data_ficha <- reactive({
df <- data_viz()
if (is.null(df) | nrow(df) == 0) return()
l_o <- input$last_option
if (is.null(l_o)) l_o <- "Basico"
if (l_o != "Red") {
var1 <- input$hcClicked$id
if (is.null(var1)) return()
var1 <- gsub('<br/>', ' ', var1)
if (sum(grepl("departamento", names(df))) == 1) {
var1 <- toupper(iconv(input$hcClicked$id, to = "ASCII//TRANSLIT"))
var1 <- ifelse(var1 == 'NARINO', 'NARIÑO', var1)
}
var2 <- input$hcClicked$cat
if (is.null(var2)) {
var_sel <- names(df)[2]
dta <- df[df[var_sel] == var1,]
} else {
var2 <- gsub('<br/>', ' ', var2)
var_sel_uno <- names(df)[2]
var_sel_dos <- names(df)[3]
dta <- df[df[var_sel_uno] == var2 & df[var_sel_dos] == var1,]
}
dt <- dta %>% drop_na() %>% distinct(id_caso)
dt <- dt %>% left_join(casos)
} else {
click_red <- input$clickRed
if (is.null(click_red)) return()
click_red_mod <- gsub('[a-z]', '', click_red)
if(grepl('c', click_red)) {
dt <- casos_data() %>% filter(id_caso %in% click_red_mod)
} else {
dt <- actores_data() %>% filter(id_actor %in% click_red_mod)
}
}
dt
})
output$bla <- renderPrint({
data_ficha()
})
output$ficha_peque <- renderUI({
info <- data_ficha()
l_o <- input$last_option
print(l_o)
if (is.null(l_o)) l_o <- "Basico"
if (l_o != "Red") {
txt <- HTML(
'<div class = "indicacion"><img src="click/click.svg" style="width: 50px; display:block;margin-left: 40%;"/>
<b>A.</b> En esta sección podrás acceder a diferentes tipos de visualización por medio de una búsqueda básica con preguntas o avanzada que permite el cruce de una o más variables de tu interés.</br>
<b>B.</b> Puedes descargar las gráficas que desees haciendo click en el botón de descarga </br>
<b>C.</b> Al hacer click en los datos de la visualización de tu interés también podrás acceder al listado de hechos de corrupción asociados a la variable. Puedes acceder a todo el contenido de la ficha del hecho haciendo click en "ver más".
</div>')
} else {
txt <- HTML(
'<div class = "indicacion"><img src="click/click.svg" style="width: 50px; display:block;margin-left: 40%;"/>
<br/>
<b>¿Cómo explorar la red?</b>
<p>En esta red podrás ver el los actores involucrados
en los hechos de corrupción reportados por la prensa.
Al seleccionar los iconos fucsia podrás conocer los hechos de corrupción en el que se vio involucrado el actor.
Al seleccionar los iconos naranja podrás conocer el detalle de los actores involucrados sancionados por los hechos.</p>
<b>¿Qué puedo hacer en esta sección?</b>
<p>Conocer la red de actores involucrados en cada hecho
de corrupción registrado y su historial judicial.</p>
</div>'
)
}
if (is.null(info)) return(txt)
if(nrow(info) == 0) txt <- txt
filas <- nrow(info)
if (filas == 0) return(txt)
print('hola')
print(info)
if (l_o != 'Red') {
tx_tit <- input$hcClicked$id
if (is.null(tx_tit)) tx_tit <- input$hcClicked$cat
if (is.null(tx_tit)) return()
HTML(paste0('<span style="font-size:15px;">', tx_tit, '</span>'))
txt <- div(
tx_tit,
purrr::map(1:filas, function(x){
div(class = "ficha",
div(class = "cont_info",
HTML(paste0('<div class = "title_ficha">',info$nombre_publico[x], '</div>')),
HTML(paste0('<div class = "sub_ficha">',info$subtitulo_publico[x], '</div>'))),
tags$button(id = info$id_caso[x], class = "click_ficha", "Ver más")
)
})) } else {
click_red <- input$clickRed
print(click_red)
if (is.null(click_red)) return()
click_red_mod <- gsub('c|a', '', click_red)
if(grepl('c', click_red)) {
txt <- purrr::map(1:filas, function(x){
div(class = "ficha",
div(class = "cont_info",
HTML(paste0('<div class = "title_ficha">',info$nombre_publico[x], '</div>')),
HTML(paste0('<div class = "sub_ficha">',info$subtitulo_publico[x], '</div>')),
HTML(paste0('<div class = "info_red"><b>Lugar del hecho:</b> ', info$departamento[x],
'</br><b>Año del hecho:</b> ', info$ano_hecho[x],
'</br><b>Tipo de corrupción:</b> ', info$tipo_corrupcion[x],
'</br><b>Sector afectado:</b> ', info$sector_afectado[x],
'</div>'))),
tags$button(id = info$id_caso[x], class = "click_ficha", "Ver más")
)
})} else {
txt <- purrr::map(1:filas, function(x){
div(class = "ficha",
div(class = "cont_info",
HTML(paste0('<div class = "title_ficha">', info$nombre_actor[x], '</div>')),
HTML(paste0('<div class = "info_red"><b>Tipo de investigación:</b> ', info$tipo_investigacion[x],
'</br><b>Institución:</b> ', info$institucion[x],
'</br><b>Situación judicial:</b> ', info$situacion_judicial[x],
'</div>'))),
tags$button(id = info$id_caso[x], class = "click_ficha", "Ver más")
)
})
}
}
# div(
#tx_tit,
txt
#)
})
fichaInfo <- reactive({
id <- input$last_case
if(is.null(id)) return()
getFicha(id)
})
output$ficha <- renderUI({
#list(
fichaInfo()#,
#br(),
#downloadButton('descarga_ficha', 'Descargar')
#)
})
observeEvent(input$last_case, {
showModal(modalDialog(
title = '',
size = 'l',
easyClose = TRUE,
footer = modalButton("Cerrar"),
uiOutput('ficha'),
br()
)
)
})
output$map_d <- renderHighchart({
id <- input$last_case
if(is.null(id)) return()
map_c(id)
})
output$descarga_ficha <-
downloadHandler(
"results_from_shiny.pdf",
content =
function(file)
{
id <- input$last_case
caso_i <- caso(id)
params <- list(
id = id,
title = gsub("\"","'",caso_i$nombre_publico),
subtitle = gsub("\"","'",caso_i$subtitulo_publico),
mapc = map_c(id),
abstract = gsub("\"","'",caso_i$hecho),
lugar = toupper(caso_i$departamento),
inicio = ifelse(is.na(caso_i$ano_hecho), 'No disponible', caso_i$ano_hecho),
actor = ifelse(is.na(caso_i$nombre_actor), 'No disponible', caso_i$nombre_actor),
tipo = ifelse(is.na(caso_i$tipo_corrupcion), 'No disponible', caso_i$tipo_corrupcion),
delito = ifelse(is.na(caso_i$derecho_vulnerado), '', caso_i$derecho_vulnerado),
sector = toupper(caso_i$sector_afectado),
# # dinero = as.character(ifelse(is.na(caso_i$dinero_juego), 'No disponible', paste0(' $', format(caso_i$dinero_juego, nsmall= 0, big.mark=",")))),
entidad = ifelse(is.na(caso_i$institucion), '', caso_i$institucion),
estado = ifelse(is.na(caso_i$situacion_judicial), '', caso_i$situacion_judicial),
fecha = ifelse(is.na(caso_i$fecha_informacion_actualizada), '', as.character(caso_i$fecha_informacion_actualizada))
)
rmarkdown::render("temp_latex/untitle.Rmd",
#output_format = pdf_document(template="default.tex"),
params = params,
output_file = "built_report.pdf")
readBin(con = "temp_latex/built_report.pdf",
what = "raw",
n = file.info("temp_latex/built_report.pdf")[, "size"]) %>%
writeBin(con = file)
},
contentType = 'temp_latex/built_report.pdf'
)
output$viz_res <- renderUI({
l_o <- input$last_option
if (is.null(l_o)) l_o <- 'Basico'
if (l_o != 'Red') {
v <- highchartOutput("viz_hgch", height = 'auto')
} else {
v <- visNetworkOutput("netViz", height = 550)
}
v
})
# Salida Panel Uno
output$panel_1 <- renderUI({
div(
uiOutput('base_opts'),
uiOutput('basicos'),
uiOutput('avanzadas'),
div(id = "type_viz",
HTML("<div style = 'margin-top:15px; font-size:15px;font-weight: 700;'>Tipo de visualización </div>"),
uiOutput('operacion'),
uiOutput('vizOptions')
),
uiOutput('red')
)
})
# Salidad Panel Dos
output$panel_2 <- renderUI({
div(class = "viz_out", style = "height: 100%; margin-top: 11px;",
HTML("<div class = 'title_panel'>VISUALIZACIÓN</div>"),
uiOutput('title_viz'),
withLoader(uiOutput('viz_res'), type = "html", loader = "loader10")
)
})
# Salida Panel Tres
output$panel_3 <- renderUI({
div(class = "tj_out",
HTML("<div class = 'title_panel'>HECHOS</div>"),
div(class = "content_ficha_mini",
#verbatimTextOutput('bla')
uiOutput("ficha_peque")
)
)
})
})
|
02e1566216e8b23de5fbed797e5765e6719a2906
|
95299589f967b19ce37cf98d33455b182e43c5b0
|
/scripts/util/__Util_TaskStateFunctions.R
|
4511e01990b3f8a10c7345f2b69ed7ebfe63ba9e
|
[] |
no_license
|
christokita/mixing-model
|
baec1217eacb98a5ba786211544888e3b9e2403b
|
bc291ee00db5edaf80d6fe09a2ee2e6abb7362d2
|
refs/heads/master
| 2021-07-14T09:56:21.520775
| 2020-06-11T19:16:15
| 2020-06-11T19:16:15
| 154,517,060
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,639
|
r
|
__Util_TaskStateFunctions.R
|
##################################################
#
# Task performance/state functions
#
##################################################
####################
# Set initial probabilities of performance
####################
initiateProbMatrix <- function(n, m) {
rowValues <- rep(1/m, m)
p_g <- matrix(data = rep(rowValues, n), byrow = TRUE, ncol = m)
rownames(p_g) <- paste0("v-", 1:n)
colnames(p_g) <- paste0("Task", 1:m)
return(p_g)
}
####################
# Choose task probabilistically
####################
updateTaskPerformance <- function(P_sub_g, TaskMat, QuitProb) {
# Create possible task space
tasks <- seq(1:ncol(P_sub_g))
# Loop through individuals
for(row in 1:nrow(TaskMat)) {
# Inactive workers randomly sample one stimulus
if (sum(TaskMat[row, ]) == 0) {
# Sample task probability
tasks_order <- sample(x = tasks, size = length(tasks), replace = FALSE)
# Loop through tasks and go with first one that results in activity
for (task in tasks_order) {
prob <- P_sub_g[row, task]
activity <- sample(x = c(0, 1), size = 1, prob = c(1 - prob, prob))
if (activity == 1) {
TaskMat[row, task] <- activity
break
}
}
} else { #active workers quit with certain probability
quitNow <- sample(x = c("yes", "no"), size = 1, prob = c(QuitProb, (1 - QuitProb)))
if (quitNow == "yes") {
TaskMat[row, ] <- 0
}
}
}
# Return
colnames(TaskMat) <- paste0("Task", 1:ncol(P_sub_g))
rownames(TaskMat) <- paste0("v-", 1:nrow(P_sub_g))
return(TaskMat)
}
####################
# Choose task with most demand (conceptual)
####################
updateTaskPerformance_Determ <- function(P_sub_g, TaskMat, QuitProb, TimeStep, StimulusMatrix) {
# Create possible task space
tasks <- seq(1:ncol(P_sub_g))
# Get relevant stimulus levels
stim_levels <- StimulusMatrix[TimeStep, 1:2]
# Loop through individuals
for(row in 1:nrow(TaskMat)) {
# Inactive workers randomly sample one stimulus
if (sum(TaskMat[row, ]) == 0) {
# Sample task probability
if (P_sub_g[row, 1] == P_sub_g[row, 2]) {
tasks_order <- order(stim_levels, decreasing = T)
} else {
tasks_order <- order(P_sub_g[row, ], decreasing = T)
}
# Loop through tasks and go with first one that results in activity
for (task in tasks_order) {
prob <- P_sub_g[row, task]
activity <- sample(x = c(0, 1), size = 1, prob = c(1 - prob, prob))
if (activity == 1) {
TaskMat[row, task] <- activity
break
}
}
} else { #active workers quit with certain probability
quitNow <- sample(x = c("yes", "no"), size = 1, prob = c(QuitProb, (1 - QuitProb)))
if (quitNow == "yes") {
TaskMat[row, ] <- 0
}
}
}
# Return
colnames(TaskMat) <- paste0("Task", 1:ncol(P_sub_g))
rownames(TaskMat) <- paste0("v-", 1:nrow(P_sub_g))
return(TaskMat)
}
####################
# Choose task with most demand and have fixed work time (conceptual)
####################
updateTaskPerformance_DetermTimed <- function(P_sub_g, TaskMat, QuitProb, TimeStep, StimulusMatrix, TaskMemory, QuitRate) {
# Create possible task space
tasks <- seq(1:ncol(P_sub_g))
# Get relevant stimulus levels
stim_levels <- StimulusMatrix[TimeStep, 1:2]
# Loop through individuals
for(row in 1:nrow(TaskMat)) {
# Inactive workers randomly sample one stimulus
if (sum(TaskMat[row, ]) == 0) {
# Sample task probability
if (P_sub_g[row, 1] == P_sub_g[row, 2]) {
tasks_order <- order(stim_levels, decreasing = T)
} else {
tasks_order <- order(P_sub_g[row, ], decreasing = T)
}
# Loop through tasks and go with first one that results in activity
for (task in tasks_order) {
prob <- P_sub_g[row, task]
activity <- sample(x = c(0, 1), size = 1, prob = c(1 - prob, prob))
if (activity == 1) {
TaskMat[row, task] <- activity
break
}
}
} else { #active workers quit with fixed
if (TaskMemory[row] >= QuitRate) {
TaskMat[row, ] <- 0
}
}
}
# Return
colnames(TaskMat) <- paste0("Task", 1:ncol(P_sub_g))
rownames(TaskMat) <- paste0("v-", 1:nrow(P_sub_g))
return(TaskMat)
}
####################
# Tasks have fixed work time (conceptual)
####################
updateTaskPerformance_Timed <- function(P_sub_g, TaskMat, QuitProb, TimeStep, StimulusMatrix, TaskMemory, QuitRate) {
# Create possible task space
tasks <- seq(1:ncol(P_sub_g))
# Get relevant stimulus levels
stim_levels <- StimulusMatrix[TimeStep, 1:2]
# Loop through individuals
for(row in 1:nrow(TaskMat)) {
# Inactive workers randomly sample one stimulus
if (sum(TaskMat[row, ]) == 0) {
# Sample task probability
tasks_order <- sample(x = tasks, size = length(tasks), replace = FALSE)
# Loop through tasks and go with first one that results in activity
for (task in tasks_order) {
prob <- P_sub_g[row, task]
activity <- sample(x = c(0, 1), size = 1, prob = c(1 - prob, prob))
if (activity == 1) {
TaskMat[row, task] <- activity
break
}
}
} else { #active workers quit with fixed
if (TaskMemory[row] >= QuitRate) {
TaskMat[row, ] <- 0
}
}
}
# Return
colnames(TaskMat) <- paste0("Task", 1:ncol(P_sub_g))
rownames(TaskMat) <- paste0("v-", 1:nrow(P_sub_g))
return(TaskMat)
}
|
4ac8a38e90da28113e1a1b66be3377ad9541f712
|
c57488528ae6e1cefb608b2b84b4797e634771df
|
/Assignment 2/corr.R
|
43ae35b7574a39d375483885b532c373875f630d
|
[] |
no_license
|
riteshjjw/datasciencecoursera
|
76820ae410832117bec623b89c338b7f2558f0a6
|
d5cf27813c4570f27361751dfd40759f2afaa94d
|
refs/heads/master
| 2022-11-22T00:14:48.757530
| 2020-07-17T13:38:00
| 2020-07-17T13:38:00
| 278,347,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
corr.R
|
corr <- function(directory, threshold = 0){
source("complete.R")
files <- list.files(path = directory, full.names = TRUE)
completecases <- complete(directory)
abovethreshold <- completecases[completecases$nobs > threshold, 1]
correlation <- rep(NA, length(abovethreshold))
for(i in abovethreshold){
mtrdata <- read.csv(files[i])
completecases <- complete.cases(mtrdata)
sulfatedata <- mtrdata[completecases, 2]
nitratedata <- mtrdata[completecases,3]
correlation[i]<- cor(x=sulfatedata,y=nitratedata)
}
correlation <- correlation[complete.cases(correlation)]
}
|
6c39f32428ad7c56a49509ca527e159bbfe78e6c
|
9e67c36a97d61e672e655ec20466da58bcad016f
|
/inst/examples/shiny-app/server.R
|
2cad4dcdb47861a96b0de82452e8580955af3a1f
|
[] |
no_license
|
arturochian/shinyjs
|
82a7dc6475574798f444acdf621c3b0a3b7c31d8
|
207a60f1317e3d05dc266c787e29d1820fae08a9
|
refs/heads/master
| 2021-01-18T08:00:31.620561
| 2015-03-25T09:43:56
| 2015-03-25T09:43:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 454
|
r
|
server.R
|
library(shiny)
library(shinyjs)
shinyServer(function(input, output, session) {
shinyjs::setShinyjsSession(session)
observe({
if (input$submitExpr == 0) {
return()
}
shinyjs::hide("error")
tryCatch(
isolate(
eval(parse(text = input$expr))
),
error = function(err) {
innerHTML("errorMsg", err$message)
shinyjs::show(id = "error", anim = TRUE, animType = "fade")
}
)
})
})
|
aaf708dab7d0ed01c758fb85004c2d9a1fbff909
|
19d3d74116db27808ef80693cf06cdbe484e3fa0
|
/man/rwa_read.Rd
|
1b4a0d887ee92524bad4d31b467ddb7a1258c95f
|
[] |
no_license
|
JBGruber/rwhatsapp
|
b9c4f37b870d8b126df4aace09c21fb2ac1ea945
|
6c12c6bc36d346ae8feadb12dfe193fcdd057d5e
|
refs/heads/master
| 2022-10-19T15:12:51.843400
| 2022-10-04T09:40:34
| 2022-10-04T09:40:34
| 147,067,905
| 99
| 18
| null | 2020-04-24T11:35:20
| 2018-09-02T08:26:21
|
R
|
UTF-8
|
R
| false
| true
| 1,461
|
rd
|
rwa_read.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwhatsapp.R
\name{rwa_read}
\alias{rwa_read}
\title{Read WhatsApp history into R}
\usage{
rwa_read(x, tz = NULL, format = NULL, verbose = FALSE, encoding = "UTF-8", ...)
}
\arguments{
\item{x}{Path to a txt or zip file of a WhatsApp history or the history
itself as character object.}
\item{tz}{A time zone for date conversion. Set NULL or "" for the default
time zone or a single string with a timezone identifier, see
\link[stringi]{stri_timezone_list}.}
\item{format}{Most formats are automatically detected. If you encounter
problems you can provide a custom format here. Refer to
\link[stringi]{stri_datetime_parse} for guidance.}
\item{verbose}{A logical flag indicating whether information should be
printed to the screen.}
\item{encoding}{Input encoding. Should usually be "UTF-8" if files haven't
changed since export from WhatsApp.}
\item{...}{Further arguments passed to \link[base]{readLines}.}
}
\value{
A tibble with the information parsed from the history file.
}
\description{
Takes a history file from the ``WhatsApp'' messenger app (txt or zip) and
returns a formatted data.frame with descriptions of the used emojis.
}
\details{
The history can be obtained going to the menu in a chat on the
``WhatsApp'' app, choosing "more", then "Export chat".
}
\examples{
history <- system.file("extdata", "sample.txt", package = "rwhatsapp")
df <- rwa_read(history)
}
|
732329cf24e52b4cb110431bde5e2956d6e5e0dc
|
ebb09f52b1ee12d8ae8d4c493e6f1079ee57868c
|
/ExploratoryDataAnalysis/Project2/plot2.R
|
7cd4e9c1bd033a5ed0478fb211d1d8c81923898a
|
[] |
no_license
|
r6brian/datasciencecoursera
|
a1723f812a34eee7094dfaa0bfde6c618b349d6c
|
548944d3ba68d302160f05158fb90859bc4c8bae
|
refs/heads/master
| 2021-01-19T10:29:54.605308
| 2015-08-23T20:00:04
| 2015-08-23T20:00:04
| 26,268,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 807
|
r
|
plot2.R
|
# 2. Have total emissions from PM2.5 decreased in Baltimore City, Maryland from 1999 to 2008?
# Read data files
NEI <- readRDS("data/exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("data/exdata-data-NEI_data/Source_Classification_Code.rds")
# Subset NEI data, select * rows for Baltimore(fip=24510).
baltimoreEmissions <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
baltimoreEmissionsByYear <- aggregate(Emissions ~ year, baltimoreEmissions, sum)
# plot a bar graph
png('plot2.png')
barplot(height=baltimoreEmissionsByYear$Emissions,
names.arg=baltimoreEmissionsByYear$year,
xlab="years",
ylab=expression('total PM'[2]*' emission'),
main=expression('Total PM'[2]*' emissions in the city of Baltimore, by year'))
dev.off()
|
f253460c0e363aba005becd560c06613af9e691a
|
d4f6682af4f1189bed66bb21fe39d71f1382d555
|
/tests/testthat/test-front-page-scaling.R
|
9bfdf6736102beb0a22d28dabbf4dbfe6dd02e9b
|
[] |
no_license
|
thechrelsres/sfthemes
|
dfa87760169a24a0ebfedf3fddec33f301197dbf
|
fcfa15dbed8dee69a45f9c7a4d831c136240945f
|
refs/heads/main
| 2023-02-25T12:17:35.634990
| 2021-01-17T16:04:04
| 2021-01-17T16:04:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,667
|
r
|
test-front-page-scaling.R
|
context("pkgdown-front-page")
test_that("front page figures showcasing the scaling have been generated...", {
testthat::skip_on_cran()
testthat::skip_on_appveyor()
testthat::skip_on_travis()
testthat::skip_on_os("windows")
library(ggplot2)
classes <- c("xSmall", "Small", "Medium", "Large", "xLarge", "xxLarge", "xxxLarge")
for (i in 1:7) {
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color=Species)) +
geom_point(aes(shape=Species)) +
xlab("Sepal Length") + ylab("Sepal Width") +
labs(title = "SF Light/Dark Theme",
subtitle = paste0("Size Class: ", classes[[i]])) +
geom_smooth(method="lm", aes(fill = Species)) +
theme_sf_light(size_class = classes[[i]]) +
scale_colour_ios_light(accessible = F) +
scale_fill_ios_light(accessible = F) +
theme(legend.position = "bottom")
ggsave(paste0("figures/front_page/iris_linear_fit_light_", i, "_",
classes[[i]], ".png"),
width = 20, height = 15, bg = "transparent", dpi = 72)
ggplot(data=iris, aes(x=Sepal.Length, y=Sepal.Width, color=Species)) +
geom_point(aes(shape=Species)) +
xlab("Sepal Length") + ylab("Sepal Width") +
labs(title = "SF Light/Dark Theme",
subtitle = paste0("Size Class: ", classes[[i]])) +
geom_smooth(method="lm", aes(fill = Species)) +
theme_sf_dark(size_class = classes[[i]]) +
scale_colour_ios_dark(accessible = F) +
scale_fill_ios_dark(accessible = F) +
theme(legend.position = "bottom")
ggsave(paste0("figures/front_page/iris_linear_fit_dark_", i, "_",
classes[[i]], ".png"),
width = 20, height = 15, bg = "transparent", dpi = 72)
system(paste0("../../misc/stitch_two_45.sh",
" figures/front_page/iris_linear_fit_light_", i, "_", classes[[i]], ".png",
" figures/front_page/iris_linear_fit_dark_", i, "_", classes[[i]], ".png",
" ../../tests/testthat/figures/front_page/iris_linear_fit_light_dark_", i, "_", classes[[i]], ".png"))
}
# system("convert -delay 30 figures/front_page/iris_linear_fit_light_*.png ../../man/figures/front-page-scaling-animation-light.gif")
# system("convert -delay 30 figures/front_page/iris_linear_fit_dark_*.png ../../man/figures/front-page-scaling-animation-dark.gif")
system("convert -delay 60 figures/front_page/iris_linear_fit_light_dark_*.png ../../man/figures/front-page-scaling-animation-light-dark.gif")
})
|
0ddb7e20da8199f2e4e966e766ca0debdc7da6b2
|
056a9308784b7ba57b6c4685d298489b2529fb7e
|
/Chapter02/cluster_inc.R
|
5965aa4d36adec755764e27b535e2563ef7c67fa
|
[
"MIT"
] |
permissive
|
taecjee/R-Deep-Learning-Essentials-Second-Edition
|
7227ae70aea25fca481c2ce137930d734e476866
|
ae6edf846f6ed40e003894de2f2e92e5beb38171
|
refs/heads/master
| 2020-11-24T17:36:50.136884
| 2020-02-11T08:47:14
| 2020-02-11T08:47:14
| 228,276,353
| 0
| 0
|
MIT
| 2019-12-16T01:05:28
| 2019-12-16T01:05:28
| null |
UTF-8
|
R
| false
| false
| 30
|
r
|
cluster_inc.R
|
library(caret)
library(RSNNS)
|
b1571dffb3dedaa6b5394fd45e2db2a0453f8366
|
00c16d1609fea324f4cf31459d805215f6b654ee
|
/R/source_fatbloom.R
|
b42ff7ae0aff467c8bd35139b5b28d2a7fc6b44c
|
[] |
no_license
|
dan2cil/tecTools
|
73b1ecead88f6101cf060df9d5f7b296ddf3c400
|
d38b47f6147b10e3b6b16dba414d3201112a1c6a
|
refs/heads/main
| 2023-07-06T16:41:44.593467
| 2021-08-03T19:37:24
| 2021-08-03T19:37:24
| 392,432,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,947
|
r
|
source_fatbloom.R
|
#' mosaicFB
#'
#' Simulate chocolate crystallization using Imogen Foubert model
#' of crystallization kinetics of cocoa butter
#'
#' @param data data frame of data
#' @param cluster field to use to clusterize data
#' @param filter data filter
#' @param depivot boolean, if T depivot data, if F none
#' @param main main title
#' @param sub subtitle
#'
#' @return elaborated data frame
#'
#' @examples
#' #tempering(BC=0.42)
#'
#' @export
mosaicFB <- function(dati, cluster='plant', filter=NULL, depivot=T, main='Product fat-bloom', sub=''){
## ----------------------------------------------------------------------
## History
## v. 1.0 - 10 aug 2012 initial version
## ToDo:
## ----------------------------------------------------------------------
## Author: Danilo Cillario, Date: 3 June 2017
## Filter example:
## filter="plant=='Alba'"
## filter="plant=='Alba' & linea %in% c('L1','L4')"
# option
options(encoding="utf-8")
# costanti
colore <- c(colours()[c(257, 254, 259)], rgb(1,.5,.5), rgb(1,.25,.25), rgb(1,0,0))
# se necessario espande i dati
if (depivot){
dati <- depivot(dati)
}
# se necessario applica il filtro ai dati
if (!is.null(filter)){
dati <- subset(dati, eval(parse(text=filter)))
}
# se necessario crea un cluster composto
if (grepl('+', cluster, fixed=T)){
gsub(" ", "", cluster, fixed = TRUE)
cluster2 <- unlist(strsplit(cluster, split='+', fixed=T))
cluster3 <- interaction(dati[, cluster2], drop=T)
dati <- cbind(dati, cluster3)
cluster <- 'cluster3'
}
# elimina i livelli del cluster non utilizzati
dati[, cluster] <- dati[,cluster][drop=T]
# plotta il grafico
if (length(dati[,cluster]) > 1){
mosaicplot(table(dati[,c(cluster,'month', 'fat_bloom')]),
color = colore,
xlab = cluster,
ylab = 'Età prodotto',
main = main,
sub = sub)
} else {
print(paste('Non ci sono abbastanza dati del cluster:', length(dati[cluster])))
}
invisible(dati)
}
#' stackFB
#'
#' Simulate chocolate crystallization using Imogen Foubert model
#' of crystallization kinetics of cocoa butter
#'
#' @param data data frame of data
#' @param cluster field used to clusterize data
#' @param filter data filter
#' @param main main title
#' @param new field useed to change graph
#' @param tabOnGraph boolean, if T print kruskal test result above graphics
#'
#' @return elaborated data frame
#'
#' @examples
#' #tempering(BC=0.42)
#'
#' @export
stackFB <- function(dati, cluster='plant', filter=NULL, main='Product fat-bloom', new='month', tabOnGraph=T){
## data: prodotto, mese, L1, L2, L3, L4, L5, L6
## ----------------------------------------------------------------------
## History
## v. 1.0 - 10 aug 2012 initial version
## ToDo:
## 1. inserire anche paired t test oltre a kruskal
## ----------------------------------------------------------------------
## Author: Danilo Cillario, Date: 3 June 2017
# controllo data frame
#call <- match.call()
#if (!"new" %in% names(call)) {new <- "mese"}
# dati di base
colori <- c(colours()[c(257, 254, 259)], rgb(1,.5,.5), rgb(1,.25,.25), rgb(1,0,0))
# se necessario applica il filtro ai dati
if (!is.null(filter)){
dati <- subset(dati, eval(parse(text=filter)))
}
# se necessario aggrega i dati
ncluster <- nlevels(dati[,cluster])
nnew <- nlevels(dati[,new])
if (length(dati[,1]) > ncluster * nnew){
print('richiede aggregazione dei dati')
dati <- aggregate(dati[,c('L1','L2','L3','L4','L5','L6')], by=list(dati[,cluster],dati[,new]), FUN=sum, na.rm=T)
names(dati)[1:2] <- c(cluster, new)
print(dati)
} else {
print('NON richiede aggregazione dei dati')
}
# rows and columns of graphics count
nlivelli <- nlevels(as.factor(dati[,new])) # numero di grafici
#livelli <- sort(unique(dati[,new])) # etichetta dei grafici
livelli <- levels(as.factor(dati[,new]))
n.prod <- nlevels(as.factor(dati[,cluster])) # numero di barre nei grafici singoli
righe <- trunc(sqrt(nlivelli)) # numero delle righe
colonne <- ceiling(nlivelli/righe) # numero delle colonne
quadri <- colonne * righe + 1 # numero dei quadri
# define layout for multiple plot
if (Sys.info()[1]=='Linux') X11.options(width=7/righe*colonne, height=7) else windows.options(width=7/righe*colonne, height=7)
par(mai=c(0, 0, 0, 0))
if (tabOnGraph){
se <- 2:((((righe*colonne)*2))+1)
if (righe == 1){
aa <- split(se, rep(c(1,2), times=colonne))
bb <- c(aa[[1]], aa[[2]])}
else if (righe == 2){
aa <- split(se, c(rep(c(1,2), times=colonne), rep(c(3,4), times=colonne)))
bb <- c(aa[[1]], aa[[2]], aa[[3]], aa[[4]])}
else if (righe == 3){
aa <- split(se, c(rep(c(1,2), times=colonne), rep(c(3,4), times=colonne), rep(c(5,6), times=colonne)))
bb <- c(aa[[1]], aa[[2]], aa[[3]], aa[[4]], aa[[5]], aa[[6]])}
else if (righe == 4){
aa <- split(se, c(rep(c(1,2), times=colonne), rep(c(3,4), times=colonne), rep(c(5,6), times=colonne), rep(c(7,8), times=colonne)))
bb <- c(aa[[1]], aa[[2]], aa[[3]], aa[[4]], aa[[5]], aa[[6]], aa[[7]], aa[[8]])}
layout(matrix(c(rep(1,colonne), bb),byrow=T,nrow=(righe*2)+1), heights=c(1,rep(c(5,2), times=righe)))
xpad <- c(.8,rep(1.25, times=n.prod))
} else {
layout(matrix(c(rep(1,colonne), 2:quadri),byrow=T,nrow=righe+1), heights=c(righe*0.75, rep(9, righe)))
}
# write main title on page
plot(c(0,0), c(1,1), type="n", xlab="", ylab="", axes=FALSE, asp=1)
mtext(main, side=3, line=-3, cex=1.5, font=4, col='red')
if (tabOnGraph)
par(mai=c(0.2, 0.3, 0.3, 0.1))
else
par(mai=c(0.4, 0.3, 0.3, 0.1))
# Graphics plot
for (graph in livelli){
caso <- subset(dati, dati[,new]==graph, select=c('L1','L2','L3','L4','L5','L6'))
nomi <- levels(subset(dati, dati[,new]==graph, select=cluster)[,cluster][, drop=T])
print(caso)
print(nomi)
row.names(caso) <- NULL
caso <- data.frame(t(caso))
names(caso) <- nomi
x <- as.matrix(caso)
# mette i dati nell'ordine corretto
#peso <- rep(0, ncol(x))
#for (i in 1:ncol(x)){
# peso <- peso + x[,i]*i}
#x1 <- x[order(peso),]
y <- .pairkruskal(caso)
#par( mai = c(0.6, 0.6, 0.8, 0.2))
par( mai = c(0.4, 0.6, 0.4, 0.2))
# Plot the graph
barplot(x, col=colori, names.arg=nomi)
# Scrivo i titoli
sub <- paste(new, ': ', graph)
mtext(sub, side=3, line=+.5, cex=.8, font=4, col='blue')
# If required plot the p-value table on graph
if (tabOnGraph){
old_parmai <- par('mai')
par( mai = c(0.1,0.0,0.1,0.1) )
plot.new()
.scrivi(0, 0, y, bty="o", lwd=0, display.rownames=TRUE, display.colnames=FALSE, hlines=FALSE, vlines=FALSE, xpad=xpad, ypad=1.2, title="Kruskal-test")
par(mai = old_parmai)
}
}
#return
}
## ---------------------------------------------------------------------
## Funzioni generiche (esterne alle classi)
## ---------------------------------------------------------------------
# pairkruskal pair comparison
.pairkruskal <- function(dati, print=F) {
## Purpose: confronta i prodotti a coppie applicando il kruskal.test
## INTERNAL FUNCTION NOT EXPORTED
## -----------------------------------------------------------------
## Arguments:
## df : Data frame
## $ name1: int 2 2 3 4 3 1 2 4 2 4 ...
## $ name2: int 4 4 1 3 4 2 1 1 1 3 ...
## $ namen: int 3 3 1 3 4 2 1 3 4 3 ...
## -----------------------------------------------------------------
## Result:
## binomial p table
## -----------------------------------------------------------------
## Author: Danilo Cillario, Date : 3 november 2012
#df <- object@dat
df <- dati
#df1 <- df[,order(sapply(df,sum))]
df1 <- df
#nomi <- names(object@dat)
nomi <- names(df1)
#colonne <- object@prd
colonne <- length(df)
#ordine <- order()
#preferenze <- matrix(nrow=colonne, ncol=colonne)
risultato <- matrix(nrow=colonne, ncol=colonne)
for (i in 1:(colonne-1)){
for (ii in (i+1):colonne){
prob1 <- kruskal.test(list(df1[,i],df1[,ii]))
prob2 <- kruskal.test(list(df1[,ii],df1[,i]))
#risultato[i,ii] <- round(prob1$p.value,4)
#risultato[ii,i] <- round(prob2$p.value,4)}
risultato[i,ii] <- sprintf("%.3f", prob1$p.value)
risultato[ii,i] <- sprintf("%.3f", prob2$p.value)}}
colnames(risultato) <- nomi
rownames(risultato) <- nomi
if(print){
cat("\n*********************************\n Kruskal pairs preferences table\n*********************************\n")
print(risultato)
cat("\n")}
invisible(risultato)
}
#' depivot
#'
#' Sscompone gli elementi aggregati in singoli
#'
#' @param dati data frame of data
#'
#' @return elaborated data frame
#'
#' @examples
#' #
#'
#' @export
depivot <- function(dati){
dt <- na.omit(dati)
provv <- NULL
for (i in 1:length(dt[,1])){
pezzi <- sum(dt[i, c('L1','L2','L3','L4','L5','L6')])
dati_inizio <- which(names(dt)=='L1')
range_dati <- dati_inizio:which(names(dt)=='L6')
# da utilizzare solo se i dati sono in percentuale
#provv <- rep(1:6, ro[i,range_dati]*pezzi/100)
provv <- rep(1:6, dt[i, range_dati])
for (ii in 1:pezzi) {
riga <- c(dt[i, 1:(dati_inizio-1)], provv[ii])
names(riga)[dati_inizio] <- 'fat_bloom'
if (i==1 & ii==1)
dt2 <- data.frame(riga, stringsAsFactors=F)
else
dt2 <- rbind(dt2, riga)
}
}
return(dt2)
}
.depivot2 <- function(dati){
dt <- na.omit(dati)
inizio <- which(names(dati)=='L1')
fine <- which(names(dati)=='L6')
pezzi <- rowSums(dati[,inizio:fine])
b[rep(seq_len(nrow(b)), c(2,3)),]
}
# Scrive tabella su immagine
.scrivi <- function (x, y = NULL, table, lwd = par("lwd"), bty = "n", bg = par("bg"),
cex = 1, xjust = 0, yjust = 1, xpad = 0.1, ypad = 0.5, box.col = par("fg"),
text.col = par("fg"), display.colnames = TRUE, display.rownames = FALSE,
hlines = FALSE, vlines = FALSE, title = NULL){
## Purpose: Write tabel into a graphics windows
## INTERNAL FUNCTION NOT EXPORTED
## -----------------------------------------------------------------
## Arguments:
## x,y : Either x and y coordinates to locate the table or an 'xy.coords' object
## table : A data frame, matrix or similar object that will be displayed
## lwd : The line width for the box and horizontal dividers
## bty : Whether to draw a box around the table ("o") or not ("n")
## bg : The background color for the table
## cex : Character expansion for the table
## xjust,yjust : Positioning for the table relative to 'x,y'
## xpad,ypad : The amount of padding around text in the cells as a proportion of the maximum
## width and height of the strings in each column
## box.col : The color for the box and lines
## text.col : The color for the text
## display.colnames : Whether to display the column names in the table
## display.rownames : Whether to display the row names in the table
## hlines :Whether to draw horizontal lines between each row of the table
## vlines : Whether to draw vertical lines between each column of the table
## title : Optional title placed over the table
## -----------------------------------------------------------------
## Variables:
##
## -----------------------------------------------------------------
## Author: Modified from Plotrix package
if (dev.cur() == 1)
stop("Cannot add table unless a graphics device is open")
if (is.null(y)) {
if (is.null(x$y))
stop("both x and y coordinates must be given")
y <- x$y
x <- x$x
}
tabdim <- dim(table)
# Aggiunta
if (length(xpad)==1)
xpad <- rep(xpad,(tabdim[2]+1))
# fine
if (is.null(dim(bg)))
bg <- matrix(bg, nrow = tabdim[1], ncol = tabdim[2])
column.names <- colnames(table)
if (is.null(column.names) && display.colnames)
column.names <- 1:tabdim[2]
row.names <- rownames(table)
if (is.null(row.names) && display.rownames)
row.names <- 1:tabdim[1]
if (par("xlog"))
x <- log10(x)
cellwidth <- rep(0, tabdim[2])
##
if (display.colnames) {
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(c(column.names[column],
format(table[, column])), cex = cex)) * (1 + xpad[column+1])
nvcells <- tabdim[2] + 1 }
else {
nvcells <- tabdim[2]
for (column in 1:tabdim[2]) cellwidth[column] <- max(strwidth(format(table[,
column]), cex = cex)) * (1 + xpad[column+1]) }
##
if (display.rownames) {
nhcells <- tabdim[2] + 1
rowname.width <- max(strwidth(row.names, cex = cex)) *
(1 + xpad[1]) }
else {
nhcells <- tabdim[2]
rowname.width <- 0 }
if (par("ylog"))
y <- log10(y)
cellheight <- max(strheight(c(column.names, row.names, as.vector(unlist(table))),
cex = cex)) * (1 + ypad)
ytop <- y + yjust * nvcells * cellheight
oldpar <- par(xlog = FALSE, ylog = FALSE, xpd = TRUE)
## Scrive il contenuto delle celle
for (row in 1:tabdim[1]) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
if (row <= nvcells - 1 && hlines)
segments(xleft + rowname.width, ytop - row * cellheight,
xleft + sum(cellwidth) + rowname.width, ytop -
row * cellheight, lwd = lwd, col = box.col)
if (display.rownames) {
text(xleft + 0.5 * rowname.width, ytop - (row + display.colnames -
0.5) * cellheight, row.names[row], cex = cex,
col = text.col)
xleft <- xleft + rowname.width
}
for (column in 1:tabdim[2]) {
rect(xleft, ytop - (row + display.colnames - 1) *
cellheight, xleft + cellwidth[column], ytop -
(row + display.colnames) * cellheight, col = bg[row,
column])
text(xleft + 0.5 * cellwidth[column], ytop - (row +
display.colnames - 0.5) * cellheight, table[row,
column], cex = cex, col = ifelse(table[row,column]<=0.05 | table[row,column]>=0.95,'red',text.col))
if (vlines)
segments(xleft, ytop - (row + display.colnames) *
cellheight, xleft + cellwidth[column], ytop -
(row + display.colnames) * cellheight, col = box.col)
xleft <- xleft + cellwidth[column]
}
}
## Stampa del nome delle colonne
if (display.colnames) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
for (column in 1:tabdim[2]) {
text(xleft + display.rownames * rowname.width + cellwidth[column] *
0.5, ytop - 0.5 * cellheight, column.names[column],
cex = cex, col = text.col)
if (!hlines)
segments(xleft + rowname.width, ytop - cellheight,
xleft + cellwidth[column], ytop - cellheight,
lwd = lwd, col = box.col)
xleft <- xleft + cellwidth[column]}}
## Stampa del titolo
if (!is.null(title)) {
xleft <- x - xjust * (sum(cellwidth) + rowname.width)
text(xleft + rowname.width + (sum(cellwidth))/2, ytop +
cellheight/2, title, cex = cex, col = text.col)
if (bty == "n")
segments(xleft, ytop, xleft + sum(cellwidth) + rowname.width,
ytop, lwd = lwd, col = box.col)
}
par(oldpar)
}
|
09eb2081e7ac0a42591c865025cecc42d15eeea4
|
3b3168707c67aefbd85934bae572b6f686b479e0
|
/R/plot_contacts.R
|
e99e17cae4a967fb7f3aa9abe8528f94352e3488
|
[] |
no_license
|
qenvio/hicvidere
|
e114869c279da21e5de178b7961432fbded5a191
|
65df8c6a2d5a9ba9d7e138da776afd3b47ab7f6e
|
refs/heads/master
| 2021-01-10T04:44:26.737390
| 2016-01-25T15:56:53
| 2016-01-25T15:56:53
| 43,735,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,378
|
r
|
plot_contacts.R
|
#' Plot a HiC map from a contact matrix.
#'
#' This function takes a contact matrix and plots a HiC map
#' @import magrittr
#' @import RColorBrewer
#' @param contacts Input contact matrix. Usually the output of \code{\link{get_contacts}}
#' @param rotate Do you want matrix to be rotated so the diagonal is from top-left to bottom-right? Dafaults to TRUE
#' @param transformation Transformation of the data to ease the visualization. Dafaults to log10
#' @param col.palette Palette of colors used in the representation
#' @return A nice plot
#' @seealso \code{\link{read_tabix}} and \code{\link{get_contacts}} for data retrieval
#' @export
#' @examples
#' plot(0)
plot_contacts <- function(contacts, rotate = T, transformation = log10, col.palette = brewer.pal(9, "Blues")){
# prepare axis breaks
guides <- pretty(x = rownames(contacts) %>% as.numeric)
# rotate if requested
if(rotate) contacts[] <- contacts[,ncol(contacts):1]
# shrink margins and force square
par(mar = c(4, 0, 0, 0), pty = "s")
# plot image
image(x = rownames(contacts) %>% as.numeric,
y = colnames(contacts)%>% as.numeric,
z = transformation(contacts),
col = col.palette,
axes = F, xlab = "Genomic Position / Mbp", ylab = "", cex.lab = 1.5)
box()
axis(1,
at = guides,
labels = guides / 1e6,
cex.axis = 1.5)
invisible()
}
|
45bb34f8f304085fa967e711241c532f6796a4a9
|
bbfcc35c6394e5cb99b3164c0c5b16ad6821ddec
|
/R/bt.tag.R
|
d2ae4274c8ada88c949663d4ef7d06b523a61657
|
[
"MIT"
] |
permissive
|
PhanstielLab/bedtoolsr
|
41ec321d3c16a2f893a56182c2d01254bf386de9
|
cce152f1ce653771d8d41431e0d12e4ef9c42193
|
refs/heads/master
| 2022-11-15T13:02:56.285838
| 2022-11-08T22:20:19
| 2022-11-08T22:20:19
| 151,143,796
| 31
| 5
|
NOASSERTION
| 2019-12-05T18:11:09
| 2018-10-01T19:08:56
|
R
|
UTF-8
|
R
| false
| false
| 2,553
|
r
|
bt.tag.R
|
#' Annotates a BAM file based on overlaps with multiple BED/GFF/VCF files
#' on the intervals in -i.
#'
#' @param i <BAM>
#' @param files FILE1 .. FILEn
#' @param s Require overlaps on the same strand. That is, only tag alignments that have the same
#' strand as a feature in the annotation file(s).
#'
#' @param S Require overlaps on the opposite strand. That is, only tag alignments that have the opposite
#' strand as a feature in the annotation file(s).
#'
#' @param f Minimum overlap required as a fraction of the alignment.
#' - Default is 1E-9 (i.e., 1bp).
#' - FLOAT (e.g. 0.50)
#'
#' @param tag Dictate what the tag should be. Default is YB.
#' - STRING (two characters, e.g., YK)
#'
#' @param names Use the name field from the annotation files to populate tags.
#' By default, the -labels values are used.
#'
#' @param scores Use the score field from the annotation files to populate tags.
#' By default, the -labels values are used.
#'
#' @param intervals Use the full interval (including name, score, and strand) to populate tags.
#' Requires the -labels option to identify from which file the interval came.
#'
#' @param labels LAB1 .. LABn
#' @param output Output filepath instead of returning output in R.
#'
bt.tag <- function(i, files, s = NULL, S = NULL, f = NULL, tag = NULL, names = NULL, scores = NULL, intervals = NULL, labels = NULL, output = NULL)
{
# Required Inputs
i <- establishPaths(input=i, name="i", allowRobjects=TRUE)
files <- establishPaths(input=files, name="files", allowRobjects=TRUE)
options <- ""
# Options
options <- createOptions(names=c("s", "S", "f", "tag", "names", "scores", "intervals", "labels"), values=list(s, S, f, tag, names, scores, intervals, labels))
# establish output file
tempfile <- tempfile("bedtoolsr", fileext=".txt")
tempfile <- gsub("\\", "/", tempfile, fixed=TRUE)
bedtools.path <- getOption("bedtools.path")
if(!is.null(bedtools.path)) bedtools.path <- paste0(bedtools.path, "/")
cmd <- paste0(bedtools.path, "bedtools tag ", options, " -i ", i[[1]], " -files ", files[[1]], " > ", tempfile)
if(.Platform$OS.type == "windows") shell(cmd) else system(cmd)
if(!is.null(output)) {
if(file.info(tempfile)$size > 0)
file.copy(tempfile, output)
} else {
if(file.info(tempfile)$size > 0)
results <- utils::read.table(tempfile, header=FALSE, sep="\t", quote='')
else
results <- data.frame()
}
# Delete temp files
temp.files <- c(tempfile, i[[2]], files[[2]])
deleteTempFiles(temp.files)
if(is.null(output))
return(results)
}
|
4681848836b54882578d077711a088b0812d81b3
|
e66b402e019db06907a31c4c4efe04f498c9b22e
|
/cachematrix.R
|
9efcb3f472062ef971549994402d90412cb881a0
|
[] |
no_license
|
codeforcalifornia/rprog-hw2
|
5c38b838d7491ec4e25cc9dc8d36d1c838305a41
|
2e46f38cf388d74aae2782f3b1d0ac613ee63213
|
refs/heads/master
| 2021-01-18T17:44:46.968994
| 2014-06-21T17:43:19
| 2014-06-21T17:43:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 991
|
r
|
cachematrix.R
|
## These functions create a special object that can cache the value of the
## inverse of a matrix if necessary
## Create an object that can hold a matrix value and optionally set a
## computed inverse. If the matrix value changes, invalidate the computed
## inverse.
makeCacheMatrix <- function(x = matrix()) {
derivedval <- NULL
set <- function(y) {
x <<- y
derivedval <<- NULL
}
get <- function() x
setInv <- function(invVal) derivedval <<- invVal
getInv <- function() derivedval
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Return a pre-calculated inverse if present, otherwise compute the value
## of the inverse of the given matrix.
cacheSolve <- function(x, ...) {
derivedVal <- x$getInv()
if (!is.null(derivedVal)) {
message("getting cached data")
return(derivedVal)
}
data <- x$get()
derivedVal <- solve(data, ...)
x$setInv(derivedVal)
derivedVal
}
|
bc7a4900d4b954b9d4eb531c3d5acb799ffea296
|
7d125cf7b30e9be0ef1f02e24ad13495b4481f4e
|
/src/Library/compareGeneExpDiffSubset.R
|
6ef7d726be1c8e50fdc595326bec0be0c845b4ed
|
[] |
no_license
|
DToxS/Differential-Comparison
|
0616004e275cfa17d605505cecc6842a0baa4b2a
|
d6b3d4cc7c4ef2bdb21527655fb927c146453942
|
refs/heads/master
| 2022-04-06T22:12:37.767298
| 2020-02-27T22:19:56
| 2020-02-27T22:19:56
| 105,199,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,917
|
r
|
compareGeneExpDiffSubset.R
|
# Perform a DEG comparison for a subset of samples between a pair of conditions.
# Append experiment info to a title text.
appendExprtInfo <- function(title_text, exprt_info)
{
if(!is.null(title_text) && !is.null(exprt_info))
{
if(!is.na(exprt_info["cell"])) title_text <- paste(title_text, "in", exprt_info["cell"])
if(!is.na(exprt_info["time"])) title_text <- paste(title_text, "at", exprt_info["time"])
}
return(title_text)
}
# Perform a DEG comparison for a subset of samples between a pair of conditions.
compareGeneExpDiffSubset <- function(read_counts, group_names, group_pair, subset_name=NULL, dispersion="auto", min_samples=3, min_samples_req="all", fdr=0.05, deg_only=TRUE, normalize=TRUE, norm_base=1e+6, remove_small=TRUE, exprt_info=NULL, deg_counts_stats_file=NULL, deg_plots_file=NULL, deg_image_width=NULL, deg_image_height=NULL, single_deg_image_file=FALSE, plot_bcv=FALSE, plot_smear=FALSE, pt_chars=c(16,19), r_compat=TRUE, verbose=FALSE, func_dir=NULL)
{
# read_counts: A matrix of read counts and sequence length of genes at different sample
# conditions.
# group_names: A vector of group names assigned to each sample used for DEG comparison.
# e.g. c("Wildtype", "Wildtype", "Knockout", "Knockout", "Mixed", "Mixed")
# group_pair: A pair of group names used for a comparison of the change of gene expression
# level from group_pair[1] to group_pair[2].
# e.g. c("Wildtype","Knockout")
# subset_name: The name of a subset of samples for DEG comparison.
# Default: NULL
# dispersion: The dispersion argument passed to exactTest function, which can be a keyword,
# a single number, or a vector of numbers with a length of the number of genes
# included in read_counts matrix.
# Allowed: auto, common, trended, tagwise
# Default: auto
# (can be a single number).
# min_samples: The minimum number of sample replicates in each sample group needed for
# outlier removal and DEG comparison.
# Default: 3
# min_samples_req: The requirement for min_samples to be met by sample groups for DEG
# comparison.
# Allowed: all, any
# Default: all
# fdr: false discovery rate.
# Default: 0.05
# deg_only: Output only the genes whose FDR is smaller than specified threshold.
# Default: TRUE
# normalize: Whether to normalize the read counts of each sample before DEG comparison.
# Default: TRUE
# remove_small: Whether to remove the genes with small read counts before DEG comparison.
# Default: TRUE
# exprt_info: The experiment information to be included in plots.
# e.g. drug group, cell line, time point, etc.
# Default: NULL
# deg_counts_stats_file: The name of output file containing detailed DEG comparison results.
# e.g. Human.A-Hour.48-Plate.1-Calc-CTRL.TRS+LOP.tsv
# deg_plots_file: The name of output DEG plot image file whose extension name can be one of
# png, jpeg/jpg, svg, pdf, eps.
# e.g. Human.A-Hour.48-Plate.1-DEG.0.1-Plots-CTRL.TRS+LOP.pdf
# Default: NULL
# deg_image_width: The width of output plot image file.
# e.g. 1600 for raster image and 8.5 for vector image.
# Default: NULL
# deg_image_height: The height of output plot image file.
# e.g. 1600 for raster image and 8.5 for vector image.
# Default: NULL
# single_deg_image_file: Whether to output multiple plots into a single image file.
# Default: FALSE
# plot_bcv: Whether to output the plot of biological coefficient of variation (BCV).
# Default: FALSE
# plot_smear: Whether to output Smear plot for mean and average (MA plot).
# Default: FALSE
# pt_chars: The character symbols to use in BCV plot and Smear plot.
# Default: c(16,19)
# r_compat: Whether to output text table file compatible with R.
# Default: TRUE
# verbose: Whether to print detailed information on screen.
# Default: FALSE
# func_dir: The directory where custom functions are loaded.
# e.g. /Users/granville/Documents/works/academia/MSSM/LINCS/Programs/
# Default: NULL
# Load required library
require(tools)
require(stats)
require(edgeR)
# Load user-defined functions.
if(is.null(func_dir)) func_dir <- getwd()
source(file.path(func_dir, "openPlotDevice.R"), local=TRUE)
source(file.path(func_dir, "closePlotDevice.R"), local=TRUE)
source(file.path(func_dir, "formatDecimal.R"), local=TRUE)
source(file.path(func_dir, "createDGEList.R"), local=TRUE)
source(file.path(func_dir, "cpb.R"), local=TRUE)
source(file.path(func_dir, "cpb.DGEList.R"), local=TRUE)
source(file.path(func_dir, "cpb.default.R"), local=TRUE)
# Check input arguments.
# Check the read-counts matrix.
if(!(is.numeric(read_counts) && is.matrix(read_counts) && nrow(read_counts)>0 && ncol(read_counts)>1))
{
warning("read_counts must be a non-empty numerical matrix with two sample columns at least!")
return(NULL)
}
# Check read_counts and group_names.
if(ncol(read_counts) != length(group_names))
{
warning("The column number of read_counts must be equal to the length of group_names!")
return(NULL)
}
# Check group pair.
if(!(is.character(group_pair) && is.vector(group_pair) && length(group_pair)==2))
{
warning("group_pair must be a character vector of length 2!")
return(NULL)
}
if(length(unique(group_pair)) < length(group_pair))
{
warning("group_pair must contain different group names!")
return(NULL)
}
# Check the dispersion parameter.
if(is.character(dispersion))
{
dispersion_types <- c("common", "trended", "tagwise", "auto")
if(!any(dispersion %in% dispersion_types))
{
warning(paste0("dispersion must be a character string equal to one of ", paste0(dispersion_types,collapse=", "), "!"))
return(NULL)
}
}
else if(is.numeric(dispersion))
{
if(length(dispersion) > 1)
{
warning("dispersion must be a single numerical value!")
return(NULL)
}
}
else
{
warning("dispersion must be either a character string or a numerical value!")
return(NULL)
}
# Check the minimum number of samples: min_samples
if(!(is.numeric(min_samples) && is.vector(min_samples) && length(min_samples)==1 && min_samples>=1 && min_samples<=ncol(read_counts)))
{
warning("min_samples must be a single numerical value no less than one and no more than the number of columns of read-counts matrix!")
return(NULL)
}
# Check the requirement for the minimum number of samples: min_samples_req
min_samples_req <- tolower(min_samples_req)
if(min_samples_req!="all" && min_samples_req!="any")
{
warning("min_samples_req must be either \"all\" or \"any\"!")
return(NULL)
}
# Check the false discovery rate: fdr
if(!(is.numeric(fdr) && is.vector(fdr) && length(fdr)==1 && fdr>=0 && fdr<=1))
{
warning("fdr must be a single numerical value between zero and one!")
return(NULL)
}
# Extract required samples for DEG comparison.
# Determine if sample groups contain the minimum number of sample replicates.
# any: requires replicate number for each group must be greater than the threshold.
# all: requires replicate number for at least one group must be greater than the threshold.
min_samples_req_error <- table(group_names) < min_samples
if(if(min_samples_req=="all") any(min_samples_req_error) else all(min_samples_req_error))
{
warning(paste0(paste("A minimum of", min_samples, "sample replicates are required for", (if(min_samples_req=="all") "all sample groups" else "at least one sample group")), "!"))
return(NULL)
}
# Prepare the names of output files.
# Prepare output result files.
group_pair_file_name <- paste0(group_pair, collapse="-")
if(!is.null(subset_name)) group_pair_file_name <- paste(group_pair_file_name, subset_name, sep=".")
# Extract the main and extension names from output file of DEG read counts.
if(!is.null(deg_counts_stats_file))
{
deg_counts_stats_file_base <- basename(deg_counts_stats_file)
deg_counts_stats_file_dir <- dirname(deg_counts_stats_file)
deg_counts_stats_file_main_name <- file_path_sans_ext(deg_counts_stats_file_base)
deg_counts_stats_file_ext_name <- file_ext(deg_counts_stats_file_base)
}
else
{
deg_counts_stats_file_main_name <- NULL
deg_counts_stats_file_ext_name <- NULL
}
# Generate the file name for the dataset of current DEG read counts.
if(!is.null(deg_counts_stats_file_main_name))
{
# Customize file name.
deg_counts_stats_file_name <- paste(deg_counts_stats_file_main_name, group_pair_file_name, sep=".")
deg_counts_stats_file_name <- paste(deg_counts_stats_file_name, deg_counts_stats_file_ext_name, sep=".")
# Add directory path.
deg_counts_stats_file_name <- file.path(deg_counts_stats_file_dir, deg_counts_stats_file_name)
}
else deg_counts_stats_file_name <- NULL
# Extract the main and extension names from output file of DEG plots.
if(!is.null(deg_plots_file))
{
deg_plots_file_base <- basename(deg_plots_file)
deg_plots_file_dir <- dirname(deg_plots_file)
deg_plots_file_main_name <- file_path_sans_ext(deg_plots_file_base)
deg_plots_file_ext_name <- file_ext(deg_plots_file_base)
}
else
{
deg_plots_file_main_name <- NULL
deg_plots_file_ext_name <- NULL
}
# Generate the file name for the plots of current read counts and DEGs.
if(!is.null(deg_plots_file_main_name))
{
# Customize file name.
deg_plots_file_name <- paste(deg_plots_file_main_name, group_pair_file_name, sep=".")
}
else deg_plots_file_name <- NULL
# Perform DEG comparison analysis.
# Generate the output plot PDF file.
if(!is.null(deg_plots_file_name) && nchar(deg_plots_file_name)>0 && (plot_bcv || plot_smear))
{
# Open a PDF file in letter size to plot
dev_info <- openPlotDevice(dev_type=deg_plots_file_ext_name, file_name=deg_plots_file_name, dir_name=deg_plots_file_dir, width=deg_image_width, height=deg_image_height, single_file=single_deg_image_file)
orig_par <- par(no.readonly = TRUE)
plot_deg_flag <- TRUE
}
else plot_deg_flag <- FALSE
# Construct DGEList from raw read counts and comparing group names.
dge_profile <- createDGEList(read_counts=read_counts, group_names=group_names, normalize=normalize, remove_small=remove_small, norm_base=norm_base, verbose=verbose, func_dir=func_dir)
group_pair_title_text <- paste0(group_pair, collapse="/")
if(!is.null(subset_name)) group_pair_title_text <- paste(group_pair_title_text, subset_name, sep="-")
# Estimate common, trended, and tagwise negative Binomial dispersions for entire dataset containing all combinations of interested factors.
if(is.character(dispersion))
{
dge_profile <- estimateDisp(dge_profile)
# Only calculate and plot BCV when dispersion is available.
if(!is.na((dge_profile$common.dispersion)))
{
# Calculate common BCV when there are more than one sample.
if(verbose) print(paste("BCV =", formatDecimal(sqrt(dge_profile$common.dispersion))))
# Plot biological coefficient of variation
if(plot_bcv)
{
title_text <- paste("Common BCV of", group_pair_title_text)
title_text <- appendExprtInfo(title_text, exprt_info)
title_text <- paste(title_text, "is", formatDecimal(sqrt(dge_profile$common.dispersion)))
plotBCV(dge_profile, xlab="Average logCPM", ylab="Biological Coefficient of Variation", main=title_text, pch=pt_chars[1], cex.main=1.75)
}
}
else
{
warning("Dispersion cannot be calculated due to a lack of replicates!")
if(plot_bcv) warning("Cannot plot BCV because dispersion cannot be calculated due to a lack of replicates!")
}
}
# Print comparison information to console.
if(verbose) print(paste0(group_pair[1], " vs ", group_pair[2], ":"))
# Perform exact test only when dispersion is available.
dispersion_avail <- !(is.na(dge_profile$common.dispersion) && any(is.na(dge_profile$trended.dispersion)) && any(is.na(dge_profile$tagwise.dispersion)) && !is.numeric(dispersion))
if(dispersion_avail)
{
# Perform exact tests for gene-wise differences between two groups
# of negative-binomially distributed counts, and obtain multiple
# statistical measures for each gene, e.g. logFC, logCPM, and p-value.
dge_diffs <- exactTest(dge_profile, pair=group_pair, dispersion=dispersion)
# Extract statistic measures and calculate adjusted p-values for each genes.
# Do not sort top_degs, so that it can be spliced with read counts table.
top_degs <- topTags(dge_diffs, n=nrow(dge_diffs$table), sort.by="none")
# Print top 10 DEGs ranked by their p-value or absolute log-fold change.
if(verbose) print((top_degs$table[sort.list(top_degs$table[,"FDR"], decreasing=FALSE),,drop=FALSE])[1:10,,drop=FALSE])
# Classify DEG statistics as up- or down-regulation, or not significant.
dge_pattern <- decideTestsDGE(dge_diffs, p.value=fdr)
if(verbose) print(summary(dge_pattern))
# Extract DEG names and flags
deg_names <- rownames(dge_profile)[as.logical(dge_pattern)]
degs_quant <- sum(abs(dge_pattern))
# Plots log-Fold Change versus log-Concentration.
if(plot_smear)
{
plotSmear(dge_diffs, de.tags=deg_names, pch=pt_chars[2])
abline(h=c(-1, 1), col="blue")
title_text <- paste(degs_quant, "DEGs")
title_text <- paste(title_text, "in", group_pair_title_text)
title_text <- appendExprtInfo(title_text, exprt_info)
title_text <- paste(title_text, "at FDR", fdr)
title(main=title_text, cex.main=1.75)
}
}
else warning("DEGs cannot be calculated because dispersion is unavailable due to a lack of replicates!")
# Merge raw and normalized read counts.
read_counts_norm <- cpb(x=dge_profile,norm.base=norm_base)
colnames(read_counts_norm) <- paste(colnames(read_counts_norm), "Norm", sep=".")
dge_counts_stats <- cbind(dge_profile$counts, read_counts_norm)
# Calculate DEGs if dispersion is available, or calculate log fold change.
if(dispersion_avail)
{
# Combine read counts and DEG statistics of all genes into one matrix.
deg_counts_stats <- cbind(dge_counts_stats, top_degs$table, Regulation=as.vector(dge_pattern))
# If deg_only is specified, then only select DEGs for output.
if(deg_only) deg_counts_stats <- deg_counts_stats[deg_names,,drop=FALSE]
}
else
{
warning("log-fold changes are calculated due to a lack of replicates!")
# Log fold change may contain Inf and NaN.
logfc <- rowMeans(dge_profile$counts[,group_names==group_pair[2],drop=FALSE]) / rowMeans(dge_profile$counts[,group_names==group_pair[1],drop=FALSE])
# Combine read counts and log fold changes of all genes into one matrix.
deg_counts_stats <- cbind(dge_counts_stats, logFC=logfc)
}
# Close plot device
if(plot_deg_flag)
{
par(orig_par)
closePlotDevice(dev_info)
plot_deg_flag = FALSE
}
# Export result data files.
# Save DEGs sorted by their range of expression level to data file
if(!is.null(deg_counts_stats_file_name) && nchar(deg_counts_stats_file_name)>0)
{
# Generate R-compatible or view-compatible tab-delimited data file.
if(r_compat) write.table(deg_counts_stats, deg_counts_stats_file_name, sep="\t", quote=FALSE)
else write.table(cbind(Gene=rownames(deg_counts_stats), deg_counts_stats), deg_counts_stats_file_name, sep="\t", quote=FALSE, row.names=FALSE)
}
# Return a data.frame table of sample read counts and DEG comparison statistics.
return(as.data.frame(deg_counts_stats))
}
|
83995e2f5a662f6b541c129eb266dfb740d37ed7
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/sBIC/R/MixtureModels.R
|
ee756004723dd1b887e85fc0e88d302e6579051e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,964
|
r
|
MixtureModels.R
|
#' @include ModelPoset.R
NULL
#' Linear collections of mixture models.
#'
#' An abstract class representing a collection of mixture models that are
#' linearly ordered by the number of components in the mixture. This class
#' should not be instantiated, just extended.
#'
#' @name MixtureModels
#' @usage MixtureModels(phi = "default")
#' @export MixtureModels
#'
#' @param phi parameter controlling the strength of the sBIC penalty.
#'
#' @return An object representing the collection.
#'
#' @seealso \code{\link{GaussianMixtures}}, \code{\link{BinomialMixtures}}, \code{\link{LCAs}}
NULL
setConstructorS3("MixtureModels",
function(phi = "default") { extend(ModelPoset(), "MixtureModels", .phi = phi) },
abstract = T)
#' @rdname parents
#' @name parents.MixtureModels
#' @S3method parents MixtureModels
#' @usage \method{parents}{MixtureModels}(this, model)
#' @export parents.MixtureModels
setMethodS3("parents", "MixtureModels", function(this, model) {
if (model > this$getNumModels() ||
model < 1 || length(model) != 1) {
throw("Invalid input model.")
}
if (model == 1) {
return(numeric(0))
} else {
return(model - 1)
}
}, appendVarArgs = F)
#' @rdname learnCoef
#' @name learnCoef.MixtureModels
#' @usage \method{learnCoef}{MixtureModels}(this, superModel, subModel)
#' @S3method learnCoef MixtureModels
#' @export learnCoef.MixtureModels
setMethodS3("learnCoef", "MixtureModels", function(this, superModel, subModel) {
i = superModel
j = subModel
r = this$getDimension(1) # Dimension of a single component
lambda = 1 / 2 * min(j * r + j - 1 + this$getPhi() * (i - j),
r * i + j - 1)
return(list(lambda = lambda, m = 1))
}, appendVarArgs = F)
#' Get the phi parameter.
#'
#' Gets the phi parameter controlling the strength of the sBIC penalty.
#'
#' @name getPhi
#' @export getPhi
#'
#' @param this the MixtureModels object.
#' @param phi the new phi value.
getPhi <- function(this, phi) {
UseMethod("getPhi")
}
#' @rdname getPhi
#' @name getPhi.MixtureModels
#' @usage \method{getPhi}{MixtureModels}(this, phi)
#' @S3method getPhi MixtureModels
#' @export getPhi.MixtureModels
setMethodS3("getPhi", "MixtureModels", function(this, phi) {
return(this$.phi)
}, appendVarArgs = F)
#' Set phi parameter.
#'
#' Set the phi parameter in a mixture model object to a different value.
#'
#' @name setPhi
#' @export setPhi
#'
#' @param this the MixtureModels object.
#' @param phi the new phi value.
setPhi <- function(this, phi) {
UseMethod("setPhi")
}
#' @rdname setPhi
#' @name setPhi.MixtureModels
#' @S3method setPhi MixtureModels
#' @usage \method{setPhi}{MixtureModels}(this, phi)
#' @export setPhi.MixtureModels
setMethodS3("setPhi", "MixtureModels", function(this, phi) {
if (!is.numeric(phi) || length(phi) != 1) {
throw("Invalid phi value.")
}
this$.phi = phi
}, appendVarArgs = F)
|
644ae5524d8da183a67a5ce26695e650238297de
|
139e93dc5ad1f30938195671caf4aefce99f188d
|
/tests/testthat/test-map-if-at.R
|
48307d99a5efd148a857e7bdbef1cdfdbafc0665
|
[
"MIT"
] |
permissive
|
tidyverse/purrr
|
7b94592b1eb6f4e6db8d83fc307465ce7b65b520
|
ac4f5a9b9ff2b5b36770c4c5e064547264544fd2
|
refs/heads/main
| 2023-08-28T01:39:40.614443
| 2023-08-10T14:13:52
| 2023-08-10T14:13:52
| 27,309,729
| 901
| 265
|
NOASSERTION
| 2023-09-03T11:49:30
| 2014-11-29T17:33:40
|
R
|
UTF-8
|
R
| false
| false
| 1,087
|
r
|
test-map-if-at.R
|
test_that("map_if() and map_at() always return a list", {
skip_if_not_installed("tibble")
df <- tibble::tibble(x = 1, y = "a")
expect_identical(map_if(df, is.character, ~"out"), list(x = 1, y = "out"))
expect_identical(map_at(df, 1, ~"out"), list(x = "out", y = "a"))
})
test_that("map_at() works with tidyselect", {
skip_if_not_installed("tidyselect")
local_options(lifecycle_verbosity = "quiet")
x <- list(a = "b", b = "c", aa = "bb")
one <- map_at(x, vars(a), toupper)
expect_identical(one$a, "B")
expect_identical(one$aa, "bb")
two <- map_at(x, vars(tidyselect::contains("a")), toupper)
expect_identical(two$a, "B")
expect_identical(two$aa, "BB")
})
test_that("negative .at omits locations", {
x <- c(1, 2, 3)
out <- map_at(x, -1, ~ .x * 2)
expect_equal(out, list(1, 4, 6))
})
test_that("map_if requires predicate functions", {
expect_snapshot(map_if(1:3, ~ NA, ~ "foo"), error = TRUE)
})
test_that("`.else` maps false elements", {
expect_identical(map_if(-1:1, ~ .x > 0, paste, .else = ~ "bar", "suffix"), list("bar", "bar", "1 suffix"))
})
|
8e55677eff254603c8923e967e25be4ddcce35f7
|
cd081c730b15c35eecb48d8524de182cb348162a
|
/app/ui/tab2_ui.R
|
06efb7428f8ba7df32bfe43ac4b3848562355123
|
[] |
no_license
|
tonyk7440/shiny_template
|
6a83099725b4dbb98922a79b8f34cfe2c7923e32
|
d0eda95f7da9f19663c68a4bd28587842ce7a1bd
|
refs/heads/master
| 2021-01-12T10:31:36.523649
| 2016-12-14T18:18:02
| 2016-12-14T18:18:02
| 76,471,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
tab2_ui.R
|
tabItem("DataTwo",
titlePanel("Tab 2")
)
|
5f535f4efe02c8d4acc9f117aeedb0fa9acec746
|
a9003002f4c54b1da0395aa68ad933a086d3973e
|
/FlashX-release/Rpkg/man/mean.Rd
|
2c87e0c4b8e500ae0643f4e16d4b9b21a6c17a45
|
[
"Apache-2.0"
] |
permissive
|
rwang067/HistorySystem
|
ca45a31658c1881bdc5586a4779add981a818ba6
|
ee06919c022d7d45b47d675d053fb261601bb310
|
refs/heads/master
| 2020-03-23T05:01:02.619671
| 2018-07-16T10:40:44
| 2018-07-16T10:40:44
| 141,119,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 486
|
rd
|
mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FlashR_base.R
\docType{methods}
\name{mean}
\alias{mean}
\alias{mean,fm-method}
\alias{mean,fmV-method}
\title{Arithmetic Mean}
\usage{
\S4method{mean}{fm}(x, ...)
\S4method{mean}{fmV}(x, ...)
}
\arguments{
\item{x}{A FlashR vector or matrix.}
\item{...}{further arguments passed to or from other methods.}
}
\description{
Compute arithmetic mean.
}
\examples{
mat <- fm.runif.matrix(100, 10)
mean(mat)
}
|
f5f94e6bf29c8d29131d98b46f1f228c5a33e979
|
6034d565642a30876b7b7a025b74a31580c44613
|
/R/p_value_kenward.R
|
4650bdfb59156d82f310264b3a6879b5705e94a3
|
[] |
no_license
|
cran/parameters
|
a95beba8c8bd820a88b74ca407609cc08a62fcab
|
f19575ccdbbd303a1896a13d8b4b8210563cabfa
|
refs/heads/master
| 2023-06-08T08:58:24.080762
| 2023-05-26T09:20:02
| 2023-05-26T09:20:02
| 211,083,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,982
|
r
|
p_value_kenward.R
|
#' @title Kenward-Roger approximation for SEs, CIs and p-values
#' @name p_value_kenward
#'
#' @description An approximate F-test based on the Kenward-Roger (1997) approach.
#'
#' @param model A statistical model.
#' @param dof Degrees of Freedom.
#' @inheritParams ci.default
#'
#' @details Inferential statistics (like p-values, confidence intervals and
#' standard errors) may be biased in mixed models when the number of clusters
#' is small (even if the sample size of level-1 units is high). In such cases
#' it is recommended to approximate a more accurate number of degrees of freedom
#' for such inferential statistics. Unlike simpler approximation heuristics
#' like the "m-l-1" rule (`dof_ml1`), the Kenward-Roger approximation is
#' also applicable in more complex multilevel designs, e.g. with cross-classified
#' clusters. However, the "m-l-1" heuristic also applies to generalized
#' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited
#' to linear mixed models only.
#'
#' @seealso `dof_kenward()` and `se_kenward()` are small helper-functions
#' to calculate approximated degrees of freedom and standard errors for model
#' parameters, based on the Kenward-Roger (1997) approach.
#'
#' [`dof_satterthwaite()`] and [`dof_ml1()`] approximate degrees of freedom
#' based on Satterthwaite's method or the "m-l-1" rule.
#'
#' @examples
#' \donttest{
#' if (require("lme4", quietly = TRUE)) {
#' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris)
#' p_value_kenward(model)
#' }
#' }
#' @return A data frame.
#' @references Kenward, M. G., & Roger, J. H. (1997). Small sample inference for
#' fixed effects from restricted maximum likelihood. Biometrics, 983-997.
#' @export
p_value_kenward <- function(model, dof = NULL) {
UseMethod("p_value_kenward")
}
#' @export
p_value_kenward.lmerMod <- function(model, dof = NULL) {
if (is.null(dof)) {
dof <- dof_kenward(model)
}
.p_value_dof(model, dof, method = "kenward")
}
# helper ------------------------------
.p_value_dof <- function(model,
dof,
method = NULL,
statistic = NULL,
se = NULL,
component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "precision", "scale", "smooth_terms", "full", "marginal"),
effects = c("fixed", "random", "all"),
verbose = TRUE,
vcov = NULL,
vcov_args = NULL,
...) {
component <- match.arg(component)
effects <- match.arg(effects)
if (is.null(.check_component(model, component, verbose = verbose))) {
return(NULL)
}
params <- insight::get_parameters(model, component = component)
# check if all estimates are non-NA
params <- .check_rank_deficiency(params, verbose = FALSE)
if (is.null(statistic)) {
statistic <- insight::get_statistic(model, component = component)
params <- merge(params, statistic, sort = FALSE)
statistic <- params$Statistic
}
# different SE for kenward and robust
if (identical(method, "kenward") || identical(method, "kr")) {
if (is.null(se)) {
se <- se_kenward(model)$SE
}
} else if (!is.null(vcov) || isTRUE(list(...)[["robust"]])) {
se <- standard_error(model,
vcov = vcov,
vcov_args = vcov_args,
component = component,
...
)$SE
}
# overwrite statistic, based on robust or kenward standard errors
if (identical(method, "kenward") || identical(method, "kr") || !is.null(vcov)) {
estimate <- if ("Coefficient" %in% colnames(params)) {
params$Coefficient
} else {
params$Estimate
}
statistic <- estimate / se
}
p <- 2 * stats::pt(abs(statistic), df = dof, lower.tail = FALSE)
out <- .data_frame(
Parameter = params$Parameter,
p = unname(p)
)
if ("Component" %in% names(params)) out$Component <- params$Component
if ("Effects" %in% names(params) && effects != "fixed") out$Effects <- params$Effects
if ("Response" %in% names(params)) out$Response <- params$Response
out
}
.p_value_dof_kr <- function(model, params, dof) {
if ("SE" %in% colnames(params) && "SE" %in% colnames(dof)) {
params$SE <- NULL
}
params <- merge(params, dof, by = "Parameter")
p <- 2 * stats::pt(abs(params$Estimate / params$SE), df = params$df_error, lower.tail = FALSE)
.data_frame(
Parameter = params$Parameter,
p = unname(p)
)
}
# helper -------------------------
.check_REML_fit <- function(model) {
insight::check_if_installed("lme4")
if (!(lme4::getME(model, "is_REML"))) {
insight::format_warning("Model was not fitted by REML. Re-fitting model now, but p-values, df, etc. still might be unreliable.")
}
}
|
23e527d2713080d80611503b8aaab9180cb87d98
|
54a1d94786a6e8083c6d4d85b741a39a688c22d8
|
/tests/testthat/test-UserMetrics.R
|
6a03adbb1d73177bf4f5c97f4c16965da0fda172
|
[
"Apache-2.0"
] |
permissive
|
dmpe/urlshorteneR
|
dd75db112224bd4c0ab62bd7c0e364ce5d7d50b9
|
c9276e932e76c4f12b6a51bd6348b58399ee9442
|
refs/heads/master
| 2022-09-08T16:37:50.278295
| 2022-08-20T10:41:04
| 2022-08-20T10:41:04
| 36,286,688
| 16
| 8
|
Apache-2.0
| 2020-12-02T20:14:08
| 2015-05-26T09:46:00
|
R
|
UTF-8
|
R
| false
| false
| 2,230
|
r
|
test-UserMetrics.R
|
library(testthat)
library(httr)
library(jsonlite)
library(stringr)
library(urlshorteneR)
library(lubridate)
context("User Metrics")
test_that("will rollup the click counts to a referrer about a single Bitlink.", {
expect_gte(bitly_user_metrics_referring_domains(bitlink = "bit.ly/2EUGovW", size = 100)$units, -1)
umrd2 <- bitly_user_metrics_referring_domains(bitlink = "bit.ly/2EUGovW", size = 100)
expect_gte(umrd2$metrics[umrd2$metrics$value == "direct", ]$clicks, 1)
})
context("Link Metrics")
test_that("Returns the number of clicks on a single Bitlink.", {
lmc <- bitly_retrieve_clicks(bitlink = "bit.ly/2EUGovW", unit = "month", units = -1, size = 100)
expect_equal(length(lmc), 4)
lmcs <- bitly_retrieve_clicks_summary(bitlink = "bit.ly/DPetrov", unit = "day", units = -1, size = 100)
expect_named(lmcs, c("unit_reference", "total_clicks", "units", "unit"))
})
test_that("Returns metrics about the countries referring click traffic to a single Bitlink.", {
lmcc <- bitly_retrieve_metrics_by_countries(bitlink = "bit.ly/DPetrov", unit = "day", units = -1, size = 100)
expect_named(lmcc, c("unit_reference", "metrics", "units", "unit", "facet"))
})
test_that("Returns Bitlinks for Group.", {
user_info <- bitly_user_info()
lmrd <- bitly_retrieve_bitlinks_by_groups(group_guid = user_info$default_group_guid[1])
expect_equal(length(lmrd), 2)
})
test_that("Returns Sorted Bitlinks for Group.", {
user_info <- bitly_user_info()
rsbbg <- bitly_retrieve_sorted_bitlinks_by_groups(group_guid = user_info$default_group_guid[1])
expect_equal(dim(rsbbg$sorted_links)[[2]], 2)
expect_equal(dim(rsbbg$links)[[2]], 12)
})
test_that("Returns metrics about the pages referring click traffic to a single Bitlink.", {
lmr <- bitly_retrieve_metrics_by_referrers(bitlink = "bit.ly/DPetrov", unit = "day", units = -1, size = 100)
expect_named(lmr, c("unit_reference", "metrics", "units", "unit", "facet"))
})
test_that("Returns metrics for a Bitlink by referrers, by domain", {
lmrbd <- bitly_retrieve_metrics_by_referrers_by_domain(bitlink = "bit.ly/DPetrov", unit = "day", units = -1, size = 100)
expect_named(lmrbd, c("unit_reference", "referrers_by_domain", "units", "unit", "facet"))
})
|
ecf9c88d6359095b9df7620fcf43e3573ce8e18a
|
7ca6b681c96e2445ca2e9d1bf9aa7b3bfd99e362
|
/server.R
|
f051a57b3b545dbc2da293c0c70710698a0ce42a
|
[] |
no_license
|
atishaysehgal/DataDashboardSI
|
b9898a7cf355d388571233f6c10c06da0625e24c
|
9b195a64addae4f34f6f98cbbb6e474c14742701
|
refs/heads/master
| 2020-04-04T09:00:43.443578
| 2018-11-02T16:23:22
| 2018-11-02T16:23:22
| 155,803,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
server.R
|
library(shiny)
library(ggplot2) # load ggplot
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
output$yearPlot <- renderPlot({
df <- data.frame(year=as.factor(insurance$year), County = as.factor(insurance$County), variable = insurance[[input$variable]])
ggplot(df, aes_string(df$year, y=df$variable, fill = df$County)) + geom_bar(stat = "Identity") + facet_wrap(~df$County)
})
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.