blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a38962a5098f04f986a82460f868b73a16fa0638 | 6bfbccc3e73a9b8edb10ce91731746dde79858c5 | /SOTU_processingfunctions.R | 9a70eef539cf7d9ff24885c842de39dea8d19b9d | [] | no_license | JoePatrick94/presidential_SOU | 637fdb7914a90129b88b778f99bfad9c65d2bb6c | a517357d367fd11bc1bf64cb1d6f47f92cb276d2 | refs/heads/main | 2023-06-26T06:09:42.242308 | 2021-07-27T17:51:35 | 2021-07-27T17:51:35 | 379,385,690 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,327 | r | SOTU_processingfunctions.R | #Function to get the years active of a president
get_years_active <- function(SOTU_pagelinks){
sotupage <- read_html(SOTU_pagelinks)
years_active <- sotupage %>%
html_nodes(".dates") %>%
html_text() %>%
gsub("\\s+", "",.)
return(years_active)
}
#Function to get the political party of president
get_party <- function(presidential_biolinks){
biopage <- read_html(presidential_biolinks)
party <- biopage %>%
html_nodes("#block-system-main :nth-child(9)") %>%
html_text()
return(party)
}
#Function to get the SOTU text for each president
get_speech <- function(SOTU_pagelinks){
sotulink <- read_html(SOTU_pagelinks)
text <- sotulink %>%
html_nodes(".field-docs-content") %>%
html_text() %>%
str_replace_all("\\s*\\[[^\\]]+\\]", "") %>% #remove bracket text, such as ...[laughter]...
str_replace_all("\n","") %>% #remove instances of \n
str_replace_all("Audience Members.*The President\\.", "") %>% #Take out text from The Audience. --> The President.
str_replace_all("The President\\.", "") %>% # Take out text indicating the president is speaking -> The President.
trimws(which = "both") %>%
str_replace_all("\\.(?=\\w)", " ") %>% # if a period does not have a space after it, replace it with a space
str_replace_all("â", "") %>%
return(text)
}
|
a8680ac049d7894569facfc270e2c567d23af2b5 | cfbbe807739f933e5e3c59970bd7197713c891f0 | /Semt5/P. Statistik Induktif/P1/Coba-coba/luassegitiga.R | c02ac00d669ac8d0fa0189f8462e766bb23b7695 | [] | no_license | bangqae/Akakom | c144b564763106346b9f60c7233faccc936cd735 | dffa676d77d1e29a4b173eb98c07169013278bae | refs/heads/master | 2020-09-05T07:17:04.833383 | 2020-07-08T10:40:13 | 2020-07-08T10:40:13 | 220,023,545 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 86 | r | luassegitiga.R | luassegitiga -> function(a, t) {
luas = 0.5*a*t
return(luas)
}
luassegitiga(4, 8) |
bb14c081cf1eda62733fa83241719c67d7631790 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/photobiologyFilters/vignettes/user-guide.R | 4b0b922d0577b132c116fd8296ebe475ba7390a0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,154 | r | user-guide.R | ## ---- echo=FALSE---------------------------------------------------------
knitr::opts_chunk$set(fig.width=8, fig.height=4)
## ------------------------------------------------------------------------
library(photobiology)
library(photobiologyWavebands)
library(photobiologyFilters)
library(ggplot2)
library(ggspectra)
## ------------------------------------------------------------------------
# band_pass
## ------------------------------------------------------------------------
schott
## ------------------------------------------------------------------------
names(filters.mspct)
## ------------------------------------------------------------------------
filters.mspct$UG11
## ------------------------------------------------------------------------
filters.mspct[["UG11"]]
## ------------------------------------------------------------------------
filters.mspct["UG11"]
## ------------------------------------------------------------------------
filters.mspct[petri_dishes]
## ------------------------------------------------------------------------
filters.mspct[grep("UG", names(filters.mspct))]
## ------------------------------------------------------------------------
filters.mspct$UG11
## ------------------------------------------------------------------------
getWhatMeasured(filters.mspct$UG11)
getWhenMeasured(filters.mspct$UG11)
## ------------------------------------------------------------------------
is_normalized(filters.mspct$UG11)
## ------------------------------------------------------------------------
comment(filters.mspct$UG11)
names(filters.mspct$UG11)
## ------------------------------------------------------------------------
getTfrType(filters.mspct$UG11)
## ------------------------------------------------------------------------
plot(filters.mspct$UG11)
## ------------------------------------------------------------------------
plot(filters.mspct$TB550_660_850,
annotations = c("+", "title:what"),
span = 11)
## ------------------------------------------------------------------------
ggplot(filters.mspct$UG11) +
geom_line()
## ------------------------------------------------------------------------
transmittance(filters.mspct$UG11, UVA())
## ------------------------------------------------------------------------
absorbance(filters.mspct$UG11, list(UVA(), Red()))
## ------------------------------------------------------------------------
transmittance(filters.mspct[grep("UG", names(filters.mspct))],
list(UVB(), UVA()))
## ------------------------------------------------------------------------
head(as.data.frame(filters.mspct$UG11))
## ------------------------------------------------------------------------
attach(filters.mspct)
transmittance(UG11, UVA())
detach(filters.mspct)
## ------------------------------------------------------------------------
attach(filters.mspct)
with(UG11, range(w.length))
detach(filters.mspct)
## ------------------------------------------------------------------------
with(filters.mspct, transmittance(UG11, UVA()))
|
3893615956d676475f68affaa0bec3a4730914c6 | 139c6d58e2ba47801b67727ed5c02d4ebaa5fea0 | /resolwe_bio/tools/transmart_fetch.R | e6c3b56d21e550941796fad4cb66ad064fa503a6 | [
"Apache-2.0"
] | permissive | JenkoB/resolwe-bio | 91957b5bb3f7de2cff9adc7c1974dc5a6cba4dbb | a958cf3fc82ebc37f527e1b156753f2324a33803 | refs/heads/master | 2020-12-29T18:47:35.230553 | 2016-12-16T08:48:40 | 2016-12-16T08:49:41 | 66,536,440 | 0 | 1 | null | 2016-08-25T07:41:48 | 2016-08-25T07:41:48 | null | UTF-8 | R | false | false | 3,185 | r | transmart_fetch.R | #!/usr/bin/Rscript
require("transmartRClient")
require('argparse')
parser = ArgumentParser(description='Fetch data from tranSMART instance.')
parser$add_argument('--URL', default = "http://137.117.169.225:8080/transmart", help='tranSMART instance URL')
parser$add_argument('--annConceptLinks', default = "", help='Annotations concept links')
parser$add_argument('--expsConceptLinks', default = "", help='Expressions concept links')
parser$add_argument('--token', default = "", help='Auth token')
parser$add_argument('--projection', default='log_intensity', help='Name of the data projection to fetch (log_intensity, all_data, default_real_projection, ...')
parser$add_argument('--outA', help='Output annotation file name')
parser$add_argument('--outE', help='Output expression file name')
parser$add_argument('--outT', help='Output tree file name')
args = parser$parse_args(commandArgs(trailingOnly=TRUE))
# TODO: Why is total NA and why do current and total have 2 values???
.downloadcallback <- function() {
start <- function(.total) cat("Retrieving data...\n")
update <- function(current, total) {
if (current[2] > 0 && !is.na(total[2]) && total[2] > 0) {
# limit the progress to 0.95 (5 % left for parsing)
cat('{"proc.progress":', current[2] / total[2] * 0.95, '}\n')
}
}
end <- function() cat("Download complete.\n")
environment()
}
connectToTransmart(args$URL, .access.token = args$token)
# get annotations
if (args$annConceptLinks != '') {
ann <- readChar(args$annConceptLinks, file.info(args$annConceptLinks)$size)
ann <- gsub("[\r\n]", "", ann)
links_new <- c(unlist(strsplit(ann, ';')))
observations <- getObservations(concept.links = links_new, as.data.frame = T)
final <- data.frame(cbind(observations$subjectInfo$subject.inTrialId, observations$observations))
final <- final[, !names(final) == 'subject.id']
colnames(final)[1] <- 'ID'
final <- as.data.frame(lapply(final, as.character), stringsAsFactors = FALSE)
empty <- rep("", ncol(final))
final <- rbind(empty, final)
final <- rbind(empty, final)
write.table(final, file = args$outA, quote = FALSE, sep = "\t", row.names = F, na = "")
# get study tree
con <- c()
stu <- list()
for (study in c(unlist(strsplit(ann, ';')))) {
study <- c(strsplit(as.character(study), '/'))
studyId <- study[[1]][3]
if (!(studyId %in% stu)) {
studyConcepts = getConcepts(studyId)
con <- c(con, studyConcepts$fullName)
stu <- c(stu, studyId)
}
}
write.table(con, file = args$outT, quote = FALSE, sep = "\t", row.names = F, col.names = F)
}
# get expressions
if (args$expsConceptLinks != '') {
dataDownloaded <- getHighdimData(concept.link = args$expsConceptLinks, projection=args$projection, progress.download = .downloadcallback())
data = dataDownloaded[[1]]
expression_data = data[,-c(1:7)]
rownames(expression_data) = make.names(data$patientId, unique=TRUE)
#rownames(expression_data) = data$patientId
write.table(t(expression_data), file = args$outE, quote = FALSE, sep = "\t", col.names = NA)
}
|
2c0c1afece03f5186fcad1cb3eb1463ad57d01c3 | 6951cfcfbcad0034696c6abe9a4ecf51aa0f3a4b | /man/parsePackageCitation.Rd | 1bd931a17cb676ca2d023395cf4ff27c200d354b | [] | no_license | renozao/pkgmaker | df3d4acac47ffbd4798e1d97a31e311bf35693c8 | 2934a52d383adba1d1c00553b9319b865f49d15b | refs/heads/master | 2023-05-10T16:40:30.977394 | 2023-05-03T07:02:51 | 2023-05-03T07:17:17 | 12,726,403 | 8 | 3 | null | 2023-02-14T10:26:07 | 2013-09-10T10:07:35 | R | UTF-8 | R | false | true | 428 | rd | parsePackageCitation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vignette.R
\name{parsePackageCitation}
\alias{parsePackageCitation}
\title{Formatting Package Citations in Sweave/knitr Documents}
\usage{
parsePackageCitation(x)
}
\arguments{
\item{x}{output document, as a single string.}
}
\value{
A character vecotr of citation references.
}
\description{
Formatting Package Citations in Sweave/knitr Documents
}
|
62d79ba5fd146764639817b4d3844240f5b6f137 | fd686b2c6aea9623fc1fdfcfab7a26508a2dbe97 | /run_analysis.R | 9346c660bc1665022281819ac298df455c15c41c | [] | no_license | cara0/getting-cleaning-data-assignment-week4 | 215b7a423876dadaac13af1315f5c768f8bda2c5 | 0d84b57c70401eec25de981d57b618461b6aca29 | refs/heads/master | 2021-01-23T06:55:30.077508 | 2017-03-29T22:42:15 | 2017-03-29T22:42:15 | 86,412,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,007 | r | run_analysis.R | # This code reads the training and test feature values from the UCI HAR dataset;
# combines training and test sets into one data.frame;
# extracts only the features (columns) with means or standard deviations of measurements;
# merges in subject identifiers (integers from 1 to 30) and activity labels for each activity performed while the measurements were recorded;
# and produces 2 smaller datasets:
# tidy1 : all mean/standard deviation measurements for all subjects and activities
# (1 row per measurement time period for total of 10,299 observations of 79 features, plus subject id and activity label)
# tidy2 : the same columns as tidy1, but instead of reporting each observation point of the measurements, they are aggregated into means for each column, by subject and activity. So, each person has one row for each activity they performed, with the values representing that person's average measurement values during that activity.
# note: this code assumes that the 'UCI HAR Dataset' directory is set as current directory.
# read and merge feature values
dt.x <- rbind(read.table('train/X_train.txt'),read.table('test/X_test.txt'))
# check dimensions
dim(dt.x)
# look at first couple rows, first 10 cols only
head(dt.x[1:10,1:10])
# read and subset feature names
features <- read.table('features.txt')
head(features)
features.ind <- grepl('mean|std',features$V2)
colNums <- features$V1[features.ind==TRUE]
# subset measurement values based on features vector & rename variables
dt.x <- dt.x[,colNums]
names(dt.x) <- features$V2[features.ind==TRUE]
names(dt.x) <- gsub('-|\\(|\\)','',names(dt.x))
# add in subject ids
subjects <- rbind(read.table('train/subject_train.txt'),read.table('test/subject_test.txt'))
dim(subjects)
# rename subject col before merging
names(subjects) <- 'subjectid'
# load activity labels and rename values with activities
dt.y <- rbind(read.table('train/y_train.txt'),read.table('test/y_test.txt'))
dim(dt.y)
head(dt.y)
names(dt.y) <- 'activity'
activity.id <- read.table('activity_labels.txt')
activity.id
str(dt.y)
str(activity.id)
activity.id$V2 <- gsub('_','',tolower(activity.id$V2))
activity.id
head(dt.y)
dt.y$activity <- activity.id[dt.y$activity,2]
head(dt.y)
# merge it all together
tidy1 <- cbind(subjects,dt.x,dt.y)
head(tidy1)
# create 2nd dataset with one row per subject per activity
dim(tidy1)
help(tapply)
dim(tidy1)
dim(tidy1[,2:80])
tidy2 <- aggregate(tidy1[,2:80],by = list(subjectid = tidy1$subjectid,activity = tidy1$activity), mean)
head(tidy2)
str(tidy2)
# looks like what we want - one row per person per activity, mean vals by group - see below:
# > str(tidy2)
# 'data.frame': 180 obs. of 81 variables:
# $ subjectid : int 1 2 3 4 5 6 7 8 9 10 ...
# $ activity : chr "laying" "laying" "laying" "laying" ...
# $ tBodyAccmeanX : num 0.222 0.281 0.276 0.264 0.278 ...
# $ tBodyAccmeanY : num -0.0405 -0.0182 -0.019 -0.015 -0.0183 ...
# $ tBodyAccmeanZ : num -0.113 -0.107 -0.101 -0.111 -0.108 ...
# $ tBodyAccstdX : num -0.928 -0.974 -0.983 -0.954 -0.966 ...
# $ tBodyAccstdY : num -0.837 -0.98 -0.962 -0.942 -0.969 ...
# $ tBodyAccstdZ : num -0.826 -0.984 -0.964 -0.963 -0.969 ...
# $ tGravityAccmeanX : num -0.249 -0.51 -0.242 -0.421 -0.483 ...
# $ tGravityAccmeanY : num 0.706 0.753 0.837 0.915 0.955 ...
# $ tGravityAccmeanZ : num 0.446 0.647 0.489 0.342 0.264 ...
# $ tGravityAccstdX : num -0.897 -0.959 -0.983 -0.921 -0.946 ...
# $ tGravityAccstdY : num -0.908 -0.988 -0.981 -0.97 -0.986 ...
# $ tGravityAccstdZ : num -0.852 -0.984 -0.965 -0.976 -0.977 ...
# $ tBodyAccJerkmeanX : num 0.0811 0.0826 0.077 0.0934 0.0848 ...
# $ tBodyAccJerkmeanY : num 0.00384 0.01225 0.0138 0.00693 0.00747 ...
# $ tBodyAccJerkmeanZ : num 0.01083 -0.0018 -0.00436 -0.00641 -0.00304 ...
# $ tBodyAccJerkstdX : num -0.958 -0.986 -0.981 -0.978 -0.983 ...
# $ tBodyAccJerkstdY : num -0.924 -0.983 -0.969 -0.942 -0.965 ...
# $ tBodyAccJerkstdZ : num -0.955 -0.988 -0.982 -0.979 -0.985 ...
# $ tBodyGyromeanX : num -0.01655 -0.01848 -0.02082 -0.00923 -0.02189 ...
# $ tBodyGyromeanY : num -0.0645 -0.1118 -0.0719 -0.093 -0.0799 ...
# $ tBodyGyromeanZ : num 0.149 0.145 0.138 0.17 0.16 ...
# $ tBodyGyrostdX : num -0.874 -0.988 -0.975 -0.973 -0.979 ...
# $ tBodyGyrostdY : num -0.951 -0.982 -0.977 -0.961 -0.977 ...
# $ tBodyGyrostdZ : num -0.908 -0.96 -0.964 -0.962 -0.961 ...
# $ tBodyGyroJerkmeanX : num -0.107 -0.102 -0.1 -0.105 -0.102 ...
# $ tBodyGyroJerkmeanY : num -0.0415 -0.0359 -0.039 -0.0381 -0.0404 ...
# $ tBodyGyroJerkmeanZ : num -0.0741 -0.0702 -0.0687 -0.0712 -0.0708 ...
# $ tBodyGyroJerkstdX : num -0.919 -0.993 -0.98 -0.975 -0.983 ...
# $ tBodyGyroJerkstdY : num -0.968 -0.99 -0.987 -0.987 -0.984 ...
# $ tBodyGyroJerkstdZ : num -0.958 -0.988 -0.983 -0.984 -0.99 ...
# $ tBodyAccMagmean : num -0.842 -0.977 -0.973 -0.955 -0.967 ...
# $ tBodyAccMagstd : num -0.795 -0.973 -0.964 -0.931 -0.959 ...
# $ tGravityAccMagmean : num -0.842 -0.977 -0.973 -0.955 -0.967 ...
# $ tGravityAccMagstd : num -0.795 -0.973 -0.964 -0.931 -0.959 ...
# $ tBodyAccJerkMagmean : num -0.954 -0.988 -0.979 -0.97 -0.98 ...
# $ tBodyAccJerkMagstd : num -0.928 -0.986 -0.976 -0.961 -0.977 ...
# $ tBodyGyroMagmean : num -0.875 -0.95 -0.952 -0.93 -0.947 ...
# $ tBodyGyroMagstd : num -0.819 -0.961 -0.954 -0.947 -0.958 ...
# $ tBodyGyroJerkMagmean : num -0.963 -0.992 -0.987 -0.985 -0.986 ...
# $ tBodyGyroJerkMagstd : num -0.936 -0.99 -0.983 -0.983 -0.984 ...
# $ fBodyAccmeanX : num -0.939 -0.977 -0.981 -0.959 -0.969 ...
# $ fBodyAccmeanY : num -0.867 -0.98 -0.961 -0.939 -0.965 ...
# $ fBodyAccmeanZ : num -0.883 -0.984 -0.968 -0.968 -0.977 ...
# $ fBodyAccstdX : num -0.924 -0.973 -0.984 -0.952 -0.965 ...
# $ fBodyAccstdY : num -0.834 -0.981 -0.964 -0.946 -0.973 ...
# $ fBodyAccstdZ : num -0.813 -0.985 -0.963 -0.962 -0.966 ...
# $ fBodyAccmeanFreqX : num -0.159 -0.146 -0.074 -0.274 -0.136 ...
# $ fBodyAccmeanFreqY : num 0.0975 0.2573 0.2385 0.3662 0.4665 ...
# $ fBodyAccmeanFreqZ : num 0.0894 0.4025 0.217 0.2013 0.1323 ...
# $ fBodyAccJerkmeanX : num -0.957 -0.986 -0.981 -0.979 -0.983 ...
# $ fBodyAccJerkmeanY : num -0.922 -0.983 -0.969 -0.944 -0.965 ...
# $ fBodyAccJerkmeanZ : num -0.948 -0.986 -0.979 -0.975 -0.983 ...
# $ fBodyAccJerkstdX : num -0.964 -0.987 -0.983 -0.98 -0.986 ...
# $ fBodyAccJerkstdY : num -0.932 -0.985 -0.971 -0.944 -0.966 ...
# $ fBodyAccJerkstdZ : num -0.961 -0.989 -0.984 -0.98 -0.986 ...
# $ fBodyAccJerkmeanFreqX : num 0.132 0.16 0.176 0.182 0.24 ...
# $ fBodyAccJerkmeanFreqY : num 0.0245 0.1212 -0.0132 0.0987 0.1957 ...
# $ fBodyAccJerkmeanFreqZ : num 0.0244 0.1906 0.0448 0.077 0.0917 ...
# $ fBodyGyromeanX : num -0.85 -0.986 -0.97 -0.967 -0.976 ...
# $ fBodyGyromeanY : num -0.952 -0.983 -0.978 -0.972 -0.978 ...
# $ fBodyGyromeanZ : num -0.909 -0.963 -0.962 -0.961 -0.963 ...
# $ fBodyGyrostdX : num -0.882 -0.989 -0.976 -0.975 -0.981 ...
# $ fBodyGyrostdY : num -0.951 -0.982 -0.977 -0.956 -0.977 ...
# $ fBodyGyrostdZ : num -0.917 -0.963 -0.967 -0.966 -0.963 ...
# $ fBodyGyromeanFreqX : num -0.00355 0.10261 -0.08222 -0.06609 -0.02272 ...
# $ fBodyGyromeanFreqY : num -0.0915 0.0423 -0.0267 -0.5269 0.0681 ...
# $ fBodyGyromeanFreqZ : num 0.0105 0.0553 0.1477 0.1529 0.0414 ...
# $ fBodyAccMagmean : num -0.862 -0.975 -0.966 -0.939 -0.962 ...
# $ fBodyAccMagstd : num -0.798 -0.975 -0.968 -0.937 -0.963 ...
# $ fBodyAccMagmeanFreq : num 0.0864 0.2663 0.237 0.2417 0.292 ...
# $ fBodyBodyAccJerkMagmean : num -0.933 -0.985 -0.976 -0.962 -0.977 ...
# $ fBodyBodyAccJerkMagstd : num -0.922 -0.985 -0.975 -0.958 -0.976 ...
# $ fBodyBodyAccJerkMagmeanFreq : num 0.266 0.342 0.239 0.274 0.197 ...
# $ fBodyBodyGyroMagmean : num -0.862 -0.972 -0.965 -0.962 -0.968 ...
# $ fBodyBodyGyroMagstd : num -0.824 -0.961 -0.955 -0.947 -0.959 ...
# $ fBodyBodyGyroMagmeanFreq : num -0.1398 0.0186 -0.0229 -0.2599 0.1024 ...
# $ fBodyBodyGyroJerkMagmean : num -0.942 -0.99 -0.984 -0.984 -0.985 ...
# $ fBodyBodyGyroJerkMagstd : num -0.933 -0.989 -0.983 -0.983 -0.983 ...
# $ fBodyBodyGyroJerkMagmeanFreq: num 0.1765 0.2648 0.1107 0.2029 0.0247 ...
|
1dc286728f414c58ec5d0e5fc90a2e6648bac8fc | 6300606517c0dcaae4dce093a8366eea953deb37 | /2019/Day5.R | 0f6d15af5889125af6d3ad54edbfed2ce62d4f37 | [] | no_license | akulumbeg/adventofcode | e5b5f8e509d240279ce0b4daf7325a48a4cbf9fc | 71d2a329beb4dd42d7e9dd6f544aa0c8fbc343cd | refs/heads/master | 2022-07-28T16:59:30.147837 | 2022-07-11T14:46:15 | 2022-07-11T14:46:15 | 220,464,721 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 173 | r | Day5.R | # Part 1 ------------------------------------------------------------------
rm(list = ls())
# Part 2 ------------------------------------------------------------------
|
5410ad4faa44dd16bbdf18aeb098dd2091dbdf23 | 05678f03a83ce73472b1473f2d0743c9f015f2b8 | /tests/testthat/test_observations_api.R | 349e5cb9b95ae836da7f93a2d3388d20f2d46cb5 | [] | no_license | Breeding-Insight/brapi-r-v2 | 3a7b4168c6d8516eb1128445a2f281d1199668a3 | 5cfa7453947121496780b410661117639f09c7ff | refs/heads/main | 2023-03-14T22:20:29.331935 | 2021-03-17T01:31:11 | 2021-03-17T01:31:11 | 348,535,689 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,813 | r | test_observations_api.R | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test ObservationsApi")
api.instance <- ObservationsApi$new()
test_that("ObservationsGet", {
# tests for ObservationsGet
# base path: https://test-server.brapi.org/brapi/v2
# Get a filtered set of Observations
# Retrieve all observations where there are measurements for the given observation variables. observationTimestamp should be ISO8601 format with timezone -> YYYY-MM-DDThh:mm:ss+hhmm
# @param observation.db.id character The unique ID of an Observation (optional)
# @param observation.unit.db.id character The unique ID of an Observation Unit (optional)
# @param germplasm.db.id character The unique ID of a germplasm (accession) to filter on (optional)
# @param observation.variable.db.id character The unique ID of an observation variable (optional)
# @param study.db.id character The unique ID of a studies to filter on (optional)
# @param location.db.id character The unique ID of a location where these observations were collected (optional)
# @param trial.db.id character The unique ID of a trial to filter on (optional)
# @param program.db.id character The unique ID of a program to filter on (optional)
# @param season.db.id character The year or Phenotyping campaign of a multi-annual study (trees, grape, ...) (optional)
# @param observation.unit.level.name character The Observation Unit Level. Returns only the observation unit of the specified Level. References ObservationUnit->observationUnitPosition->observationLevel->levelName (optional)
# @param observation.unit.level.order character The Observation Unit Level Order Number. Returns only the observation unit of the specified Level. References ObservationUnit->observationUnitPosition->observationLevel->levelOrder (optional)
# @param observation.unit.level.code character The Observation Unit Level Code. This parameter should be used together with `observationUnitLevelName` or `observationUnitLevelOrder`. References ObservationUnit->observationUnitPosition->observationLevel->levelCode (optional)
# @param observation.time.stamp.range.start character Timestamp range start (optional)
# @param observation.time.stamp.range.end character Timestamp range end (optional)
# @param external.reference.id character An external reference ID. Could be a simple string or a URI. (use with `externalReferenceSource` parameter) (optional)
# @param external.reference.source character An identifier for the source system or database of an external reference (use with `externalReferenceID` parameter) (optional)
# @param page integer Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`. (optional)
# @param page.size integer The size of the pages to be returned. Default is `1000`. (optional)
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @return [ObservationListResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("ObservationsObservationDbIdGet", {
# tests for ObservationsObservationDbIdGet
# base path: https://test-server.brapi.org/brapi/v2
# Get the details of a specific Observations
# Get the details of a specific Observations observationTimestamp should be ISO8601 format with timezone -> YYYY-MM-DDThh:mm:ss+hhmm
# @param observation.db.id character The unique ID of an observation
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @return [ObservationSingleResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("ObservationsObservationDbIdPut", {
# tests for ObservationsObservationDbIdPut
# base path: https://test-server.brapi.org/brapi/v2
# Update an existing Observation
# Update an existing Observation
# @param observation.db.id character The unique ID of an observation
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @param observation.new.request ObservationNewRequest (optional)
# @return [ObservationSingleResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("ObservationsPost", {
# tests for ObservationsPost
# base path: https://test-server.brapi.org/brapi/v2
# Add new Observation entities
# Add new Observation entities
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @param observation.new.request array[ObservationNewRequest] (optional)
# @return [ObservationListResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("ObservationsPut", {
# tests for ObservationsPut
# base path: https://test-server.brapi.org/brapi/v2
# Update multiple Observation entities
# Update multiple Observation entities simultaneously with a single call Include as many `observationDbIds` in the request as needed. Note - In strictly typed languages, this structure can be represented as a Map or Dictionary of objects and parsed directly from JSON.
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @param request.body map(ObservationNewRequest) (optional)
# @return [ObservationListResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("ObservationsTableGet", {
# tests for ObservationsTableGet
# base path: https://test-server.brapi.org/brapi/v2
# Get a list of Observations in a table format
# <p>This service is designed to retrieve a table of time dependant observation values as a matrix of Observation Units and Observation Variables. This is also sometimes called a Time Series. This service takes the \"Sparse Table\" approach for representing this time dependant data.</p> <p>The table may be represented by JSON, CSV, or TSV. The \"Accept\" HTTP header is used for the client to request different return formats. By default, if the \"Accept\" header is not included in the request, the server should return JSON as described below.</p> <p>The table is REQUIRED to have the following columns</p> <ul> <li>observationUnitDbId - Each row is related to one Observation Unit</li> <li>observationTimeStamp - Each row is has a time stamp for when the observation was taken</li> <li>At least one column with an observationVariableDbId</li> </ul> <p>The table may have any or all of the following OPTIONAL columns. Included columns are decided by the server developer</p> <ul> <li>observationUnitName</li> <li>studyDbId</li> <li>studyName</li> <li>germplasmDbId</li> <li>germplasmName</li> <li>positionCoordinateX</li> <li>positionCoordinateY</li> <li>year</li> </ul> <p>The table also may have any number of Observation Unit Hierarchy Level columns. For example:</p> <ul> <li>field</li> <li>plot</li> <li>sub-plot</li> <li>plant</li> <li>pot</li> <li>block</li> <li>entry</li> <li>rep</li> </ul> <p>The JSON representation provides a pair of extra arrays for defining the headers of the table. The first array \"headerRow\" will always contain \"observationUnitDbId\" and any or all of the OPTIONAL column header names. The second array \"observationVariables\" contains the names and DbIds for the Observation Variables represented in the table. By appending the two arrays, you can construct the complete header row of the table. </p> <p>For CSV and TSV representations of the table, an extra header row is needed to describe both the Observation Variable DbId and the Observation Variable Name for each data column. See the example responses below</p>
# @param accept WSMIMEDataTypes The requested content type which should be returned by the server
# @param observation.unit.db.id character The unique ID of an Observation Unit (optional)
# @param germplasm.db.id character The unique ID of a germplasm (accession) to filter on (optional)
# @param observation.variable.db.id character The unique ID of an observation variable (optional)
# @param study.db.id character The unique ID of a studies to filter on (optional)
# @param location.db.id character The unique ID of a location where these observations were collected (optional)
# @param trial.db.id character The unique ID of a trial to filter on (optional)
# @param program.db.id character The unique ID of a program to filter on (optional)
# @param season.db.id character The year or Phenotyping campaign of a multi-annual study (trees, grape, ...) (optional)
# @param observation.level character The type of the observationUnit. Returns only the observation unit of the specified type; the parent levels ID can be accessed through observationUnitStructure. (optional)
# @param search.results.db.id character Permanent unique identifier which references the search results (optional)
# @param observation.time.stamp.range.start character Timestamp range start (optional)
# @param observation.time.stamp.range.end character Timestamp range end (optional)
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @return [ObservationTableResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("SearchObservationsPost", {
# tests for SearchObservationsPost
# base path: https://test-server.brapi.org/brapi/v2
# Submit a search request for a set of Observations
# Submit a search request for a set of Observations. Returns an Id which reference the results of this search
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @param observation.search.request ObservationSearchRequest (optional)
# @return [ObservationListResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
test_that("SearchObservationsSearchResultsDbIdGet", {
# tests for SearchObservationsSearchResultsDbIdGet
# base path: https://test-server.brapi.org/brapi/v2
# Returns a list of Observations based on search criteria.
# Returns a list of Observations based on search criteria. observationTimeStamp - Iso Standard 8601. observationValue data type inferred from the ontology
# @param accept WSMIMEDataTypes The requested content type which should be returned by the server
# @param search.results.db.id character Unique identifier which references the search results
# @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional)
# @param page integer Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`. (optional)
# @param page.size integer The size of the pages to be returned. Default is `1000`. (optional)
# @return [ObservationListResponse]
# uncomment below to test the operation
#expect_equal(result, "EXPECTED_RESULT")
})
|
c3e97854914e21e1ff02b8aca916f517b3cdbbec | 104dae3cfffbdf12165bc3a7f9fbf2215da9a767 | /R/exportWP.R | d69fea7a5d1514ad3692618ac19130b294d104dd | [] | no_license | nicolasfstgelais/waterQuality | b507373384333810ebf6c40345ecc29669d46336 | 5af4ad62f60d93a17d4d8c8799285552a951d487 | refs/heads/master | 2021-04-29T22:15:08.271708 | 2018-08-02T17:32:21 | 2018-08-02T17:32:21 | 121,635,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 903 | r | exportWP.R |
dbWP=read.csv("C:/Users/nicol/Dropbox/ReseauLab/data/water quality/waterportal/rivers/result.csv",stringsAsFactors = F)
unique(dbWP$ResultDetectionConditionText)
# when under detection limit = detection limit/2
dbWP[dbWP$ResultDetectionConditionText=="Not Detected","ResultMeasureValue"]=LtoN(dbWP[dbWP$ResultDetectionConditionText=="Not Detected","DetectionQuantitationLimitMeasure.MeasureValue"])/2
dbWP[dbWP$ResultDetectionConditionText== "Below Reporting Limit","ResultMeasureValue"]=LtoN(dbWP[dbWP$ResultDetectionConditionText=="Not Detected","DetectionQuantitationLimitMeasure.MeasureValue"])/2
# remove results with NA
dbWP=dbWP[!is.na(dbWP$ResultMeasureValue) ,]
dbWP$CharacteristicName_ResultSampleFractionText=paste(dbWP$CharacteristicName,dbWP$ResultSampleFractionText,sep="_")
write.csv(dbWP,"C:/Users/nicol/Dropbox/ReseauLab/data/water quality/waterportal/rivers/result_mod.csv")
) |
6706eb5aa2758a65f62b049b8149cc9dfde5f9d1 | 34de8cb0a2b51b092a010cb9bbd05af42d3f830a | /keio_scripts/run_DESeq2_on_bwa_count_matrix.R | 38cb6f4844d14d6c4d8324e583f80fde9d758b1f | [] | no_license | abs-yy/Hypsibius_dujardini_manuscript | 201967c016e250c507590800b0c8394f0225ccd5 | be613c72073590565c220d899d35bc30fb86d48e | refs/heads/master | 2021-01-20T10:47:32.238859 | 2017-06-12T02:01:00 | 2017-06-12T02:01:00 | 80,097,405 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,554 | r | run_DESeq2_on_bwa_count_matrix.R | args = commandArgs(trailingOnly=TRUE)
library(DESeq2)
data<-read.delim(args[1], header=T, row.names=1)
samples=ncol(data)/3-1
for( i in 1:samples){
#if( i == samples) next
for( j in i:samples+1){
if( i == j) next
s1 <- (i-1)*3+1
e1 <- (i-1)*3+3
n1 <- strsplit(colnames(data)[s1], ".", fixed=T)[[1]][1]
s2 <- (j-1)*3+1
e2 <- (j-1)*3+3
n2<- strsplit(colnames(data)[s2], ".", fixed=T)[[1]][1]
out_f1 <- paste(paste("DESeq2", n1, n2, sep="-"), "out", sep=".")
y<-data[,c(s1:e1, s2:e2)]
print(paste( s1, e1,s2,e2,sep= "_"))
print("Starting DESeq")
#前処理(DESeqDataSetオブジェクトの作成)
data.cl <- c(rep(1, 3), rep(2, 3))#G1群を1、G2群を2としたベクトルdata.clを作成
colData <- data.frame(condition=as.factor(data.cl))#condition列にクラスラベル情報を格納したcolDataオブジェクトを作成
d <- DESeqDataSetFromMatrix(countData=y, colData=colData, design=~condition)#DESeqDataSetオブジェクトdの作成
#本番(DEG検出)
#d <- estimateSizeFactors(d)
#d <- estimateDispersions(d)
#d <- nbinomLRT(d, full= ~condition, reduced= ~1)
d <- DESeq(d)
tmp <- results(d)
p.value <- tmp$pvalue
p.value[is.na(p.value)] <- 1
q.value <- tmp$padj
q.value[is.na(q.value)] <- 1
ranking <- rank(p.value)
tmp <- cbind( n1, n2, rownames(data), y, p.value, q.value, ranking)
write.table(tmp, out_f1, sep="\t", append=F, quote=F, row.names=F)
}
}
|
2b64e20e46f9cf3475d1ad37a8f4430a55e68cc0 | 74bc48ba64859a63855d204f1efd31eca47a223f | /Telstra/Telstra.XGBoost.v16.R | 48ce0b87da16e384f6fb14e4509487ca06988b95 | [] | no_license | PraveenAdepu/kaggle_competitions | 4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c | ed0111bcecbe5be4529a2a5be2ce4c6912729770 | refs/heads/master | 2020-09-02T15:29:51.885013 | 2020-04-09T01:50:55 | 2020-04-09T01:50:55 | 219,248,958 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,227 | r | Telstra.XGBoost.v16.R | # v2.0 In Progress
# 01. Libraries
require(caret)
require(corrplot)
#require(Rtsne)
require(xgboost)
require(stats)
require(knitr)
require(ggplot2)
knitr::opts_chunk$set(cache=TRUE)
require(DiagrammeR)
require(plyr)
require(dplyr)
require(sqldf)
require(reshape)
require(tidyr)
# rm(list=ls())
setwd("C:/Users/SriPrav/Documents/R/01Telstra/")
setwd("C:/Users/SriPrav/Documents/R/01Telstra")
root_directory = "C:/Users/SriPrav/Documents/R/01Telstra"
# 02. Set Seed
# you must know why I am using set.seed()
set.seed(546)
# 03. Import source files data
# Importing data into R
train <- read.csv("./SourceFiles/train.csv" , h=TRUE, sep=",")
test <- read.csv("./SourceFiles/test.csv" , h=TRUE, sep=",")
event <- read.csv("./SourceFiles/event_type.csv" , h=TRUE, sep=",")
log <- read.csv("./SourceFiles/log_feature.csv" , h=TRUE, sep=",")
resource <- read.csv("./SourceFiles/resource_type.csv", h=TRUE, sep=",")
severity <- read.csv("./SourceFiles/severity_type.csv", h=TRUE, sep=",")
# 04. Set target variable to test data
test$fault_severity <- -1
df_all <- rbind(train,test)
# merging data
Moves <- merge(event,log,by="id" ,all = T)
Moves <- merge(Moves,resource,by="id" ,all = T)
Moves <- merge(Moves,severity,by="id" ,all = T)
df_all_combinedForMovings <- merge(df_all,Moves,by="id" ,all = T)
# filter(df_all_combinedForMovings, id == "7350" & resource_type =="resource_type 8" & event_type =="event_type 15")
df_all_combinedForMovings$location <- as.numeric(gsub("location ","",df_all_combinedForMovings$location))
df_all_combinedForMovings$event_type <- as.numeric(gsub("event_type ","",df_all_combinedForMovings$event_type))
df_all_combinedForMovings$log_feature <- as.numeric(gsub("feature ","",df_all_combinedForMovings$log_feature))
df_all_combinedForMovings$resource_type <- as.numeric(gsub("resource_type ","",df_all_combinedForMovings$resource_type))
df_all_combinedForMovings$severity_type <- as.numeric(gsub("severity_type ","",df_all_combinedForMovings$severity_type))
Movings <- sqldf("SELECT id,
-- MAX(fault_severity) fault_severity,
--MAX(location) location,
--MAX(event_type) Maxevent_type,
MAX(log_feature) Maxlog_feature,
MAX(resource_type) Maxresource_type,
MAX(severity_type) Maxseverity_type,
MIN(event_type) Minevent_type,
MIN(log_feature) Minlog_feature,
MIN(resource_type) Minresource_type,
--MIN(severity_type) Minseverity_type,
--MAX(volume) Maxvolume,
--MIN(volume) Minvolume,
--AVG(volume) Avgvolume,
--AVG(log_feature) Sumlog_feature,
COUNT(*) + Max(resource_type) + MIN(resource_type) MixFeature,
COUNT(*) as Rows
FROM df_all_combinedForMovings group by id ")
require(GGally)
head(Movings)
# ggpairs(Movings[-c(1)], colour="fault_severity", alpha=0.4)
event$eventtype <- as.integer(gsub("event_type ","",event$event_type))
# head(event)
events <- spread(event, event_type , eventtype )
# head(events)
# sqldf("select * from events where id = 10024")
events[is.na(events)] <- 0
resource$resourcetype <- as.integer(gsub("resource_type ","",resource$resource_type))
# head(resource)
resources <- spread(resource, resource_type , resourcetype )
# head(resources)
# sqldf("select * from resources where id = 10024")
resources[is.na(resources)] <- 0
severity$severitytype <- as.integer(gsub("severity_type ","",severity$severity_type))
# head(severity)
severities <- spread(severity, severity_type , severitytype )
# head(severities)
# sqldf("select * from severities where id = 10024")
severities[is.na(severities)] <- 0
log$logfeature <- as.integer(gsub("feature ","",log$log_feature))
logDetails <- log
log$volume <- NULL
# head(log)
logs <- spread(log, log_feature , logfeature )
# names(events)
# sqldf("select * from logs where id = 10024")
logs[is.na(logs)] <- 0
logDetails <- sqldf("select id , sum(logfeature) Totallogfeatures , sum(volume) as Totalvolume from logDetails group by id")
# logs$logsVolume <- rowSums(logs[,2:387])
# resources$resourcesTotal <-rowSums(resources[,2:11])
# events$eventsTotal <- rowSums(events[,2:54])
# events$severitiesTotal <- rowSums(severities[,2:6])
sessionsdata <- merge(events,logs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,resources,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,severities,by="id" ,all = T)
dim(sessionsdata)
dim(df_all)
df_all_combined <- merge(df_all,sessionsdata,by="id" ,all = T)
df_all_combined <- merge(df_all_combined, Movings, by="id" ,all = T)
df_all_combined <- merge(df_all_combined, logDetails, by="id" ,all = T)
# Locationfrequencies <- sqldf("select location , count(*) as LocationFreq from df_all_combined group by location ")
# df_all_combined <- merge(df_all_combined,Locationfrequencies,by="location" ,all.x = T)
dim(df_all_combined)
df_all_combined$location <- as.numeric(gsub("location",'',df_all_combined$location))
df_all_combined$Totallogfeatures <- log(df_all_combined$Totallogfeatures)
df_all_combined$TotalvolumeBins <- ifelse(df_all_combined$Totalvolume > 750 , 0, 1)
#df_all_combined$FeatureTotal <- df_all_combined$logsVolume + df_all_combined$resourcesTotal + df_all_combined$severitiesTotal
#df_all_combined$SeverityVolume <- log(df_all_combined$logsVolume * df_all_combined$severitiesTotal)
names(df_all_combined)
# Cl <- kmeans(df_all_combined[-c(1,3)], 10, nstart=25)
#
# df_all_combined <- cbind(df_all_combined, Cluster = Cl$cluster)
# sqldf("select id, fault_severity from df_all_combined where id IN(10024,1,10059)")
Fulltrain <- df_all_combined[which(df_all_combined$fault_severity > -1), ]
Fulltest <- df_all_combined[which(df_all_combined$fault_severity < 0), ]
Fulltest$fault_severity <- NULL
# #######################################################################################################
# # Data Visualisations
## 3D Scatterplot
# require(scatterplot3d)
# scatterplot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, main="3D Scatterplot")
#
# require(rgl)
# plot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, col="red", size=3)
#
# pairs(iris[1:4], main = "Anderson's Iris Data -- 3 species",
# pch = 21, bg = c("red", "green3", "blue")[unclass(iris$Species)])
#
# # Using formula
#
# pairs(~Fulltrain$location + Fulltrain$fault_severity + Fulltrain$Maxevent_type, main = "Fault Severity",
# pch = 21, bg = c("red", "green3", "blue")[unclass(Fulltrain$fault_severity)])
# M <- cor(Fulltrain[c(3,462:468,470:483)])
# corrplot.mixed(M)
# conputationally expensive
# require(GGally)
# ggpairs(Fulltrain[c(3,458:467)], colour="fault_severity", alpha=0.4)
summary(Fulltrain$'resource_type 1')
# head(Fulltrain[c(3,443:457)])
#
# filter(Fulltrain, id == "7350")
filter(log, id == "7350")
# ggpairs(Fulltrain[c(3,443:457)], colour="fault_severity", alpha=0.4)
########################################################################################################
# names(Fulltrain)
featureNames <- names(Fulltrain [-c(1,3)]) # ,57,444
train.matrix <- as.matrix(Fulltrain[,featureNames])#, Fulltrain$severity_type == 3))
test.matrix <- as.matrix(Fulltest[,featureNames]) #, Fulltest$severity_type == 3))
target <- ifelse(Fulltrain$fault_severity==0,'Zero', ifelse(Fulltrain$fault_severity==1,'One', 'Two'))
#y <- recode(target,"'Zero'=0; 'One'=1; 'Two'=2")
classnames = unique(target)
#target = as.integer(colsplit(target,'_',names=c('x1','x2'))[,2])
target <- as.factor(target)
# outcome.org = Fulltrain$fault_severity
# outcome = outcome.org
# levels(outcome)
y = target
y = as.matrix(as.integer(target)-1)
cvtarget = y
cvtarget <- as.factor(cvtarget)
num.class = length(levels(target))
unique(target)
# xgboost parameters
param <- list("objective" = "multi:softprob", # multiclass classification
"num_class" = num.class, # number of classes
"eval_metric" = "mlogloss", # evaluation metric
"nthread" = 24, # number of threads to be used
"max_depth" = 8, # maximum depth of tree # 6
"eta" = 0.05, # step size shrinkage # 0.5
"gamma" = 0, # minimum loss reduction
"subsample" = 0.9, # part of data instances to grow tree # 0.5
"colsample_bytree" = 0.7, # subsample ratio of columns when constructing each tree
"min_child_weight" = 3 # minimum sum of instance weight needed in a child
)
# set random seed, for reproducibility
set.seed(1231)
# k-fold cross validation, with timing
nround.cv = 20
system.time( bst.cv <- xgb.cv(param=param, data=train.matrix, label=y, early.stop.round=10,
nfold=10, nrounds=nround.cv, prediction=TRUE, verbose=TRUE) )
#tail(bst.cv$dt)
# index of minimum merror
min.merror.idx = which.min(bst.cv$dt[, test.mlogloss.mean+test.mlogloss.std])
min.merror.idx
bst.cv$dt[min.merror.idx ,]
xgb.ctrl <- trainControl(method = "repeatedcv", repeats = 2,number = 5,
summaryFunction = multiClassSummary,
classProbs = TRUE,
allowParallel=T,
verboseIter = TRUE)
min.merror.idx = 20
xgb.grid <- expand.grid(nrounds = c(20), # try with 195, get best nround from XGBoost model then apply here for Caret grid
eta = c(0.05),
max_depth = c(8),
colsample_bytree = c(0.7),
subsample = c(0.6),
min_child_weight = c(3),
gamma = 0
)
set.seed(45)
# set.seed(54)
xgb_tune <-train(x=train.matrix, y=target, # factor level string levels
#data=train,
method="xgbTree",
objective = "multi:softprob",
trControl=xgb.ctrl,
#tuneGrid=xgb.grid,
verbose=1,
metric="logLoss",
nthread =24,
print.every.n=5
)
xgb_tune
# #0.4943869, 0.01010081 , 0.7795671 , 0.01015636
# #0.4947401, 0.009533379, 0.7818702 , 0.01117952
# xgb_tune$best
# xgb_tune$best$nrounds
# xgb_tune$best$max_depth
# xgb_tune$best$eta
# xgb_tune$best$colsample_bytree
# xgb_tune$best$min_child_weight
# xgb_tune$best$gamma
# # # get the trained model
# model = xgb.dump(xgb_tune, with.stats=TRUE)
# # # get the feature real names
# names = dimnames(train.matrix)[[2]]
# # # compute feature importance matrix
# importance_matrix = xgb.importance(names, model=bst)
# print(importance_matrix)
# Training error rate
# confusionMatrix(predict(xgb_tune, Fulltrain[,featureNames]), target) ## Fact level target (string levels)
predictedProbs <- predict(xgb_tune, test.matrix, type = "prob")
#head(predictedProbs)
#head(test.matrix)
# head(prediction)
# sqldf("select * from prediction where id = 11")
# names(prediction)
prediction <- cbind( id = Fulltest$id , predict_0 = predictedProbs[,3] , predict_1 = predictedProbs[,1], predict_2 = predictedProbs[,2] )
predict01 <- as.table(prediction)
head(predict01)
filter(predict01, id == "2")
predictions <- sqldf("select id , avg(predict_0) as predict_0
, avg(predict_1) as predict_1
, avg(predict_2) as predict_2
from predict01")
write.csv(prediction, "submissionFullSet.csv", quote=FALSE, row.names = FALSE)
FullSet <- read.csv("./Telstra/submissionFullset.csv", h=TRUE, sep=",")
submissionTest <- sqldf("select id , avg(predict_0) as predict_0
, avg(predict_1) as predict_1
, avg(predict_2) as predict_2
from FullSet group by id")
write.csv(submissionTest, "submission48.csv", quote=FALSE, row.names = FALSE)
|
59664453aca21309bfabb01911fe4c7098baa42d | 43e59e7a343960576f5718f6b137f6f06145d768 | /make_jurisdiction_report.R | 2ddd6752381ac60c1891d1becb39e82d4fcead58 | [
"CC0-1.0"
] | permissive | wvp3/jurisdiction_report | 4c8632a4f0e49f52e27f2d7262bc0cf2524ed42a | c75f831db01f3976d5936395c373c58619768181 | refs/heads/main | 2023-04-02T14:42:48.143113 | 2021-04-15T15:42:16 | 2021-04-15T15:42:16 | 358,302,271 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,956 | r | make_jurisdiction_report.R | source("make_quadrants.R")
source("make_svimap.R")
source("make_sparkline.R")
# you need to first prepare the most recent data
# see prep_county_throughput.R, prep_state_throughput.R and prep_svi_data.R
make_jurisdiction_report <- function(st,format="html",qc=FALSE) {
statetitle <- as.data.frame(cbind(state.name,state.abb)) %>%
bind_rows(territories) %>% filter(state.abb==st | state.name==st)
filenamest <- statetitle %>% select(state.name) %>% as.character()
print(filenamest)
make_quadrants(st)
make_sparkline(st) %>% saveRDS("state_sparkline.rds")
make_svimap(st)
statemergedtable <- left_join(readRDS("state_datatable.rds"),
select(svimapdata,county_fips,svi,tert_svi_state,firstdose_coverage=coverage,tert_coverage_state),
by="county_fips") %>%
left_join(statetitle,by=c("state"="state.abb")) %>%
# this next select command will vary; need to customize
select(state,state.name=state.name.x,county,county_fips,contains("dose"),contains("percent"),
throughputcat,contains("svi"),contains("coverage")) %>%
mutate(county_fips=ifelse(is.na(county_fips) |
county_fips %in% c("UNK","Unknown","unknown",""),
"<missing>",county_fips)) %>%
mutate(state.name=ifelse(is.na(state.name),filenamest,state.name))
saveRDS(statemergedtable,file="state_mergedtable.rds")
if(format=="html") {rmarkdown::render("all_county_throughput.rmd",output_file=paste0(
filenamest,"_",as.character(lubridate::today()))) }
if(format=="pdf") {rmarkdown::render("all_county_throughput_pdf.rmd",output_file=paste0(
filenamest,"_",as.character(lubridate::today()))) }
if(qc==TRUE) {statemergedtable %>% summarise_at(vars(contains("dose")),funs(sum),na.rm=T) %>% print()}
}
purrr::map(state.abb,possibly(make_jurisdiction_report,otherwise=NA))
#purrr::map(state.name,possibly(make_jurisdiction_report,otherwise=NA))
|
af4c73492f1a629af1c65e9c2d3cc5e627d65320 | b4eb64787b3cdf1e8622bf968b1c07b13489275b | /man/fix_name.Rd | 0952623955bc32868b483e5aca385440d6ba51b5 | [
"MIT",
"CC-BY-4.0"
] | permissive | bodkan/archaic-ychr | c904289171fe4c752e08aab9f93c094ad5c5fc9c | 13759d2d97ce1434b12418ad2395ca98fc170016 | refs/heads/master | 2022-11-16T22:30:27.487685 | 2020-07-13T10:30:56 | 2020-07-13T10:30:56 | 52,959,577 | 8 | 0 | null | 2020-07-13T10:30:57 | 2016-03-02T12:02:34 | Jupyter Notebook | UTF-8 | R | false | true | 260 | rd | fix_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{fix_name}
\alias{fix_name}
\title{Change sample name to its full form.}
\usage{
fix_name(name, coverage = FALSE)
}
\description{
Change sample name to its full form.
}
|
22e0d1d62f058150167a22b20710475362711717 | 468075902da967e77578f8445a542faf2ee51227 | /man/LogisticRMoE.Rd | 8a621b3411c1fe0a82a812ebf07539364f58826f | [] | no_license | fchamroukhi/HDME | 202dd27585ff2a50fe0c59b62bb8e5836cf74d3f | 09d44e933cc4cd60e85cf920621708da44d12016 | refs/heads/master | 2020-06-19T18:22:12.092085 | 2019-10-23T13:24:54 | 2019-10-23T13:24:54 | 196,819,715 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,344 | rd | LogisticRMoE.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LogisticRMoE.R
\name{LogisticRMoE}
\alias{LogisticRMoE}
\title{Penalized MLE for the logistic regularized Mixture of Experts.}
\usage{
LogisticRMoE(Xmat, Ymat, K, Lambda, Gamma, option = FALSE,
verbose = FALSE)
}
\arguments{
\item{Xmat}{Matrix of explanatory variables. Each feature should be
standardized to have mean 0 and variance 1. One must add the column vector
(1,1,...,1) for the intercept variable.}
\item{Ymat}{Vector of the response variable. For the Gaussian case Y should
be standardized. For multi-logistic model Y is numbered from 1 to R (R is
the number of labels of Y).}
\item{K}{Number of experts (K > 1).}
\item{Lambda}{Penalty value for the experts.}
\item{Gamma}{Penalty value for the gating network.}
\item{option}{Optional. \code{option = TRUE}: using proximal Newton-type method;
\code{option = FALSE}: using proximal Newton method.}
\item{verbose}{Optional. A logical value indicating whether or not values of
the log-likelihood should be printed during EM iterations.}
}
\value{
LogisticRMoE returns an object of class \link{LRMoE}.
}
\description{
This function provides a penalized MLE for the logistic regularized Mixture
of Experts (MoE) model corresponding with the penalty parameters Lambda,
Gamma.
}
\seealso{
\link{LRMoE}
}
|
f1a74d34fea5b867ab165f1fc93eb9af345be833 | 3805a17d29ec60517c95ea63cd4a393d530d5b15 | /man/pagination_helper.Rd | 8fa99b49d989c5d4690971cb632ae2ca1925f3dd | [] | no_license | bkkkk/lastfmr | da73596e853989cfe116f52244c617b3f6ae78da | 8617f03a7485c4bc2c1b4baa3c46595e6c959a3c | refs/heads/master | 2022-09-05T11:35:18.732279 | 2022-08-31T06:54:54 | 2022-08-31T06:54:54 | 125,021,275 | 0 | 0 | null | 2021-11-20T07:19:08 | 2018-03-13T08:57:09 | R | UTF-8 | R | false | true | 1,432 | rd | pagination_helper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pagination.R
\name{pagination_helper}
\alias{pagination_helper}
\alias{get_current_page}
\alias{get_total_pages}
\alias{has_next_page}
\alias{get_response_attr}
\title{Pagination Management}
\usage{
get_current_page(resp)
get_total_pages(resp)
has_next_page(resp)
get_response_attr(resp)
}
\arguments{
\item{resp}{lastfm object}
}
\value{
\itemize{
\item \code{\link[=get_current_page]{get_current_page()}} returns an integer
\item \code{\link[=has_next_page]{has_next_page()}} returns a boolean
}
}
\description{
Many of last.fm API responses are paginated with a page size maximum of 50.
lastfmr provides a series of functions that help to deal with these pages.
End-users are not expected to interact with these functions directly in most
cases since pagination is already taken into account when invoking the raw
or tidy data request functions.
Note that some endpoints return pagination based on OpenSearch query results
and information about next pages, etc. are stored in slightly different ways.
\itemize{
\item \code{\link[=get_current_page]{get_current_page()}} and \code{\link[=get_total_pages]{get_total_pages()}} return the current page of a
response and the total number of pages available, respectively.
\item \code{\link[=has_next_page]{has_next_page()}} checks if there are more pages after the current page.
}
}
\keyword{internal}
|
380e0281b67386debcf7b613e474013e7add2c44 | 91ae059b3f640dd6cdbd49992acec90339fc54ac | /workers_model.R | 3826cc0f9ac28d30a3b4cb321bc0feb89d3167ad | [] | no_license | mgbostwick/galapagos | 58b291a6243a07f94a8ea84cbe3817a99a8ebdb6 | d02f4324ac016af928fc1bc9c73244318c101a08 | refs/heads/master | 2021-04-27T00:31:36.544802 | 2018-04-29T20:22:39 | 2018-04-29T20:22:39 | 122,654,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,388 | r | workers_model.R | setwd("/Users/michaelbostwick/Documents/Galapagos")
library(tidyverse)
library(ggplot2)
library(readxl)
library(glmnet)
library(mpath)
library(reshape2)
library(gridExtra)
library(leaps)
library(bestsubset)
library(bestglm)
# Load modeling function
source("modeling.function.R")
# Load clean data and subset to appropriate variables for this analysis
load("BASES_CENSO_UPA_spss/clean_data.RData")
vars <- read_excel("Variables.xlsx", sheet = "vars", range = "C2:I241")
workers_include <- vars[which(is.na(vars$`Number workers supported`)),1]$`Variable Name`
workers_x.df <- subset(reduced_data, select = workers_include)
workers_nonzero <- reduced_data[reduced_data$fulltimework > 0, 'fulltimework']
reduced_data$workers_binary <- factor(reduced_data$fulltimework > 0)
levels(reduced_data$workers_binary) = c("Zero", "Positive")
# Plot histograms and output to PDF
all_workers_hist <- ggplot(data = reduced_data) + geom_bar(mapping = aes(x = workers_binary)) +
xlab("fulltimework") + theme(text = element_text(size=14))
nonzero_hist <- ggplot(data = reduced_data[reduced_data$fulltimework > 0,]) +
geom_histogram(mapping = aes(x = fulltimework)) + theme(text = element_text(size=14))
log_nonzero_hist <- ggplot(data = reduced_data[reduced_data$fulltimework > 0,]) +
geom_histogram(mapping = aes(x = log10(fulltimework))) + theme(text = element_text(size=14))
pdf("Paper/images/worker_histograms.pdf",width=12,height=8)
grid.arrange(all_workers_hist, nonzero_hist, log_nonzero_hist, nrow = 1)
dev.off()
# Call modeling function on binary data
worker_binary.models <- fit.models(model.name = "workers_binary", x.data = workers_x.df,
y.response = reduced_data$workers_binary, response.family = "binomial")
# To find top 5 variable order
betas <- worker_binary.models$betas
betas[abs(betas$s0)>0,'variable']
worker_binary.models$fwdorder
# Prepare nonzero data and remove zero variance/linear dependent columns
workers_x.nonzero.pre <- subset(reduced_data[reduced_data$fulltimework > 0,], select = workers_include)
num_workers_log <- log10(reduced_data[reduced_data$fulltimework > 0, 'fulltimework'])
workers.matrix <- model.matrix(~., workers_x.nonzero.pre)[,-1]
col_vars <- apply(workers.matrix, 2, var)
zero_variance <- names(col_vars[col_vars == 0])
workers_x.nonzero <- workers.matrix[,!colnames(workers.matrix) %in% zero_variance]
remove_vars <- c("ga15_cualLLANTAS", "`ENERGIA_ELENERGIA SOLAR PUBLICA`", "`ENERGIA_ELGENERADOR PRIVADO`")
workers_x.nonzero <- as.data.frame(workers_x.nonzero[,!colnames(workers_x.nonzero) %in% remove_vars])
# Call modeling function for nonzero data
worker_nonzero.models <- fit.models(model.name = "workers_nonzero", x.data = workers_x.nonzero,
y.response = num_workers_log, response.family = "gaussian")
# To find top 5 variable order
betas <- worker_nonzero.models$betas
betas[abs(betas$s7)>0,'variable']
worker_nonzero.models$fwdorder
### Diagnostics to find linear dependencies and correlations between variables
your.matrix <- workers_x.nonzero
rankifremoved <- sapply(1:ncol(your.matrix), function (x) qr(your.matrix[,-x])$rank)
which(rankifremoved == max(rankifremoved))
corrs <-cor(workers_x.nonzero)
corrs[upper.tri(corrs)] <- NA
diag(corrs) <- NA
corrs_reshape <- melt(corrs, na.rm = TRUE)
(high_corrs <- corrs_reshape[abs(corrs_reshape$value) > 0.9,])
|
a4074c14a1cf2ec5aa664ece89a26bab300f535b | 14c2f47364f72cec737aed9a6294d2e6954ecb3e | /man/BCV.Rd | f51ff53446badff09239e02c418cd0d23b231f0a | [] | no_license | bedapub/ribiosNGS | ae7bac0e30eb0662c511cfe791e6d10b167969b0 | a6e1b12a91068f4774a125c539ea2d5ae04b6d7d | refs/heads/master | 2023-08-31T08:22:17.503110 | 2023-08-29T15:26:02 | 2023-08-29T15:26:02 | 253,536,346 | 2 | 3 | null | 2022-04-11T09:36:23 | 2020-04-06T15:18:41 | R | UTF-8 | R | false | true | 533 | rd | BCV.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\name{BCV}
\alias{BCV}
\alias{BCV,DGEList-method}
\alias{BCV,EdgeResult-method}
\title{Return a data.frame of BCV values}
\usage{
BCV(x)
\S4method{BCV}{DGEList}(x)
\S4method{BCV}{EdgeResult}(x)
}
\arguments{
\item{x}{An object}
}
\description{
Return a data.frame of BCV values
}
\section{Methods (by class)}{
\itemize{
\item \code{BCV(DGEList)}: Method for DGEList
\item \code{BCV(EdgeResult)}: Method for EdgeResult
}}
|
8396906e9e03b6fe30ed961aaf29183e34ed0d5f | dafa2cdb6498055e534b127d26fbea2f7ceccf32 | /workshops/introR/introR01.R | a71d380ae897f5579bb757a456c87b72a3be0b09 | [] | no_license | kho777/data-methods | c44cd4173e9fb1341e724692861d043b2a5afb1e | 6bac8bc85a5b3c87d2d5a9d6e486e3bc4b19888c | refs/heads/master | 2021-08-19T18:21:48.941139 | 2018-11-01T20:24:06 | 2018-11-01T20:24:06 | 145,901,870 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 944 | r | introR01.R | ## Introduction to R sample program 1
## file: introR01.R
## Adapted from Venables, W.N., Smith, D.M. and Team, R.C., 2018. An Introduction to R, Version 3.5.1 (2018-07-02)
# Generate two pseudo-random normal vectors of x- and y-coordinates.
# Note that the assignment operator "<-" and "=". The former is the old operator
# and considered more general and backward compatible. The latter is more friendly to
# new users from other statistical programs (e.g. SPSS, Stata)
#
# rnorm() is a function to generate normally distributed random numbers. The argument
# within the () indicates number of observations.
x <-rnorm(50)
y = rnorm(x)
# Plot the points in the plane
plot(x, y)
# Plot better, using the ggplot2 package
install.packages("ggplot2")
library(ggplot2)
qplot(x,y)
# Plot better better with ggplot2
ggplot(,aes(x,y)) +theme_bw()+geom_point()
# Check on R objects in the R workspace
ls()
# Remove objects
rm(x, y)
|
29f869940cff0ddfa2f47b5c9d945a093f46ae26 | d8bc7bba5ee687368e23a1d61453bf90ff9fc516 | /man/query_master.Rd | 3957f8b75f9781b2261366c12546f2b888288fba | [] | no_license | NREL/rplexos | 45dcf4dd919f9fab6333aa45fb4ef29203039829 | f56f88fdfb050e682e3f9137620697cf4cb6e0dd | refs/heads/master | 2020-04-12T09:39:21.133868 | 2017-11-23T16:00:49 | 2017-11-23T16:00:49 | 24,687,713 | 16 | 14 | null | 2017-01-10T17:36:25 | 2014-10-01T17:22:03 | C++ | UTF-8 | R | false | true | 4,740 | rd | query_master.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{query_master}
\alias{query_master}
\alias{query_interval}
\alias{query_day}
\alias{query_week}
\alias{query_month}
\alias{query_year}
\alias{sum_master}
\alias{sum_interval}
\alias{sum_day}
\alias{sum_week}
\alias{sum_month}
\alias{sum_year}
\title{Query data and aggregate data}
\usage{
query_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4)
query_interval(db, ...)
query_day(db, ...)
query_week(db, ...)
query_month(db, ...)
query_year(db, ...)
sum_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4, multiply.time = FALSE)
sum_interval(db, ...)
sum_day(db, ...)
sum_week(db, ...)
sum_month(db, ...)
sum_year(db, ...)
}
\arguments{
\item{db}{PLEXOS database object}
\item{time}{character. Table to query from (interval, day, week, month, year)}
\item{col}{character. Collection to query}
\item{prop}{character vector. Property or properties to query}
\item{columns}{character. Data columns to query or aggregate by (defaults to \code{name})}
\item{time.range}{POSIXt or character. Range of dates of length 2 (given as date, datetime or character in 'ymdhms'
or 'ymd' format). The Plexos data is assumed to be in UTC so providing a POSIXt vector in a different
timezone might cause conflicts. Character vectors are also converted to the UTC format, so here there
is not issue.}
\item{filter}{list. Used to filter by data columns (see details)}
\item{phase}{integer. PLEXOS optimization phase (1-LT, 2-PASA, 3-MT, 4-ST)}
\item{...}{parameters passed from shortcut functions to master (all except \code{time})}
\item{multiply.time}{boolean. When summing interval data, provide the value multiplied by interval duration (See details).}
}
\value{
A data frame that contains data summarized/aggregated by scenario.
}
\description{
This collection of functions retrieves data from the processed PLEXOS solutions and
returns it in a convenient format.
}
\details{
The family \code{query_*} returns the raw data in the databases, while \code{sum_*}
aggregates the data according to \code{columns}.
The functions \code{*_day}, \code{*_week}, \code{*_month} and \code{*_year} are
shortcuts for the corresponding, \code{*_master} function.
The following is a list of valid items for \code{columns} and filtering. Additionally,
\code{time} can be specified for summary data (interval data always includes \code{time}).
\itemize{
\item{\code{category}}
\item{\code{property}}
\item{\code{name} (default for columns)}
\item{\code{parent} (automatically selected when \code{name} is selected)}
\item{\code{category}}
\item{\code{region} (only meaningful for generators)}
\item{\code{zone} (only meaningful for generators)}
\item{\code{period_type}}
\item{\code{band}}
\item{\code{sample}}
\item{\code{timeslice}}
}
If defined, the \code{filter} parameter must be a \code{list}. The elements must be chracter
vectors and need to have a valid column name (see previous bullet points). For example, one
could define it as follows:
\code{filter = list(name = c("Generator1", "Generator2"), region = "Region1")}
To filter by time use the \code{time.range} parameter, instead of adding it as an entry in the
\code{filter} parameter. For example use \code{c("2015-03-14", "2015-03-15")} in your query.
Please note that the year/month/date starts at midnight (00:00:00).
If a scenario has multiple databases, the data will be aggregated automatically. If two or more
databases within the same scenario have overlapping time periods, the default is to select the
data from the last database (execute \code{summary(db)} so see the order). To change this behavior
set the global option \code{rplexos.tiebreak} to \code{first}, \code{last}, or \code{all} to
select data from the first database, the last one or keep all of them.
Multiple properties can be queried within a collection. If \code{prop} equals the widcard
\code{"*"}, all the properties within a collection are returned.
The parameter \code{multiply.time} allows to multiply values by interval duration (in hours) when
doing the sum of interval data. This can be used, for example, to obtain total energy (in MWh)
from power time series (in MW).
}
\examples{
# Process the folder with the solution file provided by rplexos
location <- location_solution_rplexos()
process_folder(location)
# Query data
db <- plexos_open(location)
query_day(db, "Generator", "Generation")
query_day(db, "Region", "*")
query_interval(db, "Generator", "Generation")
}
\seealso{
\code{\link{plexos_open}} to create the PLEXOS database object
\code{\link{query_sql}} to perform custom queries
}
|
a85435ebdba2b593bc1f9767de4f12042e79566c | 122659e9874d73b463cbb714db795f195f154a9b | /forest.r | a0b5189867e1be6ebffa202ce2af08ad5d8b02dd | [] | no_license | bertcarnell/Rsandbox | ac7e808b9446422cbc40e231f3b17b79ad87a35a | 11ef0962b9504a811bf5aa8aa064ddee0d059d75 | refs/heads/master | 2021-06-05T15:11:06.610766 | 2018-11-24T23:48:37 | 2018-11-24T23:48:37 | 5,023,424 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,807 | r | forest.r | require(randomForest)
require(RColorBrewer)
require(MASS)
data(iris)
set.seed(71)
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE)
print(iris.rf)
## dirichlet based on confusion matrix
rdirichlet <- function(alpha, N)
{
samp <- function(a) rgamma(N, a, 1)
X <- sapply(alpha, samp, USE.NAMES=FALSE)
norm <- rowSums(X)
return(X / norm)
}
# want a dirichlet with means equal to the predicted probabilities for an observation
# and a k value that matchs the error in the confusion matrix
p <- predict(iris.rf, iris[150,], type="prob")
N <- 10000
f <- function(k, N, p, desiredError)
{
stopifnot(length(p) == length(desiredError))
X <- rdirichlet(p*k, N)
r <- t(apply(X, 1, rank))
# given that we have predicted a max(p), what is error rate for the others
error <- apply(r, 2, function(y) length(which(y == length(p)))/N)
return(sum((desiredError-error)^2))
}
set.seed(2001)
optimize(f, lower=0, upper=10, N=N, p=p,
desiredError=iris.rf$confusion[,which.max(p)]/sum(iris.rf$confusion[,which.max(p)]), tol=1E-6)
f(1, N=N, p=p, desiredError=iris.rf$confusion[,which.max(p)]/sum(iris.rf$confusion[,which.max(p)]))
f(10, N=N, p=p, desiredError=iris.rf$confusion[,which.max(p)]/sum(iris.rf$confusion[,which.max(p)]))
f(100, N=N, p=p, desiredError=iris.rf$confusion[,which.max(p)]/sum(iris.rf$confusion[,which.max(p)]))
f(1000, N=N, p=p, desiredError=iris.rf$confusion[,which.max(p)]/sum(iris.rf$confusion[,which.max(p)]))
f <- function(x, confusionrow)
{
# x is a dirichlet sample
# confu is a row of the confusion matrix
k <- ncol(x)
n <- nrow(x)
stopifnot(length(confusionrow) == k+1)
errors <- numeric(k)
errorrate <- confusionrow/
r <- apply(X, 1, rank)
for (i in 1:k)
{
# for the first row of the confusion matrix, make a dirichlet with the right error rate
errorrate <- confu[i,1:k]
}
}
if (all(iris.rf[,4] == 0))
{
} else
{
}
## Look at variable importance:
round(importance(iris.rf), 2)
windows()
varImpPlot(iris.rf)
## Do MDS on 1 - proximity:
windows()
iris.mds <- cmdscale(1 - iris.rf$proximity, eig=TRUE)
op <- par(pty="s")
pairs(cbind(iris[,1:4], iris.mds$points), cex=0.6, gap=0,
col=c("red", "green", "blue")[as.numeric(iris$Species)],
main="Iris Data: Predictors and MDS of Proximity Based on RandomForest")
par(op)
print(iris.mds$GOF)
windows()
output <- MDSplot(iris.rf, iris$Species, k=2, pch=as.numeric(iris$Species))
legend("topleft", legend=as.character(unique(iris$Species)),
pch=as.numeric(unique(iris$Species)),
col=brewer.pal(3, "Set1"))
windows()
output <- MDSplot(iris.rf, iris$Species, k=3, pch=as.numeric(iris$Species))
legend("topleft", legend=as.character(unique(iris$Species)),
pch=as.numeric(unique(iris$Species)),
col=brewer.pal(3, "Set1"))
# add a new point not in the dataset, try to print it on the plot
windows()
boxplot(iris)
# can't because MDS plot needs the proximity matrix which is in the Random forest function
# try to fit it as part of the Random Forest
iris.outlier <- cbind(rbind(iris[,1:4], c(9,9,9,9)), c(as.character(iris$Species), "new"))
names(iris.outlier)[5] <- "Species"
iris.rf.outlier <- randomForest(Species ~ ., data=iris.outlier, importance=TRUE,
proximity=TRUE)
output <- MDSplot(iris.rf.outlier, iris.outlier$Species, k=3,
pch=as.numeric(iris.outlier$Species))
legend("topleft", legend=as.character(unique(iris.outlier$Species)),
pch=as.numeric(unique(iris.outlier$Species)),
col=brewer.pal(3, "Set1"))
o <- outlier(iris.rf.outlier$proximity)
summary(o)
windows()
boxplot(o)
# won't work since there is only one observation in the outlier category
#o <- outlier(iris.rf.outlier)
#o <- outlier(iris.rf.outlier, cls=iris.outlier$Species)
#o <- outlier(iris.rf.outlier$proximity, cls=iris.outlier$Species)
iris.outlier <- cbind(rbind(iris[,1:4], c(9,9,9,9)),
c(as.character(iris$Species), "setosa"))
names(iris.outlier)[5] <- "Species"
iris.rf.outlier <- randomForest(Species ~ ., data=iris.outlier, importance=TRUE,
proximity=TRUE)
output <- MDSplot(iris.rf.outlier, iris.outlier$Species, k=3,
pch=as.numeric(iris.outlier$Species))
legend("topleft", legend=as.character(unique(iris.outlier$Species)),
pch=as.numeric(unique(iris.outlier$Species)),
col=brewer.pal(3, "Set1"))
plot(outlier(iris.rf.outlier),
col=brewer.pal(3, "Set1")[as.numeric(iris$Species)])
set.seed(1976)
iris.outlier <- cbind(rbind(iris[,1:4], c(9,9,9,9)),
c(as.character(iris$Species), "setosa"))
names(iris.outlier)[5] <- "Species"
for (i in 1:3)
{
print(i)
iris.outlier$Species[151] <- levels(iris.outlier$Species)[i]
iris.rf.outlier <- randomForest(Species ~ ., data=iris.outlier,
importance=TRUE, proximity=TRUE)
o <- outlier(iris.rf.outlier)
print(o[151])
ind <- which(as.numeric(iris.outlier$Species) == i)
d <- (iris.rf.outlier$proximity)^2
diag(d) <- 0
m <- length(diag(d)) / apply(d, 1, sum)
print((0.6745*(m[ind]-median(m[ind]))/median(abs(m[ind]-median(m[ind]))))[length(ind)])
d <- (iris.rf.outlier$proximity)^2
m <- length(diag(d)) / apply(d, 1, sum)
print((0.6745*(m[ind]-median(m[ind]))/median(abs(m[ind]-median(m[ind]))))[length(ind)])
m <- length(ind) / apply(d, 1, sum)
print((0.6745*(m[ind]-median(m[ind]))/median(abs(m[ind]-median(m[ind]))))[length(ind)])
m <- 1 / apply(d, 1, sum)
print((0.6745*(m[ind]-median(m[ind]))/median(abs(m[ind]-median(m[ind]))))[length(ind)])
m <- length(diag(d)) / apply(d, 1, sum)
print((0.6745*(m[ind]-median(m[ind]))/median(abs(m-median(m))))[length(ind)])
d <- (iris.rf.outlier$proximity)^2
diag(d) <- 0
m <- length(diag(d)) / apply(d, 1, sum)
print(((m[ind]-median(m[ind]))/median(abs(m[ind]-median(m[ind]))))[length(ind)])
}
# MAD per class
d <- (iris.rf.outlier$proximity)^2
diag(d) <- 0
m <- length(diag(d)) / apply(d, 1, sum)
by(m, iris.outlier$Species, function(m) 0.6745*(m-median(m))/median(abs(m-median(m))))
# try to fit it into each existing group and look for outlier indicator in all groups...
X <- as.matrix(iris[, -5])
X <- X[-143,]
dim(X)
X.dist <- dist(X)
str(X.dist)
X.mds <- isoMDS(X.dist)
plot(X.mds$points, type = "n")
points(X.mds$points, pch = as.numeric(unique(iris$Species)),
col=as.numeric(unique(iris$Species)))
X.sh <- Shepard(X.dist, X.mds$points)
plot(X.sh, pch = ".")
lines(X.sh$x, X.sh$yf, type = "S")
######
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE, ntree=3000, mtry=3)
iris.rf
noprior <- predict(iris.rf, type="prob")
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE, ntree=3000, mtry=3,
classwt=c(0.1,0.8,0.1))
iris.rf
withprior <- predict(iris.rf, type="prob")
all(noprior[,2] <= withprior[,2]) # FALSE
cbind(noprior[,2], withprior[,2], iris$Species)
####
iris.rf <- randomForest(Species ~ ., data=iris, importance=TRUE,
proximity=TRUE, ntree=3000, mtry=3)
p <- predict(iris.rf, newdata=iris[134,-5], type="prob")
p[1] - iris.rf$confusion[2,1]*p[1] - iris.rf$confusion[3,1]*p[1]
p[2] + iris.rf$confusion[2,1]*p[1]
p[3] + iris.rf$confusion[3,1]*p[1]
p
p[2] - iris.rf$confusion[1,2]/sum(iris.rf$confusion[,2])*p[2] - iris.rf$confusion[3,2]/sum(iris.rf$confusion[,2])*p[2]
p[1] + iris.rf$confusion[1,2]/sum(iris.rf$confusion[,2])*p[2]
p[3] + iris.rf$confusion[3,2]/sum(iris.rf$confusion[,2])*p[2]
p
p[2] - iris.rf$confusion[1,2]/sum(iris.rf$confusion[,2])*p[2] - iris.rf$confusion[3,2]/sum(iris.rf$confusion[,2])*p[2]
p[1] + iris.rf$confusion[1,2]/sum(iris.rf$confusion[,2])*p[2]
p[3] + iris.rf$confusion[3,2]/sum(iris.rf$confusion[,2])*p[2]
p[3] - iris.rf$confusion[1,3]*p[3] - iris.rf$confusion[2,3]*p[3] |
99b31e6ce1710ecad67778e317eceff3d0165a73 | ba5a345909141b03e245fb0b082f500914e69267 | /scripts/Urs scripts/Individuals.R | 6586df3378effb0b3dee5554427d79f5a5060a90 | [] | no_license | urskalbitzer/paceR | d633974d8ca20ad51f2ee5a325356d057aac8ce4 | c9135cb0d816a364e02f8f1abc88bcc03a93f067 | refs/heads/master | 2020-12-30T19:58:42.306277 | 2015-07-09T17:47:57 | 2015-07-09T17:47:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 759 | r | Individuals.R | IDs.1 <- pace_db %>%
tbl ("tblIndividual") %>%
select (IndividualID = ID, Name = NameOf, ProjectID, DOB = DateOfBirth, SexID) %>%
# PrimateSpeciesID, CodeName, BirthdateSource, MotherID, MatrilineID, GroupAtBirthID,
# DateOfFirstSighting, DayDifference, AgeClassAtFirstSightingID,
# GroupAtFirstSightingID, VisionPhenotypeID, Comments, Comments_2,
# CommentsJFA, CommentsGenetics, TimeStampAdd, UserAddEdit, TimeStampEdit
collect ()
IDs.2 <- pace_db %>%
tbl ("codeSex") %>%
select (SexID = ID, Sex, Description) %>%
# no column excluded
collect () %>%
left_join (IDs.1, ., by = "SexID") %>%
select (-SexID, -Sex) %>%
rename (IDSex = Description)
IDs <- IDs.2
rm (list = ls(pattern="IDs."))
# View (IDs)
|
5ef390deba2890aaeba1497d5d3372556bcf1159 | 7fdb62d438fe71cd478acdc7dc6a5a9bb817d7fb | /WordCloud/wordcloud.R | 8234cf0ee17c5716f2a044ac430d2bb6b50ccf94 | [] | no_license | burakaydin/materyaller | 8f5ffa9f3a06ce2945e6839f075c649fd45c04d3 | 1eb331e38868f0b8307ea8bae104c9da7624151b | refs/heads/master | 2021-06-22T02:19:43.454340 | 2020-12-20T19:03:37 | 2020-12-20T19:03:37 | 60,400,643 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 825 | r | wordcloud.R | load("TextMineMAT.Rdata")
head(tdm2)
str(tdm2)
# see http://btovar.com/2015/04/text-mining-in-r/
library(wordcloud)
comparison.cloud(tdm2, random.order=FALSE,
colors = c("deeppink", "yellowgreen", "red", "slateblue1"),
title.size=1.4, max.words=80,scale = c(5, 0.2))
commonality.cloud(tdm2, random.order=F,
colors = brewer.pal(8, "Paired"),
scale = c(5, 0.3))
##yuzde
head(tdm2)
tdm3=apply(tdm2[1:105,], 2, function(x){x/sum(x)})
comparison.cloud(tdm3, random.order=FALSE,
colors = c("deeppink", "yellowgreen", "red", "slateblue1"),
title.size=1.4, max.words=80,scale = c(5, 0.2))
commonality.cloud(tdm3, random.order=F,
colors = brewer.pal(8, "Paired"),
scale = c(5, 0.3))
|
8aa3c506d93c9cc7ec5c0d57c9fe732e4cb25694 | 989814aeb27f2f15409022289c23d729354960cd | /structural-variation/scripts-projection/distribution_snps-LD-svs_all-chr.R | d069329d7a537dfab8eb6015e42c4cc60d5f34ab | [
"MIT"
] | permissive | HuffordLab/NAM-genomes | fc32949b689f546ec6e0a2663995de77e1e70fe6 | b510f8ec0514211f2205fd31ea4a8bbdfe071ba3 | refs/heads/master | 2023-04-18T21:26:46.147713 | 2022-03-02T20:17:55 | 2022-03-02T20:17:55 | 225,476,030 | 56 | 27 | MIT | 2021-11-05T21:40:45 | 2019-12-02T21:53:43 | R | UTF-8 | R | false | false | 2,290 | r | distribution_snps-LD-svs_all-chr.R | #### arguments for command line ----
args <- commandArgs(trailingOnly = TRUE)
# # help
# if (all(length(args) == 1 & args == "-h" | args == "--help")) {
# cat("
# Description: this script merges SNPs and SVs hapmap files from usda parents to be used in
# Tassel 5 when projecting SVs into RILs
#
# Usage: ")
# quit()
# }
# make sure the correct number of arguments are used
# you should provide 2 arguments
if (length(args) != 2) {
stop("incorrect number of arguments provided.
Usage:
")
}
# assign arguments to variables
subset.folder <- args[1]
plot_name <- args[2]
# setwd("~/projects/sv_nams/analysis/reseq_snps_projection2")
# subset.folder = "ld/subset_high-ld-snps"
# subset.folder = "ld/subset_low-ld-snps"
# subset.folder = "ld/subset_random-snps"
# plot_name <- "ld/subset_high-ld-snps/dist-LD_SNPs-SVs_high.png"
# plot_name <- "ld/subset_low-ld-snps/dist-LD_SNPs-SVs_low.png"
# plot_name <- "ld/subset_random-snps/dist-LD_SNPs-SVs_random.png"
#### libraries ----
if(!require("data.table")) install.packages("data.table")
if(!require("ggplot2")) install.packages("ggplot2")
#### plot ----
ld.files <- list.files(path = subset.folder, pattern = ".subset.ld", full.names = TRUE)
ld.df <- data.frame(stringsAsFactors = FALSE)
for (chr in 1:10) {
ld.chr <- ld.files[grep(paste0("chr", chr, "."), ld.files, fixed = TRUE)]
if (length(ld.chr) == 0) ld.chr <- ld.files[grep(paste0("chr-", chr, "."), ld.files, fixed = TRUE)]
ld.chr <- fread(ld.chr, header = TRUE, data.table = FALSE)
ld.df <- rbind(ld.df, ld.chr)
}
if (grepl("high", subset.folder)) plot_subtitle <- "Subset of SNPs in high LD to SVs"
if (grepl("low", subset.folder)) plot_subtitle <- "Subset of SNPs in low LD to SVs"
if (grepl("random", subset.folder)) plot_subtitle <- "Subset of random SNPs"
# distribution of r2 of SNPs in LD with SVs
dist.ld <- ggplot(ld.df, aes(x = R2)) +
geom_histogram(fill = "#900721", binwidth = 0.005) +
labs(x = bquote("LD"~(r^2)),
y = "Count",
subtitle = plot_subtitle) +
coord_cartesian(xlim = c(0, 1)) +
theme(title = element_text(size = 15),
axis.title = element_text(size = 20),
axis.text = element_text(size = 15))
ggsave(plot = dist.ld, filename = plot_name, device = "png")
|
10a0aacc360d8fb273855e0c301a18829a9a6752 | ca320667401d6a391e393d8d456661a5a1878a58 | /R/suggested_speedups.R | a8422b1cae9137d134992b6f265b904b42c61082 | [] | no_license | HughParsonage/grattanReporter | 631672f8bd1b317cddc8817dc0c612e18b1c9b6f | fb474cf45e8257f4d5c3718b49e51229f5d2369e | refs/heads/master | 2022-05-01T14:52:15.929972 | 2019-03-10T01:30:09 | 2019-03-10T01:30:09 | 71,602,203 | 4 | 2 | null | 2020-04-16T13:07:22 | 2016-10-21T22:38:27 | TeX | UTF-8 | R | false | false | 294 | r | suggested_speedups.R |
read_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::read_lines(...)
} else {
readLines(...)
}
}
write_lines <- function(...) {
if (requireNamespace("readr", quietly = TRUE)) {
readr::write_lines(...)
} else {
writeLines(...)
}
}
|
768af68dc4c20a443930daedbf37c2ec5ee7bb21 | 9374c7379cf458139eaae4bcebb9a1ac15ec8b11 | /tests/testthat/test-geom_junction_label_repel.R | 5cf7d41f59b99d2f45f1d7621ea2961e80501453 | [
"MIT"
] | permissive | dzhang32/ggtranscript | 2f7760b66e260f768636b29395d0f5b0f28e1c1f | b99da0d455f2e900e06d25530f23aa26d9436db9 | refs/heads/master | 2023-04-18T21:36:20.497042 | 2022-08-10T20:49:18 | 2022-08-10T20:49:18 | 441,221,677 | 94 | 6 | null | null | null | null | UTF-8 | R | false | false | 2,256 | r | test-geom_junction_label_repel.R | # manually create the expected introns
test_introns <-
sod1_annotation %>%
dplyr::filter(
type == "exon",
transcript_name %in% c("SOD1-201", "SOD1-202")
) %>%
to_intron(group_var = "transcript_name") %>%
dplyr::mutate(
count = dplyr::row_number()
)
# create base plot to be used in downstream tests
test_introns_plot <- test_introns %>%
ggplot2::ggplot(aes(
xstart = start,
xend = end,
y = transcript_name
))
##### geom_junction_label_repel #####
testthat::test_that(
"geom_junction() works correctly",
{
base_geom_junction_labels <- test_introns_plot +
geom_junction() +
geom_junction_label_repel(
aes(label = count),
seed = 32
)
w_param_geom_junction_labels <- test_introns_plot +
geom_junction(
junction.y.max = 0.5
) +
geom_junction_label_repel(
aes(label = count),
junction.y.max = 0.5,
seed = 32
)
w_aes_geom_junction_labels <- test_introns_plot +
geom_junction(aes(colour = transcript_name)) +
geom_junction_label_repel(
aes(
label = count,
colour = transcript_name
),
seed = 32
)
w_facet_geom_junction_labels <- test_introns_plot +
geom_junction() +
geom_junction_label_repel(
aes(label = count),
seed = 32
) +
ggplot2::facet_wrap(transcript_name ~ ., drop = TRUE)
vdiffr::expect_doppelganger(
"Base geom_junction_label_repel plot",
base_geom_junction_labels
)
vdiffr::expect_doppelganger(
"With param geom_junction_label_repel plot",
w_param_geom_junction_labels
)
vdiffr::expect_doppelganger(
"With aes geom_junction_label_repel plot",
w_aes_geom_junction_labels
)
vdiffr::expect_doppelganger(
"With facet geom_junction_label_repel plot",
w_facet_geom_junction_labels
)
}
)
|
0f509065f5eb1f2936f5f1c1a6d9c843dbf2bfa8 | 240e74793936b0f397d1ce7f00a0a3d859bcdc99 | /scripts/figure3a.R | a06abfda1014a01b5a8550d2a741419e5adb7528 | [] | no_license | shwetaramdas/maskfiles | c8cb95e9c85d79a9b1a21e25249591c3c74e6546 | 344b9d50c7d5112d4c73bc38733c19c2b1ab9e7c | refs/heads/master | 2020-03-22T06:28:08.096578 | 2018-12-23T21:56:48 | 2018-12-23T21:56:48 | 139,636,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,302 | r | figure3a.R | library(data.table)
library(ggplot2)
library(gridExtra)
library(data.table)
library(reshape2)
library(ggplot2)
library(stringr)
names = c("AC","BN","BU","F3","M5","MR","WK","WN")
#geneannotations = read.table("geneannotation_rn6.txt",skip=1,stringsAsFactors=F,sep=" ")
num_windows = matrix(0, 8, 21)
#the read depth file is available from: https://www.dropbox.com/s/lqnsyjfaoqny84e/readdepths.txt.gz?dl=0
depths = fread("readdepths.txt",stringsAsFactors=FALSE,sep="\t",data.table=FALSE)
#depths[,1] = as.numeric(gsub("chr","",depths[,1]))
HETs = list()
N1S = list()
DEP = list()
for(chr in 20:1){
CHR = chr
toplot = c()
print(chr)
if(CHR > 1){
raw = fread(paste("chr",chr,"raw.raw",sep=""),stringsAsFactors=FALSE,skip=1)
}else{
raw = fread('temp',stringsAsFactors=FALSE,data.table=FALSE,header=FALSE)
}
raw = as.data.frame(raw[,-c(1:6)])
raw = as.matrix(raw)
map = fread(paste("chr",chr,".map",sep=""),stringsAsFactors=FALSE, data.table=FALSE)
if(CHR > 1){
positions_in_raw = fread(paste("chr",CHR,"raw.raw",sep=""),nrow=1,stringsAsFactors=FALSE,data.table=FALSE,header=FALSE)
}else{
positions_in_raw = fread("tempheader",nrow=1,stringsAsFactors=FALSE,data.table=FALSE,header=FALSE)
}
positions_in_raw = positions_in_raw[,-c(1:6)]
positions_in_raw = positions_in_raw[1,]
positions_in_raw = gsub("_.*","",positions_in_raw)
map = merge(positions_in_raw, map, by.x=1,by.y=2,sort=FALSE)
chrdepth =depths[which(depths[,1] == paste0("chr",chr)),]
depthschr = merge(map, chrdepth, by.x=4,by.y=2,sort=FALSE)
# par(mfrow=c(2,1))
N1s = matrix(0, nrow=8, ncol=round(ncol(raw)/1000))
dep = c()
GCs = c()
positions = c()
for(i in 0:(ncol(N1s)-2)){
s = (i*1000)+1
e = (i*1000)+1000
ns = apply(raw[,s:e], 1, function(x){length(which(x==1))})
N1s[,i+1] = ns
positions = c(positions, map[s,4])
dep = c(dep,mean(as.numeric(depthschr[s:e,11]),))
}
DEP[[CHR]] = dep
N1S[[CHR]] = N1s
}
num = c()
d = c()
for(chr in 1:20){
for(i in 1:ncol(N1S[[chr]])){
num = c(num, length(which(N1S[[chr]][,i] > 250)))
d = c(d, DEP[[chr]][i])
}
}
ggplot(toplot, aes(x=as.factor(num),y=d)) + geom_boxplot() + geom_point(aes(size=0.75)) + labs(x="Num. founders with heterozygous calls", y="Read Depth")
ggsave("figure3a.pdf",width=3.5,height=3.5,device="pdf")
|
33bf314f9643a45ee88983a2671c4b4e128a8891 | 1f5294c69bb921d4e1214ee507cea86f22e202aa | /R/group_bymzisotopes3.R | 2547f3eeea9f66d4f664d1f305f7485d4774ff0d | [] | no_license | stolltho/xMSannotator | 5f01adac2b8d6bac909f5b06fb27123c8e025910 | 2b5e9a0af8ee9af2946e9ee17a26c6b5efcb04e4 | refs/heads/master | 2022-11-21T14:00:44.551583 | 2022-10-30T16:06:12 | 2022-10-30T16:06:12 | 253,958,191 | 0 | 0 | null | 2020-04-08T01:50:55 | 2020-04-08T01:50:55 | null | UTF-8 | R | false | false | 1,996 | r | group_bymzisotopes3.R | group_bymzisotopes3 <- function(dataA, max.mz.diff = 30,
numisotopes = 4) {
# Step 1 Group features by m/zdim(data_a)[1]
mz_groups <- lapply(1:dim(dataA)[1], function(j) {
commat = {
}
commzA = new("list")
commzB = new("list")
getbind_same <- c(j)
for (isotopepattern in c(1:numisotopes)) {
isotopemass = dataA$mz[j] + isotopepattern
ppmb = (max.mz.diff) * (isotopemass/1e+06)
getbind_same <- c(getbind_same, which(abs(dataA$mz -
isotopemass) <= ppmb))
}
# gid<-paste('parent',getbind_same,sep='')
return(getbind_same)
})
del_list <- {
}
gid <- {
}
# length(mz_groups)
for (k in 1:length(mz_groups)) {
if ((k %in% del_list) == FALSE) {
for (n in (k + 1):length(mz_groups)) {
if (n > length(mz_groups)) {
break
}
com1 <- intersect(mz_groups[[k]], mz_groups[[n]])
if (length(com1) > 0) {
mz_groups[[k]] <- c(mz_groups[[k]], mz_groups[[n]])
del_list <- c(del_list, n)
mz_groups[[n]] <- c(0)
}
}
# mz_groups[[m]][[1]]<-unique(mz_groups[[m]][[1]])
}
if (length(del_list) > 0) {
mz_groups <- mz_groups[-del_list]
}
}
for (k in 1:length(mz_groups)) {
for (m in 1:length(mz_groups[[k]])) {
gid[mz_groups[[k]][m]] <- paste("M", mz_groups[[k]][1],
sep = "")
}
}
# return(gid)
return(mz_groups)
}
|
f446eb10b14ab8a1eae080ad4769342b2f149324 | 2c381c17bf826631df214c4ee9de13094e5efb5b | /tests/testthat/test_BiclusterExperiment.R | a2d5beda83a6ebbd829491bbee40cc59464593cf | [] | no_license | jonalim/mfBiclust | 572452e3fd3392e1cf8c3ccd36012297abb41104 | 23225f6ace79a6aa2088926e7cb05e80578fe410 | refs/heads/master | 2020-03-19T08:02:15.453038 | 2019-02-12T17:26:54 | 2019-02-12T17:26:54 | 136,170,076 | 1 | 0 | null | 2019-01-16T17:59:05 | 2018-06-05T11:54:09 | R | UTF-8 | R | false | false | 3,029 | r | test_BiclusterExperiment.R | context("BiclusterExperiment")
if (!exists(".Random.seed", mode="numeric")) sample(NA)
oldSeed <- .Random.seed
set.seed(1261534)
input <- matrix(runif(n = 36), nrow = 6)
input[1:2, 1:2] <- 20
bce <- BiclusterExperiment(m = input)
singular <- matrix(c(1, 2, 3, 4), nrow = 4, ncol = 4)
bce.singular <- BiclusterExperiment(singular)
# How I generated the reference instances of BiclusterExperiment
# ref_als_nmf <- addStrat(bce, k = 1, method = "als-nmf")
# ref_svd_pca <- addStrat(bce, k = 1, method = "svd-pca")
# ref_snmf <- addStrat(bce, k = 1, method = "snmf")
# addStrat(bce.singular, k = 2, method = "snmf", silent = TRUE)
# ref_nipals_pca <- addStrat(bce, k = 1, method = "nipals-pca")
# ref_plaid <- addStrat(bce, k = 1, method = "plaid")
# ref_spectral <- addStrat(bce, k = 1, method = "spectral")
# ref_bces <- list(bce.als_nmf = ref_als_nmf,
# bce.svd_pca = ref_svd_pca,
# bce.snmf = ref_snmf,
# bce.nipals_pca = ref_nipals_pca,
# bce.plaid = ref_plaid, bce.spectral = ref_spectral)
# save(ref_bces, file = "../testdata/ref_bces.rda")
load(file = "../testdata/ref_bces.rda")
list2env(x = ref_bces, envir = environment())
test_that("BiclusterExperiment constructor works", {
expect_true(validObject(bce))
})
# don't compare BiclusterExperiment@.__classVersion__
test_that("ALF-NMF works", {
expect_equivalent(addStrat(bce, k = 1, method = "als-nmf"), bce.als_nmf)
})
test_that("SVD-PCA works", {
expect_equivalent(addStrat(bce, k = 1, method = "svd-pca"), bce.svd_pca)
})
test.snmf <- addStrat(bce, k = 1, method = "snmf")
test_that("SNMF is accurate (sample biclustering)", {
# snmf records its runtime in the NMFfit object in BiclusterStrategy@fit
# we only care about the hard-bicluster matrices; all other portions
# of the pipeline are tested in other tests
expect_equivalent(clusteredSamples(getStrat(test.snmf, 1)),
clusteredSamples(getStrat(bce.snmf, 1)))
})
test_that("SNMF is accurate (feature biclustering)", {
expect_equivalent(clusteredFeatures(getStrat(test.snmf, 1)),
clusteredFeatures(getStrat(bce.snmf, 1)))
})
# test_that("SNMF handles singular matrices correctly", {
# expect_warning(addStrat(bce.singular, k = 2, method = "snmf",
# silent = TRUE),
# regexp = "snmf failed, switching to PCA")
# })
test_that("NIPALS-PCA works", {
expect_equivalent(addStrat(bce, k = 1, method = "nipals-pca",
silent = TRUE),
bce.nipals_pca)
})
test_that("Plaid works", {
expect_equivalent(addStrat(bce, k = 1, method = "plaid", silent = TRUE),
bce.plaid)
})
test_that("Spectral works", {
expect_warning(test.spectral <- addStrat(bce, k = 1, method = "spectral",
silent = TRUE))
expect_equivalent(test.spectral, bce.spectral)
})
assign(".Random.seed", oldSeed, envir=globalenv())
|
5b6dcd08c54ad81347999f8d653ec4ba585c26bd | 673e6e9de275bbbdc47320d5f7e3f4eb65017d79 | /man/corplot_2_var.Rd | ee61380a7777859ab5f45ac61409fdef1fdaa188 | [] | no_license | BaderLab/SummExpDR | a27ba1cb59881bc45a23b79349c713efff7d735e | 6b889f62213217deba7feb60085b0a4309ffe6d0 | refs/heads/master | 2023-06-05T16:00:31.485296 | 2020-10-13T20:15:13 | 2020-10-13T20:15:13 | 289,382,728 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 750 | rd | corplot_2_var.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotFunctions.R
\name{corplot_2_var}
\alias{corplot_2_var}
\title{2 variable correlation plot}
\usage{
corplot_2_var(
df,
var1,
var2,
color_by,
method = "pearson",
filter_by = NULL,
filter_class = NULL,
pt.size = 1,
alpha = 0.4,
legend_pt_size = 20,
legend_pt_shape = 15,
xlim = NULL,
ylim = NULL,
legend_title = TRUE
)
}
\arguments{
\item{df}{= data.frame}
\item{var1}{= x axis variable}
\item{var2}{= y axis variable}
\item{color_by}{= variable to color points by}
\item{filter_by}{= subset data by a particular categorical variable}
\item{filter_class}{= set of allowed classes for subset}
}
\description{
2 variable correlation plot
}
|
367e3e7e9bed29be8fafd1d5ea47acccc6ac8bd2 | 005756c92713239c4d82b72e723f6a1934318e45 | /app.R | 93311798317e1f30c8b89ce3beae5ec9cb336ed6 | [] | no_license | hytsang/hkex | e9f8956a341e0c8ad4964bf2c8279a1877ed892a | 8ba59b451c85dbc073f71a22f1674011d3374bc4 | refs/heads/master | 2020-12-30T11:27:40.247185 | 2017-04-10T11:32:11 | 2017-04-10T11:32:11 | 91,555,026 | 2 | 0 | null | 2017-05-17T08:54:48 | 2017-05-17T08:54:48 | null | UTF-8 | R | false | false | 2,702 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(dplyr)
library(lubridate)
library(jsonlite)
library(tidyr)
library(shiny)
library(DT)
options("scipen"=100, "digits"=3)
hkexsmall <- tbl(src_postgres("hkdata"), "hkex")
# Define UI
ui <- fluidPage(
titlePanel("HKEX Insider Trading"),
sidebarLayout(
sidebarPanel(
radioButtons("position",
"Position",
choices = c("Long" = "Long Position",
"Short" = "Short Position",
"Lending pool" = "Lending Pool")
),
radioButtons("formtype",
"Shareholder or director?",
choices = c("Individual" = "1",
"Corporate shareholder" = "2",
"Director" = "3A")
),
sliderInput("changethreshold",
"Minimum change in position (%)",
min = 0,
max = 100,
step = 1,
value = 0
),
dateRangeInput("daterange",
label = 'Date range:',
start = Sys.Date() - 365,
end = Sys.Date(),
min = "2003-04-01",
max = Sys.Date()
)
),
mainPanel(
dataTableOutput("tablenet")
)
)
)
# Define server logic
server <- function(input, output) {
output$tablenet <- renderDataTable({
noticestablenet <- hkexsmall %>% filter(position == input$position) %>% filter(formtype == input$formtype) %>% filter(date >= input$daterange[1] & date <= input$daterange[2]) %>% arrange(corporation, stock_code, canonicalname, desc(beforeafter), date) %>% group_by(corporation, stock_code, canonicalname) %>% filter(row_number() == 1 | row_number() == n()) %>% mutate(sharesdiff = value - first(value)) %>% filter(beforeafter == "sharesafter") %>% filter(abs(sharesdiff) >= input$changethreshold) %>% ungroup %>% select(corporation, stock_code, canonicalname, sharesdiff) %>% collect
if (nrow(noticestablenet) > 0) {
noticestablenet %>% mutate(stock_code = paste0('<a href="http://www.aastocks.com/en/ltp/rtquote.aspx?symbol=', stock_code, '" target="_blank">', stock_code, '</a>')) %>% datatable(escape = FALSE, rownames = FALSE, colnames = c("Listed company", "Stock code", "Shareholder/director name", "Change in position (%)")) %>% formatRound(columns = "sharesdiff", digits = 2)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
1625c4157f144776fdfe5e3f7ad1d498d94f4dbc | 75bf5258a5b439f25741a7cecbd14c17c6a94f62 | /R/checkHeight_script.R | a06b0cfb7d953f95e736dd5398ad076fcb827b32 | [] | no_license | SebGGruber/sebExercise | 88070daec1b1540bce1ee45a9fe8c6eb6451d683 | c5349a5e4dbefbd79b73913ae39a4847f47b58e1 | refs/heads/master | 2022-11-18T11:46:43.247615 | 2018-11-14T23:18:26 | 2018-11-14T23:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,592 | r | checkHeight_script.R | library(dplyr)
age = c(19, 22, 21, 23, 22, 20, 28, 25)
weight = c(50, 75, 80, 56, 75, 58, 65, 82)
height = c(1.66, 1.78, 1.90, 1.72, 1.83, 1.68, 1.70, 1.85)
sex = c("F", "M", "M", "F", "M", "F", "F", "M")
students = data.frame(cbind(age, weight, height, sex))
students = transform(students, age = as.numeric(as.character(age)))
students = transform(students, height = as.numeric(as.character(height)))
students = transform(students, weight = as.numeric(as.character(weight)))
students$name = c("Maria", "Franz", "Peter", "Lisa", "Hans", "Eva", "Mia", "Karl")
checkHeight = function(students.input = students){
result.frame = data.frame(matrix(NA, nrow = nrow(students.input), ncol = 2))
colnames(result.frame) = c("name", "difference")
male.mean = students.input %>%
filter(sex == "M") %>%
summarise(mean = mean(height))
female.mean = students.input %>%
filter(sex == "F") %>%
summarise(mean = mean(height))
# goes through the dataframe by feeding each row x into the anonymous function and calculating the result for this row
height.diff = apply(
students.input,
MARGIN = 1,
FUN = function(x) {
if (x[["sex"]] == "F") {
100*(as.numeric(x[["height"]]) - female.mean$mean)
} else {
100*(as.numeric(x[["height"]]) - male.mean$mean)
}
}
)
print("Yippie, I calculated the mean differences!")
# merge student names and results of above into a single dataframe as the output
data.frame(
name = students.input$name,
difference = height.diff
)
}
checkHeight(students.input = students)
|
75737f3f4286eaeae726978513fea0b4aac2a27f | 8da83ca3904a0dafdfac5bad581669922224605d | /metaviswiz.R | 190e1ecb0c8c1080f5343ddaed5ff20d8ddc0b3e | [] | no_license | csapou/metaviswiz | 835de093762a6d7c8daafa5bd70d1482e528db47 | 800d91c4537e63ae340959f93d7f4269b8cd8c3a | refs/heads/master | 2021-08-28T01:28:31.540969 | 2021-08-13T10:08:15 | 2021-08-13T10:08:15 | 201,441,426 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,397 | r | metaviswiz.R | #Testing two factor aut
#############################################################################################
#Create testdata Start#
#############################################################################################
library(microbiome)
library(dplyr)
data(dietswap)
#Create Metadata only include samples from HE study and at the first time point
Metadata <- data.frame(dietswap@sam_data@.Data)
colnames(Metadata) <- dietswap@sam_data@names
Metadata<-filter(Metadata, group == "HE" & timepoint.within.group == 1)
#table(Metadata$nationality)
Metadata$sample <- gsub("-", ".", Metadata$sample)
#Create testdata
Testdata <- data.frame(dietswap@otu_table@.Data) #dim(Testdata) #222 samples and 130 features
#Subset according to selected samples
Testdata<-select(Testdata, one_of(Metadata$sample))
#Remove features if they are not present in the subset of samples
Testdata <- Testdata[rowSums(Testdata)>0,] #37 samples and 115 features
rm(dietswap)
#############################################################################################
#Create testdata End#
#############################################################################################
# Function to create List of distance/dissimilarity matrices
#Methods in decostand "total", "max", "frequency", "normalize", "range", "rank", "rrank", "standardize", "pa",
#Methods in vegdist "manhattan", "euclidean", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup", "binomial", "chao", "cao" or "mahalanobis".
#The data should be provided with samples as columns and features as rows
CreateDistList <- function(x, methods=c("totalbray", "totaleuclidean", "totalmanhattan", "totalhellingerbray", "totalhellingereuclidean", "totalhellingermanhattan", "totallogbray", "totallogeuclidean", "totallogmanhattan", "rarbray", "rareuclidean", "rarmanhattan", "offaitchison", "estaitchison")) {
library(vegan) #Have to remove, but using the functions decostand and vegdist
library(compositions) #Have to remove, but using the clr function
library(zCompositions) #Have to remove, but using the imputations of zeroes, using the cmultRepl function
DistList <- list() #Create empty list
#Assess multiple of the same entries
if (sum(duplicated(methods))>0) { #multiple equal entries evaluates as positive integers
methods<-unique(methods) #Remove duplicated entries
warning("Duplicated entries in method removed")
}
#Assess if nonexistant methods are specified
#Remember to update when adding more methods
possiblemethods<-c("totalbray", "totaleuclidean", "totalmanhattan",
"totalhellingerbray", "totalhellingereuclidean", "totalhellingermanhattan",
"totallogbray", "totallogeuclidean", "totallogmanhattan",
"rarbray", "rareuclidean", "rarmanhattan",
"rarhellingerbray", "rarhellingereuclidean", "rarhellingermanhattan",
"rarlogbray", "rarlogeuclidean", "rarlogmanhattan",
"offaitchison", "offlowaitchison", "estaitchison", "remaitchison",
"jaccard")
#Remember to update when adding more methods
if (length(setdiff(methods, possiblemethods))>0) {
for (i in 1:length(setdiff(methods, possiblemethods))) {
warning(paste("Nonexistant methods specified", setdiff(methods, possiblemethods)[i]))
}
}
## Methods implemented to go from count matrix to distance matrix.
# Scaling
if (sum(methods=="totalbray")==1) {
distmatrix<-vegdist(decostand(t(x), method="total"),
method="bray")
#Create list containing the dist matrices
DistList[[ 'totalbray' ]]<-distmatrix
}
if (sum(methods=="totaleuclidean")==1) {
distmatrix<-vegdist(decostand(t(x), method="total"),
method="euclidean")
#Create list containing the dist matrices
DistList[[ 'totaleuclidean' ]]<-distmatrix
}
if (sum(methods=="totalmanhattan")==1) {
distmatrix<-vegdist(decostand(t(x), method="total"),
method="manhattan")
#Create list containing the dist matrices
DistList[[ 'totalmanhattan' ]]<-distmatrix
}
if (sum(methods=="totalhellingerbray")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"),
method="hellinger"), method="bray") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totalhellingerbray' ]]<-distmatrix
}
if (sum(methods=="totalhellingereuclidean")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"),
method="hellinger"), method="euclidean") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totalhellingereuclidean' ]]<-distmatrix
}
if (sum(methods=="totalhellingermanhattan")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"),
method="hellinger"), method="manhattan") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totalhellingermanhattan' ]]<-distmatrix
}
if (sum(methods=="totallogbray")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"), #Zeroes remain zeroes
method="log"), method="bray") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totallogbray' ]]<-distmatrix
}
# if (sum(methods=="totallogtestbray")==1) {
# distmatrix<-vegdist(decostand(decostand(t(x+1), method="total"),
# method="log"), method="bray")
## Create list containing the dist matrices
# DistList[[ 'totallogtestbray' ]]<-distmatrix
# } #Testing adding a count, but first data is scaled. Vegan implementation: "logarithmic transformation as suggested by Anderson et al. (2006): log_b (x) + 1 for x > 0, where b is the base of the logarithm; zeros are left as zeros. Higher bases give less weight to quantities and more to presences, and logbase = Inf gives the presence/absence scaling. Please note this is not log(x+1). Anderson et al. (2006) suggested this for their (strongly) modified Gower distance, but the standardization can be used independently of distance indices.". Possibly also want to implement user can define log bases. This comment concerns all of instances using the decostand implementation of log.
if (sum(methods=="totallogeuclidean")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"), #Zeroes remain zeroes
method="log"), method="euclidean") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totallogeuclidean' ]]<-distmatrix
}
if (sum(methods=="totallogmanhattan")==1) {
distmatrix<-vegdist(decostand(decostand(t(x), method="total"), #Zeroes remain zeroes
method="log"), method="manhattan") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'totallogmanhattan' ]]<-distmatrix
}
# Rarefy
# Possibly want to implement a set seed for the rarefying, to obtain stable results
if (sum(methods=="rarbray")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(rarx, method="euclidean")
#Create list containing the dist matrices
DistList[[ 'rarbray' ]]<-distmatrix
}
if (sum(methods=="rareuclidean")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(rarx, method="euclidean")
#Create list containing the dist matrices
DistList[[ 'rareuclidean' ]]<-distmatrix
}
if (sum(methods=="rarmanhattan")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(rarx, method="manhattan")
#Create list containing the dist matrices
DistList[[ 'rarmanhattan' ]]<-distmatrix
}
if (sum(methods=="rarhellingerbray")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="hellinger"), method="bray") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarhellingerbray' ]]<-distmatrix
}
if (sum(methods=="rarhellingereuclidean")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="hellinger"), method="euclidean") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarhellingereuclidean' ]]<-distmatrix
}
if (sum(methods=="rarhellingermanhattan")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="hellinger"), method="manhattan") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarhellingermanhattan' ]]<-distmatrix
}
if (sum(methods=="rarlogbray")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="log"), method="bray") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarlogbray' ]]<-distmatrix
}
if (sum(methods=="rarlogeuclidean")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="log"), method="euclidean") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarlogeuclidean' ]]<-distmatrix
}
if (sum(methods=="rarlogmanhattan")==1) {
rarx <- rrarefy(t(x), min(colSums(x))) #Rarefying using the rrarefy function from vegan
distmatrix<-vegdist(decostand(rarx, method="log"), method="manhattan") #Equal sample sums are lost during transformation
#Create list containing the dist matrices
DistList[[ 'rarlogmanhattan' ]]<-distmatrix
}
# Compositional
if (sum(methods=="offaitchison")==1) {
offx<-x+1 #Offset/pseudocount of 1, arguments of using 1 is that taking the log or ln of 1 returns value to 0
clroffx<-clr(t(offx))
distmatrix<-vegdist(clroffx,
method="euclidean")
#Create list containing the dist matrices
DistList[[ 'offaitchison' ]]<-distmatrix
}
if (sum(methods=="offlowaitchison")==1) {
offlowx<-x+0.0001 #Low offset/pseudocounts would like to implement user specification
clrofflowx<-clr(t(offlowx))
distmatrix<-vegdist(clrofflowx,
method="euclidean")
#Create list containing the dist matrices
DistList[[ 'offlowaitchison' ]]<-distmatrix
}
if (sum(methods=="estaitchison")==1) {
estx<-cmultRepl(t(x), method="CZM", label=0) #CZM = multiplicative simple replacement
clrestx<-clr(estx)
distmatrix<-vegdist(clrestx,
method="euclidean")
#Create list containing the dist matrices
DistList[[ 'estaitchison' ]]<-distmatrix
}
if (sum(methods=="remaitchison")==1) {
row_sub = apply(x, 1, function(row) all(row !=0 )) #row_sub specifies all rows/features containing a zero
remx<-x[row_sub,] #Remove all rows/features containing a zero
clrremx<-clr(t(remx))
distmatrix<-vegdist(clrremx,
method="euclidean")
#Create list containing the dist matrices
DistList[[ 'remaitchison' ]]<-distmatrix
}
# Possibly don't have to include ILR. I also see the same results in my plots. "A second distance metric is the Aitchison distance, which is simply the Euclian distance between samples after clr transformation, and the distances between samples are the same as the phylogenetic ilr." From Gloor et al. 2017 Microbiome Datasets are compositional: And this is not optional.
#Jaccard
if (sum(methods=="jaccard")==1) {
distmatrix<-vegdist(t(x), method="jaccard", binary=TRUE) #This is the same as vegdist(decostand(t(Testdata), method="pa"), method="jaccard")
#Create list containing the dist matrices
DistList[[ 'jaccard' ]]<-distmatrix
}
return(DistList)
}
#############################################################################################
#Testing CreateDistList Start#
#############################################################################################
DistList <- CreateDistList(x=Testdata, method=c("dssd", "sdadg", "totalbray", "totalmanhattan", "totalbray", "offaitchison", "remaitchison", "rarhellingerbray", "rarhellingereuclidean", "rarhellingermanhattan", "rarlogbray", "rarlogeuclidean", "rarlogmanhattan"))
DistList2 <- CreateDistList(x=Testdata)
#############################################################################################
#Testing CreateDistList End#
#############################################################################################
# Function to append two lists together, to be used when user have created their own list and want to add to an already created DistList
# Stop if provided is not lists
# Would like to implement more than two lists can be provided
AppendTwoLists <- function(x, y) {
if ((class(x)!="list") | (class(y)!="list")) {
stop("Provide as lists, only two lists can be provided at a time")
}
for (i in 1:length(x)) {
if (class(x[[i]])!="dist") {
stop("Provide the first list containing dist objects. If distance or dissimilarity object
is provided as a matrix or dataframe convert using eg.
as.dist(yourdistmatrix, diag = FALSE, upper = FALSE")
}
}
for (i in 1:length(y)) {
if (class(y[[i]])!="dist") {
stop("Provide the second list containing dist objects. If distance or dissimilarity object
is provided as a matrix or dataframe convert using eg.
as.dist(yourdistmatrix, diag = FALSE, upper = FALSE")
}
}
AppendedLists<-c(x, y)
#Look for duplicate names
if (sum(duplicated(names(AppendedLists)))>0) {
stop("Duplicated names in appended List")
}
for (i in 1:length(AppendedLists)) {
if ( sum(labels(AppendedLists[[1]]) != labels(AppendedLists[[i]]))>0 ) {
print(i)
warning("Labels in distobjects are not the same.
Incongruence in comparison of labels in the first dist object
with the dist obejct corresponding to the number printed above")
}
}
return(AppendedLists)
}
#############################################################################################
#Testing AppendTwoLists Start#
#############################################################################################
#Not updated accordingly
#Realised that if c() ... is provided it already concatenates before creates problems when providing eg. distmatrix
#Have problems with assess elements in the dist objects. can use labels()
#User have to use the dist format otherwise modify the provided matrix
List1<-CreateDistList(Testdata, method=c("totalbray"))
List2<-CreateDistList(Testdata, method=c("totaleuclidean", "totalmanhattan"))
distmatrix<-as.matrix(vegdist(decostand(t(Testdata), method="total"),
method="manhattan"))
distmatrixList<-list(distmatrix)
class(distmatrixList[[1]])
#Does it work
List3<-AppendTwoLists(List1, List2)
#Provided one list as a matrix getting error
List4<-AppendTwoLists(List2, distmatrix)
#Provided second list that contained something that was not a distobject
List4<-AppendTwoLists(List2, distmatrixList)
#Provided first list that contained something that was not a distobject
List4<-AppendTwoLists(distmatrixList, List2)
distmatrixdims<-as.dist(distmatrix, diag = FALSE, upper = TRUE)
#distmatrixdims
distmatrixdims2<-vegdist(decostand(t(Testdata), method="total"),
method="manhattan")
#distmatrixdims2
#distmatrixdims==distmatrixdims2 #It is the same don't think it matters then on the following analysis
#Changing labels
vectorlabels<-labels(List3[[1]])
vectorlabels2<-c(vectorlabels[2:length(vectorlabels)], vectorlabels[1])
vectorlabels3<-c(vectorlabels[2:length(vectorlabels)], "Sample.1231322123")
#Basic one sample is missing
Testdata2<-Testdata[2:length(Testdata), 2:length(Testdata)]
newlabels<-list(vegdist(decostand(t(Testdata2), method="total"),
method="manhattan"))
List3<-AppendTwoLists(List2, newlabels)
#Renaming Testdata columns
Testdata2<-Testdata
colnames(Testdata2)<-vectorlabels2
newlabels<-list(vegdist(decostand(t(Testdata2), method="total"),
method="manhattan"))
List3<-AppendTwoLists(List2, newlabels)
##At some point I would like to make a function that can do it for more than one list
#How do I specify in a function that there is a variable number of entries (x, y, z...)
#AppendMultipleLists <- function(x) {
# len<-length(x)
# print(len)
# for (i in 1:len) {
# if (class(x[i])!="list") {
# stop("Provide vector of list names c(List1, List2...")
# print(i)
# }
# }
#}
#List3<-AppendMultipleLists(c(List1, List2))
#############################################################################################
#Testing AppendTwoLists End#
#############################################################################################
#PairProtest
#Function wrapper around protest that creates correlations as distances 1-correlation, raw correlations and sum of squares to a list
#Input List of dist objects
#The output can then be used as input to MetaVisCor and MetaVisWiz
#Different checkpoints inherited from the AppendMultipleLists function
#If changing do it both places
PairProtest <- function(x) {
library(vegan) #Have to remove
if ((class(x)!="list")) {
stop("Provide a list of dist objects")
}
for (i in 1:length(x)) {
if (class(x[[i]])!="dist") {
stop("Provide list containing dist objects. If distance or dissimilarity object
is provided as a matrix or dataframe convert using eg.
as.dist(yourdistmatrix, diag = FALSE, upper = FALSE")
}
}
#Look for duplicate names
if (sum(duplicated(names(x)))>0) {
stop("Duplicated names in appended List")
}
for (i in 1:length(x)) {
if ( sum(labels(x[[1]]) != labels(x[[i]]))>0 ) {
print(i)
warning("Labels in distobjects are not the same.
Incongruence in comparison of labels in the first dist object
with the dist obejct corresponding to the number printed above")
}
}
#The actual running of Procrustes analysis
#Create empty df with col and row names according to names in the dist list
ProCruPairCordist<-setNames(data.frame(matrix(ncol=length(x), nrow=length(x))), c(names(x)))
row.names(ProCruPairCordist)<-c(names(ProCruPairCordist))
ProCruPairCor <- ProCruPairCordist
ProCruPairSS <- ProCruPairCordist
#Create empty list to hold results from protest
ListProCru <- list()
for (i in 1:length(x)) {
for (j in 1:length(x)) {
#Make pairwise protest. Comment nested for loop j is iterated first then i
prot<-protest(capscale(x[[i]]~1), capscale(x[[j]]~1))
ProCruPairCordist[j,i]<-(1-prot$t0)
ProCruPairCor[j,i]<-(prot$t0)
ProCruPairSS[j,i]<-(prot$ss)
#if (method == "cordist") {
# ProCruPair[j,i]<-(1-prot$t0)
#} else if (method == "cor") {
# ProCruPair[j,i]<-(prot$t0)
#} else if (method == "ss") {
# ProCruPair[j,i]<-(1-prot$ss)
#} else {
# print("Specify valid method")
#}
}
#Percentage finished indicator
print(paste("Percentage run", (i/length(x))*100))
}
ListProCru[[ "Cordist" ]]<-as.dist(ProCruPairCordist)
ListProCru[[ "Cor" ]]<-as.dist(ProCruPairCor)
ListProCru[[ "SS" ]]<-as.dist(ProCruPairSS)
#The as.dist function default is diag=FALSE, upper=FALSE, auto_convert_data_frames=TRUE.
return(ListProCru)
}
#############################################################################################
#Testing PairProtest Start#
#############################################################################################
DistList <- CreateDistList(x=Testdata, method=c("totaleuclidean", "totalmanhattan", "totalbray", "totalhellingermanhattan"))
##Does the function method work
#testing1<-PairProtest(DistList, method="cordist")
#testing2<-PairProtest(DistList, method="cor")
#testing3<-PairProtest(DistList, method="ss")
testing<-PairProtest(DistList)
#Is the dist matrix binded correctly
prot<-protest(capscale(DistList$totalbray~1), capscale(DistList$totalhellingermanhattan~1))
prot$t0
prot2<-protest(capscale(DistList$totaleuclidean~1), capscale(DistList$totalmanhattan~1))
prot2$t0
#############################################################################################
#Testing PairProtest End#
#############################################################################################
#MetaVisWiz
#Function creating the meta PCoA plot from the dist object of sum of squares or 1 - the procrustes correlation
#Input list created from PairProtest that contains 1-cor, cor and SS
#x<-Pairpro
MetaVisWiz <- function(x, method="Cordist") {
library(vegan) #Have to remove
library(reshape2) #Have to remove includes melt
library(tidyr) #Have to remove includes unite
library(gridExtra) #Have to remove include grid.arrange
library(corrplot)
#Comparisons number limited by input
#Create empty list to hold plots
FigureList <- list()
#Create capscale object
if (method == "Cordist") {
PCoAObject<-capscale(x$Cordist~1)
} else if (method == "SS") {
PCoAObject<-capscale(x$SS~1)
} else {stop("Specify valid method")}
###############################################
#Make stressplot
#Extract ordination distances and merge with observed dissimilarity
#Correlations
stress<-stressplot(PCoAObject)
df <- melt(as.matrix(stress)) #Long format ordination distance
names(df)<-c("rowOrd", "colOrd", "OrdDist")
#df<-filter(df, OrdDist>0) #Remove comparisons to same method does include the same comparison twice
if (method == "Cordist") {
df2 <- melt(as.matrix(x$Cordist))
} else if (method == "SS") {
df2 <- melt(as.matrix(x$SS))
} else {stop("Specify valid method")}
names(df2)<-c("rowObs", "colObs", "ObsDism")
#df2<-filter(df2, ObsDism>0) #Remove comparisons to same method does include the same comparison twice
#tidyr::unite: Convenience function to paste together multiple columns into one.
df<-unite(df, mergecol, c(rowOrd, colOrd), remove=FALSE)
df2<-unite(df2, mergecol, c(rowObs, colObs), remove=FALSE)
ggstress<-merge(df, df2, by="mergecol")
#Plot stress plot
FigureList$Stress<-ggplot(ggstress) +
geom_point(aes(ObsDism, OrdDist), size=1) +
#Size depend on number of comparisons, can implement size differences accordingly in if statement
ggtitle("Stress plot") +
labs(x = "Observed dissimilarity", y = "Ordination distance") +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.title=element_text(size=12))
###############################################
#Create PCoA
##Add eig to plot axes. with cmdscale there are negative values not with capscale
eig <- PCoAObject$CA$eig
# Calculate the variation explained by PCoA1, 2, 3 and 4
# and use it to generate axis labels
eig_1_2 <- eig[1:4] / sum(eig) * 100
eig_1 <- paste("PCoA1", round(eig_1_2[1], digits = 2), "% variance")
eig_2 <- paste("PCoA2", round(eig_1_2[2], digits = 2), "% variance")
#Additional eigen values if user want to modify code to plot additional MDS
#eig_3 <- paste("PCoA3", round(eig_1_2[3], digits = 2), "% variance")
#eig_4 <- paste("PCoA4", round(eig_1_2[4], digits = 2), "% variance")
##Pull out coordinates for plotting from the ca object
#Structuring to add to Metadata2
PCoACA<-PCoAObject$CA
#The ca object contains the actual ordination results:
#u ((Weighted) orthonormal site scores),
#v ((Weighted) orthonormal species scores) all na in mine,
#Xbar (The standardized data matrix after previous stages of analysis),
#and imaginary.u.eig ???. Info http://cc.oulu.fi/~jarioksa/softhelp/vegan/html/cca.object.html
PCoA<-as.data.frame(PCoACA$u)
#Change colnames. Now add dis and trans info to names
#Select depending on user want to modify code to plot additional MDS. I went with 3 it is rarely you want to plot more and then no error is returned if few methods are compared
#colnames(PCoA) <- c("MDS1","MDS2")
colnames(PCoA) <- c("MDS1","MDS2", "MDS3")
#colnames(PCoA) <- c("MDS1","MDS2", "MDS3","MDS4")
#Add row names to df
PCoA$Sample <- row.names(PCoA)
#Create metadata
MetadataProC<-data.frame(Sample=rownames(PCoA))
MetadataProC$Trans <-
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*totallog", MetadataProC$Sample), "TSS_log",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*totalhellinger", MetadataProC$Sample), "TSS_hellinger",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*clr", MetadataProC$Sample), "clr",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*ilr", MetadataProC$Sample), "ilr",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*total", MetadataProC$Sample), "TSS",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*max", MetadataProC$Sample), "max",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*freq", MetadataProC$Sample), "freq",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*normalize", MetadataProC$Sample), "norm",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*pa", MetadataProC$Sample), "pa",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*hellinger", MetadataProC$Sample), "hellinger",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*log", MetadataProC$Sample), "log",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*chi.square", MetadataProC$Sample), "chisq",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*CSS", MetadataProC$Sample), "CSS",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*TMM", MetadataProC$Sample), "TMM",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*DESeq", MetadataProC$Sample), "DESeq",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*Rar", MetadataProC$Sample), "Rarefy",
"Other"))))))))))))))))
MetadataProC$Dist <-
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*manhattan*", MetadataProC$Sample), "manhattan",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*euclidean*", MetadataProC$Sample), "euclidean",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*canberra*", MetadataProC$Sample), "canberra",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*bray*", MetadataProC$Sample), "bray",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*kulczynski*", MetadataProC$Sample), "kulczynski",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*jaccard*", MetadataProC$Sample), "jaccard",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*gower*", MetadataProC$Sample), "gower",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*altGower*", MetadataProC$Sample), "altGower",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*horn*", MetadataProC$Sample), "horn",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*binomial*", MetadataProC$Sample), "binomial",
ifelse(seq(along=(MetadataProC$Sample)) %in% grep("*clr", MetadataProC$Sample), "euclidean",
"Other")))))))))))
#Merge according to Sample
MetadataProC2<-merge(MetadataProC, PCoA, by="Sample")
#Define coloring schemes to create procrustes PCoA/PCA
ProCCol<-c("chisq" = "#E41A1C", #Red
"freq" = "#4DAF4A", #Green
"max" = "#FFFF33", #Yellow
"norm" = "#A65628", #Brown
"clr" = "#E41A1C", #Red
"ilr" = "#9f1214", #Red
"CSS" = "#ed5e5f", #Light red
"DESeq" = "#377EB8", #Light blue
"TMM" = "#265880", #Dark blue
"hellinger" = "#4DAF4A", #Light green
"TSS_hellinger" = "#357933", #Dark green
"log" = "#984EA3", #Purple
"TSS_log" = "#5b2e61", #Dark purple
"pa" = "#FF7F00", #Orange
"Rarefy" = "#FFFF33", #Ligt yellow
"TSS" = "#A65628", #Orange brown
"Other" = "#FF007F")
#Define shape scheme to create procrustes PCoA/PCA
ProCShape<-c("altGower" = 9,
"binomial" = 5,
"bray" = 3,
"canberra" = 6,
"euclidean" = 0,
"gower" = 8,
"horn" = 2,
"jaccard" = 1,
"kulczynski" = 7,
"manhattan" = 4,
"Other" = 11)
FigureList$Metadata<-MetadataProC2
#Plot PCoA
FigureList$PCoA<-ggplot(MetadataProC2) +
#geom_line(aes(x=MDS1, y=MDS2, group=Matching_samples), size=0.1, linetype="dotted") +
geom_jitter(aes(MDS1, MDS2, col=Trans, shape=Dist), width=0.00, height=0.00, alpha=0.8, size=3, stroke=1.5) +
#geom_point(aes(MDS1, MDS2, color = Sample_LPSX, group = Sample, shape = Temperature), size=5) +
scale_color_manual(values=ProCCol) +
scale_shape_manual(values=ProCShape) +
ggtitle("PCoA") +
labs(colour="Preprocessing", shape="beta-diversity", x = eig_1, y = eig_2) +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.title=element_text(size=12), legend.position="bottom") #+
#stat_ellipse(data=filter(MetadataProC2[,1:6], grepl("clr|ilr", Trans)), aes(MDS1, MDS2), level=0.80)
#scale_y_reverse() #If you want the y scale reversed, to make plots easier to compare
#scale_x_reverse() #If you want the x scale reversed, to make plots easier to compare
#ggsave(paste("CapscalePCoAYoungCOMPARE", "Genus", i, OrgFlt, ".pdf", sep=""), height=6, width=12)
#ggtitle(paste("PCoA ", i, " n orgs > 1% = ", nrow(Tax2))) #Used with filtering
###############################################
#Create Scree plot
screeplot<-data.frame(PCoAObject$CA$eig)
colnames(screeplot)<-c("eig")
screeplot$eig <- screeplot$eig[1:length(screeplot$eig)] / sum(screeplot$eig) * 100
screeplot<-tibble::rownames_to_column(screeplot, "MDS")
screeplot$MDS <- factor(screeplot$MDS, levels=c(sprintf("MDS%d", 1:length(screeplot$eig))))
#Plot screeplot
FigureList$Scree<-ggplot(screeplot, aes(x=MDS, y=eig)) +
geom_bar(stat="identity") +
labs(x ="MDS", y ="eig (%)") +
ggtitle(paste("Scree plot ")) +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.title=element_text(size=12), axis.text.x=element_blank(), axis.ticks.x=element_blank())
#ggsave(filename=paste("ScreeplotCapscalePCoA", "Genus", i, OrgFlt, ".pdf", sep=""), height=6, width=12)
###############################################
#Create histogram
#Create histogram / density plot for correlations. Sensitivity analysis
#The histogram and density plot is to be used as an assessment of further investigations are needed in order to decide on how to process metagenomics data.
#Correls<-melt_dist(matrix(x$Cor)) #Previous solution, but can now remove harrietr
Correls<-data.frame(dist=as.vector(x$Cor))
FigureList$DensityCor<-ggplot(Correls, aes(x=dist)) +
geom_histogram(aes(y=..density..), binwidth=0.01, colour="black", fill="white") + #..density.. is to have it scale with geom_density
geom_density(alpha=.25, fill="#FF6666") +
labs(x ="Correlation", y ="Density") +
ggtitle(paste("Density plot ")) +
xlim(0,1) +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.title=element_text(size=12))
FigureList$DensityCo
###############################################
#Create output figure with all plots
#Have the plots stored in FigureList
lay <- rbind(c(1,1,1,1),
c(1,1,1,1),
c(2,2,3,4))
## Create figure with correlations
pdf(paste("Metaviswiz", ".pdf", sep=""), width=12.5, height=10)
grid.arrange(FigureList$PCoA, FigureList$DensityCor, FigureList$Stress, FigureList$Scree, layout_matrix = lay)
dev.off()
###############################################
#Create correlelograms
#User can modify directly with corrplot
pdf(paste("MetaviswizCorgram", ".pdf", sep=""), width=12.5, height=10)
corrplot(as.matrix(x$Cor),
type = "upper",
order = "hclust",
tl.col = "black",
tl.srt = 25,
addCoef.col = "white",
diag=FALSE,
number.digits = 3,
number.cex = 0.5,
cl.lim = c(0, 1))
dev.off()
return(FigureList)
}
#############################################################################################
#Testing MetaVisWiz Start#
#############################################################################################
DistList <- CreateDistList(x=Testdata, method=c("totalbray", "totaleuclidean", "totalmanhattan", "totalhellingerbray", "totalhellingereuclidean", "totalhellingermanhattan", "totallogbray", "totallogeuclidean", "totallogmanhattan", "rarbray", "rareuclidean", "rarmanhattan", "rarhellingerbray", "rarhellingereuclidean", "rarhellingermanhattan", "rarlogbray", "rarlogeuclidean", "rarlogmanhattan", "offaitchison", "offlowaitchison", "estaitchison", "remaitchison", "jaccard") ) #Getting warning from "totallogbray", "totallogeuclidean", "totallogmanhattan"
# DistList <- CreateDistList(x=Testdata)
Pairpro<-PairProtest(DistList)
VisWiz<-MetaVisWiz(Pairpro)
Meta<-VisWiz$Metadata
library(plotly)
ggplotly(VisWiz$PCoA)
#############################################################################################
#Testing MetaVisWiz End#
#############################################################################################
#AllOrdi
#Function to create individual PCA and PCoA |
e856d9485e93ac9339898b69fe93a442a1fa0a8d | a7432a15fbaef2273640f0a6e043c2345838b74b | /03_Matrix_Factorization_CF.R | 334131956a797b0f11cd10a1a0ebd631ec5c8ec9 | [] | no_license | RooneySong/Collaborative_Filtering | 82b1aa51b1ec30743fa9d2ae08e496ec12287c1c | 5548448bba56cc08a96d1affeb3ed3ea43882718 | refs/heads/master | 2023-01-24T11:48:34.962006 | 2020-12-07T20:57:29 | 2020-12-07T20:57:29 | 319,366,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,592 | r | 03_Matrix_Factorization_CF.R | ###########################
# Input Dataframe Example #
###########################
# User ID Item 1 Item 2 ... Item 4 Item 5
# 1 7345 6 10 7 9
# 2 12431 NA 6 NA 7
# 3 22434 8 10 10 10
# 4 42635 7 9 7 7
# 5 45659 8 8 9 10
###########
# Library #
###########
library(dplyr)
###################################
# Loading Dataset & Preprocessing #
###################################
data_rating = read.csv('C:/Users/user/Desktop/Recommender_System/Data/df_rating.csv')
mtx_rating = as.matrix(data_rating[, -1])
###########################
# Matrix Factorization CF #
###########################
# matrix factorization
num_user = nrow(mtx_rating)
num_item = ncol(mtx_rating)
k = 5 # num. of latent factors
lambda = 0.1 # weight of penalty
learning_rate = 0.01
epoch = 1e3
R = as.matrix(mtx_rating)
P = matrix(runif(num_user*k), nrow = num_user)
Q = matrix(runif(num_item*k), nrow = num_item)
for (e in seq(epoch)) {
for (u in seq(num_user)) {
for (i in seq(num_item)) {
if (!is.na(R[u, i])) {
e_ui = R[u, i]-t(Q[i, ])%*%P[u, ] %>%
as.vector()
P[u, ] = P[u, ]+learning_rate*(e_ui*Q[i, ]-lambda*P[u, ])
Q[i, ] = Q[i, ]+learning_rate*(e_ui*P[u, ]-lambda*Q[i, ])
}
}
}
if (e%%1e2 == 0) {
cat('Epoch:', e, 'MAE:', mean(abs(R-P%*%t(Q)), na.rm = T), '\n')
}
}
# predicting ratings
mtx_rating_pred = P%*%t(Q)
# evaluation: MAE
mean(abs(mtx_rating_pred-mtx_rating), na.rm = T)
|
6f4aafa9e25a6cfe36debfd395fb1d02e565678b | ead4cb575788865d09de14bda315b278324a42e4 | /R/update_library_path.R | 5b3eeee3283ba2aa3bbee0e4119643bc00310aa6 | [] | no_license | tmsinclair/RICT | 123a7349e608867577ab82566077eacf2c7819d1 | 46befb52f3b37ffb276a1331ce4ee61d0119592a | refs/heads/master | 2022-04-08T14:00:40.465414 | 2020-03-19T12:34:07 | 2020-03-19T12:34:07 | 248,495,325 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 127 | r | update_library_path.R | .libPaths(c("U:/R/library", .libPaths()))
options(pkgType = "binary")
install.packages("", lib = "U:/R/library")
library()
|
dca6fb106c05bdbc5e2ca88f58ea75533de3d5e4 | 47bddfe0c98960a69a335e7721fa1df227549648 | /writeup/cogsci2018/analysis/study2a/IAT_utils.R | 7dfad9ed19dadefb0509ed47f9d70bd557279b0f | [] | no_license | mllewis/IATLANG | d604e8e376bf211357c6dac6d46d34702b4691c2 | 451e273ec9adaf3ca6755108793f358958559c6c | refs/heads/master | 2023-02-09T16:47:26.413201 | 2023-02-06T17:16:47 | 2023-02-06T17:16:47 | 109,060,091 | 16 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,985 | r | IAT_utils.R | ## IAT effect size utility functions for embedding models ##
# wrapper for ES function
get_ES <- function(df, model) {
print(pluck(df, "test_name"))
es <- prep_word_list(df[-1:-2]) %>%
get_swabs(., model) %>%
get_sYXab()
data.frame(test = pluck(df, "test_name"),
bias_type = pluck(df, "bias_type"),
effect_size = es)
}
# gets df with each unique pairing of category and attribute
prep_word_list <- function(word_list) {
if (length(word_list) > 4) {
word_list <- word_list[-1:-2]
}
cross_df(word_list) %>%
gather(key = "category_type", value = "category_value", category_1:category_2) %>%
gather(key = "attribute_type", value = "attribute_value", attribute_1:attribute_2) %>%
distinct(category_value, attribute_value, .keep_all = TRUE)
}
# function on the top right of Caliskan pg 2
get_swabs <- function(df, model){
df %>%
rowwise() %>%
mutate(cosine_sim = get_word_distance_cos(model, category_value, attribute_value)) %>%
ungroup() %>% # gets rid of rowwise error message
group_by(category_type, category_value, attribute_type) %>%
summarize(word_attribute_mean = mean(cosine_sim, na.rm = TRUE)) %>%
spread(attribute_type, word_attribute_mean) %>%
mutate(swab = attribute_1 - attribute_2)
}
# gets cosine distance
get_word_distance_cos = function(model, w1 , w2){
w1_vec <- filter(model, target_word == tolower(w1)) %>% select(-1) %>% as.matrix()
w2_vec <- filter(model, target_word == tolower(w2)) %>% select(-1) %>% as.matrix()
lsa::cosine(w1_vec[1,], w2_vec[1,])
}
# effect size function on Caliskan pg. 2 (top right)
get_sYXab <- function(df){
sYXab_denom <- sd(df$swab, na.rm = T)
k = df %>%
group_by(category_type) %>%
summarize(mean_swab = mean(swab, na.rm = T)) %>%
spread(category_type, mean_swab) %>%
summarize(sYXab_num = category_1 - category_2) %>%
transmute(sYXab = sYXab_num/sYXab_denom) %>%
unlist(use.names = FALSE)
}
|
accbdd6497e5455bdcfee1dcb37d5351bf3cfae4 | bf8556f971d62c20c8a3942636c3edb531c9ea6e | /figure/plot3.R | 84a7959b675d502bb607f3364d57ce22ad1a7ca5 | [] | no_license | jaguayor/ExData_Plotting1 | f594639e40cb49c1906a27b0ac91fff4decf99ab | 13146e70714269b4f76e3124b1e5ae63c15bb4fb | refs/heads/master | 2020-04-24T17:45:42.921983 | 2019-02-24T21:03:22 | 2019-02-24T21:03:22 | 172,158,023 | 0 | 0 | null | 2019-02-23T01:39:41 | 2019-02-23T01:39:40 | null | UTF-8 | R | false | false | 1,661 | r | plot3.R | #Reading, naming and subsetting power consumption data
#reading the data
energy <- read.table("household_power_consumption.txt",skip=1,sep=";")
# name the labels appropiately:instead of v1,v2 ... we name them
names(energy) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#subsetting to read required dates only
subenergy <- subset(energy,energy$Date=="1/2/2007" | energy$Date =="2/2/2007")
# Transforming the Date and Time variables from characters into objects of type Date and POSIXlt respectively
subenergy$Date <- as.Date(subenergy$Date, format="%d/%m/%Y")
subenergy$Time <- strptime(subenergy$Time, format="%H:%M:%S")
#the Time has to be corrected to the actual dates of 2007-02-01 and 2007-02-02
subenergy[1:1440,"Time"] <- format(subenergy[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subenergy[1441:2880,"Time"] <- format(subenergy[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# calling the basic plot function
plot(subenergy$Time,subenergy$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
# adding the 3 variables in the same graph
with(subenergy,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subenergy,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subenergy,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
# adding the legend
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# annotating graph
title(main="Energy sub-metering")
#name and save de plot file
dev.copy(png,file="plot3.png")
dev.off()
|
5f0ae0adceb09a282db85b875abff180e3ae36f8 | b3be9c0fe8ab960adcfe6eac692591d03abfdf19 | /man/hist3d.io.Rd | 6c7fbe1645db45c7f26240252c4cec55e1add7e4 | [] | no_license | MarceloRTonon/ioanalysis | aac83125c72967fe0de061a7f10ea6f4e024e90a | b19288cc9f2362943192a3b7d0139488020183b5 | refs/heads/master | 2022-12-13T22:57:09.366586 | 2020-09-18T16:40:16 | 2020-09-18T16:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 879 | rd | hist3d.io.Rd | \name{hist3d.io}
\alias{hist3d.io}
\title{3D Histogram of Input-Output object }
\description{Produces a three dimensional histogram from plot3d}
\usage{
hist3d.io(obj, alpha = 1, phi = 65, theta = 45, limits,
colors = ramp.col(c('yellow', 'violet', 'blue')))
}
\arguments{
\item{obj}{The nxm matrix to be plotted}
\item{alpha}{The transparency of bars where 1 is opaque and 0 is complete transparency. Default is 1}
\item{phi}{Colatitude rotation (shaking head left and right)}
\item{theta}{Colatitude rotation (nodding up and down)}
\item{limits}{The lower and upper bound for color limits}
\item{colors}{A \code{ramp.col()} for the 3D histogram}
}
\details{Uses \code{hist3D} from the package \code{plot3d} to generate a 3D plot
}
\examples{
data(toy.IO)
obj = toy.IO$Z[1:5, 1:5]
hist3d.io(obj, alpha = 0.7)
}
|
07a8f54ee1a7a95666a54f91a1a128a804a47ecb | 05c9b338ceb1e637ff56ed4076c294033c6a8ddf | /data/test_set_data_prep.R | 5ba4523157b216044bfee0aa4f46d0d0d70231e2 | [] | no_license | Preetis17/home_prices | 1984c7a761f3bdcc8e5b51a0074b7d8448476fa5 | 14349b393a233f57b0b53a665044712d54ea8b89 | refs/heads/master | 2021-01-01T18:51:17.013754 | 2017-07-24T03:11:58 | 2017-07-24T03:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,574 | r | test_set_data_prep.R |
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... directories
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
home_dir <- "~/_smu/_src/home_prices/"
setwd(home_dir)
data_dir <- "./data"
sas_dir <- "./sas_analysis"
setwd(home_dir)
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ... read in test data set
# ... -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
setwd(data_dir)
test_homes <- read.csv("test.csv", stringsAsFactors = FALSE)
setwd(home_dir)
names(test_homes) <- tolower(names(test_homes))
for (i in 2:(length(test_homes)))
{
if (class(test_homes[,i]) == "character")
{
test_homes[,i] <- factor (test_homes[,i])
}
}
for (i in 1 : (length(test_homes)))
{
if(class(test_homes[,i]) == "integer"
|| class(test_homes[,i]) == "numeric"
|| class(test_homes[,i]) == "matrix")
{
test_homes[,i][is.na (test_homes[,i])] <- mean(test_homes[,i], na.rm = TRUE)
}
}
for (i in 1 : (length(test_homes)))
{
if(class(test_homes[,i]) == "factor")
{
levels <- levels(test_homes[,i])
levels[length(levels) + 1] <- "None"
test_homes[,i] <- factor(test_homes[,i], levels = levels)
test_homes[,i][is.na (test_homes[,i])] <- "None"
}
}
test_homes$log_lotfrontage <- log(test_homes$lotfrontage)
test_homes$log_lotarea <- log(test_homes$lotarea)
test_homes$log_grlivarea <- log(test_homes$grlivarea)
write.csv(test_homes, file = "test_set_cleaned.csv", row.names = FALSE)
|
1affa2f4fd162ad3f40edd0113999c88c2902516 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/modelfree/examples/locglmfit_private.Rd.R | 9009a4e1f197c2309ecf522cfe00b3cda5d3693c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 428 | r | locglmfit_private.Rd.R | library(modelfree)
### Name: locglmfit_private
### Title: Local generalized linear fitting with usual (non-sparse)
### matrices
### Aliases: locglmfit_private
### Keywords: nonparametric models regression nonlinear
### ** Examples
data( "01_Miranda" )
xnew = 1.2 * (0:99)/99+0.1
h <- 0.2959
fit <- locglmfit_private( xnew, example01$r, example01$m, example01$x, h, FALSE, "logit_link", 0, 0, 2, 1, "dnorm", 50, 1e-6)
|
8b343e4a56aaf1e72cf9984a58679d49ea5e8ba4 | 199abcb64f2e2600be40063a79e98f8ff4a09181 | /R/btools.r | 3e4291822fa6fc21b2b22e1ec97919509a963538 | [] | no_license | ykim0902/btools | e318aedc2bec402837b5029a09340f162fb9ca6c | 6380a231e70ed78878530b3058c1d75cd04acc9f | refs/heads/master | 2021-01-22T14:25:32.556699 | 2015-04-29T13:48:28 | 2015-04-29T13:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 181 | r | btools.r | #' btools.
#'
#' @description
#' Tools that I use regularly.
#'
#' @section Overview.
#' @section Examples. ht(popst)
#'
#' @name btools
#' @docType package
#' @import dplyr
NULL
|
b9b49229b0ec36a2151a4a208384431e13f5e1e7 | 6a12c6f132a636c04c80bd5305cfec3d10bd8f19 | /tidyr_practice.R | 4b79c1572cc507989ae5f5a1a9275ab44c4b0147 | [] | no_license | IrinaMax/R_descr_stat | 5fce9379e9aa6cb4647e146655a7b7d80508b5ae | 07b650b8b609de483d0f0dee1c79cea1d8f476c0 | refs/heads/master | 2020-04-02T05:14:51.715494 | 2018-12-14T05:55:18 | 2018-12-14T05:55:18 | 154,061,259 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,103 | r | tidyr_practice.R | # Let's make the data tidy. #https://campus.datacamp.com/courses/education-data-analysis-primer-r-dplyr-and-plotly/getting-fancy-with-plotly?ex=2
#Load the tidyr library
library(tidyr)
#Gather the data
GatheredStudentData <-StudentData %>% gather(Indicator,Score, -SID,-First,-Last)
GatheredStudentData%>%summary
# Remove NA's
GatheredStudentData <- GatheredStudentData %>% na.omit()
# Dump the student data
glimpse(GatheredStudentData)
#------------------------------------------------------------------------
# Plotly provides online graphing, analytics, and statistics tools. Using their technology anyone, including yourself, can make beautiful, interactive web-based graphs.
# load the `plotly` package
library(plotly)
# This will create your very first plotly visualization
plot_ly(z = ~volcano)
#------------------------------------------------------------------------
# The diamonds dataset Plotly diamonds are forever
# You'll use several datasets throughout the tutorial to showcase the power of plotly. In the next exercises you will make use of the diamond dataset. A dataset containing the prices and other attributes of 1000 diamonds.
str(diamonds)
# A firs scatterplot has been made for you
plot_ly(diamonds, x = ~carat, y = ~price)
# Replace ___ with the correct vector
plot_ly(diamonds, x = ~carat, y = ~price, color = ~carat)
# Replace ___ with the correct vector
plot_ly(diamonds, x = ~carat, y = ~price, color = ~carat, size = ~carat)
#------------------------------------------------------------------------
#The interactive bar chart
str(diamonds)
# Calculate the numbers of diamonds for each cut<->clarity combination
diamonds_bucket <- diamonds %>% count(clarity, cut)
diamonds_bucket
# Replace ___ with the correct vector
plot_ly(diamonds_bucket, x = ~cut, y = ~n, type = "bar", color = ~clarity)
# Box plots
# The Non Fancy Box Plot
plot_ly(y = ~rnorm(50), type = "box")
# The Fancy Box Plot
plot_ly(diamonds, y = ~price, color = cut, type ="box")
# The Super Fancy Box Plots
plot_ly(diamonds, x = ~price, y = ~clarity, color =cut, type = "box") %>% layout(boxmode = "group")
plot_ly(diamonds, x = ~price, y = ~clarity, color =~clarity, type = "box") %>% layout(boxmode = "group")
plot_ly(diamonds, x = ~cut, y = ~price, color =~clarity, type = "box") %>% layout(boxmode = "group")
plot_ly(diamonds, x = ~cut, y = ~price, color = ~clarity, type = "box") %>%layout(boxmode = "group")
#-----------------------------------------------------------------------------------------------------
# Load the `plotly` library
library(plotly)
# Your volcano data
str(volcano)
# The heatmap
plot_ly(z = ~volcano, type = "heatmap")
# The 3d surface map
plot_ly(z = ~volcano, type = "surface")
#----------------------------------------------------------------------------
# ggplot2, the interactive dimension
# Create the ggplot2 graph
ggplot(mtcars, aes(x = wt, y = mpg, col = cyl)) + geom_point()
# Make your plot interactive
qplot(wt, mpg, data = mtcars, color = ~cyl)+ggplotly()
#---------------------------------------------------------------------------
# Most Trafficked US Airports
# Most Trafficked US Airports
g <- list(
scope = 'usa',
showland = TRUE,
landcolor = toRGB("gray95")
)
g
plot_geo(airport_traffic, lat = ~lat, lon = ~long) %>%
add_markers(
text = ~paste(airport, city, state, paste("Arrivals:", cnt), sep = "<br />"),
color = ~cnt, symbol = I("square"), size = I(8), hoverinfo = "text"
) %>%
colorbar(title = "Incoming flights<br />February 2011") %>%
layout(
title = 'Most trafficked US airports<br />(Hover for airport)', geo = g
)
# Commercial Airports WorldWide
str(airports)
data(airports)
# Mapping all commercial airports in the world
g <- list(
scope = 'world',
showland = TRUE,
landcolor = toRGB("gray95")
)
plot_geo(airports, lat = ~Latitude, lon = ~Longitude) %>%
add_markers(
text = ~paste(AirportID, City, Country, sep = "<br />"),
color = ~Country, symbol = I("circle"), size = I(3), hoverinfo = "text", colors = "Set1"
) %>%
layout(
title = 'Commercial Airports Worldwide', geo = g
)
|
15faeede0bc4a83884a514902ab269d951c9c32b | a6f40d2dcecf67016d8791dd8f1cad3d3f7bd7a4 | /man/rmsd_calc.Rd | f2e005c40004877ec17f035f96bb6069be16abcf | [] | no_license | wongswk/compass | 304d357928424330010d9a82292a137fd7ccce9e | 0589697d56d76a1cc18e74082689f4d678ca0c56 | refs/heads/master | 2022-01-26T10:58:03.218499 | 2022-01-08T21:06:12 | 2022-01-08T21:06:12 | 155,491,031 | 2 | 1 | null | 2022-01-08T21:06:13 | 2018-10-31T03:10:09 | R | UTF-8 | R | false | true | 1,519 | rd | rmsd_calc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pdb_rmsd_final.R
\name{rmsd_calc}
\alias{rmsd_calc}
\title{Least Root Mean Squared Deviation for Molecule Conformations}
\usage{
rmsd_calc(pred, truth, n=NULL, m=NULL, atype='all', optimal=FALSE)
}
\arguments{
\item{pred}{matrix containing predicted coordinates of atoms in protein conformation.}
\item{truth}{matrix containing true coordinates of atoms in protein conformation.}
\item{n}{The Resno to start at for the rmsd calculation.}
\item{m}{The Resno to end at for the rmsd calculation.}
\item{atype}{Can be Null, CA, CaCNO, specifies the types of items to consider.
Null means consider all atoms.}
\item{optimal}{TRUE to apply optimal rotations as described in
\url{https://cnx.org/contents/HV-RsdwL@23/Molecular-Distance-Measures}
Otherwise calculates RMSD without rotations}
}
\value{
Returns the calculated LRMSD value and the rotation matrix used to achieve optimal rotation.
}
\description{
LRMSD or RMSD calculation between two coordinate sets (3 by n matrices).
}
\examples{
predicted <- bio3d::read.pdb("TR928prediction.pdb")
truthful <- bio3d::read.pdb("TR928truth.pdb")
rmsd_calc(predicted, truthful,n=6, m=8, 'all', T)
rmsd_calc(predicted, truthful,n=6, m=8, 'CA', T)
rmsd_calc(predicted, truthful,n=6, m=8, "CaCNO", T)
rmsd_calc(predicted, truthful,n=6, m=8, "CaCNO", F)
rmsd_calc(predicted, truthful, n=8, m=NULL, "all", T)
rmsd_calc(predicted, truthful, n=NULL, m=NULL, 'all', F)
rmsd_calc(predicted, truthful)
}
|
104e712e3c346476dab6331ad87b1c9f238ad646 | f256fd8f31ea589cae179c155f44b7e4f8de744a | /tests/testthat/test-roc.R | a021ac8ca11fd77159c7eba0c8889b19172dffdd | [] | no_license | cran/performance | 9578b76fd8981e5896d25d036cb8439e8b01c24a | c44285dff9936445c56ec8b83feb7ff9cae3fa81 | refs/heads/master | 2023-06-08T08:42:02.230184 | 2023-06-02T10:30:02 | 2023-06-02T10:30:02 | 183,289,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 758 | r | test-roc.R | skip_if_not_installed("ISLR")
data(Smarket, package = "ISLR")
m1 <- glm(am ~ vs + wt, family = binomial(), data = mtcars)
m2 <- glm(Direction ~ Lag1 + Volume, family = binomial(), data = Smarket)
roc1 <- performance_roc(m1)
roc2 <- performance_roc(m2)
auc1 <- bayestestR::area_under_curve(roc1$Specificity, roc1$Sensitivity)
auc2 <- bayestestR::area_under_curve(roc2$Specificity, roc2$Sensitivity)
test_that("roc", {
expect_equal(head(roc1$Sensitivity), c(0, 0.07692, 0.15385, 0.23077, 0.30769, 0.38462), tolerance = 1e-2)
expect_equal(head(roc2$Sensitivity), c(0, 0, 0, 0, 0.00154, 0.00154), tolerance = 1e-2)
})
test_that("auc", {
expect_equal(auc1, 0.964, tolerance = 1e-2)
expect_equal(auc2, 0.535, tolerance = 1e-2)
})
|
8d3f41b5d45413cfa3d7c97a1a9344edce92fa81 | fececa38a6bc74110469588ad0a4885eb578c5b6 | /inst/staticexports/cat.R | 8f78c165f401646aaab6acc78a711c14ea3f7cad | [
"MIT"
] | permissive | cpsievert/staticimports | cfdd11749ee2086e267e722469bddb7578fdd0fb | 7c26b9e022d822a01ca8ff321ebaae258e50e277 | refs/heads/main | 2023-08-27T02:38:45.995659 | 2021-04-15T04:41:52 | 2021-04-15T04:44:45 | 424,739,205 | 0 | 0 | NOASSERTION | 2021-11-04T21:02:24 | 2021-11-04T21:02:22 | null | UTF-8 | R | false | false | 123 | r | cat.R | cat0 <- function(..., sep = "") {
cat(..., sep = sep)
}
catln <- function(..., sep = "") {
cat(..., "\n", sep = "")
}
|
8c19e5282ecbe14d9e5bc3129e7b0f51ae609241 | 08a6e8e2b86a015fe6f847102bf244fc1ce18d6a | /4-Demography/PCA/plot.PCA.R | e490d319f75ff089eb31b516489c0b4d39bc3402 | [] | no_license | dechavezv/2nd.paper | 40d5578aef8dfdaa07a5d9eb27c9f632f0750cd3 | ffa6506ec062bc1442e3d0ee7325f60087ac53e1 | refs/heads/master | 2020-12-19T21:31:43.317719 | 2020-08-07T05:46:59 | 2020-08-07T05:46:59 | 235,857,029 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,680 | r | plot.PCA.R | ##### Load libraries
library(gdsfmt)
library(SNPRelate)
library(ggplot2)
library(RColorBrewer)
##### Set working directory?
todaysdate=format(Sys.Date(),format="%Y%m%d")
calldate=20200504
setwd("/u/scratch/d/dechavez/rails.project/SNPRelate")
plotoutdir=paste("/u/scratch/d/dechavez/rails.project/SNPRelate",calldate,"/PCA/",sep="")
dir.create(plotoutdir,recursive = T)
#make a data.frame
tab1a <- data.frame(sample.id = pca$sample.id, pop1 = factor(pop1_code)[match(pca$sample.id, sample.id)],
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
############### set up your colors -- keep this consistent across all plots ######
colorPal=RColorBrewer::brewer.pal(n=8,name = "Dark2")
colors=list(St.Cruz=colorPal[1],Isabela=colorPal[3],Santiago=colorPal[5],
Pinta=colorPal[8]) # your population colors
#plot first 2 pc coloring by primary population
p1a <- ggplot(tab1a,aes(x=EV1,y=EV2,color=pop1))+
geom_point(size=3)+
theme_bw()+
ylab(paste("PC1", format(pc.percent[1], digits=2),"%", sep=""))+
xlab(paste("PC2", format(pc.percent[2], digits=2),"%", sep=""))+
ggtitle(paste("PCA based on ",as.character(length(pca$snp.id))," LD Pruned SNPs",sep=""))+
theme(legend.title = element_blank(),axis.text = element_text(size=14),
axis.title = element_text(size=14),legend.text = element_text(size=14))+
scale_shape_manual(values=c(1,16))+
scale_color_manual(values=unlist(colors))
# paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
#p1a
ggsave(paste(plotoutdir,"/PCA.rails.",todaysdate,".pdf",sep=""),p1a,device="pdf",width = 8,height=5)
|
af609a3140a943bc967ef45ef7166e1f8ec0eac4 | bfb0dd605dbe9a50903dc13ea49b04edb027a868 | /man/EWHP.Rd | 09fd2363fa83c1df33befb04e3a45ef255628dc5 | [] | no_license | GWmodel-Lab/GWmodel3 | 6492b18e70d7ab58076acd9ad7bdb58a7e40c2bf | 42e756431500fb84fc777594c80bca78a2de0f02 | refs/heads/master | 2023-08-09T15:33:12.321389 | 2023-07-25T11:18:30 | 2023-07-25T11:18:30 | 567,957,637 | 0 | 1 | null | 2023-09-14T15:35:53 | 2022-11-19T02:05:58 | R | UTF-8 | R | false | false | 1,838 | rd | EWHP.Rd | \name{EWHP}
\alias{EWHP}
\docType{data}
\title{House price data set (DataFrame) in England and Wales}
\description{
A house price data set for England and Wales from 2001 with 9 hedonic (explanatory) variables.
}
\usage{data(EWHP)}
\format{
A data frame with 519 observations on the following 12 variables.
\describe{
\item{Easting}{a numeric vector, X coordinate}
\item{Northing}{a numeric vector, Y coordinate}
\item{PurPrice}{a numeric vector, the purchase price of the property}
\item{BldIntWr}{a numeric vector, 1 if the property was built during the world war, 0 otherwise}
\item{BldPostW}{a numeric vector, 1 if the property was built after the world war, 0 otherwise}
\item{Bld60s}{a numeric vector, 1 if the property was built between 1960 and 1969, 0 otherwise}
\item{Bld70s}{a numeric vector, 1 if the property was built between 1970 and 1979, 0 otherwise}
\item{Bld80s}{a numeric vector, 1 if the property was built between 1980 and 1989, 0 otherwise}
\item{TypDetch}{a numeric vector, 1 if the property is detached (i.e. it is a stand-alone house), 0 otherwise}
\item{TypSemiD}{a numeric vector, 1 if the property is semi detached, 0 otherwise}
\item{TypFlat}{a numeric vector, if the property is a flat (or 'apartment' in the USA), 0 otherwise}
\item{FlrArea}{a numeric vector, floor area of the property in square metres}
}
}
\references{
Fotheringham, A.S., Brunsdon, C., and Charlton, M.E. (2002), Geographically Weighted Regression:
The Analysis of Spatially Varying Relationships, Chichester: Wiley.
}
\author{Binbin Lu \email{binbinlu@whu.edu.cn}}
\examples{
library(sf)
data(EWHP)
data(EWOutline)
ewhp_sf <- st_as_sf(EWHP, coords = c("Easting", "Northing"))
plot(EWOutline$geometry)
plot(ewhp_sf["PurPrice"], add = TRUE)
}
\keyword{data}
\concept{house price}
|
a0ec7bf5e69d4f07f19f053f6f02bbcc0afc0a5e | ec266aaef98cd9f59ccc84852a53dac3609a15d0 | /man/shp_list_files.Rd | 1dc17d77fe8b9698a8b189b9b2aaf42f5d1bc3a2 | [
"MIT"
] | permissive | dleutnant/shp | bf40c2dc83aca2a57fbe7e33057402a94c2fcabc | 76cc909bba82d0390f259f21fa779a0fae3d0a23 | refs/heads/master | 2023-03-16T03:46:26.686210 | 2021-03-06T02:33:33 | 2021-03-06T02:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,323 | rd | shp_list_files.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shp-aux.R
\name{shp_list_files}
\alias{shp_list_files}
\alias{shp_delete}
\alias{shp_copy}
\alias{shp_move}
\alias{shp_assert}
\alias{shp_extensions}
\title{List files associated with a shapefile}
\usage{
shp_list_files(file, ext = shp_extensions(), exists = TRUE)
shp_delete(file, ext = shp_extensions())
shp_copy(file, to, ext = shp_extensions(), overwrite = FALSE)
shp_move(file, to, ext = shp_extensions(), overwrite = FALSE)
shp_assert(file)
shp_extensions()
}
\arguments{
\item{file}{A .shp file}
\item{ext}{One or more extensions to include}
\item{exists}{Use \code{FALSE} to generate hypothetical shapefile
names.}
\item{to}{Destination: a .shp filename the same length as \code{to} or
a single directory.}
\item{overwrite}{Use \code{TRUE} to}
}
\value{
A list of files of length \code{length(file) * length(ext)}.
}
\description{
List files associated with a shapefile
}
\examples{
shp_assert(shp_example_all())
shp_list_files(shp_example("anno.shp"))
dest <- tempfile()
dir.create(dest)
shp_copy(shp_example("anno.shp"), dest)
list.files(dest)
shp_move(file.path(dest, "anno.shp"), file.path(dest, "anno1.shp"))
list.files(dest)
shp_delete(file.path(dest, "anno1.shp"))
list.files(dest)
unlink(dest, recursive = TRUE)
}
|
2f22581b3752aa8906fa48fafe01856cc82642f9 | 59269fb30524f0abe2142df42ebf156bd5d7831d | /explanatory_note.R | 0decfe2fc87391e7cf9025337b84685b1ef39405 | [
"MIT"
] | permissive | philippasigl/FinancialFlowsEuroArea | 4d89001f0bf82ad5838a9777852bd1e72371d63d | adfb442d6463f7aca81c67eaa1cd85fc277cd13b | refs/heads/master | 2021-05-05T09:06:17.568933 | 2018-02-10T15:07:56 | 2018-02-10T15:07:56 | 119,179,864 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,013 | r | explanatory_note.R | #explanatory text accompanying the financial flows graph
expl<-paste("The data shown reflects quarterly net transactions. Valuation effects are not captured. Data is not seasonally adjusted.",
"Blue lines go from financier to asset, green lines from asset to recipient of the funding. Arrows point in the direction of net flows for the given quarter. Line width is scaled to the size of the respective net flow.",
"Sizes of the circles are not scaled to balance sheet size.",
"Individual nodes can be highlighted by clicking on them.",
"All data in billions of Euros.",
"It is important to note that this network is not a closed system as inflows from non Euro Area counterparties and outflows to the former are not captured.",sep="\n")
defs<-paste("Banks: Monetary financial institutions",
"Government: Central and subnational government bodies",
"Non-banks: Non monetary financial institutions, including investment funds, financial vehicle corporations, insurance corporations and pension funds",
"Households: Households and non profit institutions serving households",
"Bonds: Debt securities of all maturities",
"Equities: Listed and unlisted shares as well as other equity; excludes investment fund shares",
"Loans: Fixed debt contracts between borrowers and lenders, including mortgages and loans for consumption",
"IF Shares: Investment fund shares",
"Flows from the Central Bank are defined as net purchases under the ECB's Asset Purchase Programmes, including the Corporate Sector Purchase Programme, the Public Sector Purchase Programme, the Asset Backed Securities Purchase Programme and the Third Covered Bond Purchase Programme", sep="\n")
#hyperlinks
urlECBstats<-a("ECB Data Warehouse", href="http://sdw.ecb.europa.eu/")
urlESA2010<-a("ESA 2010 Accounts", href="http://ec.europa.eu/eurostat/cache/metadata/Annexes/nasa_10_f_esms_an1.pdf")
|
e1351169dae67bb55b301f955044265b28fd50df | 3aca2ac08437fc0652cd2b6cdd95020ab8236ecd | /R/calculate_bld.R | 2a1de2785d92dcd32f46e14c09297e82b7200124 | [] | no_license | sean-rohan-NOAA/TLUtilities | 064f555531e82d4828ec9df3e5d2597a9dd3eae5 | 3d3302f973ec9bd1571aba915ed93d9e9f8dbf46 | refs/heads/main | 2023-08-11T02:07:15.227477 | 2021-09-28T01:05:24 | 2021-09-28T01:05:24 | 149,823,824 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 779 | r | calculate_bld.R | #' Calculate bottom layer depth using a threshold method
#'
#' @param rho Numeric vector of densities
#' @param z Numeric vector of depths. Depths are positive.
#' @param ref.depth Thickness of bottom layer to use for calculating bottom layer density
#' @param totdepth Maximum depth sampled by the cast
#' @param threshold Density threshold
calculate_bld <- function(rho, z, totdepth, threshold = 0.1, ref.depth = 5) {
rho <- rho[order(z)]
z <- z[order(z)]
z.max <- max(z)
bottom.rho <- mean(rho[z >= (z.max - ref.depth)])
bld.bin <- which(rho < (bottom.rho - threshold) & z < (z.max - ref.depth))
if(length(bld.bin) > 0) {
bld <- z[max(bld.bin)]
} else {
if(length(totdepth) > 0) {
bld <- 0
} else {
bld <- NA
}
}
return(bld)
} |
d134eddd6b79334b9ccf1e37eff32307abd758ae | 3bdf47d64b71f5629c0eee89fb1f5fb1c78a1542 | /Celgene/curation/archive/2017-04-05_table_flow.R | a8fe0c2559c8e4f9c8a7580728e4fc94408b27d7 | [] | no_license | celgene-research/mgp_ngs | c40b97d95565e24a035920070956c10074fadb21 | 7e664256f611a380fd82ab17d8f078f7f0d80b32 | refs/heads/master | 2021-03-16T10:06:01.344782 | 2019-04-04T19:42:13 | 2019-04-04T19:42:13 | 63,191,892 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,691 | r | 2017-04-05_table_flow.R |
source("curation_scripts.R")
#
# read in unfiltered per.file tables (clinical, translocation, snv, cnv, rna, bi)
# filter to include only nd.tumor files
# cbind to get per.file.all.nd.tumor
#
# collapse each individual table to patient
# export each molecular table as per.patient.snv.nd.tumor ...
# cbind along with per.patient.clinical to get per.patient.unified.all.nd.tumor
#
# read in unfiltered per.file tables
###########################################
per.file.clinical <- toolboxR::GetS3Table(file.path(s3, "ClinicalData/ProcessedData/Integrated/per.file.clinical.txt")) %>%
select(-starts_with("CYTO")) %>%
as.data.table() %>%
setkey("File_Name")
# fetch the individual molecular tables and import as a list of DT so we can parallelize processing
s3joint <- "s3://celgene.rnd.combio.mmgp.external/ClinicalData/ProcessedData/JointData"
system(paste('aws s3 cp', s3joint, local, '--recursive --exclude "*" --include "curated*" --exclude "archive*"', sep = " "))
files <- list.files(local, pattern = "^curated", full.names = T)
dts <- lapply(files, fread)
names(dts) <- gsub("curated_(.*?)_.*", "\\1", tolower(basename(files)))
# # Check that all tables have a File_Name column
# if( !all(sapply(dts, function(x){"File_Name" %in% names(x)})) ){
# stop("At least one curated table is missing the File_Name column")}
lapply(dts, setkey, File_Name)
# add the clinical table to the list
dts <- c(list(clinical = per.file.clinical), dts)
sapply(dts, dim)
# filter to include only nd.tumor files
###########################################
nd.tumor.lookup <- per.file.clinical[Disease_Status == "ND" &
Sample_Type_Flag == 1,
.(Patient, File_Name)] %>%
setkey(File_Name)
nd.tumor.dts <- lapply(dts, function(dt){
# remove Patient column already present on clinical table
if( "Patient" %in% names(dt) ){dt <- dt[,!"Patient", with = F]}
# add patient identifiers for nd.tumor samples and filter those without patient
merge(nd.tumor.lookup, dt, all.y = T)[!is.na(Patient)]
})
lapply(nd.tumor.dts, dim)
# now that they all have PAtient column we can index by both
lapply(nd.tumor.dts, function(dt){
setkeyv(dt, c("File_Name", "Patient")) })
# cbind individual filtered tables to get per.file.all.nd.tumor
###########################################
per.file.all.nd.tumor <- nd.tumor.lookup %>% setkeyv(c("File_Name", "Patient"))
for( i in nd.tumor.dts ){
per.file.all.nd.tumor <- merge(per.file.all.nd.tumor, i, all.x=TRUE, fill = T)
}
dim(per.file.all.nd.tumor)
# collapse each individual table to per.patient
###########################################
collapsed.dts <- lapply(nd.tumor.dts, function(dt){
local_collapse_dt(dt, column.names = "Patient") })
sapply(collapsed.dts, dim)
# export each molecular table as per.patient.snv.nd.tumor ...
###########################################
lapply(names(collapsed.dts), function(type){
n <- paste("per.patient", type, "nd.tumor.txt", sep = ".")
PutS3Table(collapsed.dts[[type]],
file.path(s3, "ClinicalData/ProcessedData/Integrated", n))
})
# cbind along with per.patient.clinical to get per.patient.unified.all.nd.tumor
###########################################
per.patient.unified.all.nd.tumor <- toolboxR::GetS3Table(file.path(s3,
"ClinicalData/ProcessedData/Integrated/per.patient.clinical.nd.tumor.txt")) %>%
select(-starts_with("INV")) %>%
as.data.table() %>%
setkey("Patient")
for( i in 1:length(collapsed.dts) ){
setkey(collapsed.dts[[i]], "Patient")
# rename File_Name in each subtable to retain source info
# e.g. File_Name --> File_Name_translocations
setnames(collapsed.dts[[i]], "File_Name", paste("File_Name", names(collapsed.dts)[i], sep = "_"))
per.patient.unified.all.nd.tumor <- merge(per.patient.unified.all.nd.tumor, collapsed.dts[[i]], all.x=TRUE, fill = T)
}
dim(per.patient.unified.all.nd.tumor)
PutS3Table(per.patient.unified.all.nd.tumor,
file.path(s3, "ClinicalData/ProcessedData/Integrated/per.patient.unified.all.nd.tumor.txt"))
# and also put a version with just the clinical data (derived from per.patient + collapsed per.file)
x <- per.patient.unified.all.nd.tumor
per.patient.unified.clinical.nd.tumor <- x[,!grep("^SNV|^CNV|^BI|^RNA|^CYTO|^File_Name_", names(x), value = T), with=F]
PutS3Table(per.file.all.nd.tumor,
file.path(s3, "ClinicalData/ProcessedData/Integrated/per.file.all.nd.tumor.txt"))
# PutS3Table(per.patient.unified.clinical.nd.tumor,
# file.path(s3, "ClinicalData/ProcessedData/Integrated/per.patient.unified.clinical.nd.tumor.txt"))
RPushbullet::pbPost("note", "done")
|
c9687884bc87d20c250d7d1a58837dc64379197a | fd570307c637f9101ab25a223356ec32dacbff0a | /src-local/specpr/src.specpr/fcn48-51/gcrd.r | c5b4beae61a4d4029631634a8810a4db39d1d7e7 | [] | no_license | ns-bak/tetracorder-tutorial | 3ab4dd14950eff0d63429291c648820fb14bb4cb | fd07c008100f6021c293ce3c1f69584cc35de98a | refs/heads/master | 2022-07-30T06:04:07.138507 | 2021-01-03T22:19:09 | 2021-01-03T22:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,690 | r | gcrd.r | subroutine gcrd (nchan,ydata,xdata,ixx,xpos,imatch,iopcon,irtn)
implicit integer*4(i-n)
#ccc version date: 16-Dec-88
#ccc author(s): roger clark; modified by wendy calvin for function 50
#ccc language: ratfor
#ccc
#ccc short description:
#ccc this subroutine reads the graphics cursor position
#ccc and prints the x, y, error data values
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc argument list description:
#ccc argumrnts: none
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc notes:
#ccc
include "../common/lbl3"
include "../common/hptrm"
include "../common/lundefs"
include "../common/alphabet"
include "../common/dscrch"
common /plot1/xmax, xmin, lbnd, diff
real*4 xdata(4864), ydata(4864), lbnd
equivalence (xdata(1),datsc5(1))
equivalence (ydata(1),datsc6(1))
character*80 atemp, iopcon
character*1 cntrlq
character*1 escape
axl = 56.
axh = 500.
ayl = 46.
ayh = 276.
#
# determine constants to scale data
#
if (diff == 0.) diff = 0.1e-36
dy = (ayh-ayl)/diff
an = xmax - xmin
if (an <= 0) an = 0.1e-36
dx = (axh-axl)/an
#
cntrlq = char(17)
escape = char(27)
atemp = iopcon
1 read (ttyin,2,end=2000,err=2000) iopcon
2 format (a)
i = 1
call wjfren (i,x,il)
if (il == ihe || il == ihx) {
irtn = il
return
}
# send escape sequence to get cursor position
call cursrd(ixx,iyy)
x = float (ixx)
y = float(iyy)
if (y > ayh || y < ayl || x > axh || x < axl) {
call serase(0,310,511,348)
call movabs (0,338)
call sb(0)
write (ttyout, 30) ixx,iyy
30 format (20x,'OUT OF BOUNDS ('i4,','i4')')
call movabs (ixx, iyy)
go to 1
}
#
# calculate x and y postion in data space
#
xpos = (x-axl)/dx + xmin
ypos = (y-ayl)/dy + lbnd
imatch =0
xmatch = 0.9e37
do jj = 1, nchan { # find closest channel
tstmch = abs(xpos - xdata(jj))
if (tstmch < xmatch) {
imatch = jj
xmatch = tstmch
}
}
if (imatch == 0) {
call serase(0,310,511,348)
call movabs (0,338)
call sb(0)
write (ttyout, 40) ixx,iyy
40 format (12x,'CANNOT FIND A CLOSE CHANNEL ('i4,','i4')')
call movabs (ixx, iyy)
go to 1
}
call serase(0,310,511,348)
call movabs (0,338)
call sb(0)
write (ttyout, 50) ixx,iyy,xpos,ypos,imatch,
xdata(imatch),ydata(imatch)
50 format ('pixel coordinates: x=',i4,' y=',i4,/,
'data coordinates: x=',1pe13.6,' y=',1pe13.6,/,
'closest channel=',i6,' x=',1pe13.6,' y=',1pe13.6)
call movabs (ixx,iyy)
call sb(0)
2000 iopcon = atemp
return
end
|
a2e6ce165aa69d5bcf3322f695c61f712fbc51c1 | 3e6363c14b1fe300f616b7941262f05a5fb28b57 | /1.4.MBH_design_BRUVs_not-clustered.NPZ6.R | bce4e1d291b197ba2e79e7db2ff53089bacb107c | [] | no_license | anitas-giraldo/MBH_AbroNPZs | f5f2e71182a95dfa75129e53c2f0f0be7a2784e3 | 4a5f31bfcd6bf19962aed57cc2146847b1f1cfb0 | refs/heads/master | 2023-06-08T08:38:47.104088 | 2021-06-10T00:20:37 | 2021-06-10T00:20:37 | 349,294,095 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,232 | r | 1.4.MBH_design_BRUVs_not-clustered.NPZ6.R | ### ### ### MBH design clustered BRUVs ### ### ###
# libraries ----
#install.packages("MBHdesign")
library( MBHdesign)
library( parallel)
library( class)
library( fields)
#install.packages("pdist")
library( pdist)
library( raster)
library( rgdal)
library( sp)
library( rgeos)
# clear environment ----
rm(list = ls())
# Directories ----
w.dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
#w.dir <- "~/MBH_AbroNPZs"
p.dir <- paste(w.dir, "plots", sep = '/')
d.dir <- paste(w.dir, "data", sep='/')
s.dir <- paste(w.dir, "shapefiles", sep='/')
r.dir <- paste(w.dir, "rasters", sep='/')
o.dir <- paste(w.dir, "outputs", sep='/')
#### NPZ 6 ####
# Read in the inclusion probs ----
inclProbs <- raster(paste(r.dir, "inclProbs_zone6.32_deployments.v2.tif", sep='/'))
plot(inclProbs)
inclProbs <- setValues( inclProbs, values( inclProbs) / sum( values( inclProbs), na.rm=TRUE))
plot(inclProbs)
# check sun of incl probs --
cellStats(inclProbs, 'sum')
rootInclProbs <- inclProbs
rootInclProbs <- setValues( rootInclProbs, sqrt( values( rootInclProbs)))
cellStats(rootInclProbs, 'sum')
plot(rootInclProbs)
# Read data ----
zones <- readRDS(paste(d.dir, "Zones_Abro_NPZs.RDS", sep='/')) # this one in different folder
#Deans <- readRDS( "DeansPoints_forinNOutMP-d3.RDS")
rast <- readRDS(paste(d.dir, "abro_rasters_forInNOutNPZ.RDS", sep='/'))
#if( class( BRUVS) != "SpatialPointsDataFrame")
#Deans <- SpatialPointsDataFrame( coords=Deans[,c("Longitude","Latitude")], data=Deans, proj4string = CRS( proj4string( zones[[1]])))
#proj4string(Deans) <- proj4string(swrast$bathy)
straw.nums <- readRDS(paste(d.dir, "StrawmanNumbers_zones06.32_deployments.RDS", sep ='/'))
############################
#### Spatial sample of new sites ----
#### from altered incl. probs.
############################
### Here use quasiSamp to get random points ####
## these points will be the center of buffer for transects ###
#### Set the seed for reproducability
#set.seed( 777)
#### HAVE NOT BEEN ABLE TO MAKE THIS FUNCTION WORK ----
newSites <- list(npz6 = NULL, out6 = NULL)
for( zz in c("npz6", "out6")){
print( zz)
#the number of samples to take (specified minus the legacy number)
#numby <- floor( (straw.nums[zz])/4) # for clustered cluster - without legacy sites
numby <- floor( (straw.nums[zz])) # for not clustered sites
#numby <- floor( (straw.nums[zz] - numRef[zz])/2)
#numby <- floor( (straw.nums[zz] - numRef[zz])) # with legacy sites
#set up spatial domain
myZone <- zones[[zz]]
#if( zz == "AMP"){
# myZone = zones$AMP - zones$IUCN2
#set.seed( 747)
#}
#tmpIP <- mask( rootInclProbs, myZone)
tmpIP <- mask( inclProbs, myZone)
tmpIP <- crop( tmpIP, myZone)
#take the sample of clusters based on root incl probs
newSites[[zz]] <- quasiSamp( n=numby, potential.sites=coordinates( tmpIP), inclusion.probs=values(tmpIP), nSampsToConsider=5000)
#plotting (maybe remove at a later date?)
tmpIPFull <- mask( inclProbs, myZone)
tmpIPFull <- crop( tmpIPFull, myZone)
plot( tmpIPFull)
#plot( legacySites, add=TRUE, pch=1, col='red')
points( newSites[[zz]][,c("x","y")], pch=20, col='black')
}
newSites <- do.call( "rbind", newSites)
head(newSites)
# Give id to sites and zones --
site.names <-row.names(newSites)
newSites$site <- as.factor(site.names)
#zone.names <- gsub('.{3}$', '', site.names) # remove last 3 characters
zone.names <- substr(site.names, 1, 3) # extract first three characters
newSites$zone <- as.factor(zone.names)
newSites$zone
newSites <- SpatialPointsDataFrame( coords=newSites[,c("x","y")], data=newSites, proj4string=CRS(proj4string(inclProbs)))
#some of the spatial balance is not great... Presumably because the balance of the reference sites is also not great...
# Plot --
plot(inclProbs)
plot(rast$slope6, main = "Slope")
plot(zones$Both6, add=T)
plot(newSites, col=newSites$zone, pch = 20, add=T) # 41
newSites$zone
### Make sure the clusters centres are ~ 1 km apart ----
## Get CRS in utm ----
crs1 <- CRS("+init=epsg:32750") # WGS 84 / UTM zone 50S
## transform the points into UTM --
p1u <- spTransform(newSites, crs1)
## calculate if 2 points fall within 1500 m of eachother ----
# https://gis.stackexchange.com/questions/102796/remove-points-within-x-distance
dist1 <- gDistance(p1u, byid =T)
dist1
max(dist1)
min(dist1[dist1 > 0]) # minimum distance other than 0
## p1 ----
p1_matrix <- gWithinDistance(p1u, dist = 400, byid = TRUE)
diag(p1_matrix) <- NA
p1_matrix
# extract the upper triangular part of matrix and use the column sums as a criterion to remove the points:
p1_matrix[lower.tri(p1_matrix, diag=TRUE)] <- NA
p1_matrix
colSums(p1_matrix, na.rm=TRUE) == 0
v1 <- colSums(p1_matrix, na.rm=TRUE) == 0
p1u[v1, ] # 98 features left
remaining.sites <- p1u[v1, ]
remaining.sites <- spTransform(remaining.sites, proj4string(inclProbs))
# plot --
plot(inclProbs)
plot(rast$slope6, main = "Slope")
plot(zones$Both6, add=T)
plot(remaining.sites, col=remaining.sites$zone, pch = 20, add=T) # 41
remaining.sites$zone
## Save --
site <- "Abrolhos"
NPZ <- "npz6"
design <- "32Bruvs"
version <- "v2"
writeOGR(remaining.sites, o.dir, paste(site, NPZ, design, version, sep='-'), driver = "ESRI Shapefile")
|
b7f1384f22bb14f84962bfab3467c849491ea35a | b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e | /build/shogun_lib/examples/documented/r_static/kernel_linearstring.R | 55763ee39f22a5e4133e7903df2522ec8cfd8ed3 | [] | no_license | behollis/muViewBranch | 384f8f97f67723b2a4019294854969d6fc1f53e8 | 1d80914f57e47b3ad565c4696861f7b3213675e0 | refs/heads/master | 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 732 | r | kernel_linearstring.R | # This is an example for the initialization of a linear kernel on string data. The
# strings are all of the same length and consist of the characters 'ACGT' corresponding
# to the DNA-alphabet. Each column of the matrices of type char corresponds to
# one training/test example.
library("sg")
size_cache <- 10
fm_train_dna <- as.matrix(read.table('../data/fm_train_dna.dat'))
fm_test_dna <- as.matrix(read.table('../data/fm_test_dna.dat'))
# Linear String
print('LinearString')
dump <- sg('set_kernel', 'LINEAR', 'CHAR', size_cache)
dump <- sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
km <- sg('get_kernel_matrix', 'TRAIN')
dump <- sg('set_features', 'TEST', fm_test_dna, 'DNA')
km <- sg('get_kernel_matrix', 'TEST')
|
659d95f38fb3504e85edbcf6920e44676f6e5c06 | b48d77a827e6afa5f1f09e7fbf83a851ee11103c | /code/fix_theta_residuals.R | bbe95cefba2231126781d8bfc7ddabdce6833222 | [] | no_license | blakeharlow/ground_truthing | 6c4334eab60d41f29118f86fdc9dd08d5393b229 | d5770c377e2df03d3df3d03b4b931039ad57a9df | refs/heads/main | 2023-07-19T08:25:25.148950 | 2021-08-02T15:06:05 | 2021-08-02T15:06:05 | 382,028,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,389 | r | fix_theta_residuals.R | # This function takes a theta_residual (in degrees) and returns
# the corresponding angle that is closest to
# zero, ie between -180 and 180.
# There are a few cases for theta_i that need to be accounted for:
# CASE 1: if 0 <= theta <= 180:
# leave theta_i unchanged
# CASE 2: if 180 < theta < 360:
# return theta - 360
# CASE 3: if theta < 0 or theta >= 360
# perform mod 360 and then use case 1 or case 2
fix_theta_value <- function(theta_value){
if (theta_value >= 0 & theta_value <= 180){
return(theta_value)
}
else if(theta_value > 180 & theta_value < 360){
return(theta_value - 360)
}
else{
return(fix_theta_value(theta_value %% 360))
}
}
# Now make a vectorized version, which takes a residuals vector for v, h,
# and theta, where the residuals vector is organized as:
# residuals = <v1, h1, theta1, ..., v_m, h_m, theta_m>
# This function does the performs fix_theta_value() on the theta_i
# data of the residuals vector. In other words, it performs this operation
# only on element 3,6,9,... of the residuals vector.
# This function then returns the edited residuals vector.
fix_theta_residuals <- function(residuals){
theta_residuals <- residuals[c(FALSE, FALSE, TRUE)]
theta_residuals <- theta_residuals %>% sapply(fix_theta_value)
residuals[c(FALSE, FALSE, TRUE)] = theta_residuals
return(residuals)
}
|
8b7dbdad2d8b0554a0853fef50c9fbbfe7663a8a | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Linear_Algebra_And_Its_Applications_by_David_C._Lay/CH1/EX1.32/Ex1.32.R | 625da44830fc61fa1d5139cfa5ab0b6ade947f59 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 607 | r | Ex1.32.R | #Chapter 1 - Linear Equations In Linear Algebra
#Supplementary Exercises
#Page No.58 / 1-46
#Prob 7a
#7a
#clear console
cat("\014")
#clear variables
rm(list=ls(all=TRUE))
v1v<-c(2,-5,7)
v2v<-c(-4,1,-5)
v3v<-c(-2,1,-3)
v1=matrix(v1v,3,1,TRUE)
v2=matrix(v2v,3,1,TRUE)
v3=matrix(v3v,3,1,TRUE)
print('v1')
print(v1)
print('v2')
print(v2)
print('v3')
print(v3)
R=cbind(v1,v2,v3)
print(R)
R[1,]=R[1,]/2
print('~')
print(R)
Rnv<-c(1,-2,-1,0,-9,-4,0,9,4)
Rn=matrix(Rnv,3,3,TRUE)
print('~')
print(Rn)
Rnv<-c(1,-2,-1,0,-9,-4,0,0,0)
Rn=matrix(Rnv,3,3,TRUE)
print('~')
print(Rn) |
2e45777be62855f1e8bb08ddd596c09ce894510f | 40458078168c2577b22dee7b1d04abd5c5d4d87c | /man-roxygen/roxlate-object-estimator.R | 8f0af275285010e4958e59b89a472615d4a4e0ef | [] | no_license | muratmaga/tfestimators | edfd89dd942dd3290d07ff0950aa762177397dca | d035c96576c69eb907006afbb37466d5547e6cac | refs/heads/master | 2022-12-24T07:40:00.947716 | 2020-09-29T18:56:32 | 2020-09-29T18:56:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41 | r | roxlate-object-estimator.R | #' @param object A TensorFlow estimator.
|
3cce656fa84a4b8a9682a9866bc3c3b51e8a9ee8 | 32ed69ea8721f9913b704a1223e13a6fe398374b | /man/format_data.Rd | d2fc8559afc3904d3586cf3c207a37d69a393d5c | [] | no_license | mlysy/losmix | 1957a388845bafba673c41c609ef1aee32aaefc9 | 16de5dbeb18d89a45f658e93e3c6b276e9ac1f13 | refs/heads/master | 2021-06-22T23:05:57.787587 | 2021-01-13T14:38:26 | 2021-01-13T17:35:15 | 180,701,572 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,249 | rd | format_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{format_data}
\alias{format_data}
\title{Format data for \pkg{TMB} calculations.}
\usage{
format_data(y, X, id)
}
\arguments{
\item{y}{Response vector for all subjects. A vector of length \code{n}.}
\item{X}{Covariate matrix from all subjects. A matrix of size \code{n x p}.}
\item{id}{Subject identifiers. A vector of length \code{n} consisting of \code{nsub <= n} distinct elements. If missing default to \code{rep(1, n)}, i.e., only one subject.}
}
\value{
A list with the following elements:
\describe{
\item{\code{y}}{The response vector \code{y}, reordered such that all observations for a given subject are consecutive, and converted to an \code{n x 1} column matrix.}
\item{\code{Xtr}}{The transpose of the covariate matrix, reordered the same way as \code{y}, having size \code{p x n}.}
\item{\code{iStart}}{An integer vector of length \code{nsub}, specifying the starting index of the observations for each subject, using C++ indexing (i.e., starting at zero).}
\item{\code{nObs}}{An integer vector of length \code{nsub}, specifying the number of observations per subject.}
}
}
\description{
Format data for \pkg{TMB} calculations.
}
|
b80a783b1432159478566809ef320dedf0d6a236 | 88ddabe0777c9e789c54a2ff3ebeb9446692805e | /nm.R | 88bde0ee2694850f1cb5039be29213c6bf23106f | [] | no_license | gcgibson/navajo_nation_analysis | b9f9ea7f330f3c409a3ecd65ad6fe4fcf8d92b55 | 9d68c82a15178495e33f2354d80c8113b54b43c5 | refs/heads/master | 2022-11-05T13:33:38.715619 | 2020-06-17T23:15:23 | 2020-06-17T23:15:23 | 273,043,089 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,686 | r | nm.R |
format_county_str <- function(x){
tmp1 <- str_split(x,",")[[1]][1]
return (str_remove(tmp1," County"))
}
library(stringr)
library(dplyr)
##
confirmed <- read.csv("/Users/gcgibson/Downloads/covid_us_county.csv")
confirmed_nm <- confirmed[confirmed$state == "New Mexico",]
num_counties <- length(unique(confirmed_nm$county))
num_times <- nrow(confirmed_nm)/num_counties
confirmed_nm <- confirmed_nm %>% group_by(county) %>% mutate(gr=c(0,cases[2:length(cases)]/cases[1:(length(cases)-1)]))
confirmed_nm[is.nan(confirmed_nm$gr) ,]$gr <- 0
confirmed_nm[is.infinite(confirmed_nm$gr) ,]$gr <- 0
##manually add in population density
## source: https://www.nmhealth.org/publication/view/report/4442/
pop_density <- c(575.3,.5,10.8,
6.0,3.5,35.6,.8,55.8,13.1,7.4,
1.5,.3,1.4,15.2,4.2,164.5,
8.4,13.4,2.4,9.9,3.1,6.8,
8.2,36.4,23,6.1,76.6,2.8,
2.6,14.9,4.8,1.2,71.6,0,0)
confirmed_nm$pop_density <- rep(pop_density,each=num_times)
## food access
## source: https://www.nmhealth.org/publication/view/report/4442/
food_access <- c(14.9,18.7,14.8,19.8,16.3,18.0,16.0,15.5,
13.1,15.3,9.4,16.7,14.9,13.3,15.4,12.7,
20.7,26.9,11.4,17.9,16.8,13.2,18.8,
14.2,19.6,14.6,12.4,19.0,16.4,15.0,
18.5,14.1,13.4,0,0)
confirmed_nm$food_access <- rep(food_access,each=num_times)
## access to healthcare
## source : https://www.nmhealth.org/publication/view/report/4442/
access_p <- c(70,63,70,70,74,63,78,70,70,74,NA,80,63,57,74,70,57,57,70,74,
70,74,63,70,63,74,70,63,70,70,70,66,70,NA,NA)
confirmed_nm$access_p <- rep(access_p,each=num_times)
confirmed_nm$t <- rep(1:num_times,num_counties)
##### general covariate data
format_county_str <- function(x){
tmp1 <- str_split(x,",")[[1]][1]
return (str_remove(tmp1," County"))
}
covariate_data <- readxl::read_xls("/Users/gcgibson/Downloads/ACS_17_5YR_DP02_Selected_Social_Characteristics.xls",col_names = F)
covariate_data_t <- data.frame(t(covariate_data))
covariate_data_t$county <- covariate_data_t$X3
covariate_data_t$living_with_senior <- covariate_data_t$X20
covariate_data_t_subset <- covariate_data_t[colnames(covariate_data_t) %in% c("county","living_with_senior")]
covariate_data_t_subset <- covariate_data_t_subset[complete.cases(covariate_data_t_subset),]
#covariate_data_t_subset <- covariate_data_t_subset[3:nrow(covariate_data_t_subset),]
covariate_data_t_subset$county <- unlist(lapply(covariate_data_t_subset$county,format_county_str))
confirmed_nm <- confirmed_nm %>% left_join(covariate_data_t_subset,by="county")
confirmed_nm$living_with_senior <- as.numeric(gsub(",", "", confirmed_nm$living_with_senior))
### County population size
populations <- rep(c(677692,3539,65459,26978,12353,50199,2060,215338,
57437,28061,NA,459,4371,70126,19482,18356,24264,
72849,4563,65745,8373,39307,19117,140769,
127455,28034,148917,11135,17000,32888,15595,
4175,75956,NA,NA),each=num_times)
confirmed_nm$pop <- populations
confirmed_nm <- confirmed_nm %>% group_by(county) %>% mutate(living_with_senior_normalized = living_with_senior/pop)
#### AGE DISTRIBUTION
age_dist_csv<- read.csv("/Users/gcgibson/Downloads/cc-est2018-alldata-35.csv")
age_dist_csv$county <- unlist(lapply(age_dist_csv$CTYNAME ,function(x){
return (str_remove(x," County"))
}))
age_dist_csv <- age_dist_csv[age_dist_csv$YEAR == 1,]
age_dist_sum <- age_dist_csv %>% group_by(county) %>% summarize(mean_age_dist = mean(TOT_POP*AGEGRP/sum(TOT_POP)))
confirmed_nm <- confirmed_nm %>% left_join(age_dist_sum,by="county")
###### EDA PLOTS
library(ggplot2)
confirmed_nm <- confirmed_nm %>% group_by(county) %>% mutate(normalized_gr=log(cases)/pop)
log_case_plot <- ggplot(confirmed_nm,aes(x=t,y=normalized_gr,col=county)) + geom_line() + facet_wrap(~county) + theme_bw() + ylab("Population Normalized Growth Rate")
ggsave("log_case_plot.png",log_case_plot,device = "png",height = 4,width = 6)
living_with_senior_plot <- ggplot(confirmed_nm[confirmed_nm$county %in% c("Taos","Santa Fe","Bernalillo","McKinley"),],aes(x=county,y=living_with_senior_normalized,size=4)) + geom_point() + theme_bw() + ylab("Living With Senior") + xlab("County") + theme(legend.position = "none")
ggsave("living_with_senior_plot.png",living_with_senior_plot,device = "png",height = 4,width = 6)
pop_density_plot <- ggplot(confirmed_nm[confirmed_nm$county %in% c("Taos","Santa Fe","Bernalillo","McKinley"),],aes(x=county,y=pop_density,size=4)) + geom_point() + theme_bw() + ylab("Population Density") + xlab("County") + theme(legend.position = "none")
ggsave("pop_density_plot.png",pop_density_plot,device = "png",height = 4,width = 6)
age_dist_plot <- ggplot(confirmed_nm[confirmed_nm$county %in% c("Taos","Santa Fe","Bernalillo","McKinley"),],aes(x=county,y=mean_age_dist,size=4)) + geom_point() + theme_bw() + ylab("Age Dist") + xlab("County") + theme(legend.position = "none")
ggsave("age_dist_plot.png",age_dist_plot,device = "png",height = 4,width = 6)
food_access_plot <- ggplot(confirmed_nm[confirmed_nm$county %in% c("Taos","Santa Fe","Bernalillo","McKinley"),],aes(x=county,y=food_access,size=4)) + geom_point() + theme_bw() + ylab("Food Insecurity") + xlab("County") + theme(legend.position = "none")
ggsave("food_access_plot.png",food_access_plot,device = "png",height = 4,width = 6)
healthcare_access_plot <- ggplot(confirmed_nm[confirmed_nm$county %in% c("Taos","Santa Fe","Bernalillo","McKinley"),],aes(x=county,y=access_p,size=4)) + geom_point() + theme_bw() + ylab("Healthcare access") + xlab("County") + theme(legend.position = "none")
ggsave("healthcare_access_plot.png",healthcare_access_plot,device = "png",height = 4,width = 6)
confirmed_nm[is.infinite(confirmed_nm$normalized_gr),]$normalized_gr <- 0
confirmed_nm[is.na(confirmed_nm$normalized_gr),]$normalized_gr <- 0
######
confirmed_nm_complete <- confirmed_nm[colnames(confirmed_nm) %in% c("t","pop_density","food_access","living_with_senior_normalized","access_p","county", "normalized_gr","mean_age_dist")]
confirmed_nm_complete[is.infinite(confirmed_nm_complete$normalized_gr), ]$normalized_gr <- 0
confirmed_nm_complete <- confirmed_nm_complete[complete.cases(confirmed_nm_complete),]
library(forecast)
xreg_cols <- c("pop_density","food_access","living_with_senior_normalized","access_p","mean_age_dist")
xreg_mat <- scale(as.matrix(sapply(confirmed_nm_complete[,colnames(confirmed_nm_complete)%in%xreg_cols], as.numeric)))
ar_fit <-auto.arima(confirmed_nm_complete$normalized_gr*1e6,xreg=xreg_mat)
summary(ar_fit)
|
455f52f5da667d74aafda20654bc56ccf599c690 | c5707b03ca4c5015fdd48aa98a87c1d038b0ed7f | /mc/man/library.mc.Rd | b3e7f1a5de47feecaeaa813fe61cbf427e470773 | [] | no_license | nhtuong/mc-r | e145ab2a130c90ab57ea0ce7fc10e2f740f39e9a | 50e523c1ce2d8ec323f7cb3a58e2d8edd8dc211d | refs/heads/master | 2020-05-17T16:05:53.766057 | 2015-01-07T11:44:01 | 2015-01-07T11:44:01 | 33,657,174 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 375 | rd | library.mc.Rd | \docType{methods}
\name{library.mc}
\alias{library.mc}
\title{Loading and Listing of Packages}
\usage{
library.mc(pkg, repos = "cran")
}
\arguments{
\item{pkg}{name of package}
}
\value{
A list of attached packages
}
\description{
On-the-fly load or install a package
}
\author{
Hoai Tuong Nguyen
}
\seealso{
\code{\link[utils]{install.packages}}
}
|
641aca96378118cad5af0f995ea8a71bc7f51563 | d41d3ec09a06a39c0e711aa7eed14bb2a7aa7052 | /hw2/StatsII_Week_2_16Nov2015_BB.R | 09130928fb7f2afbe5b2aa67d5828b6fe0960b8e | [] | no_license | andregalvez79/Statistics_Course_Master | 7618817bf6bb4f6368b54fd3f852bd69ec4b5dc6 | 1a508aee5adc87b3951ab971f1dedcc7747f280d | refs/heads/master | 2021-05-04T13:02:51.378568 | 2018-02-05T13:13:15 | 2018-02-05T13:13:15 | 120,306,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,827 | r | StatsII_Week_2_16Nov2015_BB.R | # Example R Script for the course "Analyzing in R" (aka Stats II), version 2015
# Week 2
# Bernd Figner
# the FMF book (only in chapter 5, but it's already handy for us now) introduces the package pastecs, in particular the handy command stat.desc() that gives descriptive statistics; so let's try this out.
# perhaps you first need to install some of the packages that I'm loading below (if you don't have them already)
install.packages("pastecs")
install.packages("reshape")
install.packages("ltm")
# load that library
library(pastecs) # for stat.desc()
library(psych) # for describe() and describeBy()
library(lattice) # for densityplot()
library(ltm) # for rcor.test()
library(reshape) # for melt/cast
# BTW, the developer of the package reshape created a newer package reshape2, with very similar functionality:
# As the developer himself writes, "This version [reshape2] improves speed at the cost of functionality, so I have renamed it to reshape2 to avoid causing problems for existing users."
# see also here: http://stackoverflow.com/questions/12377334/reshape-vs-reshape2-in-r
# Since we are not concerned about speed when reshaping data, we are going to focus on reshape (not reshape2), as it seems a bit easier and user-friendly; and it's also the package that is used in the book.
# set working directory
setwd('~/GoogleDrive/Radboud/Teaching/Stats_II_IntroR/2015_2016/Week02/RScripts')
######################################################################################
# reshaping data frames: wide and long format data
# for the example here, we need some data
# such as: 20 participants, each rates their moood 3 times: baseline measure, then after watching a funny movie clip, and then again after watching a sad movie clip. the mood ratings are done on a continuous visual analogue scale ranging from 0 to 100
# we could either load a file I made or generate some data ourselves, either is fine
# option 1: load a data file that I have created previously
r_0 <- read.csv("Rating_ExampleData.csv")
head(r_0)
tail(r_0)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# option 2: generate our own example data using a bunch of R commands: this is only for the interested, you can skip this part if you want
# The following lines of code create some 'fake' data; you can ignore that bit, if you prefer
r_1 <- as.data.frame(matrix(data = NA, nrow = 20, ncol = 5)) # you already know that from week 1: we create a matrix filled with NA values and turn that matrix into a data frame
names(r_1) <- c('pp_code', 'f_gender', 'rating_1', 'rating_2', 'rating_3') # now we give the variables some meaningful names
r_1$pp_code <- paste('pp', c(1:20), sep = '_') # I like to use participant codes that are NOT numbers (to make sure I never accidentally treat it as continuous variable)
# for gender, I use the sample() command; it randomly samples elements from a vector that I define
r_1$f_gender <- as.factor(sample(c('male', 'female'), 20, replace = TRUE)) # what this does is that it randomly assigns the gender male to half the participants and the gender female to the other half of the participants
# if you're curious, have a look at:
?sample
# creating some fake data; I assume the rating scale goes from 0 to 100 and I generate some normally distributed data with a mean of 50 and a SD on 20 and round it to integers (i.e., no decimals)
r_1$rating_1 <- round(rnorm(20, mean = 50, sd = 20), digits = 0)
densityplot(r_1$rating_1) # just to check the distribution (since I said the ratings go from 0 to 100, I should make sure that there are no values smaller than 0 or larger than 100)
# I next create a second rating that should be on average higher than the first one (that's what the +20 does in the command below) and is correlated (but not perfectly correlated) with the first measure: that's what the + round(rnorm(20, mean = 0, sd = 3), digits = 0) does; it adds a random number to the previous rating (the random number is drawn from a normal distribution with a mean of 0 and an SD of 3; thus, sometimes the random number will be smaller, sometimes larger than 0)
r_1$rating_2 <- r_1$rating_1 + 20 + round(rnorm(20, mean = 0, sd = 3), digits = 0)
densityplot(r_1$rating_2) # let's check again
# ok, there are some that are above 100, so let's fix that
r_1$rating_2[which(r_1$rating_2 > 100)] <- 100
# and for the third rating, we create a average lower rating (thus, I subtract 30 from the first rating and again do the adding of a random number)
r_1$rating_3 <- r_1$rating_1 - 30 + round(rnorm(20, mean = 0, sd = 3), digits = 0)
densityplot(r_1$rating_3) # let's check
# that generated a few values below 0, so I change these to 0: I identify the entries below 0 using which() and assign the value 0 to these entries
r_1$rating_3[which(r_1$rating_3 < 0)] <- 0
densityplot(r_1$rating_3) # ok, no more values below 0
# save this fake data file
write.csv(r_1, file = 'Rating_ExampleData.csv', row.names = FALSE)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# OK, we have created the fake data, the rest below you are not allowed to ignore anymore
###############################################################################################################################################
# OK, so let's do some things with this data frame (we'll do the change from wide to long format afterwards)
r_1 <- r_0
r_1
# check the correlations among the 3 ratings
# there are many different functions computing correlations
#for example:
?cor()
cor(r_1$rating_1, r_1$rating_2) # that gives us just the correlation coefficient (which is very high), but for example no p values; and it's also not the most convenient command for other reasons. But luckily, there are other commands
# I like the function rcor.test() from the package ltm (but there are other options)
# rcor.test wants as input several columns of data that it will then correlate with each other (e.g., columns with numerical variables such as rating_1, rating_2 etc). What is nice that it accepts more than 2 columns at a time; it will then compute all pairwise correlations and show a correlation matrix as output
# thus, if we want to check the correlation between the columns rating_1 and rating_2, we can do this like this
rcor.test(cbind(r_1$rating_1, r_1$rating_2)) # cbind stands for column-bind. It takes two columns of data (here rating_1 and rating_2) and binds them together
# however, there are more elegant ways to do the same
# like this:
rcor.test(r_1[,c('rating_1', 'rating_2')])
# or like this (this is my preferred way in this case, but to each their own...)
rcor.test(r_1[,3:4])
# if we want to have all pairwise correlations among the 3 rating variables, we again have these different ways how to do it; but I'm using this one here now (least typing...)
rcor.test(r_1[,2:5])
# as you can see, it also computes correlations for the variable gender (which is a factor). As gender has only 2 different values (males/females), this actually makes sense
# there are also ways how to plot scatterplots of pairwise correlations (some simpler, some more sophisticated); a very simple one is pairs():
pairs(~rating_1 + rating_2 + rating_3, data = r_1) # the ~ often means "as a function of." here, it means make all pairwise scatterplots for rating_1, rating_2, and rating_3
?pairs
# if you are curious, here is an overview stating this plus some fancier ways how to do these kinds of plots:
# http://www.statmethods.net/graphs/scatterplot.html
# some more simple stats: t tests
# we could also do pairwise t tests to check whether the mood manipulations via the video clips were significant; in the case of our example, these would be paired t tests
# testing rating_1 versus rating_2
my1sttest <- t.test(r_1$rating_1, r_1$rating_2, paired = TRUE) # yep, it's significant (not that surprising, given the way I generated these data...)
my1sttest
# the same for rating_1 versus rating_3; this time using the with() command
with(r_1, t.test(rating_1, rating_3, paired = TRUE)) # also significant
# ok, you can compute the t test comparing rating 2 vs. 3 yourself!
# ......................................................................
# OK, that was fun, now back to the serious things: Let's do the change from wide to long format (and afterwards back again)
# RESHAPING THE DATA FRAME: wide versus long format
# our data frame r_1 is in so-called wide format; repeated measures (rating_1, rating_2, rating_3) are separate columns. This is the format you are familiar with also from SPSS
# For many types of analyses in R (and for some analyses even in SPSS, actually), we need the data to be in LONG format; this is also sometimes called "stacked" format, because the repeated measures are 'stacked' on top of each other
# The FMF book covers this in Chapter 3.9.4
# there are different ways how to get from wide to long format and back, ranging from simple (but not very flexible) to complex (but very flexible):
# stack() and unstack() [these commands are in the base package, i.e., available without loading any additional packages, if I am not mistaken]
# cast() and melt() [from the package reshape]
# reshape() [also from the package reshape]
# I typically use reshape(), but the other commands are a bit easier (but not always flexible enough)
# so, let's try the most simple, i.e., stack/unstack first
?stack
r_1_stack <- stack(r_1, select = c('rating_1', 'rating_2', 'rating_3'))
#let's check what we created:
r_1_stack
# the problem here is that we lose the information about which data are from which participant... so that's not handy for our case of repeated measures. It is a fine and simple command, if all one wants to do is indeed to put the variables on top of each other; and vice versa:
r_1_unstack <- unstack(r_1_stack)
r_1_unstack # yep, it's back in wide format, but as one could expect, the information about participants (and gender) is gone forever...
# on to cast/melt then: these commands are in the package reshape, so we need to install and load it first (I do this at the very top of this script!)
?melt # this is an extremely short and not very helpful help file... But, book chapter 3 has good information about this command.
?melt.data.frame # this is a bit longer, but the book does a better job at explaining
r_1_melt <- melt(r_1, id = c('pp_code', 'f_gender'), measured = c('rating_1', 'rating_2', 'rating_3'))
r_1_melt
# that worked very nicely!
# if we wanted, we could change the names of the columns "variable" and "value" but I'm not going to do this here
# so here's the basic syntax for melt:
# under measured =, we tell the function melt which variables form the repeated measures and need to be stacked on top of each other
# under id =, we specify the variables that are not the repeated measures and thus need to be copied several times: i.e., pp_1 and their gender need to be put into the long-format data frame 3 times, once for each of the 3 rating variables
# now let's try to undo that again:
r_1_cast <- cast(r_1_melt)
r_1_cast
# that worked fine as well!
# well, it doesn't always work that easily. for more complicated cases, the syntax can require some more details and specifications of variables
?cast
r_1_cast2 <- cast(r_1_melt, pp_code ~ variable) # left of the ~ is the variable that identified in the molten (=stacked) data frame which observations belong to the same participant; right of the ~ is the variable that identifies the new columns that will be created
r_1_cast2 # yep, looks good, but now we lost the gender information!
# so, if we want to keep the gender information, we do this:
r_1_cast3 <- cast(r_1_melt, pp_code + f_gender ~ variable)
r_1_cast3
# Looks good. So we tell cast() that the variables pp_code and gender are id variables (not repeated measurements) and, after the tilde (~), we specify which variable is the repeated-measures column
r_1_cast
r_1
# just to compare: the only difference between r_1_cast and r_1 seems to be how the rows are ordered:
# in r_1 it's pp_1, pp_2 etc
# in r_1_cast, the data have been sorted alphabetically, i.e., pp_1, pp_10, pp_11 etc
# but the sorting doesn't matter here (BTW: if you need to sort a data frame, you must NOT use the sort() command, as this can have the effect that only one column is sorted in your data frame and the others not, turning your data frame into a mess...)
# see for example here how to order the rows in a data frame: http://stackoverflow.com/questions/1296646/how-to-sort-a-dataframe-by-columns-in-r
# here I sort the rows according to the values in rating_1
r_1_ordered <- r_1[order(r_1[,3]),]
# this command looks more complicated than it is, as it combines several things
# in the middle is this: order(r_1[,3]) --> this says order r_1 according to the 3rd column in r_1
# this is then put into r_1[,] where the row index goes, resulting in the full command:
# r_1[order(r_1[,3]),]
# how should the command look like if you wanted to order the data frame according to the variable rating_3, but in descending order (i.e., highest values first)? HINT: have a look at ?order
# I paste the correct command at the very end of this script
# Ok, but back to reshaping
# now let's use the most complicated (and most flexible) command, reshape()
?reshape # ok, this help file is a bit longer...
r_1_long <- reshape(r_1, idvar = 'pp_code', varying = c('rating_1', 'rating_2', 'rating_3'), timevar = 'rating_1or2or3', v.names = 'rating', direction = 'long')
r_1_long # check: looks good!
# so, how does this function work?
# similar to before, with idvar = , we specify the ID variable. Note that we did not have to specify f_gender here, but it was still included in the new long format data frame
# with varying =, we specify the repeated-measures variables
# with timevar =, we give the name to a new variable that will be created, which tells us from which variable this row of data comes (rating 1, 2, or 3 in our case)
# with v.names =, we can specify the name that our new stacked rating variable should have
# with direction =, we tell the function whether we want to go from wide to LONG format (as we did here); or the other way around from long to WIDE
# If we want to go back from the long to the wide format:
r_1_wide <- reshape(r_1_long, direction = 'wide')
r_1_wide # looks good
# Can we also do a t test in that format?
# Well, first, a t test compares only 2 groups, so we could first get rid of rating_3 entries
head(r_1_long)
# here we create a new data frame that contains only the data from rating 1 and rating 3
r_1_r1r2 <- r_1_long[which(r_1_long$rating_1or2or3 < 3),] # the command which() can be used to select entries that fit a specific criterion. here we use it to select the entries in the variable rating_1or2or3 that are smaller than 3
# let's take one step back:
a <- c(2, 5, 6, 1)
which(a < 3) # gives as answer 1 and 4, i.e., the first and fourth element in a
which(a == 6) # gives as answer 3, i.e., the 3rd element in a
which(a != 6) # gives as answer 1, 2, 4, i.e., everything except the 3rd element
which(a >= 2) # >= means 'larger than or equal to'; gives as answer 1, 2, 3, i.e., the first, second and third element in a
which(a <= 5) # <= means 'smaller than or equal to'; gives as answer 1, 2, 4, i.e., the first, second and foruth element in a
# In the command above: r_1_long[which(r_1_long$rating_1or2or3 < 3),]
# we use which() to pick out only those rows in the data frame r_1_long, for which the value in rating_1or2or3 is 1 or 2
# let's check whether this did what we wanted:
r_1_r1r2 # looks good
# some more ways to check: the number of columns should be the same as before, but the number of rows should be now 40 instead of 60
ncol(r_1_long) #4
ncol(r_1_r1r2) #4 --> good!
nrow(r_1_long) #60
nrow(r_1_r1r2) #40 --> good!
# ok, so let's do a t test
# if you want to have some short and nice explanations, have a look here:
# http://www.statmethods.net/stats/ttest.html
t.test(r_1_r1r2$rating ~ r_1_r1r2$rating_1or2or3, paired = TRUE)
# so, besides learning more about t tests, we also learned how to select only parts of a data frame. there are many ways how to do this, for example which(), which we already learned about
?which()
# we could also use the original data frame, but use which() to pick out only the data points we want: this is going to look a bit complicated, but I simply use which to specify that only ratings 1 and 2 should be used: which(r_1_long$rating_1or2or3 <= 2)
t.test(r_1_long$rating[which(r_1_long$rating_1or2or3 <= 2)] ~ r_1_long$rating_1or2or3[which(r_1_long$rating_1or2or3 <= 2)], paired = TRUE)
# another command that is very handy to select parts of a data frame is subset()
# so let's try to do the same as we did, but using subset()
?subset
OnlyRatings1_2 <- subset(r_1_long, rating_1or2or3 < 3)
# or we could only select rows of data in which the actual rating was exactly or above 80 (i.e., only very hapy-mood data)
Happy <- subset(r_1_long, rating >= 80) # note: >= means "greater or equal"
Unhappy <- subset(r_1_long, rating <= 30) # note: <= means "smaller or equal"
# we can use subset also to select only specific *columns*, we do that with the argument select =
NoRatings <- subset(r_1_long, select = 1:2) # the last bit means select only columns 1 and 2
# we can do the same by using the - before the column number (the -3 means "all except column 3")
NoRatings <- subset(r_1_long, select = -3) # the last bit means select only columns 1 and 2
# and here's yet another way to do the same thing, by explictly naming the variable names
NoRatings <- subset(r_1_long, select = c('pp_code', 'rating_1or2or3'))
# and we can combine both the row-subsetting and the column-subsetting
OnlyRatings1_2_NoPPCode <- subset(r_1_long, rating_1or2or3 < 3, select = -1) # removing the variable with the participant code is hardly ever a good idea, though...
OnlyRatings1_2_NoPPCode <- subset(r_1_long, select = -1, rating_1or2or3 < 3) # removing the variable with the participant code is hardly ever a good idea, though...
#................................................
# solution sorting of data frame according to values in rating_3 in descending order:
r_1[order(r_1[,5], decreasing = TRUE),]
|
2a39c14cc7f8d65536b5113de56bd22da5eb7cbe | a785e6399877781758e769334e50538d108d320d | /man/infuse.Rd | e861c943ffbed7cd6bdc6af2dd2eb2413c3ada38 | [] | no_license | fxcebx/infuser | 63be56e95f7f7ed650e796a4f1d0f4c60e4de0e9 | 630f39212a2f6f3e5e05c6ebc43b21d4f7449638 | refs/heads/master | 2020-12-11T03:32:25.922616 | 2015-05-30T06:09:58 | 2015-05-30T06:12:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,683 | rd | infuse.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/core.R
\name{infuse}
\alias{infuse}
\title{Infuse a template with values.}
\usage{
infuse(file_or_string, key_value_list, ..., variable_identifier = c("{{",
"}}"), default_char = "|", collapse_char = ",",
transform_function = function(value) return(value), verbose = FALSE)
}
\arguments{
\item{file_or_string}{the template file or a string containing the template}
\item{key_value_list}{a named list with keys corresponding to the parameters requested by the template, if specified, will be used instead of ...}
\item{...}{different keys with related values, used to fill in the template}
\item{variable_identifier}{the opening and closing character that denounce a variable in the template}
\item{default_char}{the character use to specify a default after}
\item{collapse_char}{the character used to collapse a supplied vector}
\item{transform_function}{a function through which all specified values are passed, can be used to make inputs safe(r). dplyr::build_sql is a good default for SQL templating.}
\item{verbose}{verbosity level}
}
\description{
For more info and usage examples see the README on the \href{https://github.com/Bart6114/infuser}{\code{infuser} github page}.
To help prevent \href{https://xkcd.com/327/}{SQL injection attacks} (or other injection attacks), use a transformation function to escape special characters and provide it through the \code{transform_function} argument. \code{\link[dplyr]{build_sql}} is a great default escaping function for SQL templating. For templating in other languages you will need to build/specify your own escaping function.
}
|
0aedac0f84469b158787a832e39faf858efe6b9c | d353c4f3157e925a465c6b1422ab2421a3afb89e | /bin/tsvToBigWig.R | 7f82d594e9eea5e4c79fc96b216db96a284eea89 | [
"MIT"
] | permissive | NCBI-Hackathons/Epigenomics_CWL | a26a38f10ba568dc10cc0177ef502e2074e5442c | 934baaadf133eda785426079d98489307d02f3d7 | refs/heads/master | 2020-05-25T21:51:05.200521 | 2017-03-25T19:20:28 | 2017-03-25T19:20:28 | 84,972,914 | 8 | 9 | null | 2017-07-22T06:09:01 | 2017-03-14T16:37:07 | R | UTF-8 | R | false | false | 1,567 | r | tsvToBigWig.R | #!/usr/bin/Rscript
# Outstanding issues:
# Test whether guessing seqlengths creates erroneous BigWIGs (do they load in IGV/GViz/whatever)
library(getopt)
spec <- matrix(c(
'infile', 'i', 1, "character",
'outdir' , 'd', 1, "character"
), byrow=TRUE, ncol=4)
opt = getopt(spec)
in.file <- opt$infile
out.dir <- opt$outdir
if(is.null(in.file)|is.null(out.dir))
{
message('Usage: tsvToBigWig.R -i input_file -o out_dir' )
q(status=1)
}
library(data.table)
library(rtracklayer)
message(paste('Reading in', in.file, 'and writing BigWIGs to', out.dir))
# Guess the seqlengths of the genome; needed for BigWig indexing
guessSeqLengths <- function(in.grange)
{
guessOne <- function(seq.lev)
{
this.starts <- start(in.grange)[which(seqnames(in.grange)==seq.lev)]
return(max(this.starts) + 1)
}
return(sapply(seqlevels(in.grange), guessOne))
}
# Read in the file, noting that the format is as output by interconverter:
cpg.bed <- fread(in.file)
colnames(cpg.bed) <- c('chr', 'start', 'strand', 'type', 'meth_prop', 'cov')
cpg.bed$end <- cpg.bed$start+1
cpg.bed <- cpg.bed[which(cpg.bed$chr=="chr19_gl000208_random" )]
base.gr <- makeGRangesFromDataFrame(cpg.bed)
seqlengths(base.gr) <- guessSeqLengths(base.gr)
meth.gr <- base.gr
elementMetadata(meth.gr) <- data.frame(score=cpg.bed$meth_prop)
export.bw(meth.gr, file.path(out.dir, paste0(basename(in.file), '.prop_meth.bw')))
cov.gr <- base.gr
elementMetadata(cov.gr) <- data.frame(score=cpg.bed$cov)
export.bw(cov.gr, file.path(out.dir, paste0(basename(in.file), '.cov.bw')))
|
8680ae79ff92d3d0280dbe8fe56b19660a189c89 | 2bd826cc936ce2d53997ee8b72459248a905a4cb | /cachematrix.R | e13c7538fd89adb789c65afa945fa81ab8a3b90b | [] | no_license | forumvisitor/ProgrammingAssignment2 | 1ee6deb1ed23051610e58f4216df0b00acca01f5 | 64898f57be83d64561f0ef8481fa13c5471d1876 | refs/heads/master | 2021-01-22T12:48:56.298486 | 2015-11-22T10:57:29 | 2015-11-22T10:57:29 | 46,654,876 | 0 | 0 | null | 2015-11-22T09:59:28 | 2015-11-22T09:59:28 | null | UTF-8 | R | false | false | 1,463 | r | cachematrix.R | ## Description:
## Cache the inverse of a matrix rather than compute it repeatedly .
##
## Usage:
## m <- matrix(c(1:9), nrow=3, ncol=3)
## cachedMatrix <- makeCacheMatrix(m)
## cacheSolve(cachedMatrix)
##
## cachedMatrix$set(m) # Set the matrix to be cached.
## m <- cachedMatrix$get() # Returns the matrix cached.
##
## cachedMatrix$setInverse(solvedMatrix) # Set the inverse matrix to a cache.
## cachedMatrix$getInverse() # Get the cached inverse of the cached matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverseinput) inverse <<- inverseinput
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
9dc3dab4f128350772ed94c01069e60d259a0a38 | f684ffa27e0e63ab6395378ea5919a3ba9b9802a | /code/models_glm.R | 4c0a94a8780a5fe4b9299279cde3dc8a7d9e9dae | [] | no_license | Dean-Sun/Baseline | 79ce0d13a4c0e0446b52ba46ea824754559defce | 43723334918287ae4f5f35e85f0ca9ea326cf00e | refs/heads/master | 2020-06-20T07:55:20.346537 | 2019-08-23T20:34:33 | 2019-08-23T20:34:33 | 197,050,395 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,354 | r | models_glm.R | library(tidyverse)
library(data.table)
library(h2o)
library(MLmetrics)
library(lubridate)
source('code/tools.R')
source('code/plott.R')
# start h2o session
h2o.init(nthreads=-1, max_mem_size="16G")
train = h2o.importFile(path = 'data/csv_cut/data_train.csv')
valid = h2o.importFile(path = 'data/csv_cut/data_val.csv')
dim(train)
names(train)
h2o.describe(train)
#train['TrueAnswer_log'] = log(train['TrueAnswer'])
#train['TrueAnswer_inv'] = (train['TrueAnswer'])^-1
#valid['TrueAnswer_log'] = log(valid['TrueAnswer'])
#valid['TrueAnswer_inv'] = (valid['TrueAnswer'])^-1
train['TrueAnswer_norm'] = my_scale(train['TrueAnswer'])
valid['TrueAnswer_norm'] = my_scale(valid['TrueAnswer'],
mean = mean(train['TrueAnswer']),
sd = sd(train['TrueAnswer']))
# set X and y
y <- "TrueAnswer_norm"
y_log = "TrueAnswer_log"
y_inv = 'TrueAnswer_inv'
X = names(train)[c(3, 10:59, 63)]
##################################################################
################### Baseline Linear Regression ###################
##################################################################
model_baseline <- h2o.glm(
model_id="model_baseline",
training_frame=train,
validation_frame=valid,
y=y,
x=X,
family = 'gaussian'
)
# performance check
summary(model_baseline)
h2o.varimp(model_baseline)
h2o.performance(model_baseline, newdata=train) ## full training data
h2o.performance(model_baseline, newdata=valid) ## full validation data
# normal check
metrics(h2o.predict(model_baseline, train), train[y])
# scale back
metrics(unscale(h2o.predict(model_baseline, train),
mean = mean(train['TrueAnswer']),
sd = sd(train['TrueAnswer'])), train['TrueAnswer'])
valid['y_pred_baseline'] = h2o.predict(model_baseline, valid)
metrics(valid['y_pred_baseline'], valid[y])
# scale back
valid['y_pred_baseline'] = unscale(h2o.predict(model_baseline, valid),
mean = mean(train['TrueAnswer']),
sd = sd(train['TrueAnswer']))
metrics(valid['y_pred_baseline'], valid['TrueAnswer'])
valid_dt = as.data.table(valid)
# id start from 167
plotPred(valid_dt, group = 'GroupF-193', model = 'baseline', activity = FALSE)
##################################################################
################### Poisson Linear Regression ###################
##################################################################
model_poisson <- h2o.glm(
model_id="model_poisson",
training_frame=train,
validation_frame=valid,
x=X,
y=y,
family = 'poisson'
)
# performance check
summary(model_poisson)
metrics(h2o.predict(model_poisson, train), train[y])
valid['y_pred_poisson'] = h2o.predict(model_poisson, valid)
metrics(valid['y_pred_poisson'], valid[y])
valid_dt$y_pred_poisson = as.data.table(valid$y_pred_poisson)
plotPred(valid_dt, group = 'GroupF-193', model = 'poisson', activity = TRUE)
######## distribution check ################
data %>% ggplot(aes(x= TrueAnswer_inv))+geom_histogram(bins= 2000)
data %>% ggplot(aes(x= TrueAnswer_log))+geom_histogram(bins= 2000)
data %>% ggplot(aes(x= TrueAnswer))+geom_histogram(bins= 2000)
###############################################
##################################################################
################### Log Linear Regression ########################
##################################################################
model_gaussian_log <- h2o.glm(
model_id="model_gaussian_log",
training_frame=train,
validation_frame=valid,
x=X,
y=y_log,
family = 'gaussian'
)
metrics(exp(h2o.predict(model_gaussian_log, train)), train[y])
valid['y_pred_gaussian_log'] = exp(h2o.predict(model_gaussian_log, valid))
metrics(valid['y_pred_gaussian_log'], valid[y])
valid_dt$y_pred_gaussian_log = as.data.table(valid$y_pred_gaussian_log)
plotPred(valid_dt, group = 'GroupF-193', model = 'gaussian_log', activity = FALSE)
##################################################################
################### Inv Linear Regression ########################
##################################################################
model_gaussian_Inv <- h2o.glm(
model_id="model_gaussian_Inv",
training_frame=train,
validation_frame=valid,
x=X,
y=y_inv,
family = 'gaussian'
)
metrics((h2o.predict(model_gaussian_Inv, train))^-1, train[y])
valid['y_pred_gaussian_Inv'] = (h2o.predict(model_gaussian_Inv, valid))^-1
metrics(valid['y_pred_gaussian_Inv'], valid[y])
valid_dt$y_pred_gaussian_Inv = as.data.table(valid$y_pred_gaussian_Inv)
plotPred(valid_dt, group = 'GroupA', model = 'gaussian_Inv')
##################################################################
################### Inv Poisson Regression ######################
##################################################################
model_poisson_inv <- h2o.glm(
model_id="model_poisson_inv",
training_frame=train,
validation_frame=valid,
x=X,
y=y_inv,
family = 'poisson'
)
metrics((h2o.predict(model_poisson_inv, train))^-1, train[y])
valid['y_pred_poisson_Inv'] = (h2o.predict(model_poisson_inv, valid))^-1
metrics(valid['y_pred_poisson_Inv'], valid[y])
valid_dt$y_pred_poisson_Inv = as.data.table(valid$y_pred_poisson_Inv)
plotPred(valid_dt, group = 'GroupA', model = 'poisson_Inv')
##################################################################
################ Basline Fixed for Negitive ####################
##################################################################
valid['y_pred_baseline_adjust'] = ifelse(valid['y_pred_baseline']>0,
valid['y_pred_baseline'], 0)
valid_dt$y_pred_baseline_adjust = as.data.table(valid$y_pred_baseline_adjust)
plotPred(valid_dt, group = 'GroupA-170', model = 'baseline_adjust', activity = FALSE)
plotPred(valid_dt, group = 'GroupA-170', model = 'baseline', activity = FALSE)
metrics(valid['y_pred_baseline_adjust'], valid[y])
plotPred(valid_dt, group = 'GroupG', model = 'baseline_adjust')
plotPred(valid_dt, group = 'GroupG', model = 'gaussian_log')
plotPred(valid_dt, group = 'GroupG', model = 'baseline')
##dygraph(mike_check[FileGrp==100, .(TIMESTAMP, Activity, TrueAnswer)]) %>% dyOptions(useDataTimezone = TRUE)
h2o.shutdown(prompt = TRUE)
|
6f45a813159e14775942668f20d47079a8e93bfa | 6a21ff9cd2b4438a7b6cbbf3e020523f042866c6 | /tests/testthat/test_Consistency.R | 5d7b54ed358eea8ba090520da81a671ac3bcde7c | [
"Artistic-2.0",
"MIT"
] | permissive | rikenbit/gcTensor | 6545ccd97dbc83a1b2e472bdc6853ca8a2865b74 | 8a5c05b74e73493fe6a81443b2811da3f41f2368 | refs/heads/master | 2023-07-26T10:48:04.383065 | 2023-07-06T12:46:06 | 2023-07-06T12:46:06 | 168,929,267 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,124 | r | test_Consistency.R | #
# Test Consistency of Objects
#
# Test C-1: X and Z
# CP
## I
expect_all_identical(
dim(X$X1)[1],
dim(X$X2)[1],
dim(out.CP_EUC$Z$A)[1],
dim(out.CP_KL$Z$A)[1],
dim(out.CP_IS$Z$A)[1])
## J
expect_all_identical(
dim(X$X1)[2],
dim(X$X3)[1],
dim(out.CP_EUC$Z$B)[1],
dim(out.CP_KL$Z$B)[1],
dim(out.CP_IS$Z$B)[1])
## K
expect_all_identical(
dim(X$X1)[3],
dim(out.CP_EUC$Z$C)[1],
dim(out.CP_KL$Z$C)[1],
dim(out.CP_IS$Z$C)[1])
## M
expect_all_identical(
dim(X$X2)[2],
dim(out.CP_EUC$Z$D)[1],
dim(out.CP_KL$Z$D)[1],
dim(out.CP_IS$Z$D)[1])
## N
expect_all_identical(
dim(X$X3)[2],
dim(out.CP_EUC$Z$E)[1],
dim(out.CP_KL$Z$E)[1],
dim(out.CP_IS$Z$E)[1])
# Tucker
## I
expect_all_identical(
dim(X$X1)[1],
dim(X$X2)[1],
dim(out.Tucker_EUC$Z$A)[1],
dim(out.Tucker_KL$Z$A)[1],
dim(out.Tucker_IS$Z$A)[1])
## J
expect_all_identical(
dim(X$X1)[2],
dim(X$X3)[1],
dim(out.Tucker_EUC$Z$B)[1],
dim(out.Tucker_KL$Z$B)[1],
dim(out.Tucker_IS$Z$B)[1])
## K
expect_all_identical(
dim(X$X1)[3],
dim(out.Tucker_EUC$Z$C)[1],
dim(out.Tucker_KL$Z$C)[1],
dim(out.Tucker_IS$Z$C)[1])
## M
expect_all_identical(
dim(X$X2)[2],
dim(out.Tucker_EUC$Z$E)[1],
dim(out.Tucker_KL$Z$E)[1],
dim(out.Tucker_IS$Z$E)[1])
## N
expect_all_identical(
dim(X$X3)[2],
dim(out.Tucker_EUC$Z$F)[1],
dim(out.Tucker_KL$Z$F)[1],
dim(out.Tucker_IS$Z$F)[1])
# Test C-2: X and M
# Expect no error when partially masked some rows
M <- X
M$X1 <- M$X1 * 0 + 1
# Mask only rows 1:3
M$X2 <- M$X2 * 1
M$X2[1:3, 1:7] <- 0
M$X3 <- M$X3 * 0 + 1
expect_error(
GCTF(X=X, R=R_CP, M=M, Ranks=Ranks_CP, Beta=0),
regexp=NA) # regexp=NA means "there should be no errors"
# Expect error when completely masked some columns
M <- X
M$X1 <- M$X1 * 0 + 1
# Mask some column
M$X2 <- M$X2 * 1
M$X2[1:4, 7] <- 0
M$X3 <- M$X3 * 0 + 1
expect_error(GCTF(X=X, R=R_CP, M=M, Ranks=Ranks_CP, Beta=0))
# Expect error when completely masked
M <- X
M$X1 <- M$X1 * 0 + 1
# Mask all rows (1:4)
M$X2 <- M$X2 * 1
M$X2[1:4, 1:7] <- 0
M$X3 <- M$X3 * 0 + 1
expect_error(GCTF(X=X, R=R_CP, M=M, Ranks=Ranks_CP, Beta=0))
# Test C-3: Rank and Z
# CP
## A
expect_all_equal(
unlist(Ranks_CP$A),
dim(out.CP_EUC$Z$A),
dim(out.CP_KL$Z$A),
dim(out.CP_IS$Z$A))
## B
expect_all_equal(
unlist(Ranks_CP$B),
dim(out.CP_EUC$Z$B),
dim(out.CP_KL$Z$B),
dim(out.CP_IS$Z$B))
## C
expect_all_equal(
unlist(Ranks_CP$C),
dim(out.CP_EUC$Z$C),
dim(out.CP_KL$Z$C),
dim(out.CP_IS$Z$C))
## D
expect_all_equal(
unlist(Ranks_CP$D),
dim(out.CP_EUC$Z$D),
dim(out.CP_KL$Z$D),
dim(out.CP_IS$Z$D))
## E
expect_all_equal(
unlist(Ranks_CP$E),
dim(out.CP_EUC$Z$E),
dim(out.CP_KL$Z$E),
dim(out.CP_IS$Z$E))
# Tucker
## A
expect_all_equal(
unlist(Ranks_Tucker$A),
dim(out.Tucker_EUC$Z$A),
dim(out.Tucker_KL$Z$A),
dim(out.Tucker_IS$Z$A))
## B
expect_all_equal(
unlist(Ranks_Tucker$B),
dim(out.Tucker_EUC$Z$B),
dim(out.Tucker_KL$Z$B),
dim(out.Tucker_IS$Z$B))
## C
expect_all_equal(
unlist(Ranks_Tucker$C),
dim(out.Tucker_EUC$Z$C),
dim(out.Tucker_KL$Z$C),
dim(out.Tucker_IS$Z$C))
## E
expect_all_equal(
unlist(Ranks_Tucker$E),
dim(out.Tucker_EUC$Z$E),
dim(out.Tucker_KL$Z$E),
dim(out.Tucker_IS$Z$E))
## F
expect_all_equal(
unlist(Ranks_Tucker$F),
dim(out.Tucker_EUC$Z$F),
dim(out.Tucker_KL$Z$F),
dim(out.Tucker_IS$Z$F))
# Test C-4: num.iter, RecError, and RelChange
# CP
expect_all_equal(
formals(GCTF)$num.iter,
length(out.CP_EUC$RecError)-1,
length(out.CP_KL$RecError)-1,
# length(out.CP_IS$RecError)-1,
length(out.CP_EUC$RelChange)-1,
length(out.CP_KL$RelChange)-1)
# length(out.CP_IS$RelChange)-1)
# Tucker
expect_all_equal(
formals(GCTF)$num.iter,
length(out.Tucker_EUC$RecError)-1,
length(out.Tucker_KL$RecError)-1,
# length(out.Tucker_IS$RecError)-1,
length(out.Tucker_EUC$RelChange)-1,
length(out.Tucker_KL$RelChange)-1)
# length(out.Tucker_IS$RelChange)-1) |
4ef09a7e483ac4bda14fe66dcbab86744dff774f | 5cc5926c9d1bb30674876dd6d1c609f4d5f8af25 | /src/FNS_ParseURLs.R | 84d8a7a931fc949b4ab6050f941e42f735ca4480 | [
"MIT"
] | permissive | mhsmario/PrevineBrasil_NewFinancingModel_PHC | b7e1b3bc45483c8c9fd634f539004e429ac782ac | ae43ee0818b6c8d166aac8bc2410a2c13eb19bb0 | refs/heads/main | 2023-01-24T21:18:36.981333 | 2020-11-28T17:08:43 | 2020-11-28T17:08:43 | 316,774,325 | 1 | 0 | MIT | 2020-11-28T16:38:46 | 2020-11-28T16:24:09 | null | UTF-8 | R | false | false | 3,772 | r | FNS_ParseURLs.R | ##FNS 2019
library(data.table)
library(stringr)
library(dplyr)
mytxtfiles <- list.files(path = "~/Documents/WB/Health/data",
pattern = ".txt$",
full.names = T)
txt_results <- data.frame()
for (i in 1:length(mytxtfiles)) {
txt <- read.csv(paste0(mytxtfiles[i]), sep = ";", comment.char = "#",
stringsAsFactors = FALSE, header = FALSE)
colnames(txt)[1] <- "content"
txt_results <- rbind(txt_results, txt)
}
missing_jul_2020 <- "https://consultafns.saude.gov.br/recursos/repasse-dia/detalhar-pagamento?codigo=65578&count=10&dsCompetencia=MAR%2520de%25202020&nuAno=2020&nuMes=7&page=1&tipo=PROGRAMA"
txt_results <- rbind(txt_results,missing_jul_2020)
txt_results$content <- gsub("url: ", "", txt_results$content)
txt_results$content <- gsub(",", "", txt_results$content)
txt_results <- as.data.frame(unique(txt_results$content))
colnames(txt_results)[1] <- "content"
txt_results$txt_results <- as.character(txt_results$content)
txt_results2 <- dplyr::filter(txt_results, !grepl("google", content))
colnames(txt_results2)[1] <- "url"
txt_results2 <- dplyr::filter(txt_results2, !grepl("app", url))
txt_results2 <- dplyr::filter(txt_results2, !grepl("ufs", url))
txt_results2 <- dplyr::filter(txt_results2, !grepl("anos", url))
txt_results2$url <- trimws(txt_results2$url)
txt_results2 <- as.data.frame(unique(txt_results2$url))
colnames(txt_results2)[1] <- "url"
##Prepare table
txt_results2$codigo <- sub(".*codigo=", "", txt_results2$url)
txt_results2$codigo <- sub("&.*", "", txt_results2$codigo)
txt_results2$Competencia_Mes <- sub(".*dsCompetencia=", "", txt_results2$url)
txt_results2$Competencia_Mes <- sub("%.*", "", txt_results2$Competencia_Mes)
txt_results2$Competencia_Ano <- sub("&nuAno.*", "", txt_results2$url)
txt_results2$Competencia_Ano <- str_sub(txt_results2$Competencia_Ano, start = -4)
txt_results2$Ano_posted <- sub("&nuMes.*", "", txt_results2$url)
txt_results2$Ano_posted <- str_sub(txt_results2$Ano_posted, start = -4)
txt_results2$Mes_posted <- sub("&page.*", "", txt_results2$url)
txt_results2$Mes_posted <- str_sub(txt_results2$Mes_posted, start = -2)
txt_results2$Mes_posted <- gsub("=", "", txt_results2$Mes_posted)
txt_results2$Competencia_Mes <- ifelse(is.na(txt_results2$Competencia_Mes), paste0(txt_results2$Mes_posted), txt_results2$Competencia_Mes)
txt_results2$Programa_ou_Emenda <- sub(".*tipo=", "", txt_results2$url)
txt_results2$Programa_ou_Emenda <- gsub("%2520", " ",txt_results2$Programa_ou_Emenda)
txt_results2$Competencia_Mes <- gsub("ABR", "01", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("AGO", "08", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("DEZ", "12", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("FEV", "02", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("JAN", "01", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("JUL", "07", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("JUN", "06", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("MAI", "05", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("MAR", "03", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("NOV", "11", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("OUT", "10", txt_results2$Competencia_Mes)
txt_results2$Competencia_Mes <- gsub("SET", "09", txt_results2$Competencia_Mes)
saveRDS(txt_results2, "~/Documents/WB/Health/data/FNS_URL_20_19_18.RDS")
write.csv(txt_results2, "~/Documents/WB/Health/data/FNS_URL_20_19_18.csv")
check <- txt_results2 %>%
group_by(Ano_posted, Mes_posted) %>%
filter(Ano_posted == 2018) %>%
summarise(n = n())
check$Mes_posted <- as.numeric(check$Mes_posted)
|
088a91b0292ff765152bd261a8dfad3918e96bca | 77dca6a841e1fb02fa27a55a0d1bf6bbc2eb8047 | /BLM Grid improved (R:C++)/BMLGrid.Rcheck/tests/testthat.R | 1262d25a57f093c7b0504d4be273d728e4c59649 | [] | no_license | vladimirsemenp/Projects-in-statistics | 8d61f47ecb56919bcaabe59133bfa35d7db3b5f0 | 26ac7696c78c26a30d245a03451a60cc2a310f32 | refs/heads/master | 2021-01-10T01:28:26.147453 | 2015-10-10T23:25:52 | 2015-10-10T23:25:52 | 44,030,337 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(BMLGrid)
test_check("BMLGrid")
|
f68abfe84dadd84c83199a5125ea65a783ec4c81 | 7f7f3498b162234f986502dba4b4f6bb50e5c7fd | /run_analysis.R | c0bc16bd844ed046d4045f5d792a9c59b7b737a7 | [] | no_license | stephaniebeard/Tidy | 09107e4082b0fb236f2992973bf599a7f8ffb837 | 3a4bf0e17dfab6eb24e65b9516f318cc028c762b | refs/heads/master | 2020-06-20T21:00:15.629511 | 2019-07-17T05:09:13 | 2019-07-17T05:09:13 | 197,247,022 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,804 | r | run_analysis.R |
# You should create one script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
# 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#import libraries
library(dplyr)
# download and extract the data
# make sure file is in the working directory
if( !dir.exists("UCI HAR Dataset") ) {
if( !file.exists("UCI HAR Dataset.zip") ) {
download.file("https://www.dropbox.com/s/lfsbhwy8elgjo15/UCI_HAR_Dataset.zip?dl=1", destfile = "UCI HAR Dataset.zip")
}
unzip("UCI HAR Dataset.zip")
}
#read tables into dataframes
X_train <- read.table(file = "./UCI HAR Dataset/train/X_train.txt")
X_test <- read.table(file = "./UCI HAR Dataset/test/X_test.txt")
y_train <- read.table(file = "./UCI HAR Dataset/train/y_train.txt")
y_test <- read.table(file = "./UCI HAR Dataset/test/y_test.txt")
subject_train <- read.table(file = "./UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table(file = "./UCI HAR Dataset/test/subject_test.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
# 1.Merges the training and the test sets to create one data set.
#merge test, training, and subject data
x <- rbind(X_train, X_test)
y <- rbind(y_train, y_test)
subject <- rbind(subject_train, subject_test)
#merge x and y
xy <- cbind(y,x)
#merge xy and subject to make full dataset
merged <- cbind(subject, xy)
#uses the features data to define column names for merged dataset
names(merged) <- c("SubjectID", "ActivityName", as.character(features$V2))
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
sd_mean_cols <- features$V2[grep("-mean\\(\\)|-std\\(\\)", features[, 2])]
subsetColumns <- c("SubjectID", "ActivityName", as.character(sd_mean_cols))
subsetted <- subset(merged, select = subsetColumns)
# 3.Uses descriptive activity names to name the activities in the data set
activity_labels$V2 <- as.character(activity_labels$V2) #converts to string
subsetted$ActivityName = activity_labels[subsetted$ActivityName, 2] #replaces labels
# 4.Appropriately labels the data set with descriptive variable names.
names(subsetted) <- gsub("^t", "Time ", names(subsetted))
names(subsetted) <- gsub("^f", "Frequency ", names(subsetted))
names(subsetted) <- gsub("Body", "Body ", names(subsetted))
names(subsetted) <- gsub("Acc", "Accelerometer ", names(subsetted))
names(subsetted) <- gsub("Gravity", "Gravity ", names(subsetted))
names(subsetted) <- gsub("Gyro", "Gyroscope ", names(subsetted))
names(subsetted) <- gsub("Mag", "Magnitude ", names(subsetted))
names(subsetted) <- gsub("Body", "Body ", names(subsetted))
names(subsetted) <- gsub("Jerk", "Jerk ", names(subsetted))
names(subsetted) <- gsub('mean\\(\\)', ' mean value ', names(subsetted))
names(subsetted) <- gsub('std\\(\\)', ' standard deviation ', names(subsetted))
names(subsetted) <- gsub('-X', 'in X direction' , names(subsetted))
names(subsetted) <- gsub('-Y', 'in Y direction' , names(subsetted))
names(subsetted) <- gsub('-Z', 'in Z direction', names(subsetted))
# 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
data_group <- group_by(subsetted, SubjectID, ActivityName)
tidy <- summarise_each(data_group, funs(mean))
# Output tidy data set
write.table(tidy, "tidydata.txt", row.names = FALSE)
|
31990d905a440f157abd9d8e7b051f573dd0da99 | a9ceb2c8a4f33e6f9eb31650bc0dba68fb090468 | /models/gamye_season_blank.R | 2fb127af4980faffb10f5b6402169232cc6d0232 | [] | no_license | AdamCSmithCWS/BBS_seasonal_shift_ALHU | 348f638535ae7963f327b5cf3845ca01a1ad6e95 | d42c0575b86c584fc9c6a62ad7340a9e87eab831 | refs/heads/main | 2023-04-14T17:57:54.902976 | 2021-08-06T15:06:30 | 2021-08-06T15:06:30 | 393,386,547 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,743 | r | gamye_season_blank.R | model
{
#### counts and overdispersion effects ###### Hierarchical GAM model with additional random year-effects
#### builds on the GAM model used in 2016 work
### not yet applied or tested
for( k in 1 : ncounts )
{
log(lambda[k]) <- obs[strat[k],obser[k]] + eta*firstyr[k] + strata[strat[k]] + yeareffect[strat[k],year[k]] + yy[strat[k],year[k]] + noise[k]
#noise[k] ~ dnorm(0, taunoise)
noise[k] ~ dt(0, taunoise, nu) #alternative t-distributed noise = heavy-tailed overdispersion
count[k] ~ dpois(lambda[k])
#----------------------------------#
#fcount[k] ~ dpois(lambda[k])
#err[k] <- pow(count[k]-lambda[k],2)/lambda[k]
#ferr[k] <- pow(fcount[k]-lambda[k],2)/lambda[k]
#fzero[k] <- equals(fcount[k],0)
#loglik[k] <- logdensity.pois(count[k], lambda[k])
#----------------------------------#
}
### goodness of fit statistics
#maxf <- max(fcount[1:ncounts])
#meanf <- mean(fcount[1:ncounts])
#nfzero <- sum(fzero[1:ncounts])
#gof <- sum(err[1:ncounts])
#fgof <- sum(ferr[1:ncounts])
#diffgof <- gof-fgof
#posdiff <- step(diffgof)
### fixed effect priors
nu ~ dgamma(2, 0.1) #degrees of freedom (i.e., the heavy-tail component of the t-distribution), if nu is large (infinite) this is equivalent to the normal distribution noise
taunoise ~ dgamma(0.001,0.001)
sdnoise <- 1 / pow(taunoise, 0.5)
#taunoise <- 1/pow(sdnoise,2)#~ dgamma(0.001,0.001) # alternative priors
#sdnoise ~ dunif(0.00001,5)#<- 1 / pow(taunoise, 0.5)
#mulogtauobs ~ dnorm(0,2)#3.33) #informative prior that reduces the chance of very large values of sdobs
#mulogtauobs ~ dnorm(0.0,1.0E-6) #alternative less informative prior
#taulogtauobs ~ dgamma(2,0.2) #informative prior that reduces the chance of very large values of sdobs
mulogtauyy ~ dnorm(0.0,2)
taulogtauyy ~ dgamma(2,0.2)
#Tauy ~ dgamma(2,0.2)
eta ~ dnorm( 0.0,0.01)
#STRATA ~ dnorm( 0.0,0.01)
#taustrata ~ dgamma(0.001,0.0001) #<- 1/pow(sdbeta,2)#
#sdstrata <- 1/pow(taustrata,0.5)#~ dunif(0.001,10)
sdobs <- 1/pow(tauobs, 0.5)
tauobs ~ dgamma(0.001,0.0001)
#### stratum-level effects ######
for( i in 1 : nstrata )
{
#### observer effects ######
for( o in 1 : nobservers[i] )
{
#obs[i,o] ~ dnorm( 0.0,tauobs[i])
obs[i,o] ~ dnorm(0.0, tauobs)
}
#log(tauobs[i]) <- logtauobs[i]
#logtauobs[i] ~ dnorm(mulogtauobs,taulogtauobs)
#sdobs[i] <- 1 / pow(tauobs, 0.5)
#### end observer effects ######
### stratum-level priors
#strata.p[i] ~ dnorm(0,taustrata)
strata[i] ~ dnorm(0,0.1) #<- STRATA + strata.p[i]
expstrata[i] <- exp(strata[i])
overdisp[i] <- 1 + 1/(expstrata[i]*taunoise)
}# end s strata loop and stratum-level effects
###########COMPUTING GAMs for yeareffects##############
# Following Crainiceanu, C. M., Ruppert, D. & Wand, M. P. (2005). Bayesian Analysis for Penalized Spline Regression Using WinBUGS. Journal of Statistical Softare, 14 (14), 1-24.
# X.basis is data computed in R
tauX~dgamma(1.0E-2,1.0E-4) #alternate prior, original from Cainiceanu et al. second gamma parameter == 0.0001 << (abs(mean(B.X[]))^2)/2, mean(B.X[]) ~ 0.2
#tauX <- 1/pow(sdX,2) # prior on precision of gam hyperparameters
sdX <- 1/(pow(tauX,0.5)) # ~ dunif(0,5)
taubeta ~ dgamma(2,0.2) # prior on precision of gam coefficients(
sdbeta <- 1/(pow(taubeta,0.5))
for(k in 1:nknots)
{
# Computation of GAM components
B.X[k] ~ dnorm(0,tauX)
for(i in 1:nstrata)
{
beta.X.p[i,k] ~ dnorm(0,taubeta) #alternative non-centered parameterization
beta.X[i,k] <- B.X[k]+beta.X.p[i,k]
#beta.X[i,k] ~ dnorm(B.X[k],taubeta) T(-10,10) #avoiding log density calculation errors
for ( t in ymin : ymax )
{
X.part[i,k,t] <- beta.X[i,k]*(X.basis[t,k])
}#t
}#i
}#k
for(i in 1:nstrata)
{
for (t in ymin : ymax )
{
yeareffect[i,t] <- sum(X.part[i,1:nknots,t])
}#t
}#i
#-------------------------------------------------#
#### additional random year effects ######
# for( t in ymin : ymax )
# {
# Muyy[t] ~ dnorm(0,Tauy) #range wide mean year-effect alternative parameterization
# }
for( i in 1 : nstrata )
{
for( t in ymin : ymax )
{
# yy.p[i,t] ~ dnorm(0,tauyy[i])
# yy[i,t] <- Muyy[t] + yy.p[i,t]
yy[i,t] ~ dnorm(0,tauyy[i])
}
log(tauyy[i]) <- logtauyy[i]
logtauyy[i] ~ dnorm(mulogtauyy,taulogtauyy)
sdyy[i] <- 1/pow(tauyy[i],0.5)
}
#### summary statistics ######
## rescaling factor for t-distribution noise, from Link and Sauer, unpublished
adj <- (1.422*pow(nu,0.906))/(1+(1.422*pow(nu,0.906)))
sdnoise.adj <- sdnoise/adj
sdn_ret <- 0.5*pow(sdnoise.adj,2)
sdobs_ret <- 0.5*pow(sdobs,2)
for( i in 1 : nstrata )
{
for( t in ymin : ymax )
{
for(o in 1 : nobservers[i])
{
no[i,t,o] <- exp(strata[i]+yeareffect[i,t] + yy[i,t] + obs[i,o] + sdn_ret)
no3[i,t,o] <- exp(strata[i]+yeareffect[i,t] + obs[i,o] + sdn_ret)
}
mn[i,t] <- mean(no[i,t,(1 : nobservers[i])])
mn3[i,t] <- mean(no3[i,t,(1 : nobservers[i])])
n[i,t] <- nonzeroweight[i]*(mn[i,t])
n3[i,t] <- nonzeroweight[i]*(mn3[i,t])
n2[i,t] <- nonzeroweight[i]*exp(strata[i]+yeareffect[i,t] + yy[i,t] + sdn_ret + sdobs_ret) #n2 is an alternative way of calculating n
n4[i,t] <- nonzeroweight[i]*exp(strata[i]+yeareffect[i,t] + sdn_ret + sdobs_ret) #n4 is an alternative way of calculating n3
}
}
#-------------------------------------------------#
}
|
692403fec12a5ec4cbde80cacc8eabc93c585f78 | 379f840dea3ed2dd096beef70c98eca12de1ecfa | /derived/d_combine_matrices/4_combine_all.R | b583af7eb3c0a5a45eb1a96430ee8da2a645823a | [] | no_license | junukitashepard/homies | bb7efd0a9055f8d1471e3a8c00bc63b8fbe8c6e1 | 9693b456f4b2be229b26fb53afbc7a674ae4287d | refs/heads/master | 2023-01-02T13:01:13.985401 | 2020-10-31T00:21:08 | 2020-10-31T00:21:08 | 285,435,869 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,459 | r | 4_combine_all.R | ######################
# Dissect Eora table #
######################
rm(list = ls())
wd <- "~/global_energy/"
cwd <- "~/global_energy/derived/d_combine_matrices/"
setwd(wd)
source(paste0(wd, "/derived/0_globals.R"))
source(paste0(cwd, '1_setup_emptymat.R'))
source(paste0(cwd, '2_insert_domestic_submat.R'))
source(paste0(cwd, '3_insert_trade_submat.R'))
input <- paste0("/data/jus3/GlobalIO/raw/EORA/", eora.version)
output <- "/data/jus3/GlobalIO/output/derived"
temp <- "/data/jus3/GlobalIO/temp"
figures <- "/data/jus3/GlobalIO/output/figures"
library('magrittr')
library('dplyr')
library('hiofunctions')
sink(paste0(cwd, 'out/out.txt'))
######################################################
# Separate and recombine Eora and energy submatrices #
######################################################
for (year in year.min:year.max) {
print('###############################')
print(paste0('YEAR = ', year))
print('###############################')
import_eora(year = year) # Import Eora matrices
assign(paste0('mat.', year), setup.emptymat()) # Set up empty matrix for given year
for (c in country.list) {
print(paste0('Inserting ', c))
quiet(setup.domestic(country = c, year = year)) # Insert domestic submatrix
quiet(setup.trade(country = c, year = year)) # Insert trade submatrix
}
assign('outmat', get(paste0('mat.', year)))
saveRDS(outmat, file.path(output, paste0('IO_MAT/io_mat_', year, '.rds')))
}
sink() |
f21117c89fdbfd5a04d68e1c985ec8070837f47f | 63a6c9882091d70a53728b0db4228ccb498534ad | /man/summary.gears.Rd | 37906d2b6b23aa58115888a4440d5cac67fb6de2 | [] | no_license | gustavo-argus/gears | 61970d6a6a22b260ea62e3122bb25578b8b847d5 | 4650311862f526102f78fd4d4a2d072110654ad9 | refs/heads/master | 2023-01-07T19:02:32.162729 | 2020-11-03T16:03:02 | 2020-11-03T16:03:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 391 | rd | summary.gears.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_gears.R
\name{summary.gears}
\alias{summary.gears}
\title{Summary method for "gears" class}
\usage{
\method{summary}{gears}(object, ...)
}
\arguments{
\item{object}{An object of class "gears"}
\item{...}{Other arguments passed to or from other methods}
}
\description{
Summary method for "gears" class
}
|
30629f4b1448d7943c95470681cf1c925a68b627 | e3ec859e7d220cfbd53efede83a1e6a34d2e806c | /R/GetListOfThresholds.R | f5850e955fecd87ff6395d44b43426e55e852c4d | [
"MIT"
] | permissive | Rassigny/Rassigny | 639c78e54a6cd0fb178a4b4eb831083f759e5d86 | 03bf79c4adf21d1017bc28a8b490707f743555a5 | refs/heads/master | 2020-07-20T12:29:53.229976 | 2019-10-23T02:00:38 | 2019-10-23T02:00:38 | 206,640,353 | 0 | 1 | MIT | 2019-09-20T22:20:11 | 2019-09-05T19:21:02 | null | UTF-8 | R | false | false | 1,382 | r | GetListOfThresholds.R | # Function that returns a list of thresholds for corresponding assignment.
# If taskID in the argument is specified, this function returns a threshold
# for corresponding assignment and task.
GetListOfThresholds <- function(assignmentID, taskID=NULL) {
# creating a logger object
GetListOfThresholdsLogger <- logging::getLogger('GetListOfThresholdsLogger')
# adding a handler that would log everything to log file
# the default level for this handler is "INFO (20)"
logging::addHandler(logging::writeToFile, file=log.config[["logfile_path"]], logger = 'GetListOfThresholdsLogger')
tryCatch({
thresholds <- GetInfos(assignmentID, "matchingThreshold")
if (is.null(taskID)) {
logging::loginfo("Returned matchingThreshold for assignmentID: %d", assignmentID, logger = 'GetListOfThresholdsLogger')
return(thresholds)
} else {
num.thresholds <- length(thresholds)
if (taskID > num.thresholds || taskID <= 0) stop()
logging::loginfo("Returned matchingThreshold for assignmentID: %d and taskID: %d", assignmentID, taskID, logger = 'GetListOfThresholdsLogger')
return(thresholds[[taskID]])
}
}, warning = function(W){
logging::logwarn(' %s ', w, logger = 'GetListOfThresholdsLogger')
warning(w)
}, error = function(e){
logging::logerror(' %s ', e, logger = 'GetListOfThresholdsLogger')
stop(e)
})
}
|
25e19f83a5f5fc1985617a658153b043bafcc23f | fb036b826e32f4700c44a912079fa8ba3ba19f60 | /GraphicsPaperCompetition/code/BayesAnalysisW.R | 2eac93b740a111bd3035dc78dd99285c994b2fdc | [] | no_license | srvanderplas/LieFactorSine | d10ad465ff5706fdd7f9b8bc9017cb6464a75e8e | 7934646013864b0283f5259a36b7ffce0c2cc84a | refs/heads/master | 2021-01-19T00:16:29.391362 | 2015-05-21T13:53:50 | 2015-05-21T13:53:50 | 8,753,157 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,640 | r | BayesAnalysisW.R | setwd("/home/susan/Documents/R Projects/LieFactorSine/")
load("./code/BayesAnalysisW.Rdata")
require(ggplot2)
require(reshape2)
require(plyr)
require(msm)
turkdata <- read.csv("./data/turkdataclean.csv", stringsAsFactors=FALSE)
setwd("./code")
#-----------------------Distribution of W----------------------------------------------------------
logpost <- function(data, par){
temp <- sum(dtnorm(data$answer.w, mean=par[1], sd=par[2], lower=0, upper=1.4, log=TRUE))
temp <- temp-max(temp)*.9
}
get_posterior_density <- function(data, pars){
temp <- sapply(1:nrow(pars), function(i) logpost(data, pars[i,]))
temp <- exp(temp)/sum(exp(temp))
data.frame(mean=pars[,1], sd=pars[,2], f=temp)
}
#--------------------Overall Marginals-------------------------------------------------------------------------
pars <- as.matrix(expand.grid(seq(0, 2, .01), seq(.15, .6, .01)))
overall <- ddply(turkdata, .(test_param), get_posterior_density, pars=pars)
overall.mean <- ddply(overall[,-3], .(test_param, mean), summarise, f=sum(f))
overall.mean <- ddply(overall.mean, .(test_param), transform, f=f/sum(f))
#' Posterior marginal distribution over individual and std. deviation
qplot(data=overall.mean, x=mean, y=f, geom="line", colour=test_param, group=test_param) +
scale_colour_discrete("Function Type") + xlim(c(0, 1.4)) +
xlab("Mean Preferred Weighting") + ylab("Density") + theme_bw() + theme(legend.position="bottom")
# ggsave("figure/fig-OverallMeansW.pdf", width=4, height=4, units="in")
overall.sd <- ddply(overall[,-2], .(test_param, sd), summarise, f=sum(f))
overall.sd <- ddply(overall.sd, .(test_param), transform, f=f/sum(f))
#' Posterior marginal distribution over individual and mean
qplot(data=overall.sd, x=sd, y=f, geom="line", colour=factor(test_param), group=test_param) + theme_bw() + xlab("Posterior SD") + ylab("Density")
#' Posterior joint dist of mean, sd over individuals
#' since stat_density2d won't use weights, ... improvise!
overall.joint.sample <- sample(1:nrow(overall), size=50000, replace=TRUE, prob=overall$f)
ggplot(data=overall[overall.joint.sample,], aes(x=mean, y=sd)) +
stat_density2d(n=c(75, 40), geom="density2d", aes(colour=test_param)) +
facet_wrap(~test_param) + scale_colour_discrete(guide="none") + theme_bw() +
xlab("Mean Preferred Weighting") + ylab("Std. Deviation")
# ggsave("figure/fig-Joint2dDensityW.pdf", width=4, height=4, units="in")
#--------------------Individual Distribution of Theta ------------------------------------------------------------------
# test <- ddply(turkdata, .(ip.id, test_param), get_posterior_density, pars=pars)
#
# test.mean <- ddply(test, .(ip.id, test_param, mean), summarise, f=sum(f))
# test.mean <- ddply(test.mean, .(ip.id, test_param), transform, f=f/sum(f))
#
# participants <- dcast(ddply(turkdata, .(ip.id, test_param), summarise, n=length(test_param)), ip.id~test_param, value.var="n")
# ipsubset <- subset(participants, rowSums(is.na(participants))==0 & rowSums(participants[,2:4]>6, na.rm=TRUE)==3)$ip.id
#
# par_labeller <- function(var, value){
# n <- sapply(value, function(i) sum(subset(participants, ip.id%in%i)[,2:4]))
# value <- paste("Participant ", as.character(value), "\n(n = ", n, ")", sep="")
# return(value)
# }
#' Plot 4 individuals who did at least 6 figures of each trial
qplot(data=subset(test.mean, ip.id%in%ipsubset), x=mean, y=f, group=test_param, colour=test_param, geom="line") +
facet_grid(.~ip.id, labeller=par_labeller) + scale_colour_discrete("Function Type") + theme_bw() +
theme(legend.position="bottom") + xlab("Mean Preferred Weighting") + ylab("Density")
# ggsave("figure/fig-IndivMeanAllFcnsW.pdf", width=7, height=3.5)
#' Posterior mean estimates, including CI information for the individual MEAN
#' (i.e. not for any individual observation)
# test.post.indiv<- ddply(test.mean, .(ip.id, test_param),
# function(x){
# ex=sum(x$mean*x$f)
# n=nrow(subset(turkdata, turkdata$ip.id==ip.id[1] & turkdata$test_param==test_param[1]))
# samp <- matrix(sample(x$mean, n*11, prob=x$f, replace=TRUE), ncol=11)
# z <- as.numeric(quantile(rowMeans(samp), c(.025, .5, .975)))
# data.frame(ip.id=unique(x$ip.id), test_param=unique(x$test_param), lb=z[1], mean = ex, median=z[2], ub=z[3], n=n)
# })
#
# overall.mean.f <- ddply(test.mean, .(test_param, mean), summarise, f=sum(f))
# overall.mean.f <- ddply(overall.mean.f, .(test_param), transform, f=f/sum(f))
#
# overall.mean.bounds <- ddply(overall.mean.f, .(test_param), function(x){
# ex=sum(x$mean*x$f)
# n=length(unique(subset(turkdata, turkdata$test_param==test_param)$ip.id))
# samp <- matrix(sample(x$mean, n*11, prob=x$f, replace=TRUE), ncol=11)
# sample.mean = mean(samp)
# sdev = sd(rowMeans(samp))
# lb = as.numeric(quantile(rowMeans(samp), .025))
# med = as.numeric(quantile(rowMeans(samp), .5))
# ub = as.numeric(quantile(rowMeans(samp), .975))
# data.frame(lb=lb, mean=sample.mean, median=med, ub=ub)
# })
#
# test.post.indiv$functions <- c("Exponential", "Inverse", "Sine")[as.numeric(test.post.indiv$test_param)]
# test.post.indiv$functions <- factor(test.post.indiv$functions, levels=c("Sine", "Exponential", "Inverse"))
# overall.mean.bounds$functions <- c("Exponential", "Inverse", "Sine")[as.numeric(factor(overall.mean.bounds$test_param))]
qplot(data=test.post.indiv, x=lb, xend=ub, y=ip.id, yend=ip.id, geom="segment", colour=test_param) +
facet_wrap(~functions) + geom_point(aes(x=median), colour="black") +
geom_vline(data=overall.mean.bounds, aes(xintercept=lb), linetype=3) +
geom_vline(data=overall.mean.bounds, aes(xintercept=median)) +
geom_vline(data=overall.mean.bounds, aes(xintercept=ub), linetype=3) +
ylab("Participant ID") + xlab("Mean Preferred Weighting") + theme_bw() + theme(legend.position="none") +
scale_colour_discrete("Function Type")
# ggsave("figure/fig-CIindivMeanW.pdf", width=6, height=6, units="in")
#' Posterior estimates, including CI information for the individual observations
#' (i.e. not for any individual observation)
# indiv.value.bounds <- ddply(test.mean, .(ip.id, test_param), function(x){
# lb=x$mean[which.min(abs(cumsum(x$f)-.025))]
# med=x$mean[which.min(abs(cumsum(x$f)-.5))]
# ub=x$mean[which.min(abs(cumsum(x$f)-.975))]
# data.frame(lb=lb, median=med, ub=ub)
# })
#
# overall.value.bounds <- ddply(overall.mean.f, .(test_param), function(x){
# xnew <- sample(x$mean, length(x$mean), prob=x$f, replace=TRUE)
# z <- as.numeric(quantile(xnew, c(.025, .5, .975)))
# data.frame(lb=z[1], median=z[2], ub=z[3])
# })
# Posterior Distribution for theta without averaging over individuals
# qplot(data=overall.mean.f, x=mean, y=f, geom="line", colour=test_param) +
# xlab("Psychological Lie Factor\nEstimated Distribution for All Individuals") +
# theme_bw() + theme(legend.position="bottom") + scale_color_discrete("Function Type") +
# ylab("Density")
#
# qplot(data=indiv.value.bounds, x=lb, xend=ub, y=ip.id, yend=ip.id, geom="segment", colour=test_param) +
# facet_wrap(~test_param) + geom_point(aes(x=median), colour="black") +
# geom_vline(data=overall.value.bounds, aes(xintercept=lb), linetype=3) +
# geom_vline(data=overall.value.bounds, aes(xintercept=median)) +
# geom_vline(data=overall.value.bounds, aes(xintercept=ub), linetype=3) +
# ylab("Participant ID") + xlab("Lie Factor") + theme_bw() + theme(legend.position="bottom") +
# scale_colour_discrete("Function Type")
#
#' Plot both individual user and group posterior estimates of preferred weightings.
#' We may need more power/trials for each user in phase 2 if we want to do inference on w directly.
# test.mean.marginal <- ddply(test, .(ip.id, test_param, mean), summarise, f=sum(f))
# test.mean.marginal$f <- unlist(dlply(test.mean.marginal, .(ip.id, test_param), summarise, f=f/sum(f)))
# test.mean.marginal$functions <- c("Exponential", "Inverse", "Sine")[as.numeric(as.factor(test.mean.marginal$test_param))]
# test.mean.marginal$functions <- factor(test.mean.marginal$functions, levels=c("Sine", "Exponential", "Inverse"))
# overall.mean$functions <- c("Exponential", "Inverse", "Sine")[as.numeric(as.factor(overall.mean$test_param))]
ggplot(data=test.mean.marginal, aes(x=mean, y=f, group=ip.id, colour=test_param)) + geom_line(alpha=I(.175)) +
facet_wrap(~functions) + ylab("Density") + xlab("Lie Factor") + theme_bw() + scale_colour_discrete("Function Type") +
theme(legend.position="none") + geom_line(data=overall.mean, aes(x=mean, y=f, group=functions), colour="black") +
guides(colour = guide_legend(override.aes = list(alpha = 1)))
# ggsave("figure/fig-spaghettiIndivDistsW.pdf", width=6, height=6, units="in")
#
# posterior.modes <- ddply(test, .(ip.id, test_param), summarise, theta=mean[which.max(f)])
# qplot(data=posterior.modes, x=theta, geom="density",colour=test_param, fill=test_param, alpha=I(.25)) + ylab("Density") + xlab("Individual Posterior Lie Factor Mode")
#' User's estimates for Sine vs other experiment.
byuser <- dcast(test.post.indiv[, c(1, 2, 4)], ip.id~test_param)
byuser <- melt(byuser, id.vars=c("ip.id", "Sin"), variable.name="Experiment2", value.name="w")
qplot(data=byuser, x=Sin, y=w, colour=Experiment2, geom="point") +
scale_colour_manual(values=c("red", "blue")) +
geom_smooth(method="lm") + theme_bw() +
ylab("Weight in Exp 2") + xlab("Weight for Sine")
setwd("../")
save.image("./code/BayesAnalysisW.Rdata")
setwd("./code/") |
10c5c76b809da6f07b917cbd723a4599881da082 | e09cdbc1259ec0efae301e120dd18744c979b77c | /functions/LoadersFunctions.R | 34bdc552d134ea540a26f9fb08a1ae63dcb23b6d | [] | no_license | Zireae1/16s_study | a06f18d980baf685d43ef14dfb2b8c2d7d7cc25b | a896199e568f880a6a6952d468190b18e3937898 | refs/heads/master | 2021-01-18T01:50:59.487926 | 2016-03-11T09:40:58 | 2016-03-11T09:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,705 | r | LoadersFunctions.R | ########################################################
## Loading functions ##
########################################################
library(futile.logger)
# Loading case and control
Load <- function(pathway)
{
flog.info("start load table")
family <- UniteMatrices(read_qiime_sum_feats(pathway$Case$FamCaseOtuTbl), read_qiime_sum_feats (pathway$Ctrl$FamCtrlOtuTbl))
genus <- UniteMatrices(read_qiime_sum_feats (pathway$Case$GenCaseOtuTbl), read_qiime_sum_feats (pathway$Ctrl$GenCtrlOtuTbl))
species <- UniteMatrices(read_qiime_sum_feats (pathway$Case$SpeCaseOtuTbl), read_qiime_sum_feats (pathway$Ctrl$SpeCtrlOtuTbl))
otu <- UniteMatrices(read_qiime_otu_table_no_tax (pathway$Case$OtuCaseTbl), read_qiime_otu_table_no_tax (pathway$Ctrl$OtuCtrlTbl))
alphaDiv <- UniteMatrices(LoadAlphaDiv(pathway$Case$AlphaDivCase), LoadAlphaDiv(pathway$Ctrl$AlphaDivCtrl))
# percent OTU
otup <- 100 * otu / rowSums(otu)
######### insert load alpha diversity as vector in list
# load meta data
metaTable <- rbind(read.csv(pathway$Case$MetaCaseCsv, stringsAsFactors=F), read.csv(pathway$Ctrl$MetaCtrlCsv, stringsAsFactors=F))
flog.info("finished load table")
#TODO(Anna) create checking ...
list(family=family, genus=genus, species=species, otu=otu, otup=otup, meta=metaTable, alphaDiv=alphaDiv)
}
#--- end loading case and control ---
##### check rowSums=100%
#rowSums(TotalTable$family)
##### check rownames matching for all tables and metadata
#setdiff(rownames(FamilyCtrl), MetaCtrl$samples_name)
#setdiff(rownames(GenusCtrl), MetaCtrl$samples_name)
#setdiff(rownames(SpeciesCtrl), MetaCtrl$samples_name)
#rownames(TotalTable$family)
|
5ff43f531ddf3e565a6f0688d0507602549649e8 | cf6d6b48a353b9d4807176064c1ba3db0bab3ee0 | /R/install_h2o.R | c4bf896677ee47635e21a2c2f6dd5d495dc963dc | [
"Apache-2.0"
] | permissive | javierluraschi/rsparkling | 05673ee602032dbab77713bd9bb74fc2a13fa660 | 4acb98ce989978718a8d93ddf75d6f9cb065d6bc | refs/heads/master | 2021-01-25T07:07:10.139239 | 2017-01-31T22:39:52 | 2017-01-31T22:39:52 | 80,698,186 | 0 | 0 | null | 2017-02-02T06:02:45 | 2017-02-02T06:02:44 | null | UTF-8 | R | false | false | 3,234 | r | install_h2o.R | #' An easy installation of the H2O R pacakage
#'
#' @param release_name Object of type character that specifies the release name of the H2O pacakge
#' @param release_number Object of type character that specifies the release number of the H2O pacakge
#' @examples
#' \donttest{
#' #Install the latest release of H2O on 1/30/16 (relv-tverberg-1)
#' install_h2o(release_name = "rel-tverberg", release_number = "1")
#' }
#' @export
install_h2o <- function(release_name = "rel-tverberg", release_number = "1"){
if(!is.character(release_name)){
stop(paste0("`release_name` should be of type character but got ", class(release_name)))
}
if(!is.character(release_number)){
stop(paste0("`release_number` should be of type character but got ", class(release_number)))
}
# The following two commands remove any previously installed H2O packages for R.
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
# Next, we download packages that H2O depends on.
if (! ("methods" %in% rownames(installed.packages()))) { install.packages("methods") }
if (! ("statmod" %in% rownames(installed.packages()))) { install.packages("statmod") }
if (! ("stats" %in% rownames(installed.packages()))) { install.packages("stats") }
if (! ("graphics" %in% rownames(installed.packages()))) { install.packages("graphics") }
if (! ("RCurl" %in% rownames(installed.packages()))) { install.packages("RCurl") }
if (! ("jsonlite" %in% rownames(installed.packages()))) { install.packages("jsonlite") }
if (! ("tools" %in% rownames(installed.packages()))) { install.packages("tools") }
if (! ("utils" %in% rownames(installed.packages()))) { install.packages("utils") }
# Now we download, install and initialize the H2O package for R.
install.packages("h2o", type="source", repos=(c(sprintf("http://h2o-release.s3.amazonaws.com/h2o/%s/%s/R", release_name,release_number))))
}
#' Provide integration information for rsparkling
#'
#' @return Returns a data frame containing rsparkling integration information
#' @export
h2o_release_table <- function(){
#Spark 2.0
release_spark_2 <- data.frame(Spark_Version = c("2.0"),
Sparkling_Water_Version = c("2.0.0","2.0.1","2.0.2","2.0.3"),
H2O_Version = c("3.10.0.7","3.10.0.10","3.10.0.10","3.10.1.2"),
H2O_Version_Name = c("rel-turing","rel-turing","rel-turing","rel-turnbull"),
H2O_Version_Number = c("7","10","10","2"))
#Spark 1.6
release_spark_1_6 <- data.frame(Spark_Version = c("1.6"),
Sparkling_Water_Version = c("1.6.1","1.6.2","1.6.3","1.6.4","1.6.5","1.6.6","1.6.7","1.6.8"),
H2O_Version = c("3.8.1.3","3.8.1.3","3.8.2.3","3.8.2.4","3.8.2.6","3.10.0.4","3.10.0.6","3.10.0.7"),
H2O_Version_Name = c("rel-turan","rel-turan","rel-turchin","rel-turchin","rel-turchin","rel-turing","rel-turing","rel-turing"),
H2O_Version_Number = c("3","3","3","4","6","4","6","7"))
return(rbind(release_spark_2,release_spark_1_6))
} |
579a13cc75cacf3cc7edd1d391c4f3b23f550d49 | d8ad56cf1976c5ee086f441bcc1ab82d999f3291 | /R/get.R | a213e2c1e678d5016d656ecb484bd0dc8047863d | [
"MIT"
] | permissive | ataustin/covid19us | 4430efb1bd39a9e07beb51f06bbb6d49d00ed7b6 | c300a30acb64333360d2c85d8cce507396e52084 | refs/heads/master | 2022-04-13T08:58:58.914526 | 2020-04-11T15:25:58 | 2020-04-11T15:25:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,700 | r | get.R | #' Get current counts for every state
#'
#' @return A tibble with one row per state and columns for individuals' COVID statuses (positive, negative, pending, death) and their total.
#' @export
#'
#' @examples
#' \donttest{
#' get_states_current()
#' }
get_states_current <- function() {
get("states")
}
#' Get daily counts for every state
#'
#' Daily counts are updated every day at 4pm EST. This is the only function that takes arguments.
#'
#' @param state State abbreviation for a specific state or all states with \code{"all"}.
#' @param date For a specific date, a character or date vector of length 1 coercible to a date with \code{lubridate::as_date()}.
#'
#' @return A tibble with one row per state for all dates available with columns for individuals' COVID statuses (positive, negative, pending, death) and their total.
#' @export
#'
#' @examples
#' \donttest{
#' get_states_daily()
#'
#' get_states_daily("NY", "2020-03-17")
#' get_states_daily(state = "WA")
#' get_states_daily(date = "2020-03-11")
#' }
get_states_daily <- function(state = "all", date = "all") {
if (state == "all" && date == "all") {
q <- ""
} else {
if (date != "all") {
date %<>%
date_to_int()
# All states, specific date
if (state == "all") {
q <- glue::glue("?date={date}")
# Specific state and specific date
} else {
q <- glue::glue("?state={state}&date={date}")
}
# Specific state, all dates
} else {
q <- glue::glue("?state={state}")
}
}
get("states/daily", query = q)
}
#' Get COVID-related information for each state
#'
#' @return A tibble with one row per state incluing information on the state's \code{data_site} where the data was pulled from and the \code{covid_19_site} where data is published.
#' @export
#'
#' @examples
#' \donttest{
#' get_states_info()
#' }
get_states_info <- function() {
tbl <- get("states/info")
if (nrow(tbl) == 0) {
return(tbl)
}
tbl %>%
select(
state, name,
everything()
)
}
#' Get current US counts
#'
#' @return A tibble with one row for the current count of the country's COVID statuses.
#' @export
#'
#' @examples
#' \donttest{
#' get_us_current()
#' }
get_us_current <- function() {
get("us")
}
#' Get daily US counts
#'
#' Updated every day at 4pm.
#'
#' @return A tibble with one row per date in which data is available and counts for each of those states.
#' @export
#'
#' @examples
#' \donttest{
#' get_us_daily()
#' }
get_us_daily <- function() {
tbl <- get("us/daily")
if (nrow(tbl) == 0) {
return(tbl)
}
tbl %>%
rename(
n_states = states
) %>%
arrange(
desc(date)
)
}
#' Get COVID-related information for certain counties
#'
#' Currently limited to the worst-affected counties in mostly Washington state, California, and New York.
#'
#' @return A tibble with one row per county and their COVID website information.
#' @export
#'
#' @examples
#' \donttest{
#' get_counties_info()
#' }
get_counties_info <- function() {
get("counties")
}
#' Get URLs and their details for each state
#'
#' @return A tibble with one row for every state, the URL used by scrapers to get data, and a \code{filter} column that provices the xpath or CSS selector used by the \href{https://github.com/COVID19Tracking/covid-tracking}{COVID-19 Tracking Project's scraper} to get this data.
#' @export
#'
#' @examples
#' \donttest{
#' get_tracker_urls()
#' }
get_tracker_urls <- function() {
tbl <- get("urls")
if (nrow(tbl) == 0) {
return(tbl)
}
tbl %>%
rename(
state_name = name
) %>%
select(
state_name, url, filter, ssl_no_verify, kind, request_datetime
)
}
|
f8b00da82e15414cea87cbfdd09ba6bae324b040 | fb916588f8fdcbe08b9e9d057f9657912d55b60d | /bootSSR/R/ssr_methods.R | b3d04dd5fb6275065307162af0bdf3a48d41d49d | [] | no_license | VictorAether/SSR-with-resampling | a559c1ad365c481765563f00b341af4ea073665c | 18f9fd89f808252a9f6748be28463ac23eec1498 | refs/heads/main | 2023-03-18T00:42:58.352516 | 2021-03-18T10:03:16 | 2021-03-18T10:03:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,850 | r | ssr_methods.R | #-------------------------------------
# Observed conditional power approach
#-------------------------------------
recalculateSampleSize_ocp <- function(design, t1, w1, w2, nmax, t1_boot) {
n <- design$n1 * (1 + ((qnorm(design$beta) - qnorm(1 - design$alpha_1) *
sqrt(w1^2 + w2^2)/w2)/t1_boot + w1/w2)^2)
n[t1_boot < qnorm(1 - design$alpha_0) | t1_boot >= qnorm(1 - design$alpha_1)] <- design$n1 # not in recalculation area
n[n > nmax] <- nmax
n
}
#------------------------------------------------
# Restricted observed conditional power approach
#------------------------------------------------
recalculateSampleSize_rocp <- function(design, t1, w1, w2, nmax, beta_0, t1_boot) {
n <- design$n1 * (1 + ((qnorm(design$beta) - qnorm(1 - design$alpha_1) *
sqrt(w1^2 + w2^2)/w2)/t1_boot + w1/w2)^2)
n[t1_boot < qnorm(1 - design$alpha_0) | t1_boot >= qnorm(1 - design$alpha_1)] <- design$n1 # not in recalculation area
n[n > nmax] <- nmax
n[conditionalPower(t1_boot, n, design$n1, design$alpha_1, w1, w2) < 1 - beta_0] <- design$n1 # restriction
n
}
#-------------------------------
# Promising Zone approach
#-------------------------------
recalculateSampleSize_pz <- function(design, t1, w1, w2, nmax, n_ini, beta_tilde, t1_boot) {
n <- design$n1 * (1 + ((qnorm(design$beta) - qnorm(1 - design$alpha_1) *
sqrt(w1^2 + w2^2)/w2)/t1_boot + w1/w2)^2)
n[t1 < qnorm(1 - design$alpha_0) | t1 >= qnorm(1 - design$alpha_1)] <- design$n1 # not in recalculation area
n[(conditionalPower(t1_boot, n_ini, design$n1, design$alpha_1, w1, w2) < 1 - beta_tilde |
conditionalPower(t1_boot, n_ini, design$n1, design$alpha_1, w1, w2) >= 1 - design$beta) &
qnorm(1 - design$alpha_0) <= t1 &
t1_boot < qnorm(1 - design$alpha_1)] <- n_ini # not in promising zone
n[n > nmax] <- nmax
n
}
#--------------------------------
# Optimization function approach
#--------------------------------
recalculateSampleSize_of <- function(design, t1, w1, w2, nmax, n_ini, beta_tilde, gamma) {
n <- optimize(f, interval = c(design$n1, nmax), t1 = t1, n1 = design$n1,
alpha_1 = design$alpha_1, w1 = w1, w2 = w2,
gamma = gamma, n_ini = n_ini,
maximum = TRUE, tol = 1)$maximum
n[t1 < qnorm(1 - design$alpha_0) | t1 >= qnorm(1 - design$alpha_1)] <- design$n1 # not in recalculation area
n[(conditionalPower(t1, n_ini, design$n1, design$alpha_1, w1, w2) < 1 - beta_tilde |
conditionalPower(t1, n_ini, design$n1, design$alpha_1, w1, w2) >= 1 - design$beta) &
qnorm(1 - design$alpha_0) <= t1 &
t1 < qnorm(1 - design$alpha_1)] <- n_ini # not in promising zone
n[n > nmax] <- nmax
n
}
|
61cc035be18dbfea985544ef56c1a9203523ed8c | 6d8814c8d9b0aed27d626327583c47e434c39fe1 | /task3/ui.R | 6b5b98c1725bc04668a0222668b40756085d8b51 | [] | no_license | mbilychenko/MMP19 | 70d3527cd19f506f25c7b275183a39cedebdf30f | c0a53bbbc6bc8bd5a865875ad693b16a59342658 | refs/heads/master | 2020-04-22T12:32:57.821580 | 2019-07-31T18:50:25 | 2019-07-31T18:50:25 | 170,375,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,151 | r | ui.R |
library(shiny)
shinyUI(navbarPage("Задача №3",
tabPanel("Результат",
# Sidebar
sidebarLayout(
sidebarPanel(
sliderInput("n",
"Кількість фірм:",
min = 10,
max = 200,
value = 100),
sliderInput("alpha0",
"Початкові межі коефіцієнта а0:",
min = 0,
max = 5,
value = c(0.5, 3)),
sliderInput("alpha1",
"Початкові межі коефіцієнта а1:",
min = 0,
max = 2,
value = c(0, 1)),
sliderInput("alpha2",
"Початкові межі коефіцієнта а2:",
min = 0,
max = 2,
value = c(0, 1)),
selectInput("error_function",
"Оберіть метод для обрахунку похибки",
choices = list("Абсолютна/Absolute" = 1, "Квадратична/Quadratic" = 2),
selected = 1),
submitButton("Submit")
),
# Show a plot of the generated distribution
mainPanel(
tags$style(type="text/css",
".shiny-output-error { visibility: hidden;}",
".shiny-output-error:before { visibility: hidden;}"),
h3("Result"),
textOutput("text1"),
h3("Download data"),
downloadButton(outputId = "download_data",
label = "Завантажити дані")
)
)
),
tabPanel("Про модель/задачу",
mainPanel(
withMathJax(includeMarkdown("about.Rmd"))
)
)
)
)
|
71b85feb863334ed93f2160a6e9d9e21402bb499 | aaf2ec8b4e7fbd7bbc8165498e564332fa4b9494 | /full data setup.R | 8d5eda8ec1364e8dd34e8ba8d9a9598a9c878f38 | [] | no_license | tobiasdahlnielsen/P8 | 336f517321de00b32567c7e3b143cedacebea892 | 49ce49d453789dc4c4c9d81451fdb596ab690c4f | refs/heads/master | 2022-10-05T04:10:15.547561 | 2020-06-11T06:08:25 | 2020-06-11T06:08:25 | 260,437,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,593 | r | full data setup.R | library(ghyp);library(tictoc);library(tidyverse);library(magrittr);library(lubridate);library(beepr)
setwd("~/P8")
load("~/P8/altdata.RData");remove(ACAS,HBAN)
list <- list.files(paste0("al data"))
list <- str_remove(list,c(".db"))
list <- list[-which(list=="MTB")];list <- list[-which(list=="ICE")]
getdbdata = function(series,path = ""){
con = dbConnect(RSQLite::SQLite(), dbname = paste0(path,series,".db"))
result = tbl(con, series) %>% collect() %>% unique()
dbDisconnect(con)
return(result)
}
logreturns = function(df){
library(magrittr)
df %<>% mutate(logr= c(0,Price %>% log %>% diff))
return(df)
}
simreturns = function(df){
library(magrittr)
df %<>% mutate(simr = logr %>% exp - 1 )
return(df)
}
setwd("~/P8/al data")
stocks <- list()
stocks_5minute <- list()
alllogreturns_5min <- c()
allsimreturns_5min <- c()
periode <- c("2005-01-03 09:30:00","2009-12-31 16:00:00")
for (i in 1:length(list)) {
if (list[[i]]=="SPY") {
setwd("~/P8")
stocks[[i]] <- SPY[,1:2];names(stocks[[i]]) <- c("Time","price")
stocks[[i]] <- stocks[[i]][which(stocks[[i]]$Time == ymd_hms(periode[1])):which(stocks[[i]]$Time == ymd_hms(periode[2])),]
stocks_5minute[[i]] = stocks[[i]] %>% filter(minute(Time) %% 5 == 0)
setwd("~/P8/al data")
}
else{
stocks[[i]] <- select(getdbdata(list[[i]]),"utcsec","price")
stocks[[i]] %<>% mutate(Time = ymd_hms(utcsec), Price = price) %>% select(Time,price)
stocks[[i]] <- stocks[[i]][which(stocks[[i]]$Time == ymd_hms(periode[1])):which(stocks[[i]]$Time == ymd_hms(periode[2])),]
}
stocks_5minute[[i]] = stocks[[i]] %>% filter(minute(Time) %% 5 == 0)
stocks[[i]] %<>% logreturns %<>% simreturns
stocks_5minute[[i]] %<>% logreturns %<>% simreturns
alllogreturns_5min <- cbind(alllogreturns_5min,stocks_5minute[[i]]$logr)
allsimreturns_5min <- cbind(allsimreturns_5min,stocks_5minute[[i]]$simr)
#assign(list[[i]],stocks[[i]])
#assign(paste0(list[[i]],"_5minute"),stocks_5minute[[i]])
}
for (j in 1:length(stocks_5minute)) {
plot(stocks_5minute$ACAS$Time,stocks_5minute[[j]]$simr,type="l",main=list[j])
}
####################################################weights################################################################################
weightreturns <- function(allsimreturns,alllogreturns,window,O,risk=c("expected.shortfall","value.at.risk","sd")){
portfolioreturns <- list()
portweights <- list()
portrisk <- list()
for (j in 1:length(risk)) {
for (i in 0:(dim(allsimreturns)[1]/O-(window+1))) {
print(paste0(j," ",i))
if (i==0) {
dist <- fit.NIGmv(alllogreturns[1:O*window,],silent=TRUE)
optvalues <- portfolio.optimize(dist,risk.measure = risk[j],type = "minimum.risk",distr = "return",silent = TRUE)
portreturns <- allsimreturns[(O*window+1):(O*(window+1)),] %*% optvalues$opt.weights
allweights <- optvalues$opt.weights
old.weights <- optvalues$opt.weights
allrisks <- optvalues$risk
old.risk <- optvalues$risk
}
else {
dist <- fit.NIGmv(alllogreturns[(O*i+1):(O*(window+i)),],silent=TRUE)
if (dist@converged==FALSE) {
new.weights <- old.weights
}
else {
optvalues <-try(portfolio.optimize(dist,risk.measure = risk[j],type = "minimum.risk",distr = "return",silent = TRUE),silent=TRUE)
if (class(optvalues)!="try-error") {
old.weights <- optvalues$opt.weights
new.weights <- optvalues$opt.weights
old.risk <- optvalues$risk
new.risk <- optvalues$risk
}
if (class(optvalues)=="try-error") {
new.weights <- old.weights
new.risk <- old.risk
}
}
portreturns <- c(portreturns, allsimreturns[(O*(window+i)+1):(O*(window+i +1)),] %*% new.weights)
allweights <- rbind(allweights,new.weights)
allrisks <- c(allrisks,new.risk)
}
}
portfolioreturns[[risk[j]]] <- portreturns
portweights[[risk[j]]] <- allweights
portrisk [[risk[j]]] <- allrisks
}
output <- list(portfolioreturns,portweights,portrisk)
print(paste0("finished ",risk[j]))
return(output)
}
O = which(stocks_5minute[[1]][["Time"]] == stocks_5minute[[1]][["Time"]][1] + days(1)) - 1
window <- 21
tic()
load("~/P8/3stockreturns.RData")
a <- weightreturns(allsimreturns_5min,alllogreturns_5min,window,O,risk=c("expected.shortfall","sd"))
load("~/P8/alldataportreturns.RData")
b <- weightreturns(allsimreturns_5min,alllogreturns_5min,window,O,risk=c("expected.shortfall","sd"))
load("~/P8/adjportreturns.RData")
c <- weightreturns(adjsimreturns_5min,adjlogreturns_5min,window,O,risk=c("expected.shortfall","sd"))
toc()
beep()
colnames(b[[2]]$expected.shortfall) <- unlist(list)
colnames(b[[2]]$sd) <- unlist(list)
names(stocks) <- unlist(list)
names(stocks_5minute) <- unlist(list)
for (i in 1:length(list)) {
plot(b[[2]]$expected.shortfall[,i],type="l",ylim=c(-0.1,0.1),main=list[i])
#lines(b[[2]]$value.at.risk[,i],col="blue")
lines(b[[2]]$sd[,i],col="red")
abline(1/34,0)
}
plot(exp(cumsum(log(b[[1]]$expected.shortfall+1)))-1,type="l")
lines(exp(cumsum(log(b[[1]]$sd+1)))-1,col="red")
############################################split adjusted#########################################
splitstocks <- c()
for (j in 1:length(stocks_5minute)) {
if (max(stocks_5minute[[j]]$simr)>2) {
splitstocks[j] <- list[j]
}
if (min(stocks_5minute[[j]]$simr)<(-0.4)) {
splitstocks[j] <- list[j]
}
}
splitstocks[3] <- "AET";splitstocks <- splitstocks[-which(splitstocks=="STT")]
splitstocks <- splitstocks[-which(is.na(splitstocks))]
splits <- c("2005-03-14 09:30:00","2006-02-21 09:30:00",
"2009-07-01 09:30:00",
"2006-04-19 09:30:00",
"2005-01-18 09:30:00",
"2006-10-25 09:30:00",
"2007-06-22 09:30:00",
"2008-01-02 09:30:00",
"2006-06-26 09:30:00",
"2005-05-31 09:30:00",
"2006-08-14 09:30:00")
splitsf <- c(1/2,1/2,
20,
1/2,
1/2,
100/207,
1/2,
44/100,
1/2,
1/2,
1/2
)
adjstocks <- stocks
for (i in 1:length(splitstocks)) {
adjstocks[[splitstocks[[i]]]][1:(which(adjstocks[[splitstocks[[i]]]]$Time == ymd_hms(splits[i]))-1),2] <-
adjstocks[[splitstocks[[i]]]][1:(which(adjstocks[[splitstocks[[i]]]]$Time == ymd_hms(splits[i]))-1),2]*splitsf[i]
}
adjstocks[[35]] <- NULL
adjlogreturns_5min <- c()
adjsimreturns_5min <- c()
for (i in 1:length(adjstocks)) {
adjstocks[[i]] = adjstocks[[i]] %>% filter(minute(Time) %% 5 == 0)
adjstocks[[i]] %<>% logreturns %<>% simreturns
adjlogreturns_5min <- cbind(adjlogreturns_5min,adjstocks[[i]]$logr)
adjsimreturns_5min <- cbind(adjsimreturns_5min,adjstocks[[i]]$simr)
}
tic()
d <- weightreturns(adjsimreturns_5min,adjlogreturns_5min,window,O,risk = "expected.shortfall")
toc()
beep()
for (i in 1:length(list)) {
plot(c[[2]]$expected.shortfall[,i],type="l",ylim=c(-0.1,0.1),main=list[i])
#lines(b[[2]]$value.at.risk[,i],col="blue")
#lines(c[[2]]$sd[,i],col="red")
abline(1/34,0)
plot(b[[2]]$expected.shortfall[,i],type="l",ylim=c(-0.1,0.1),main=list[i])
#lines(b[[2]]$value.at.risk[,i],col="blue")
#lines(b[[2]]$sd[,i],col="red")
abline(1/34,0)
}
plot(exp(cumsum(log(c[[1]]$expected.shortfall+1)))-1,type="l")
lines(exp(cumsum(log(c[[1]]$sd+1)))-1,col="red")
yearlyreturns <- matrix(0,ncol=6,nrow=5)
for (i in 2005:2009) {
Index <- which(year(ACAS_5minute$Time[(O*window+1):length(SPY_5minute$logr)])==i)
yearlyreturns[1,i-2004] <- exp(sum(ACAS_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[2,i-2004] <- exp(sum(HBAN_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[3,i-2004] <- exp(sum(SPY_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[4,i-2004] <- exp(sum(log(c[[1]][["sd"]][Index]+1),na.rm=TRUE))-1
yearlyreturns[5,i-2004] <- exp(sum(log(c[[1]][["expected.shortfall"]][Index]+1),na.rm=TRUE))-1
}
yearlyreturns[1,6] <- exp(sum(ACAS_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[2,6] <- exp(sum(HBAN_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[3,6] <- exp(sum(SPY_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[4,6] <- exp(sum(log(c[[1]][["sd"]]+1),na.rm=TRUE))-1
yearlyreturns[5,6] <- exp(sum(log(c[[1]][["expected.shortfall"]]+1),na.rm=TRUE))-1
colnames(yearlyreturns) <- c("ACAS","HBAN","SPY","MVP","CVAR")
library(xtable)
colnames(adjsimreturns_5min) <- colnames(adjlogreturns_5min) <- list
selected <- sort(sample(1:34,10))
tic()
d <- weightreturns(adjsimreturns_5min[,selected],adjlogreturns_5min[,selected],window,O,risk = c("expected.shortfall","sd"))
toc()
beep()
yearlyreturns <- matrix(0,ncol=6,nrow=5)
for (i in 2005:2009) {
Index <- which(year(ACAS_5minute$Time[(O*window+1):length(SPY_5minute$logr)])==i)
yearlyreturns[1,i-2004] <- exp(sum(ACAS_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[2,i-2004] <- exp(sum(HBAN_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[3,i-2004] <- exp(sum(SPY_5minute$logr[O*window+1+Index],na.rm=TRUE))-1
yearlyreturns[4,i-2004] <- exp(sum(log(c[[1]][["sd"]][Index]+1),na.rm=TRUE))-1
yearlyreturns[5,i-2004] <- exp(sum(log(c[[1]][["expected.shortfall"]][Index]+1),na.rm=TRUE))-1
}
yearlyreturns[1,6] <- exp(sum(ACAS_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[2,6] <- exp(sum(HBAN_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[3,6] <- exp(sum(SPY_5minute$logr[(O*window+1):length(SPY_5minute$logr)],na.rm=TRUE))-1
yearlyreturns[4,6] <- exp(sum(log(d[[1]][["sd"]]+1),na.rm=TRUE))-1
yearlyreturns[5,6] <- exp(sum(log(d[[1]][["expected.shortfall"]]+1),na.rm=TRUE))-1
for (i in 1:length(selected)) {
#plot(d[[2]]$expected.shortfall[,i],type="l",main=list[selected[i]])
#lines(b[[2]]$value.at.risk[,i],col="blue")
#lines(c[[2]]$sd[,i],col="red")
plot(stocks_5minute[[list[selected[i]]]]$price,type="l",main=list[selected[i]])
}
plot(exp(cumsum(log(d[[1]]$expected.shortfall+1)))-1,type="l")
lines(exp(cumsum(log(d[[1]]$sd+1)))-1,col="red")
|
8fa77729efd0a0b3f31203690c721f5465f2cee9 | c4545e79e1d43501204b60fd92555dc7428d6b79 | /ui.R | 89565946cfe32caef6270f9148b09d38cf2c1c53 | [] | no_license | hemanshu9s/capston_project | fd6abc1abd0020f1244ad79d5fc456e71f28b0db | 0f1826dbcc9be523c1da996cfc128f0647d7fbc0 | refs/heads/master | 2021-05-01T15:45:29.060554 | 2018-02-07T20:42:57 | 2018-02-07T20:42:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 948 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(" Nature Language Processing"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("txt", label = h3("Input"), value = ""),
h6(em("dont enter stopwords, Punctuation, number")),
submitButton("OK")
),
mainPanel(
h3("using first sample:"),
verbatimTextOutput("sample1_next"),
h3("using second sample:"),
verbatimTextOutput("sample2_next"),
h3("using third sample:"),
verbatimTextOutput("sample3_next")
)
)
))
|
312a37337576e7bc639089439271795d4a6c3d83 | 3044a8d0f80b0cc8d94815de99b716c570addf3a | /final.R | 2b653757502b2f96f621f0e31fc14055013ce38f | [] | no_license | agobeljic1/Employee-HR-attrition-IBM-dataset- | 100c651d123d4763eb9099108709f4675ab4a075 | d5aff8652f17a8c7819d48a6d51f6a4a1a8dc8dd | refs/heads/master | 2020-11-26T12:12:53.736232 | 2018-12-24T14:04:30 | 2018-12-24T14:04:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,048 | r | final.R |
library(Amelia)
library(ROCR)
HRA1 <- read.csv("D:/imarticus/Project/Attrition.csv",na.strings=c(""," ","NA"))
View(HRA1)
colSums(is.na(HRA1))
summary(HRA1)
names(HRA1)
dim(HRA)
library(ggplot2)
ggplot(HRA1, aes(x=Attrition)) + ggtitle("Attrition") + xlab("Attrition") +
geom_bar(aes(y = 100*(..count..)/sum(..count..)), width = 0.5) + ylab("Count") + coord_flip() + theme_minimal()
summary(HRA1)
HRA1$Education <- as.factor(HRA1$Education)
HRA1$JobLevel <- as.factor(HRA1$JobLevel)
HRA1$PerformanceRating <- as.factor(HRA1$PerformanceRating)
HRA1$StockOptionLevel <- as.factor(HRA1$StockOptionLevel)
HRA <-HRA1
#________________________________SMOTE_______________________________
#Data Imbalanced Smote function
Classcount = table(HRA1$Attrition)
# Over Sampling
over = ( (0.6 * max(Classcount)) - min(Classcount) ) / min(Classcount)
# Under Sampling
under = (0.4 * max(Classcount)) / (min(Classcount) * over)
over = round(over, 1) * 100
under = round(under, 1) * 100
#Generate the balanced data set
library(DMwR)
HRA = SMOTE(Attrition~., HRA1, perc.over = 210, k = 5, perc.under = 100)
View(HRA)
table(HRA$Attrition)
table(HRA1$Attrition)
# let check the output of the Balancing
library(ggplot2)
ggplot(HRA, aes(x=Attrition)) + ggtitle("Attrition") + xlab("Attrition") +
geom_bar(aes(y = 100*(..count..)/sum(..count..)), width = 0.5) + ylab("Count") + coord_flip() + theme_minimal()
summary(HRA)
#_____________________EDA____________________________________________________________
#converting numeric data into as.factors
#removing insignificant varaibles
HRA$EmployeeNumber<- NULL
HRA$StandardHours <- NULL
HRA$Over18 <- NULL
HRA$EmployeeCount <- NULL
summary(HRA)
#bivariate analysis
library(corrplot)
corrplot(cor(sapply(HRA,as.integer)),method = "pie")
#Feature Engineering
#adding all work related ratings
HRA$TotlaSatisfaction <-
as.numeric(HRA$EnvironmentSatisfaction)+
as.numeric(HRA$JobInvolvement)+
as.numeric(HRA$JobSatisfaction)+
as.numeric(HRA$RelationshipSatisfaction)+
as.numeric(HRA$WorkLifeBalance)
View(HRA)
HRA <- HRA[,-c(9,12,15,23,27)]
HRA$AgeGroup <- as.factor(
ifelse(HRA$Age<=24,"Young", ifelse(
HRA$Age<=54,"Middle-Age","Adult"
))
)
table(HRA$AgeGroup)
HRA <- HRA[,-c(1)]
View(HRA)
summary(HRA)
HRA$Incomelevel <- as.factor(
ifelse(HRA$MonthlyIncome<ave(HRA$MonthlyIncome),"Low","High")
)
table(HRA$Incomelevel)
#__________________________________GGPLOT______________________________________
library(magrittr)
library(ggplot2)
library(knitr)
library(ggthemes)
library(dplyr)
library(forcats)
numeric=HRA %>% dplyr::select(Age,DailyRate,DistanceFromHome,HourlyRate,MonthlyIncome,MonthlyRate,NumCompaniesWorked,PercentSalaryHike,YearsAtCompany,YearsInCurrentRole,YearsSinceLastPromotion,YearsWithCurrManager,TotalWorkingYears,TrainingTimesLastYear,StockOptionLevel)
corrplot(cor(numeric),method="circle",type="upper")
## Distribution of Age
ggplot(HRA,aes(Age))+geom_histogram(binwidth=5,aes(y=..count..),fill="green4")+theme(legend.position="none",plot.title = element_text(hjust=0.5,size=15))+labs(x="Age",y="Count",title="Distribution of Age")
#Age Distribution of people who leave
ggplot(HRA, aes(Age))+geom_histogram(binwidth=5,aes(y=round(((..count..)/sum(..count..))*100,2)),fill="red")+theme_few()+theme(legend.position="none",plot.title = element_text(hjust=0.5,size=15))+labs(x="Age",y="Percentage",title="Age distribution of people who leave")+scale_y_continuous(limits=c(0,30),breaks=seq(0,30,5))+scale_x_continuous(limits=c(15,60),breaks=seq(15,60,5))
#Age Distribution of people who stay
ggplot(HRA,aes(Age))+geom_histogram(binwidth=5,aes(y=round(((..count..)/sum(..count..))*100,2)),fill="green4")+theme_few()+theme(legend.position="none",plot.title = element_text(hjust=0.5,size=15))+labs(x="Age",y="Percentage",title="Age distribution of people who Stay")+scale_y_continuous(limits=c(0,30),breaks=seq(0,30,5))+scale_x_continuous(limits=c(15,60),breaks=seq(15,60,5))
#salary with gender
ggplot(HRA,aes(Gender,MonthlyIncome,fill=Gender))+geom_boxplot()+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=10))+labs(x="Gender",y="Salary",title="Salary with Gender")+scale_fill_canva(palette="Neon and bold")+coord_flip()
#Attrition count with gender
ggplot(HRA,aes(MaritalStatus,..count..,fill=Attrition))+geom_bar(position=position_dodge())+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16))+labs(title="Attrition Count Vs Marital Status")
#Attrition wrt change in Age income and MartialStatus
ggplot(HRA,aes(Age,MonthlyIncome,size=Age,col=factor(Attrition)))+geom_point(alpha=0.3)+theme_minimal()+facet_wrap(~MaritalStatus)+labs(x="Age",y="MonthlyIncome",title="Attrition Level Comparision ",subtitle="How attrition is observed with change in Age,Income and MaritalStatus",col="Attrition")+theme(legend.position="bottom",plot.title=element_text(size=16,hjust=0.5),plot.subtitle = element_text(size=10))+scale_color_brewer(palette="Set2")
#Attrition vs department
ggplot(HRA,aes(x=Department,group=Attrition))+geom_bar(aes(y=..prop..,fill=factor(..x..)),stat="count")+facet_grid(~Attrition)+theme(axis.text.x=element_text(angle=90,vjust=0.5),legend.position="none",plot.title=element_text(size=16,hjust=0.5))+labs(x="Department",y="Percentage",title="Attrition % Vs Department")+ geom_text(aes(label = scales::percent(..prop..), y = ..prop.. ),stat= "count",vjust =-.5)
#department vs attrition %
ggplot(HRA,aes(x=Attrition,group=Department))+geom_bar(aes(y=..prop..,fill=factor(..x..)),stat="count")+facet_grid(~Department)+theme(axis.text.x=element_text(angle=90,vjust=0.5),legend.position="none",plot.title=element_text(size=16,hjust=0.5))+labs(x="Attrition",y="Percentage",title="Department Vs Attrition %")+ geom_text(aes(label = scales::percent(..prop..), y = ..prop.. ),stat= "count",vjust =-.5)
## Attrition Vs Distance From Home:
ggplot(HRA,aes(DistanceFromHome,fill=Attrition))+geom_density(alpha=0.5)+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16))+labs(x="Distance from Home",title="Attrition Vs Distance From Home")+scale_fill_canva(palette="Bold feature colors")
## Attrition Vs Business Travel
ggplot(HRA,aes(BusinessTravel,fill=Attrition))+geom_bar(stat="count",aes(y=..count..),position=position_dodge())+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16),axis.text.x = element_text(angle=90))+labs(x="Travel Frequency",y="Count",title="Attrition Vs Business Travel")
## Attrition Vs Payrates
g1=ggplot(HRA,aes(Attrition,DailyRate,fill=Attrition))+geom_boxplot()+theme_few()+theme(plot.title=element_text(hjust=0.5),legend.position="bottom")+scale_y_continuous(limits=c(100,1500),breaks=seq(100,1500,100))+coord_flip()+labs(title="Attrition Vs Daily Wages")
g1
g2=ggplot(HRA,aes(Attrition,MonthlyIncome,fill=Attrition))+geom_boxplot()+theme_few()+theme(plot.title=element_text(hjust=0.5),legend.position="bottom")+coord_flip()+labs(title="Attrition Vs Monthly Income")
g2
g3=ggplot(HRA,aes(Attrition,HourlyRate,fill=Attrition))+geom_boxplot()+theme_few()+theme(plot.title=element_text(hjust=0.5),legend.position="bottom")+coord_flip()+labs(title="Attrition Vs Hourly Wages")
g3
### Percentage of salary hike
ggplot(HRA,aes(PercentSalaryHike,..count..,fill=Attrition))+geom_histogram(binwidth=5)+theme_few()+theme(plot.title=element_text(hjust=0.5),legend.position="none")+labs(title="Histogram of SalaryHike")+scale_y_continuous(limits=c(0,1500),breaks=seq(0,1500,150))
#percentage of hike vs Years of experience
ggplot(HRA,aes(TotalWorkingYears,PercentSalaryHike,col=factor(Attrition),size=PercentSalaryHike))+geom_point(alpha=0.6)+theme(legend.position="bottom",plot.title = element_text(size=15,hjust=0.5))+labs(title="Percentage of Hike Vs Years of Experience",col="Attrition")
# Years at company VS Percentage of hike
ggplot(HRA,aes(YearsAtCompany,PercentSalaryHike,size=PercentSalaryHike))+geom_point(color="purple",alpha=0.5)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=16))+labs(title="Years at Company Vs Percentage of Hike")
### Which role is paid more?
temp=HRA %>% group_by(JobRole) %>% summarise(salary=median(MonthlyIncome)) %>% arrange(desc(salary))
ggplot(temp,aes(factor(JobRole,levels=JobRole),salary))+geom_bar(stat="identity",fill="gold4")+coord_polar()+labs(x="Job Role",y="Median Salary",title="Who gets more??")+theme_few()+theme(axis.text.x=element_text(vjust=300),plot.title=element_text(hjust=0.5,size=16),axis.text.y=element_blank())
#Attrition by job role
ggplot(HRA,aes(x=reorder(JobRole,Attrition),y=Attrition)) + geom_bar(stat='identity',alpha=0.5,fill="red") + theme_fivethirtyeight()+coord_flip()+theme(axis.text.x=element_text(angle=0,vjust=0.5),legend.position='bottom',plot.title = element_text(size=12)) +labs(title="Attrition Rate by Job Role")
## Education,EducationField:
temp= HRA %>% mutate(Education=factor(Education)) %>% mutate(Education=fct_recode(Education,'Below College'='1','College'='2','Bachelor'='3','Master'='4','Doctor'='5'))
ggplot(temp,aes(Education,fill=Attrition))+geom_bar(stat="count",aes(y=..count..),position=position_dodge())+theme_few()+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16),axis.text.x = element_text(angle=90))+labs(x="Education Level",y="Count",title="Trend of Attrition with Education Level")+scale_fill_canva(palette="Golden afternoon")
# Education levels and field of eduaction
ggplot(temp,aes(Education,fill=Attrition))+geom_bar(stat="count",aes(y=..count..),position=position_dodge())+theme_few()+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16),axis.text.x = element_text(angle=90))+labs(x="Education Level",y="Count",title="Education levels and field of education")+scale_fill_canva(palette="Unique and striking")+facet_grid(~EducationField)
##Education Vs Satisfaction Levels Vs Attrition:
temp %>% mutate(JobSatisfaction=factor(JobSatisfaction)) %>% mutate(JobSatisfaction=fct_recode(JobSatisfaction,"Low"="1","Medium"="2","High"="3","Very High"="4")) %>% ggplot(aes(Education,fill=JobSatisfaction))+geom_bar(stat="count",position = position_dodge())+theme_few()+facet_wrap(~Attrition)+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16),axis.text.x = element_text(angle=90,hjust=0.5))+labs(x="Education",y="Satisfaction Count",title="Comparing attrition with Education")
## Number of companies worked:
temp = HRA %>% group_by(Attrition,NumCompaniesWorked) %>% tally(sort=TRUE)
ggplot(temp,aes(NumCompaniesWorked,n,fill=Attrition,label=n))+geom_bar(stat="identity",position=position_dodge())+theme_few()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5,size=16))+labs(x="Number of Companies",y="Count",title="Number of Companies worked")+coord_cartesian(xlim=c(0,9))+scale_x_continuous(breaks=seq(0,9,1))
g1=ggplot(HRA,aes(Attrition,TotalWorkingYears,fill=Attrition))+geom_boxplot()+theme(legend.position="bottom",plot.title=element_text(hjust=0.5))+labs(x="Attrition",y="Years of Experience",title="Attrition trend with number of years of experience")+coord_flip()
g1
g2=HRA %>% filter(Attrition=="Yes") %>% ggplot(aes(TotalWorkingYears,..count..))+geom_histogram(binwidth=5,alpha=0.8,fill="#575da9")+labs(x="Years of Experience",y="Count",title="Histogram of Years of experience",subtitle="Attrition=Yes")+theme_few()+theme(plot.title=element_text(hjust=0.5),plot.subtitle=element_text(hjust=0.3))
g2
g3=HRA %>% filter(Attrition=="No") %>% ggplot(aes(TotalWorkingYears,..count..))+geom_histogram(binwidth=5,alpha=0.8,fill="#336b87")+labs(x="Years of Experience",y="Count",title="Histogram of Years of experience",subtitle="Attrition=No")+theme_few()+theme(plot.title=element_text(hjust=0.5),plot.subtitle=element_text(hjust=0.3))
g3
# Attrition Vs Categorical Variables:
#job involvement vs attrition rates
temp = HRA %>% mutate(JobInvolvement=factor(JobInvolvement)) %>% mutate(JobInvolvement=fct_recode(JobInvolvement,"Low"="1","Medium"="2","High"="3","Very High"="4"))
ggplot(temp,aes(x=JobInvolvement,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="Job Involvement",y="Percentage",title="Job Involvement Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Job Satisfaction
temp = HRA %>% mutate(JobSatisfaction=factor(JobSatisfaction)) %>% mutate(JobSatisfaction=fct_recode(JobSatisfaction,"Low"="1","Medium"="2","High"="3","Very High"="4"))
ggplot(temp,aes(x=JobSatisfaction,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="Job Satisfaction",y="Percentage",title="Job Satisfaction Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Performance Rating:
temp= HRA %>% mutate(PerformanceRating=factor(PerformanceRating)) %>% mutate(PerformanceRating=fct_recode(PerformanceRating,"Low"="1","Good"="2","Excellent"="3","Outstanding"="4"))
ggplot(temp,aes(x=PerformanceRating,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="PerformanceRating",y="Percentage",title="Performance Rating Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Relationship Satisfaction:
temp= HRA %>% mutate(RelationshipSatisfaction=factor(RelationshipSatisfaction)) %>% mutate(RelationshipSatisfaction=fct_recode(RelationshipSatisfaction,"Low"="1","Medium"="2","High"="3","Very High"="4"))
ggplot(temp,aes(x=RelationshipSatisfaction,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="RelationshipSatisfaction",y="Percentage",title="RelationshipSatisfaction Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Worklife balance:
temp= HRA %>% mutate(WorkLifeBalance=factor(WorkLifeBalance)) %>% mutate(WorkLifeBalance=fct_recode(WorkLifeBalance,"Bad"="1","Good"="2","Better"="3","Best"="4"))
ggplot(temp,aes(x=WorkLifeBalance,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="WorkLifeBalance",y="Percentage",title="Worklifebalance Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Environment Satisfaction:
temp= HRA %>% mutate(EnvironmentSatisfaction=factor(EnvironmentSatisfaction)) %>% mutate(EnvironmentSatisfaction=fct_recode(EnvironmentSatisfaction,"Low"="1","Medium"="2","High"="3","Very High"="4"))
ggplot(temp,aes(x=EnvironmentSatisfaction,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="EnvironmentSatisfaction",y="Percentage",title="Environment satisfaction Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
## Attrition Vs OverTime:
ggplot(HRA,aes(x=OverTime,group=Attrition))+geom_bar(stat="count",aes(y=..prop..,fill=factor(..x..)))+labs(x="Overtime",y="Percentage",title="Overtime Vs Attrition Rates")+facet_wrap(~Attrition)+theme_few()+theme(legend.position="none",plot.title=element_text(hjust=0.5,size=14))+geom_text(aes(label=scales::percent(..prop..),y=..prop..),stat="count",vjust=-0.5)
#_________________________________________Splitting______________________________________
library(caTools)
set.seed(010)
splitHRA<-sample.split(HRA$Attrition, SplitRatio = 0.80)
trainHRA<-subset(HRA,splitHRA==T)
testHRA<-subset(HRA,splitHRA==F)
summary(HRA)
#_________________________________________DECESION TREES____________________________________
library(rpart)
modelHRADT2 <- rpart(formula = Attrition ~., data=trainHRA)
plot(modelHRADT2)
text(modelHRADT2)
#validation
HRADT2_pred = predict(modelHRADT2, newdata = testHRA, type = 'class')
class(HRADT1_pred)
class(testHRA$Attrition)
# confusion matrix
cmHRADT2 = table(HRADT2_pred, testHRA$Attrition)
cmHRADT2
library(caret)
cfHRADT2<-confusionMatrix(HRADT2_pred,testHRA$Attrition)
cfHRADT2
Raw.rf.plot<- plot.roc(as.numeric(testHRA$Attrition), as.numeric(HRADT2_pred),lwd=2, type="b",print.auc=TRUE,col ="blue")
#_________________________________RANDOM FOREST_______________________________
library(data.table)
library(dplyr)
library(DT)
library(gridExtra)
library(ggplot2)
library(caret)
library(Metrics)
library(randomForest)
library(pROC)
library(e1071)
library(DMwR)
View(HRA)
summary(HRA)
#fitting random forest classification to the training set
library(randomForest)
randomforestHRA2 = randomForest(x = trainHRA[-1],y = trainHRA$Attrition, ntree = 50)
#predicting the test set results
randomHRA_pred2 = predict(randomforestHRA2,newdata = testHRA[-1],type="response")
#making the confucion matrix
cmHRArandom2 = table(testHRA$Attrition,randomHRA_pred2)
acc(cmHRArandom2)
library(caret)
cfHRARF2<-confusionMatrix(randomHRA_pred2,testHRA$Attrition)
class(randomHRA_pred2)
class(testHRA$Attrition)
cfHRARF1
Raw.rf.prd <- predict(randomforestHRA2, newdata = testHRA)
confusionMatrix(testHRA$Attrition, Raw.rf.prd)
Raw.rf.plot<- plot.roc(as.numeric(testHRA$Attrition), as.numeric(Raw.rf.prd),lwd=2, type="b",print.auc=TRUE,col ="blue")
varImpPlot(randomforestHRA2)
#_________________________________LOGISTIC_______________________________________
modelHRAL12<-glm(Attrition~.,family=binomial(link="logit"),data=trainHRA)
summary(modelHRAL12)
modelHRAL13<-glm(Attrition~BusinessTravel+DistanceFromHome+JobRole+MaritalStatus+NumCompaniesWorked+OverTime+TotalWorkingYears+YearsAtCompany+YearsInCurrentRole+YearsSinceLastPromotion+TotlaSatisfaction,family=binomial(link="logit"),data=trainHRA)
summary(modelHRAL13)
#testing
#validation of our model using validation set
# if type = response is not mentioned it will take log(odd(probability)), its for backtransforming it to categorical variable
fitted.resultsHRAL13 <- predict(modelHRAL12,newdata=testHRA[,-1],type='response')
#Thresholding
fitted.resultsHRAL13<- ifelse(fitted.resultsHRAL13 > 0.5,1,0)
#plotting auc curve
HRALP13 <- predict(modelHRAL10, newdata=testHRA[,-1], type="response")
HRALpr13 <- prediction(fitted.resultsHRAL13, testHRA[,1])
HRALprf13 <- performance(HRALpr13, measure = "tpr", x.measure = "fpr")
plot(HRALprf13)
HRALaucp13 <- performance(HRALpr13, measure = "auc")
HRALaucp13 <- HRALaucp13@y.values[[1]]
HRALaucp13
#___________________________SVM______________________________________
library(e1071)
#model building
HRAclassifier10<-svm(formula =Attrition~.,data=trainHRA,type = 'C-classification')
summary(HRAclassifier10)
HRAclassifier11<-svm(formula =Attrition~.,data=trainHRA,type = 'C-classification',gamma=0.5,cost=4)
summary(HRAclassifier11)
# SVM based based on grid scearch
tunesvmHRA=tune(svm,Attrition~.,
data=trainHRA,
ranges = list(gamma=2^(-1:1),cost=2^(2:9)))
summary(tunesvmHRA)
HRAclassifier12<-svm(formula =Attrition~.,data=trainHRA,type = 'C-classification',kernel="linear")
summary(HRAclassifier12)
HRAclassifier13<-svm(formula =Attrition~.,data=trainHRA,type = 'C-classification',kernel="sigmoid")
summary(HRAclassifier13)
HRAclassifier14<-svm(formula =Attrition~.,data=trainHRA,type = 'C-classification',kernel="polynomial")
summary(HRAclassifier14)
View(HRA)
#validation data
#validation of our model using validation set
# if type = response is not mentioned it will take log(odd(probability)), its for backtransforming it to categorical variable
fitted.resultssvmHRA10 <- predict(HRAclassifier10,newdata=testHRA[,-1])
fitted.resultssvmHRA11 <- predict(HRAclassifier11,newdata=testHRA[,-1])
fitted.resultssvmHRA12 <- predict(HRAclassifier12,newdata=testHRA[,-1])
fitted.resultssvmHRA13 <- predict(HRAclassifier13,newdata=testHRA[,-1])
fitted.resultssvmHRA14 <- predict(HRAclassifier14,newdata=testHRA[,-1])
#Confusion matrix
svmcfHRA10<-table(fitted.resultssvmHRA10 , testHRA[,1])
svmcfHRA11<-table(fitted.resultssvmHRA11 , testHRA[,1])
svmcfHRA12<-table(fitted.resultssvmHRA12 , testHRA[,1])
svmcfHRA13<-table(fitted.resultssvmHRA13 , testHRA[,1])
svmcfHRA14<-table(fitted.resultssvmHRA14 , testHRA[,1])
#function for accuracy for logistic radial without cost and gamma
acc<-function(svmcfHRA10){
Totp<-svmcfHRA10[2,1]+svmcfHRA10[2,2]
TP<-svmcfHRA10[2,2]
c<-TP/Totp
c
}
acc(svmcfHRA10)
#function for accuracy for logistic radial wit cost and gamma
acc<-function(svmcfHRA11){
Totp<-svmcfHRA11[2,1]+svmcfHRA11[2,2]
TP<-svmcfHRA11[2,2]
c<-TP/Totp
c
}
acc(svmcfHRA11)
#function for accuracy for logistic linear
acc<-function(svmcfHRA12){
Totp<-svmcfHRA12[2,1]+svmcfHRA12[2,2]
TP<-svmcfHRA12[2,2]
c<-TP/Totp
c
}
acc(svmcfHRA12)
#function for accuracy for logistic sigmoid
acc<-function(svmcfHRA13){
Totp<-svmcfHRA13[2,1]+svmcfHRA13[2,2]
TP<-svmcfHRA13[2,2]
c<-TP/Totp
c
}
acc(svmcfHRA13)
#function for accuracy for logistic polynomial
acc<-function(svmcfHRA14){
Totp<-svmcfHRA14[2,1]+svmcfHRA14[2,2]
TP<-svmcfHRA14[2,2]
c<-TP/Totp
c
}
acc(svmcfHRA14)
#plotting auc curve for linear
HRAsvmp11 <- predict(HRAclassifier10, newdata=testHRA[,-1])
HRAsvmp11 <- as.numeric(HRAsvmp11)
testHRA$Attrition <-as.numeric(testHRA$Attrition)
HRAsvmpr11 <- prediction(HRAsvmp11, testHRA[,1])
HRAsvmprf11 <- performance(HRAsvmpr11, measure = "tpr", x.measure = "fpr")
plot(HRAsvmprf11)
HRAaucsvmp11 <- performance(HRAsvmpr11, measure = "auc")
HRAaucsvmp11 <- HRAaucsvmp11@y.values[[1]]
HRAaucsvmp11
#_______________________NAIVE BAYES_____________________________________
library(e1071)
#model building on train data
library(e1071)
HRAnaivem11 <- naiveBayes(Attrition~., data=trainHRA)
dim(HRAnaivem11)
summary(HRAnaivem11)
#validation data
HRANaive_pred11 = predict(HRAnaivem11, newdata = testHRA)
cmHRANaive11 = table(HRANaive_pred11, testHRA$Attrition)
library(caret)
cfNaiveHRA11<-confusionMatrix(HRANaive_pred11,testHRA$Attrition)
cfNaiveHRA11
class(HRANaive_pred11)
class(testHRA$Attrition)
testHRA$Attrition <- as.numeric(testHRA$Attrition)
#__________________________XGB__________________________________
library(xgboost)
library(plyr)
library(DMwR)
fitControl <- trainControl(method="cv", number = 3,classProbs = TRUE )
xgbGrid <- expand.grid(nrounds = 500,
max_depth = 20,
eta = .03,
gamma = 0.01,
colsample_bytree = .7,
min_child_weight = 1,
subsample = 0.9
)
HRAXGBmodel <- train(Attrition~., data = trainHRA,
method = "xgbTree"
,trControl = fitControl
, verbose=0
, maximize=FALSE
,tuneGrid = xgbGrid
)
HRAXGBprd <- predict(HRAXGBmodel,testHRA)
confusionMatrix(HRAXGBprd, testHRA$Attrition)
XGB.plot <- plot.roc (as.numeric(testHRA$Attrition), as.numeric(HRAXGBprd),lwd=2, type="b", print.auc=TRUE,col ="blue")
|
257d36bbb5496b2d247899d2a7ea1d9e11ba90cd | 1712ed440489db168071b533ff8e79a1ede57df7 | /R/spatial.occupancy.R | aa97df91479fe5632d942c3b8ed31b738bb104d3 | [] | no_license | dsjohnson/stocc | 2947fc1f52e54e6e1747485fc3d905e7078bebc2 | 584c24792bb3c6083de274fab839a3f713c4adf0 | refs/heads/master | 2022-10-21T01:42:42.469014 | 2022-10-05T18:52:47 | 2022-10-05T18:52:47 | 14,853,161 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,354 | r | spatial.occupancy.R | #' Fit a spatial occupancy model using Gibbs sampling
#'
#' This function fits a spatial occupancy model where the true occupancy is a
#' function of a spatial process. An efficient Gibbs sampling algorithm is
#' used by formulating the detection and occupancy process models with a probit
#' model instead of the traditional logit based model.
#'
#' A Gibbs sampler is run to draw an MCMC sample of the spatial occupancy
#' parameters \code{beta} (detection parameters), \code{gamma} (the occupancy
#' parameters), \code{psi} (the model occupancy generating process), and the
#' realized occupancy.
#'
#' @param detection.model A formula object describing the detection portion of
#' the occupancy model. The variables described by the detection model are
#' located in the \code{visit} data frame of the \code{so.data}.
#' @param occupancy.model A formula object describing the fixed effects portion
#' of the spatial occupancy process. The variables described by the occupancy
#' model are located in the \code{site} data frame of an \code{so.data} object.
#' @param spatial.model A named list object describing the spatial component of
#' the occupancy process. Currently the only possible models are ICAR, restricted spatial regression,
#' process convolution models, and no spatial model (i.e., eta = 0). Thus, \code{spatial.model=list(model="icar",
#' threshold= )}, \code{spatial.model=list(model="rsr", threshold=, moran.cut=)},
#' \code{spatial.model=list(model="proc.conv", knots=)}, and \code{spatial.model=list(model="none")}
#' are the only forms that are accepted at present. The \code{threshold}
#' component is used the create neighborhoods in the ICAR and RSR models. All sites
#' within distance \code{threshold} of site i are considered neighbors of site
#' i. The \code{moran.cut} component is the cut-off for selecting
#' the spatial harmonics used in the restricted spatial regression model. The value must be between 1 and N and implies that
#' the eigen vectors associated with the largest \code{moan.cut} eigen values are used for the basis functions.
#' The item \code{knots} are xy locations of the discrete process
#' convolution knots.
#' @param so.data An \code{so.data} object containing the observed occupancies,
#' detection covariates, site covariates, and site coordinates. This is created
#' via the \code{\link{make.so.data}}
#' @param prior A named list that provides the parameter values for the prior
#' distributions. At the current time the elements of the list must contain
#' \code{a.tau} and \code{b.tau} which are the parameters for the gamma prior on the spatial
#' process parameter in the occupancy model. Other elements may include
#' \code{Q.b} and \code{mu.b} which are the tolerance and mean for the beta
#' vector (detection parameters). Also \code{Q.g} and \code{mu.g} which are the
#' prior parameters for the occupancy model. If the Q.b and Q.g are left out,
#' the default is Q.b = 0 and Q.g = 0 (i.e., flat priors). If mu.b and mu.g are left out,
#' the default is zero vectors.
#' @param control A named list with the control parameters for the MCMC. The
#' elements of the list must include: (1) \code{burnin} is the number of
#' iterations of burnin, (2) \code{iter} is the total number of iterations
#' retained for the MCMC sample, and (3) \code{thin} is the thining rate of the
#' chain. The real number of MCMC iterations is equal to \code{iter*thin} of
#' which \code{iter - burnin} are retained for posterior summary.
#' @param initial.values A named list that can include any or all of the following vectors or scalers
#' (1) \code{beta}, a vector of initial values for the detection parameters, (2) \code{gamma}, a vector or
#' initial values for the occupancy model, and (3) \code{tau}, an initial value for the spatial precision
#' parameter.
#' @return A list with the following elements: \item{beta}{An object of class
#' \code{mcmc}. The detection model parameters.} \item{gamma}{An object of
#' class \code{mcmc}. The occupancy model parameters.} \item{psi}{An object of
#' class \code{mcmc}. The occupancy generating process} \item{real.occ}{An
#' object of class \code{mcmc}. The realized occupancy at the time of the
#' survey} \item{tau}{An object of
#' class \code{mcmc}. The variance parameter for the spatial model}
#' \item{occupancy.df}{A data frame with the spatial
#' coordinates, site id, and posterior mean and variance of psi, eta, and real.occ}
#' \item{D.m}{The posterior predictive loss criterion of Gelfand and Ghosh
#' (1998; Biometrika 85:1-11) for model selection. The criterion is a combination of a goodness-of-fit measure, G.m, and
#' a complexity measure, P.m, similar information criteria such as AIC and BIC. D.m = G.m + P.m. Lower values of D.m imply lower
#' expected loss in predicting new data with the posterior model parameters.}
#' \item{G.m}{The
#' goodness-of-fit portion of D.m} \item{P.m}{The model complexity component of
#' D.m} \item{detection.model}{The detection model call.}
#' \item{occupancy.model}{The occupancy model call.} \item{model}{A character
#' version of the joint occupancy and detection model call. This is useful for
#' saving results.}
#' @author Devin S. Johnson <devin.johnson@@noaa.gov>
#' @export
#' @import truncnorm
#' @import coda
#' @import Matrix
#' @import fields
#' @import rARPACK
spatial.occupancy <-
function(detection.model, occupancy.model, spatial.model, so.data, prior, control, initial.values=NULL){
#Packages
#Matrix construction, etc...
site <- so.data$site
visit <- so.data$visit
site$site.idx <- factor(site[,attr(site,"site")])
visit$site.idx <- factor(visit[,attr(visit,"site")], levels=levels(site$site.idx))
xy <- as.matrix(site[,attr(site,"coords")])
Xy <- Matrix(model.matrix(detection.model, visit))
Xz <- Matrix(model.matrix(occupancy.model, site))
n.site <- nrow(Xz)
n.knots <- nrow(spatial.model$knots)
M <- Matrix(model.matrix(~site.idx-1, visit))
z.master=as.vector(ifelse(table(visit$site.idx,visit[,attr(visit,"obs")])[,"1"]>0, 1, NA))
z <- as.numeric(!is.na(z.master))
z.obs <- site$site.idx %in% visit$site.idx
n.obs <- sum(z.obs)
y <- visit[,attr(visit,"obs")]
## spatial model
if(spatial.model$model=="icar"){
Q.eta <- Matrix(icar.Q(xy,spatial.model$threshold, 1))
}
# else if(spatial.model$model=="car"){
# Q.fun <- icar.Q(xy,spatial.model$threshold, fun=TRUE)
# rho<- 0.95
# Q.eta <- Matrix(Q.fun(rho))
# ln.det.Q.eta <- determinant(Q.eta, log=TRUE)$modulus
# alpha <- rnorm(n.site,0,0.01)
# #alpha <- alpha-mean(alpha)
# sigma <- 1
# eta <- sigma*alpha
# }
else if(spatial.model$model=="proc.conv"){
Q.alpha <- Diagonal(nrow(spatial.model$knots))
if(is.null(spatial.model$pc.scale)) pc.scale <- min(dist(knots))
else pc.scale <- spatial.model$pc.scale
K <- Matrix(exp(-0.5*(rdist(xy,spatial.model$knots)/pc.scale)^2))
KtK <- crossprod(K)
a <- rep(0,n.knots)
}
else if(spatial.model$model=="rsr"){
cat("\nCreating (R)estricted (S)patial (R)egression matrices ...\n")
if(as.integer(spatial.model$moran.cut)<1 | as.integer(spatial.model$moran.cut)>n.site){
stop("Invalid value for 'moran.cut' specified. See documentation\n")
}
Q <- icar.Q(xy,spatial.model$threshold, rho=1)
Q <- Matrix(Q)
A <- Matrix(diag(diag(Q)) - Q)
P <- diag(n.site) - Xz %*% solve(crossprod(Xz), t(Xz))
Op <- (nrow(A)/sum(A))*(P %*% (A %*% P))
e <- rARPACK::eigs(Op, as.integer(spatial.model$moran.cut))
K <- e$vectors
KtK <- diag(ncol(K))
Q.alpha <- as.matrix(t(K) %*% Q %*% K)
a <- rep(0,nrow(Q.alpha))
}
## Priors
a.tau <- prior$a.tau
b.tau <- prior$b.tau
if(!is.null(prior$mu.b)) mu.b <- prior$mu.b
else mu.b <- rep(0,ncol(Xy))
Q.b = prior$Q.b
if(is.null(Q.b)) {
Q.b = matrix(0, ncol(Xy), ncol(Xy))
} else if(is.vector(Q.b)){
Q.b=diag(Q.b, ncol(Xy))
}
if(!is.null(prior$mu.g)) mu.g <- prior$mu.g
else mu.g <- rep(0,ncol(Xz))
Q.g = prior$Q.g
if(is.null(Q.g)) {
Q.g = matrix(0, ncol(Xz), ncol(Xz))
} else if(is.vector(Q.g)){
Q.g=diag(Q.g, ncol(Xz))
}
# Storage
beta <- matrix(nrow=control$iter-control$burnin,ncol=ncol(Xy))
colnames(beta) <- colnames(Xy)
gamma <- matrix(nrow=control$iter-control$burnin,ncol=ncol(Xz))
colnames(gamma) <- colnames(Xz)
psi <- matrix(nrow=control$iter-control$burnin,ncol=n.site)
eta.stor <- matrix(nrow=control$iter-control$burnin,ncol=n.site)
real.occ <- matrix(nrow=control$iter-control$burnin,ncol=n.site)
tau.stor <- matrix(nrow=control$iter-control$burnin,ncol=1)
colnames(tau.stor) <- "tau"
if(spatial.model$model %in% c("proc.conv", "rsr")){
alpha <- matrix(nrow=control$iter-control$burnin,ncol=nrow(Q.alpha))
}
else alpha <- NULL
#if(spatial.model$model=="car") rho.eta <- matrix(nrow=control$iter-control$burnin,ncol=1)
#yz.ln.lik <- matrix(nrow=control$iter-control$burnin,ncol=1)
y.rep.stor <- matrix(nrow=control$iter-control$burnin,ncol=nrow(Xy))
V.g.inv <- crossprod(Xz) + Q.g
I.n <- Matrix(diag(n.site))
# Starting values
if(is.null(initial.values$beta)) {
b <- rep(0,ncol(Xy))
} else b <- initial.values$beta
if(length(b)!=ncol(Xy)) {
stop("Error: Length of initial values for beta does not match the detection model!\n")
}
if(is.null(initial.values$gamma)) {
g <- rep(0,ncol(Xz))
} else g <- initial.values$gamma
if(length(g)!=ncol(Xz)) stop("Error: Length of initial values for gamma does not match the occupancy model!\n")
if(is.null(initial.values$tau)) {
tau <- 1
} else tau <- initial.values$tau
eta <- rep(0,n.site)
iter <- control$iter*control$thin
burnin <- control$burnin*control$thin
cat("\nBeginning MCMC routine ...\n")
st <- Sys.time()
for(i in 1:iter){
Xge <- as.numeric(Xz%*%g+eta)
Xb <- as.numeric(Xy%*%b)
#Update missing z
ln.qz <- pnorm(rep(0,n.obs), Xge, 1, lower.tail=FALSE, log.p=TRUE)
ln.qy <- crossprod(M,pnorm(0, Xb, 1, log.p=TRUE))
p.z <- exp(ln.qz + ln.qy)/(exp(ln.qz + ln.qy) + pnorm(rep(0,n.obs), Xge, 1))
z[is.na(z.master)] <- rbinom(sum(is.na(z.master)), 1, p.z[is.na(z.master)])
#Update y.tilde
y.tilde <- rtruncnorm(nrow(Xy), a=ifelse(y==0,-Inf,0), b=ifelse(y==0,0,Inf), Xb, 1)
#Update z.tilde
z.tilde <- rtruncnorm(n.site, a=ifelse(z==0,-Inf,0), b=ifelse(z==0,0,Inf), Xge, 1)
#Update b
idx <- visit$site.idx %in% (site$site.idx[z==1])
#Q.b <- crossprod(Xy[idx,])
V.b.inv <- crossprod(Xy[idx,]) + Q.b
m.b <- solve(V.b.inv, crossprod(Xy[idx,],y.tilde[idx])+crossprod(Q.b,mu.b))
b <- m.b + solve(chol(V.b.inv), rnorm(ncol(Xy),0,1))
#Update g
m.g <- solve(V.g.inv, crossprod(Xz,z.tilde-eta)+crossprod(Q.g,mu.g))
g <- m.g + solve(chol(V.g.inv), rnorm(ncol(Xz),0,1))
#Update eta
if(spatial.model$model %in% c("car","icar")){
V.eta.inv <- I.n + tau*Q.eta
m.eta <- solve(V.eta.inv, z.tilde-Xz%*%g)
eta <- m.eta + solve(chol(V.eta.inv), rnorm(n.site,0,1), tol=1e-10)
if(spatial.model$model=="icar") eta <- eta - mean(eta)
}
else if(spatial.model$model %in% c("proc.conv","rsr")){
V.alpha.inv <- KtK + tau*Q.alpha
m.alpha <- solve(V.alpha.inv, crossprod(K,z.tilde-Xz%*%g))
#cat("\n", min((diag(chol(V.alpha.inv))))," ",tau," ",length(tau),"\n")
a <- m.alpha + backsolve(chol(V.alpha.inv), rnorm(nrow(Q.alpha),0,1))
eta <- K%*%a
}
else NULL
#Update tau
if(spatial.model$model=="icar") tau <- rgamma(1, (n.site-1)/2 + a.tau, as.numeric(crossprod(eta, Q.eta %*% eta)/2) + b.tau)
else if(spatial.model$model %in% c("proc.conv","rsr")) tau <- rgamma(1, length(a)/2 + a.tau, as.numeric(crossprod(a, Q.alpha %*% a)/2) + b.tau)
else NULL
# #Update rho for "car" models
# if(spatial.model$model=="car"){
# rho.p <- rbeta(1,a.rho.tune,1)
# Q.eta.p <- Q.fun(rho.p)
# ln.det.Q.eta.p <- determinant(Q.eta.p)$modulus
# mh <- exp(
# (ln.det.Q.eta.p - crossprod(alpha, crossprod(Q.eta.p, alpha)))/2 + dbeta(rho.p, a.rho, b.rho, log=TRUE)
# - dbeta(rho.p, a.rho.tune, 1, log=TRUE)
# - (ln.det.Q.eta.p - crossprod(alpha, crossprod(Q.eta.p, alpha)))/2 + dbeta(rho.p, a.rho, b.rho, log=TRUE)
# + dbeta(rho, a.rho.tune, 1, log=TRUE)
# )
# if(runif(1,0,1)<=as.numeric(mh)){
# rho <- rho.p
# Q.eta <- Q.eta.p
# ln.det.Q.eta <- ln.det.Q.eta.p
# }
# }
#Record sample
if(i>burnin & i%%control$thin==0){
beta[(i-burnin)/control$thin,] <- as.numeric(b)
gamma[(i-burnin)/control$thin,] <- as.numeric(g)
psi[(i-burnin)/control$thin,] <- pnorm(rep(0,n.site), as.numeric(Xz%*%g+eta), 1, lower.tail=FALSE)
eta.stor[(i-burnin)/control$thin,] <- as.numeric(eta)
real.occ[(i-burnin)/control$thin,] <- as.numeric(z)
tau.stor[(i-burnin)/control$thin] <- as.numeric(tau)
if(spatial.model$model %in% c("proc.conv","rsr")){
alpha[(i-burnin)/control$thin,] <- as.numeric(a)
}
#if(spatial.model$model=="car") rho.eta[(i-burnin)/control$thin] <- rho
#ln.p.ygz <- crossprod(M, dbinom(y,1, pnorm(0, as.vector(Xy%*%b), 1, lower.tail=FALSE), log=TRUE))[z.obs]
#ln.p.z <- pnorm(rep(0,sum(z.obs)), as.numeric(Xz%*%g+eta)[z.obs], 1, lower.tail=FALSE, log.p=TRUE)
#yz.ln.lik[(i-burnin)/control$thin] <- sum(log(exp(ln.p.ygz + ln.p.z) + ifelse(is.na(z.master)[z.obs], 1-exp(ln.p.z), 0)))
##Replicated data
idx.rep <- visit$site.idx %in% site$site.idx[rnorm(n.site, as.numeric(Xz%*%g + eta), sd=1)<=0]
y.rep <- 1.0*(rnorm(nrow(Xy), as.numeric(Xy%*%b), sd=1)>0)
y.rep[idx.rep] <- 0
y.rep.stor[(i-burnin)/control$thin,] <- y.rep
}
if(i==15){
tpi <- as.numeric(difftime(Sys.time(), st, units="secs"))/15
ttc <- round((iter-15)*tpi/3600, 2)
cat("\nApproximate time till completion: ", ttc, " hours\n")
}
if(100*(i/iter) >= 10 & (100*(i/iter))%%10==0) cat("\n", 100*(i/iter), "% completed\n")
}
beta <- mcmc(beta)
gamma <- mcmc(gamma)
psi <- mcmc(psi)
real.occ <- mcmc(real.occ)
tau <- mcmc(tau.stor)
#cross.eta <- mcmc(cross.eta)
#if(spatial.model$model=="car") rho.eta <- mcmc(rho.eta)
#else rho.eta <- NULL
#ln.prior.pred <- mean(yz.ln.lik)-log(mean(exp(-(yz.ln.lik-mean(yz.ln.lik)))))
#l.max <- mean(yz.ln.lik) + var(yz.ln.lik)
#d.hat <- 2*var(yz.ln.lik)
#BICM <- -2*l.max + ncol(Xy)*log(sum(y==0)+1) + ncol(Xz)*log(sum(!is.na(z.master))+1) + (d.hat-ncol(Xy)-ncol(Xz))*log(sum(samp)+1)
#AICM <- -2*l.max + 2*d.hat
G.m <- crossprod(apply(y.rep.stor, 2, mean)-y)
P.m <- crossprod(apply(y.rep.stor, 2, sd))
D.m <- G.m + P.m
occupancy.df <- data.frame(
site[attr(site,"site")],
site[,attr(site,"coords")],
samp=as.numeric(site$site.idx %in% unique(visit$site.idx)),
psi.est=apply(psi,2,mean),
psi.se=apply(psi,2,sd),
eta.est=apply(eta.stor,2,mean),
eta.se=apply(eta.stor,2,sd),
real.occ.est=apply(real.occ,2,mean),
real.occ.se=apply(real.occ, 2, sd)
)
out <- list(
beta=beta,
gamma=gamma,
psi=psi,
real.occ=real.occ,
tau=tau,
alpha=alpha,
occupancy.df=occupancy.df,
#ln.prior.pred=ln.prior.pred, yz.ln.lik=yz.ln.lik, BICM=BICM, AICM=AICM,
D.m=D.m,
G.m=G.m,
P.m=P.m,
detection.model=detection.model,
occupancy.model=occupancy.model,
model=paste(
c(
paste(c("DET", as.character(detection.model)[-1]),collapse="~"),
paste(c("OCC", as.character(occupancy.model)[-1]),collapse="~")
),
collapse='-'
),
so.data=so.data
)
class(out) <- "spat.occ"
return(out)
}
|
59e35675a00735d7be36d156449943fcae4b29a4 | 29585dff702209dd446c0ab52ceea046c58e384e | /mixOmics/R/Mfold.R | 2151c5511ddd6f5c6a547cd468be4002e7199999 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,614 | r | Mfold.R | # Copyright (C) 2009
# Sebastien Dejean, Institut de Mathematiques, Universite de Toulouse et CNRS (UMR 5219), France
# Ignacio Gonzalez, Genopole Toulouse Midi-Pyrenees, France
# Kim-Anh Le Cao, French National Institute for Agricultural Research and
# ARC Centre of Excellence ins Bioinformatics, Institute for Molecular Bioscience, University of Queensland, Australia
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Mfold <-
function(X, Y, lambda1, lambda2, folds, M)
{
xscore = NULL
yscore = NULL
for (m in 1:M) {
omit = folds[[m]]
result = rcc(X[-omit, ], Y[-omit, ], 1, lambda1, lambda2,method="ridge")
X[omit, ][is.na(X[omit, ])] = 0
Y[omit, ][is.na(Y[omit, ])] = 0
xscore = c(xscore, X[omit, ] %*% result$loadings$X[, 1])
yscore = c(yscore, Y[omit, ] %*% result$loadings$Y[, 1])
}
cv.score = cor(xscore, yscore, use = "pairwise")
return(invisible(cv.score))
}
|
32c0f132c2b6b50833014065cc3ffd590a831585 | b3f0ad3934f871dfe1105a8168d0f91048c0420c | /R/years_to_rownames.R | 49cac28425de1ab9ed88614cfe0c4e0f79794b0c | [] | no_license | cran/dendroTools | 25739b0411af2f8a9114e46c01641fac4b40e538 | 51c3eed999c8d27b38ea7d2edd91cb0063cf2127 | refs/heads/master | 2023-07-20T07:48:19.869232 | 2023-07-17T12:50:02 | 2023-07-17T14:34:17 | 105,138,038 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | years_to_rownames.R | #'
#'
#' Function returns a data frame with row names as years
#'
#' @param data a data frame to be manipulated
#' @param column_year string specifying a column with years
#'
#' @return a data frame with years as row names
#'
#' @export
#'
#' @examples
#' data <- data.frame(years = seq(1950, 2015), observations = rnorm(66))
#' new_data <- years_to_rownames(data = data, column_year = "years")
#'
#' data <- data.frame(observations1 = rnorm(66), years = seq(1950, 2015),
#' observations2 = rnorm(66), observations3 = rnorm(66))
#' new_data <- years_to_rownames(data = data, column_year = "years")
years_to_rownames <- function(data, column_year) {
data <- data.frame(data) # Data needs to of class data.frame!
year_index <- grep(column_year, colnames(data))
names <- colnames(data)
names <- names[-year_index]
row.names(data) <- data[, year_index]
data <- as.data.frame(data[, -as.numeric(year_index), F])
colnames(data) <- names
return(data)
}
|
a3d6534cdc1ea2452f8ad26c190b7a0dd964c09e | 9f273727d61fea30a468dc1e5f8c7b6fd14ed0eb | /lookupNPIs_APIcall.R | ac86ec6cf07cba6aaee83ff028fc4268ec572751 | [] | no_license | checono/christine_code | 3f9ec0cb8c6bdf67800d0aca1e8fa1120f4f5d85 | 3ba222656405a8c12bbe8eb3e56cd8932606071c | refs/heads/master | 2021-01-12T00:05:27.566184 | 2017-01-11T19:57:25 | 2017-01-11T19:57:25 | 78,672,198 | 0 | 0 | null | 2017-01-11T19:32:41 | 2017-01-11T19:30:44 | null | UTF-8 | R | false | false | 2,315 | r | lookupNPIs_APIcall.R | require(xlsx)
require(jsonlite)
require(XML)
require(RCurl)
library(plyr)
data <- read.xlsx("//Econo-file1/users/Shares/ZJ5586B/R/NPIs_to_Check_20161118.xlsx", 1)
output <- data.frame(matrix( nrow= length(data$NPI), ncol= 6))
colnames(output) <- c("NPI", "credential", "desc", "license", "primary", "code")
lengths <- list()
for(i in 1:length(data$NPI)){
NPInum <- data$NPI[i]
# print(NPInum)
address <- paste("https://npiregistry.cms.hhs.gov/api/?number=", NPInum, "&enumeration_type=&taxonomy_description=&first_name=&last_name=&organization_name=&address_purpose=&city=&state=&postal_code=&country_code=&limit=&skip=&pretty=on", sep="")
npiData = fromJSON(address)
# print(npiData$results[["taxonomies"]])
# credential <- npiData$results[["basic"]][["credential"]]
# desc <- npiData$results[["taxonomies"]][[1]][["desc"]]
# license <- npiData$results[["taxonomies"]][[1]][["license"]]
# primary <- npiData$results[["taxonomies"]][[1]][["primary"]]
# code <- npiData$results[["taxonomies"]][[1]][["code"]]
if(length(npiData$results[["taxonomies"]][[1]][[1]])>0){
lengths[[i]] <- length(npiData$results[["taxonomies"]][[1]][[1]])
}
# output$NPI[i] <- NPInum
#
#
#
# if(length(credential) >0){
# output$credential[i] <- credential
# }
# if(length(desc) >0){
# output$desc[i] <- desc
# }
# if(length(license)>0){
# output$license[i] <- license
# }
# if(length(primary) >0){
# output$primary[i] <- primary
# # print(length(npiData$results[["taxonomies"]][[1]][["primary"]]))
# }
#
# if(length(code) >0){
# output$code[i] <- code
# }
#
#
# if(length(primary) > 0 & output$primary[i] == FALSE){
# print("FALSE")
# # print(length(npiData$results[["taxonomies"]][[1]][["primary"]]))
# # print(length(npiData$results[["taxonomies"]][[1]][["primary"]][[1]]))
# }
#
}
write.csv(output, "//Econo-file1/users/Shares/ZJ5586B/R/NPIs_to_Check_2016123_output.csv")
creds = count(output, "credential")
creds[order(-creds$freq),]
|
d4a13db6a1ce8b95734e3946b76dfe7e29f5440a | 78bd071a206d4f35a02520a27a6d5e0434c152c6 | /R/rotation_matrices.R | 1d4648a28dfb2a67e7b456bcbe99bd4a14e7b2b2 | [
"MIT"
] | permissive | Middleton-Lab/KinematicAnalysis | 96c1ab00ee8e24bbf970a9755c2c82c9e36219e9 | 8e1e4e4a064e8f873af4b9ed1eaadefa629ec1d3 | refs/heads/main | 2022-05-01T05:49:42.457253 | 2022-03-25T02:46:33 | 2022-03-25T02:46:33 | 240,816,100 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,195 | r | rotation_matrices.R | #' Calculate rotation matrices for a set of calibration coordinates
#'
#' @param cal_coords Coordinates of calibration object in wide format.
#'
#' @return List containing:
#' `Rx` and `Ry` Rodrigues rotation matrices to zero about the x and y axes
#' `cal_rot` Calibration points rotated about x and y
#'
#' @export
#'
rotation_matrices <- function(cal_coords) {
# Find origin
origin <- cal_coords |>
dplyr::select(starts_with("Intersection")) |>
as.numeric()
# Move points to origin
cal_coords <- translate_points(cal_coords, origin)
# Get vertical line and x-axis vectors
v_zero <- cal_coords |>
dplyr::select(starts_with("Top_Line")) |>
as.numeric()
v_axis <- cal_coords |>
dplyr::select(starts_with("Right_Line")) |>
as.numeric()
# Rotate around x-axis to line up z-axis with origin
Rx <- rodrigues_rotation(v_zero = v_zero, v_axis = v_axis)
cal_rotx <- Rx %*% make_xyz_matrix(cal_coords)
# Rotate about y-axis to line up x-axis
Ry <- rodrigues_rotation(v_zero = as.numeric(cal_rotx[, 11]),
v_axis = as.numeric(cal_rotx[, 10]))
cal_rot <- Ry %*% cal_rotx
return(list(Rx = Rx, Ry = Ry, cal_rot = cal_rot))
}
|
e63e6d4fbcbb6145a34099fe0ddb419fd8b68788 | 01b1302af51d339f7c8827a620c4a5fb26c890f1 | /resource_tracking/analysis/deliverables/_DRC 2019 report analyses/report_graphs.r | 556c946e516dc8b42cc033b32a3090d0112c4dec | [] | no_license | ihmeuw/gf | 64ab90fb5a5c49694bde1596f4b20fcf107a76e3 | 29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee | refs/heads/develop | 2021-08-15T02:16:59.086173 | 2021-08-03T19:52:31 | 2021-08-03T19:52:31 | 109,062,373 | 3 | 6 | null | 2019-03-21T01:48:02 | 2017-10-31T23:17:16 | R | UTF-8 | R | false | false | 6,002 | r | report_graphs.r | #------------------------------------------
# Specific graphs for the DRC report
# Updated by Emily Linebarger November 2019
#------------------------------------------
rm(list=ls())
library(data.table)
library(ggplot2)
library(RColorBrewer)
library(scales)
options(scipen=100)
source("C:/Users/elineb/Documents/gf/resource_tracking/analysis/graphing_functions.r")
#Read in data
revisions = readRDS("C:/Users/elineb/Box Sync/Global Fund Files/COD/prepped_data/budget_revisions.rds")
absorption = readRDS("C:/Users/elineb/Box Sync/Global Fund Files/COD/prepped_data/absorption_cod.rds")
all_mods = readRDS("J:/Project/Evaluation/GF/resource_tracking/modular_framework_mapping/all_interventions.rds")
setnames(all_mods, c('module_eng', 'intervention_eng', 'abbrev_mod_eng', 'abbrev_int_eng'), c('gf_module', 'gf_intervention', 'abbrev_mod', 'abbrev_int'))
all_mods = unique(all_mods[, .(gf_module, gf_intervention, disease, abbrev_mod, abbrev_int)])
absorption = merge(absorption, all_mods, by=c('gf_module', 'gf_intervention', 'disease'), allow.cartesian=TRUE)
save_loc = "J:/Project/Evaluation/GF/resource_tracking/visualizations/deliverables/_DRC 2019 annual report/"
#make sure this merge worked correctly.
stopifnot(nrow(absorption[is.na(abbrev_int)])==0)
#Create a cumulative dataset
# 1. Bar graph that shows 18-month cumulative absorption by grant.
by_grant = get_cumulative_absorption(countrySubset="COD", byVars='grant')
by_grant = melt(by_grant, id.vars=c('grant', 'absorption'), value.name="amount")
by_grant[variable=="budget", label:=""] #Don't display the expenditure amount on the budget bar.
by_grant[variable=="expenditure", label:=paste0(dollar(amount), " (", absorption, "%)")]
by_grant[variable=="budget", variable:="Budget"]
by_grant[variable=="expenditure", variable:="Expenditure"]
p1 = ggplot(by_grant, aes(x=grant, y=amount, fill=variable, label=label)) +
geom_bar(stat="identity") +
geom_text(hjust=0) +
theme_bw(base_size=14) +
coord_flip() +
scale_y_continuous(labels=scales::dollar) +
labs(title="Absorption by grant", subtitle="January 2018-June 2019", x="Grant",
y="Absorption (%)", fill="", caption="*Labels show expenditure amounts and absorption percentages")
ggsave(paste0(save_loc, "absorption_by_grant.png"), p1, height=8, width=11)
# 2. Show absorption by grant over the last 18 months compared to the full 3-year grant budget (most recent version)
plot_data = cumulative_absorption[, .(budget=sum(cumulative_budget, na.rm=T), expenditure=sum(cumulative_expenditure, na.rm=T)), by=c('grant', 'gf_module', 'abbrev_mod')]
plot_data[, absorption:=round((expenditure/budget)*100, 1)]
melt = melt(plot_data, id.vars=c('grant', 'gf_module', 'abbrev_mod', 'absorption'))
melt[variable=="expenditure", label:=paste0(dollar(value), " (", absorption, "%)")]
melt[is.na(label), label:=""]
melt[variable=="budget", variable:="Budget"]
melt[variable=="expenditure", variable:="Expenditure"]
p2 = ggplot(melt[grant=='COD-C-CORDAID'], aes(x=abbrev_mod, y=value, fill=variable, label=label)) +
geom_bar(stat="identity", position="identity") +
geom_text(hjust=0) +
theme_bw(base_size=18) +
coord_flip() +
scale_y_continuous(labels=scales::dollar) +
labs(title="Cumulative absorption for COD-C-CORDAID", subtitle="Jan 2018-June 2019", x="Module", y="Budget (USD)",
fill="", caption="*Labels show expenditure amounts and absorption percentages")
p3 = ggplot(melt[grant=='COD-M-MOH'], aes(x=abbrev_mod, y=value, fill=variable, label=label)) +
geom_bar(stat="identity", position="identity") +
geom_text(hjust=0) +
theme_bw(base_size=18) +
coord_flip() +
scale_y_continuous(labels=scales::dollar) +
labs(title="Cumulative absorption for COD-M-MOH", subtitle="Jan 2018-June 2019", x="Module", y="Budget (USD)",
fill="", caption="*Labels show expenditure amounts and absorption percentages")
p4 = ggplot(melt[grant=='COD-M-SANRU'], aes(x=abbrev_mod, y=value, fill=variable, label=label)) +
geom_bar(stat="identity", position="identity") +
geom_text(hjust=0) +
theme_bw(base_size=18) +
coord_flip() +
scale_y_continuous(labels=scales::dollar) +
labs(title="Cumulative absorption for COD-M-SANRU", subtitle="Jan 2018-June 2019", x="Module", y="Budget (USD)",
fill="", caption="*Labels show expenditure amounts and absorption percentages")
# 3. Show a line graph of absorption for catalytic funds in DRC
# First, tag catalytic modules/interventions.
catalytic_ints = data.table(abbrev_mod=c("Care & prevention", "MDR-TB"),
abbrev_int=c("Case detection and diagnosis", "Case detection and diagnosis"))
catalytic_mods = c("Human rights barriers", "Info systems & M&E")
for (i in 1:nrow(catalytic_ints)){
absorption[abbrev_mod==catalytic_ints$abbrev_mod[i] & abbrev_int==catalytic_ints$abbrev_int[i], catalytic:=TRUE]
}
absorption[abbrev_mod%in%catalytic_mods, catalytic:=TRUE]
absorption[is.na(catalytic), catalytic:=FALSE]
plot_data = absorption[grant_period=="2018-2020" & catalytic==TRUE, .(budget=sum(budget, na.rm=T), expenditure=sum(expenditure, na.rm=T)), by=c('abbrev_mod', 'grant', 'semester')]
plot_data[, absorption:=round((expenditure/budget)*100, 1)]
#Drop grants where the catalytic module was not applied
plot_data = plot_data[!(abbrev_mod=="Info systems & M&E" & grant!="COD-M-MOH")]
# Show absorption for matching funds in DRC.
plot_data[, concat:=paste0(grant, ", ", abbrev_mod)]
p5 = ggplot(plot_data, aes(x=semester, y=absorption, color=concat, group=concat, label=paste0(absorption, "%"))) +
geom_point() +
geom_line() +
geom_text(position="jitter") +
theme_bw(base_size=14) +
labs(title="Absorption for catalytic funds, over time", subtitle="January 2018-June 2019", x="PUDR Semester", y="Absorption (%)", color="")
pdf("J:/Project/Evaluation/GF/resource_tracking/visualizations/deliverables/_DRC 2019 annual report/report_graphs.pdf", height=8, width=11)
p1
p2
p3
p4
p5
dev.off()
|
49381f04f6b63dfab0378f2c1d29a26b4ffee231 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Maeswrap/examples/plotuspar.Rd.R | 9dcaef28ec61897b61764c9de8d9a82d2d4fa336 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 419 | r | plotuspar.Rd.R | library(Maeswrap)
### Name: plotuspar
### Title: Plot the understorey PAR points
### Aliases: plotuspar readuspar
### ** Examples
## Not run:
##D
##D # Plot one hour of the first day, showing incident PAR on understorey:
##D plotuspar("ipar", day=1,hour=12,makepdf=FALSE)
##D
##D # Make pdf of the whole day, plotting beam radiation:
##D plotuspar("beam", day=1, outputfile="beam uspar")
##D
## End(Not run)
|
5a79bf3041bd9f8754027526ed58743ff2f98bdf | 46cc65e160ccf5e0189f893127f0229807842f96 | /adhoc/old.R | 2bf723c22d23b3a411ea71c18eb949038d6bff7e | [] | no_license | minghao2016/filearray | dfc3dfc97724ae1285e9cb4a7b28ee2ccc0194e7 | 9f49882c508b83c6b95f3e71513472bb88f824aa | refs/heads/main | 2023-08-04T03:36:40.130475 | 2021-09-20T10:07:32 | 2021-09-20T10:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,599 | r | old.R | # previous implementations in R
caster <- function(type = c('double', 'integer', 'logical', 'raw')){
type <- match.arg(type)
switch(
type,
double = function(x){
if(typeof(x) == "double"){
return(as.vector(x))
} else {
return(as.double(x))
}
},
integer = function(x){
if(typeof(x) == "integer"){
return(as.vector(x))
} else {
return(as.integer(x))
}
},
logical = function(x){
na <- as.raw(2)
if(typeof(x) == "logical"){
re <- as.vector(x)
} else {
re <- as.logical(x)
}
re[is.na(re)] <- 2L
ret <- as.raw(re)
ret
},
raw = function(x){
if(typeof(x) == "raw"){
return(as.vector(x))
} else {
return(as.raw(x))
}
},
stop("Unknown data type: ", type))
}
write_partition <- function(
file, partition, dimension, value,
type = c("double","integer","logical","raw"),
size = NULL
){
stopifnot(length(value) == prod(dimension))
type <- match.arg(type)
header <- ensure_partition(file, partition, dimension, type, size)
fid <- file(description = file, open = "r+b")
on.exit({
try({ close(fid) }, silent = TRUE)
}, after = FALSE, add = TRUE)
write_seq(fid, 0L, value, length(value), header$unit_bytes, type)
seek(con = fid, where = HEADER_SIZE - 8L, rw = "write")
writeBin(con = fid, object = as.double(length(value)), size = 8L,
endian = header$endianness)
close(fid)
return(invisible())
}
write_seq = function(fid, start, value, total_len, size, type){
stopifnot( start >= 0L )
stopifnot( start+length(value) <= total_len )
seek(con = fid, where = (start)*size + HEADER_SIZE, rw = "write")
f_caster <- caster(type = type)
# Writing data of non-naitive size is slow in R. (Why?)
# This is solved by writing RAW data after using
# writeBin to convert it into memory vector.
if( ((size!=8) && (type=="double")) ||
((size!=4) && (type=="integer")) ){
addwrite = function(value){
tmp = writeBin(
con = raw(),
object = f_caster(value),
size = size,
endian = ENDIANNESS)
writeBin(con = fid, object = tmp)
}
} else {
addwrite = function(value){
writeBin(
con = fid,
object = f_caster(value),
size = size,
endian = ENDIANNESS)
}
}
# Writing long vectors is currently NOT supported
# (as of R 3.2.2, 3.3.0).
# Thus write in pieces of 128 MB or less.
if(length(value)*as.numeric(size) < 134217728){
addwrite(value)
} else {
step1 = 134217728 %/% size
mm = length(value)
nsteps = ceiling(mm/step1)
for( part in 1:nsteps ){ # part = 1
# cat( part, "of", nsteps, "\n")
fr = (part-1)*step1 + 1
to = min(part*step1, mm)
addwrite(value[fr:to])
}
rm(part, step1, mm, nsteps, fr, to)
}
# Instead of flush:
seek(con = fid, where = 0, rw = "write")
return(invisible())
}
#' @describeIn S3-filearray get element by position
#' @export
`[.FileArray` <- function(x, ..., drop = TRUE, reshape = NULL, strict = TRUE) {
if(!x$valid()){
stop("Invalid file array")
}
drop <- isTRUE(drop)
# file <- tempfile(); x <- filearray_create(file, c(300, 400, 100, 1))
filebase <- paste0(x$.filebase, x$.sep)
arglen <- ...length()
elem_size <- x$element_size()
dim <- x$dimension()
listOrEnv <- list()
if(arglen == 1){
tmp <- tryCatch({
...elt(1)
}, error = function(e){
NULL
})
if(length(tmp)){
stop("Subset FileArray only allows x[] or x[i,j,...] (single index like x[i] is not allowed, use x[[i]] instead)")
}
} else if(arglen > 1){
if(arglen != length(dim)){
stop("Subset FileArray dimension mismatch.")
}
missing_args <- check_missing_dots(environment())
for(ii in seq_len(arglen)){
if( missing_args[[ii]] ){
listOrEnv[[ii]] <- seq_len(dim[[ii]])
} else {
tmp <- ...elt(ii)
if(!length(tmp)){
tmp <- integer(0L)
} else if(is.logical(tmp)){
if(length(tmp) > dim[[ii]]){
stop("(subscript) logical subscript too long")
}
tmp <- rep(tmp, ceiling(dim[[ii]] / length(tmp)))
tmp <- tmp[seq_len(dim[[ii]])]
tmp <- seq_along(tmp)[tmp]
}
listOrEnv[[ii]] <- tmp
}
}
}
# guess split dim
max_buffer <- max_buffer_size() / elem_size
if(length(listOrEnv) == length(dim)){
idxrange <- sapply(listOrEnv, function(x){
if(!length(x) || all(is.na(x))){ return(1L) }
rg <- range(x, na.rm = TRUE)
return(rg[2] - rg[1] + 1)
})
} else {
idxrange <- dim
}
# sapply(seq_len(length(dim) - 1), function(split_dim){
# idx1dim <- dim[seq_len(split_dim)]
# idx1dim[[split_dim]] <- idxrange[[split_dim]]
# idx1len <- prod(idx1dim)
# idx2len <- prod(dim[-seq_len(split_dim)])
# nloops <- ceiling(idx1len / max_buffer)
# (idx1len * nloops) * idx2len
# })
# worst-case time-complexity
time_complexity <-
sapply(seq_len(length(dim) - 1), function(split_dim) {
dim[[length(dim)]] <- 1
idx1dim <- dim[seq_len(split_dim)]
idx1dim[[split_dim]] <- idxrange[[split_dim]]
idx1len <- prod(idx1dim)
idx2len <- prod(dim[-seq_len(split_dim)])
buffer_sz <-
ifelse(idx1len > max_buffer, max_buffer, idx1len)
nloops <- ceiling(idx1len / buffer_sz)
(idx1len * nloops + idx2len) * idx2len
})
split_dim <- which.min(time_complexity)
split_dim <- split_dim[[length(split_dim)]]
# set buffer size
idx1len <- prod(dim[seq_len(split_dim)])
buffer_sz <- idx1len * elem_size
buffer_sz <- ifelse(buffer_sz > max_buffer, max_buffer, buffer_sz)
current_bsz <- get_buffer_size()
on.exit({
set_buffer_size(current_bsz)
})
set_buffer_size(buffer_sz)
dnames <- NULL
if(is.null(reshape)){
dnames <- x$dimnames()
if(length(dnames)){
dnames <- structure(
lapply(seq_along(dnames), function(ii){
if(length(dnames[[ii]])){
dnames[[ii]][listOrEnv[[ii]]]
} else {
NULL
}
}),
names = names(dnames)
)
}
}
re <- FARR_subset(
filebase = filebase,
type = x$sexp_type(),
listOrEnv = listOrEnv,
dim = dim,
cum_part_sizes = x$.partition_info[, 3],
reshape = reshape,
drop = drop,
strict = strict,
split_dim = split_dim,
dimnames = dnames
)
} |
77626b92e080c969be8196a1efdba03c1a024280 | 67e58cd3324dbdb981002e680488e5aa88266111 | /STAT_636/R Scripts/Data-Specific/Textbook/T5-8.r | c558bd5c046320b5c277675cbbf1a7caebf0a6db | [] | no_license | mauliasavana/Statistics-Masters | b8a152870e528cb653dfb921cf1fd53202ecfe78 | c89c2f36d05d5936404d5f460b1a2bdb01a93f3a | refs/heads/master | 2021-08-27T17:18:16.994407 | 2017-05-13T12:09:28 | 2017-05-13T12:09:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,226 | r | T5-8.r | ####
#### For each of n = 16 time periods, we have the number of overtime hours of 5 types in
#### the Madison, Wisconsin police department. The overtime types are (1) legal appear-
#### ances, (2) extraordinary event, (3) holdover, (4) compensatory overtime allowed
#### (COA), and (5) meetings.
####
library(plotrix)
##
## Input data.
##
X <- as.matrix(read.delim("T5-8.DAT", header = FALSE, sep = ""))
n <- nrow(X)
p <- ncol(X)
colnames(X) <- c("legal", "extra", "holdover", "coa", "meeting")
## Summary statistics.
X_bar <- colMeans(X)
S <- var(X)
## X-bar charts for legal appearances and extraordinary event overtime types.
pdf("figures/overtime_legal.pdf")
plot(X[, 1], ylim = c(1500, 5500), xlab = "Observation Number", ylab = "Overtime Hours",
main = "Legal Appearances", type = "b")
abline(X_bar[1], 0)
abline(X_bar[1] - 3 * sqrt(S[1, 1]), 0)
abline(X_bar[1] + 3 * sqrt(S[1, 1]), 0)
dev.off()
pdf("figures/overtime_extra.pdf")
plot(X[, 2], ylim = c(-3000, 6000), xlab = "Observation Number", ylab = "Overtime Hours",
main = "Extraordinary Event", type = "b")
points(11, X[11, 2], pch = 20, col = "red")
abline(X_bar[2], 0)
abline(X_bar[2] - 3 * sqrt(S[2, 2]), 0)
abline(X_bar[2] + 3 * sqrt(S[2, 2]), 0)
dev.off()
## An ellipse format chart for legal appearances and extraordinary event overtime types.
d2 <- rep(NA, n)
for(i in 1:n)
d2[i] <- t(X[i, 1:2] - X_bar[1:2]) %*% solve(S[1:2, 1:2]) %*% (X[i, 1:2] - X_bar[1:2])
c2 <- qchisq(0.99, 2)
ee <- eigen(S[1:2, 1:2])
lambda <- ee$values
ee <- ee$vectors
theta <- atan(ee[2, 1] / ee[1, 1]) * 57.2957795
pdf("figures/overtime_ellipse.pdf")
plot(c(1500, 5500), c(-2000, 5500), xlab = "Legal Appearances",
ylab = "Extraordinary Event", asp = 1, type = "n")
draw.ellipse(X_bar[1], X_bar[2], sqrt(c2 * lambda[1]), sqrt(c2 * lambda[2]),
angle = theta, lwd = 2)
points(X[, 1:2], pch = 20)
points(X[11, 1], X[11, 2], pch = 20, col = "red")
dev.off()
## A T2 chart for legal appearances and extraordinary event overtime types. This could
## also be done for > 2 variables.
pdf("figures/overtime_T2.pdf")
plot(d2, xlab = "Observation Number", ylab = expression(T^2), type = "b")
points(11, d2[11], pch = 20, col = "red")
abline(c2, 0)
dev.off()
|
4fcfe51396d8b9a67105c50e81e0d462503692ac | 3c5deaa7540688e7332a4b838ddb442bed8f1101 | /fonctions/functions.R | 8653aacad8a66811c7d586594b10ad3ea605f0e6 | [] | no_license | gaohan0416/sy09-p2 | 69842986954333661f6fba616f1c3eb3ea426861 | 86ad9c6913335891f15c493ba02d63f72792552f | refs/heads/master | 2020-03-19T06:23:38.431459 | 2018-06-29T19:51:26 | 2018-06-29T19:51:26 | 136,015,294 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,261 | r | functions.R | library(Rlab)
library(MASS)
library(rpart)
#calcul du taux d'erreur pour l'analyse discriminante quadratique (ADQ)
calculTauxErreurADQnoSep <- function(donn){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
param.adq <- adq.app(Xapp, zapp)
adq.val <- ad.val(param.adq, Xtst)
z.res <- adq.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
calculTauxErreurADQ <- function(Xapp, zapp, Xtst, ztst){
param.adq <- adq.app(Xapp, zapp)
adq.val <- ad.val(param.adq, Xtst)
z.res <- adq.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
#calcul du taux d'erreur pour l'analyse discriminante lineaire (ADL)
calculTauxErreurADLnoSep <- function(donn){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
param.adl <- adl.app(Xapp, zapp)
adl.val <- ad.val(param.adl, Xtst)
z.res <- adl.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
calculTauxErreurADL <- function(Xapp, zapp, Xtst, ztst){
param.adl <- adl.app(Xapp, zapp)
adl.val <- ad.val(param.adl, Xtst)
z.res <- adl.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
#calcul du taux d'erreur pour le classifieur bayésien naif (NBA)
calculTauxErreurNBAnoSep <- function(donn){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
param.nba <- nba.app(Xapp, zapp)
nba.val <- ad.val(param.nba, Xtst)
z.res <- nba.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
calculTauxErreurNBA <- function(Xapp, zapp, Xtst, ztst){
param.nba <- nba.app(Xapp, zapp)
nba.val <- ad.val(param.nba, Xtst)
z.res <- nba.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
#calcul du taux d'erreur pour la regression logistique (RL)
calculTauxErreurRLnoSep <- function(donn, intercept, epsi = 1*(10^-5)){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
res.app <- log.app(Xapp, zapp, intercept, epsi)
res.val <- log.val(res.app$beta, Xtst)
z.res <- res.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
calculTauxErreurRL <- function(Xapp, zapp, Xtst, ztst, intercept){
res.app <- log.app(Xapp, zapp, intercept, 1*(10^-5))
res.val <- log.val(res.app$beta, Xtst)
z.res <- res.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
scriptCalculTauxErreurRL <- function(donn, intercept, nbIter){
tauxErreur <- rep(0, nbIter)
for(i in 1:nbIter){
tauxErreur[i] <- calculTauxErreurRLnoSep(donn, intercept)
}
moy <- sum(tauxErreur) / nbIter
res <- list(tauxErreur, moy)
names(res) <- c("Vector", "Mean")
return(res)
}
#calcul du taux erreur avec la méthode des arbres binaires de décison
calculTauxErreurABD <- function(Xapp, zapp, Xtst, ztst){
#calcul de l'arbre de décision
tree <- rpart(zapp~.,data=Xapp)
#élagage de l'abre pour obtenir l'arbre optimal
treeOptimal <- prune(tree,cp=tree$cptable[which.min(tree$cptable[,4]),1])
#prediction de l'arbre
pred <- predict(treeOptimal, Xtst)
z.res <- round(pred)
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
calculTauxErreurRLQ <- function(Xapp, zapp, Xtst, ztst, intercept){
donn <- transformDataForRLQ(Xapp, Xtst)
Xapp2 <- donn$Xapp2
Xtst2 <- donn$Xtst2
rlq.app <- log.app(Xapp2, zapp, intercept, 1*(10^-5))
rlq.val <- log.val(test.app$beta, Xtst2)
z.res <- rlq.val$pred
nbSim.tst <- length(which(z.res==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
return(tauxErreur.tst)
}
compareModelsError <- function(donn, nbIter, intercept = T){
matErr <- matrix(0, nrow = nbIter, ncol = 5)
i <- 0
nbError <- 0
while(i <= nbIter){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
errADL = calculTauxErreurADL(Xapp, zapp, Xtst, ztst)
errADQ = calculTauxErreurADQ(Xapp, zapp, Xtst, ztst)
errNBA = calculTauxErreurNBA(Xapp, zapp, Xtst, ztst)
errRL = calculTauxErreurRL(Xapp, zapp, Xtst, ztst, intercept)
errABD = calculTauxErreurABD(Xapp, zapp, Xtst, ztst)
if(errADL != 1 && errADQ != 1 && errNBA != 1 && errRL !=1 && errABD!=1){
matErr[i,] <- c(errADL, errADQ, errNBA, errRL, errABD)
i <- i + 1
}
else {
#erreur
nbError <- nbError + 1
}
}
colnames(matErr) <- c("errADL", "errADQ", "errNBA", "errRL", "errABD")
moy <- colSums(matErr) / nbIter
var <- apply(matErr, 2, var)
res <- NULL
res$matErr <- matErr
res$moy <- moy
res$var <- var
return(res)
}
compareModelsErrorOnSonar <- function(donn, nbIter){
matErr <- matrix(0, nrow = nbIter, ncol = 4)
i <- 0
nbError <- 0
while(i <= nbIter){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
errADL = calculTauxErreurADL(Xapp, zapp, Xtst, ztst)
errADQ = calculTauxErreurADQ(Xapp, zapp, Xtst, ztst)
errNBA = calculTauxErreurNBA(Xapp, zapp, Xtst, ztst)
errABD = calculTauxErreurABD(Xapp, zapp, Xtst, ztst)
if(errADL != 1 && errADQ != 1 && errNBA != 1 && errABD!=1){
matErr[i,] <- c(errADL, errADQ, errNBA, errABD)
i <- i + 1
}
else {
#erreur
nbError <- nbError + 1
}
}
colnames(matErr) <- c("errADL", "errADQ", "errNBA", "errABD")
moy <- colSums(matErr) / nbIter
var <- apply(matErr, 2, var)
res <- NULL
res$matErr <- matErr
res$moy <- moy
res$var <- var
return(res)
}
#script pour tester les résultats de l'ADL, ADQ et le NBA un nombre nbIter de fois
compareModelsErrorClassDiscri <- function(donn, nbIter){
matErr <- matrix(0, nrow = nbIter, ncol = 3)
for(i in 1:nbIter){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
errADL = calculTauxErreurADL(Xapp, zapp, Xtst, ztst)
errADQ = calculTauxErreurADQ(Xapp, zapp, Xtst, ztst)
errNBA = calculTauxErreurNBA(Xapp, zapp, Xtst, ztst)
matErr[i,] <- c(errADL, errADQ, errNBA)
}
colnames(matErr) <- c("errADL", "errADQ", "errNBA")
moy <- colSums(matErr) / nbIter
res <- NULL
res$matErr <- matErr
res$moy <- moy
return(res)
}
transformDataForRLQ <- function(Xapp, Xtst){
Xapp2 <- Xapp
Xtst2 <- Xtst
for (p in 1:(dim(Xapp)[2]-1))
{
for (q in (p+1):dim(Xapp)[2])
{
Xapp2 <- cbind(Xapp2, Xapp[,p]*Xapp[,q])
Xtst2 <- cbind(Xtst2, Xtst[,p]*Xtst[,q])
}
}
for (p in 1:dim(Xapp)[2])
{
Xapp2 <- cbind(Xapp2, Xapp[,p]^2)
Xtst2 <- cbind(Xtst2, Xtst[,p]^2)
}
res <- NULL
res$Xapp2 <- Xapp2
res$Xtst2 <- Xtst2
return(res)
}
#fonction pour calculer LDA avec utilisation de fct de R
ldaR <- function(donn){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
lda.model <- lda(Xapp, grouping = zapp)
predmodel.lda.app <- predict(lda.model, Xapp)
predmodel.lda.tst <- predict(lda.model, Xtst)
table.lda.app <- table(Predicted=predmodel.lda.app$class, z=zapp)
table.lda.tst <- table(Predicted=predmodel.lda.tst$class, z=ztst)
accuracy.app <- (table.lda.app[1,1] + table.lda.app[2,2])/sum(colSums(table.lda.app))
accuracy.tst <- (table.lda.tst[1,1] + table.lda.tst[2,2])/sum(colSums(table.lda.tst))
z.res.app <- predmodel.lda.app$class
nbSim.app <- length(which(z.res.app==zapp))
tauxErreur.app <- 1 - nbSim.app/length(zapp)
z.res.tst <- predmodel.lda.tst$class
nbSim.tst <- length(which(z.res.tst==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
err <- list(tauxErreur.app, tauxErreur.tst)
names(err) <- c("Training","Test")
acc <- list(accuracy.app, accuracy.tst)
names(acc) <- c("Training","Test")
pred <- list(predmodel.lda.app,predmodel.lda.tst)
names(pred) <- c("Training", "Test")
res <- list(lda.model, pred, acc, err)
names(res) <- c("Modele", "Prediction", "Accuracy", "Error")
return (res)
}
#fonction pour calculer LDA avec utilisation de fct de R
qdaR <- function(donn){
n <- ncol(donn)
X <- donn[,1:(n-1)]
z <- donn[,n]
donn.sep <- separ1(X, z)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
qda.model <- qda(Xapp, grouping = zapp)
predmodel.qda.app <- predict(qda.model, Xapp)
predmodel.qda.tst <- predict(qda.model, Xtst)
table.qda.app <- table(Predicted=predmodel.qda.app$class, z=zapp)
table.qda.tst <- table(Predicted=predmodel.qda.tst$class, z=ztst)
accuracy.app <- (table.qda.app[1,1] + table.qda.app[2,2])/sum(colSums(table.qda.app))
accuracy.tst <- (table.qda.tst[1,1] + table.qda.tst[2,2])/sum(colSums(table.qda.tst))
z.res.app <- predmodel.qda.app$class
nbSim.app <- length(which(z.res.app==zapp))
tauxErreur.app <- 1 - nbSim.app/length(zapp)
z.res.tst <- predmodel.qda.tst$class
nbSim.tst <- length(which(z.res.tst==ztst))
tauxErreur.tst <- 1 - nbSim.tst/length(ztst)
err <- list(tauxErreur.app, tauxErreur.tst)
names(err) <- c("Training","Test")
acc <- list(accuracy.app, accuracy.tst)
names(acc) <- c("Training","Test")
pred <- list(predmodel.qda.app,predmodel.qda.tst)
names(pred) <- c("Training", "Test")
res <- list(qda.model, pred, acc, err)
names(res) <- c("Modele", "Prediction", "Accuracy", "Error")
return (res)
}
|
ff1b90b4a0cf7d2d81e7b9076e502650b1a7fbe3 | a4d1f5e97734429a77a8ab1585ee4ec2175fb48f | /trainingGGPLOT2.R | 5bc4bbe32b7bb3dcbdb1b681bce5115acd24eabb | [] | no_license | obaidM/Advance-R-programming-DataVisulization | c2e4e8f0b13229e1b63e5f6b8ca5cf40e18ea3cb | 7a6a85f92062a0861786eea72e21e6e0e86033cc | refs/heads/master | 2021-01-01T06:23:27.285463 | 2017-07-17T00:22:37 | 2017-07-17T00:22:37 | 97,419,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 448 | r | trainingGGPLOT2.R | library(ggplot2)
library(ggthemes)
df <- mpg
df1 <- txhousing
#q1
#pl <- ggplot(df,aes(x=hwy))
#pl <-pl + geom_histogram(fill='red', alpha = 0.6,bins=20)
#q2
#pl <- ggplot(df,aes(x=manufacturer))
#pl <- pl + geom_bar(aes(fill= factor(cyl) ),alpha = 0.6,position='dodge')
#q3
pl <- ggplot(df1, aes(x = sales,y = volume))
pl <- pl + geom_point(color='blue',alpha = 0.3)
pl <- pl + geom_smooth(color='red')
print(pl) |
f963c835aaf8ef3b497a5ec4541b045fc6b4fdc5 | 283ee52a10131297b23c885a1de3c221ed8af25f | /man/adeproLogo.Rd | d14199449665d257b654d25ccd7c1b7dac218c44 | [] | no_license | cran/adepro | 0cda24b9c32be406d302b7ee42d05160802e79b4 | 22a3fee9fbeca27351e187c4c206ef40e710932a | refs/heads/master | 2021-07-25T19:19:09.707141 | 2021-07-06T09:20:02 | 2021-07-06T09:20:02 | 96,978,803 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 554 | rd | adeproLogo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adeproLogo.R
\name{adeproLogo}
\alias{adeproLogo}
\title{adeproLogo Function that creates a div container for the AdEPro Logo}
\usage{
adeproLogo(height = 230, width = 160, align = "right")
}
\arguments{
\item{height}{height of the Logo (numeric)}
\item{width}{width of the Logo (numeric)}
\item{align}{Alignment of the Logo ("center"/"left"/"right)}
}
\description{
creates a div container with AdEPro Logo for the shiny app adepro
}
\keyword{internal}
|
3513c38e52a4751b74884f5e898a4874efcf4bed | 1eaecdaf4971803f97238b9d78cc0caa080aace6 | /man/BayesSAE.Rd | ca58b4e65be702c85a9b6a26f8d059d3db48305d | [] | no_license | ssoro411/bayesae | 250995dec082e02dc64a92d518b040107782b08d | 2de4f32926c3d1c6c81471a8af35c1c00780d070 | refs/heads/master | 2021-06-29T04:10:48.474966 | 2019-12-03T02:36:14 | 2019-12-03T02:36:14 | 102,888,086 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,436 | rd | BayesSAE.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BayesSAE.R
\name{BayesSAE}
\alias{BayesSAE}
\title{Univariate hierarchical Bayes approach to small area estimation.}
\arguments{
\item{formula}{formula}
\item{data}{Data frame with direct estimate and auxiliary variables.}
\item{Di}{\code{m} vector with sampling variance.}
\item{domain}{Vector with Domain names.}
\item{model}{There are three possible models. "FH" for Fay-Herriot model, "CAR" for conditional auto-regressive model and "SAR" for simultaneous auto-regressive model.}
\item{W}{Spatial matrix. If \code{model}="SAR", rowsum should be 1.}
\item{logit.trans}{If true, it transforms direct estimate to logit scale and sampling variance is approximated by the delta mehod. Simulation result will be returned in origial proportion scale.}
\item{pars}{Parameters to be monitored.}
\item{iter}{Total iteration.}
\item{warmup}{Warm up. Default is "\code{floor}(\code{iter}/2)".}
\item{chains}{Number of chains. Default is 4.}
\item{control}{See the \code{rstan} package document.}
\item{open.progress}{Progress of chiain will be presented if it is \code{TRUE}.}
}
\value{
Simulated posterior sample from the \code{rstan}.
}
\description{
Hierarchical Bayes approach to small area estimation using \code{stan}.
}
\references{
\insertRef{carpenter2016stan}{bayesae}
\insertRef{guo2016rstan}{bayesae}
\insertRef{vehtari2014waic}{bayesae}
}
|
3930fdee8104f632a2d676b905e76e002221c244 | 405814ab45ea197f8d5cb3576545c1526b44b1ed | /pml-prediction.R | a5669c69dd2dcad620857df88345dcd3d70895f6 | [] | no_license | kisscool62/Machine-Learning | 41991d45b19ee1120ec984615e31fd13159ce806 | 2a0ba55e49a5ab811551b4899ed09f96872dcaee | refs/heads/master | 2021-01-25T12:13:28.828622 | 2015-06-21T21:12:57 | 2015-06-21T21:12:57 | 37,784,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,898 | r | pml-prediction.R | ###################################
#
# prediction model for pml
# coursera machine leaning project
#
###################################
training_raw <- read.csv('pml-training.csv', dec = '.', na.strings=c(""," ","NA", "#DIV/0!"))
testing_raw <- read.csv('pml-testing.csv', dec = '.', na.strings=c(""," ","NA"))
library(plyr)
library(caret)
library(rpart)
library(randomForest)
library(doParallel)
###################################
#
# functions
#
###################################
filterColumns <- function(df, FUN, ...){
df.dim <- dim(df)[2]
res <- vector(mode="logical", df.dim)
for(i in 1:df.dim){
res[i] <- FUN(df[,i], ...)
}
names(res) <- names(df)
res
}
isToFewVariance <- function(vector, threshold = 0){
var(vector) <= threshold
}
isClassANumber <- function(vector, whatIsANumber){
class(vector) %in% whatIsANumber
}
isNa <- function(vector, threshold = 1){
size_vector <- length(vector)
(1-(size_vector - sum(is.na(vector))) / size_vector) >= threshold
}
createTrainAndTestPartition <- function(df, p){
len <- dim(df)[1]
training_rows <- sample(1:len, size = floor(p*len), replace = FALSE)
list(train = df[training_rows,], test = df[-training_rows,])
}
# write answers
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
#which(filterColumns(training_without_NA, isToFewVariance, 0.03))
#sum(filterColumns(training_without_NA, isToFewVariance, 0.03))
#filterColumns(training_without_NA, var)
#numericColumns <- filterColumns(training, isClassANumber, c("numeric", "integer"))
# remove all columns with 97% NA because shouldn't impact the model
filtered_columns <- !filterColumns(training_raw, isNa, 0.97)
training_without_NA <- subset(training_raw, select = filtered_columns)
training_without_NA <- subset(training_without_NA, select = -c(1))
training_without_NA <- mutate(training_without_NA, cvtd_timestamp = as.numeric(as.POSIXlt(as.character(cvtd_timestamp)), format = "%d/%m/%Y %H:%M"))
testing_without_NA <- subset(testing_raw, select = filtered_columns)
testing_without_NA <- subset(testing_without_NA, select = -c(1))
testing_without_NA <- mutate(testing_without_NA, cvtd_timestamp = as.numeric(as.POSIXlt(as.character(cvtd_timestamp)), format = "%d/%m/%Y %H:%M"))
levels(testing_without_NA$new_window) <- levels(training_without_NA$new_window)
## predict with removed problem_id
#to_test is the final data to predict classes. Not for sample error analysis because classe is not known
to_test <- subset(testing_without_NA, select = -c(59))
to_test <- mutate(to_test,
magnet_dumbbell_z = as.numeric(magnet_dumbbell_z),
magnet_forearm_y = as.numeric(magnet_forearm_y),
magnet_forearm_z = as.numeric(magnet_forearm_z))
## building data to build the model
# data grouped by classe
data_A <- training_without_NA[training_without_NA$classe == "A",]
data_B <- training_without_NA[training_without_NA$classe == "B",]
data_C <- training_without_NA[training_without_NA$classe == "C",]
data_D <- training_without_NA[training_without_NA$classe == "D",]
data_E <- training_without_NA[training_without_NA$classe == "E",]
## createTrainAndTestPartition takes randomly 60% in each group to build train data, remaining 40% are test data
data <- createTrainAndTestPartition(data_A, 0.6)
training_A_1 <- data$train
testing_A_1 <- data$test
data <- createTrainAndTestPartition(data_B, 0.6)
training_B_1 <- data$train
testing_B_1 <- data$test
data <- createTrainAndTestPartition(data_C, 0.6)
training_C_1 <- data$train
testing_C_1 <- data$test
data <- createTrainAndTestPartition(data_D, 0.6)
training_D_1 <- data$train
testing_D_1 <- data$test
data <- createTrainAndTestPartition(data_E, 0.6)
training_E_1 <- data$train
testing_E_1 <- data$test
## build a training data
training <- rbind(training_A_1, training_B_1, training_C_1, training_D_1, training_E_1)
testing <- rbind(testing_E_1, testing_D_1, testing_B_1, testing_C_1, testing_A_1)
#too long method...
#model_caret <- train(form = classe ~ ., data = training, method="rf")
# build the model with all remaining variables
model_rf <- randomForest(formula = classe ~ ., data = training)
## predict without classe (not needed) variable
prediction <- predict(model_rf, subset(testing, select = -c(59)))
## out of sample error
table(prediction, testing$classe)
sum(prediction == testing$classe)/length(prediction) * 100
## apply the model to the final test data
final_prediction <- predict(model_rf, newdata = to_test)
pml_write_files(final_prediction)
|
5a4b85a8b7abc438db71b8fc175a3122bd5e2775 | 35c741fe1331be9fd2214c7b2cc3e5bcaddba833 | /simmen/man/getParameterMatrix.simmen.Rd | 316972484e4bbe5ec2f1970eb1b50dfb9cc7d18e | [] | no_license | ctszkin/simmen | 717d73f06572ee3cc78fb82670966098b61fa330 | 3a08eae0a3385f2b810991cff575d19722302289 | refs/heads/master | 2020-05-17T05:44:58.805273 | 2014-06-17T11:04:11 | 2014-06-17T11:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 539 | rd | getParameterMatrix.simmen.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{getParameterMatrix.simmen}
\alias{getParameterMatrix.simmen}
\title{getParameterMatrix.simmen}
\usage{
\method{getParameterMatrix}{simmen}(x, tail, ...)
}
\arguments{
\item{x}{x}
\item{tail}{tail}
\item{...}{can specify the name of one parameter(e.g. getParameterMatrix(x,tail=50,a="phi")). All parameter if missing.}
}
\value{
value
}
\description{
Get a matrix of all parameters
}
\details{
getParameterMatrix.simmen
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
|
4a091d3285efb5798b3bd89bc1b1df10f3f4e7ca | b949481ed26efb37c36ccee00c0f4418ad269a6b | /man/tpptrTidyUpESets.Rd | 625f5f6498d28996ac923a86ed425d54dd22941c | [] | no_license | andreaschrader/TPP | 207a302363f2a26db884ae190f3eead27475fc48 | f2cab6cf7b152f0801581aeca569fd4c5f8a6cba | refs/heads/master | 2023-08-27T20:31:27.139608 | 2021-10-29T21:56:55 | 2021-10-29T21:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,209 | rd | tpptrTidyUpESets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpptrTidyUpESets.R
\name{tpptrTidyUpESets}
\alias{tpptrTidyUpESets}
\title{Tidy up expressionSets}
\usage{
tpptrTidyUpESets(tppESetList, returnType = "exprs")
}
\arguments{
\item{tppESetList}{A list of expressionSets, returned
by most TPP-TR functions.}
\item{returnType}{A string with two possible values: "exprs", "featureData".}
}
\value{
Either the fold changes per protein across all experiments
(if \code{returnType = "exprs"}), or the
additional annotation per protein and experiment (if \code{returnType = "featureData"}). For example, the
peptide counts per identified protein can be found here.
}
\description{
Convert list of expressionSets (intermediate output of several
TPP-TR functions) to tidy tables.
}
\details{
expressionSet lists are for example produced by
\code{\link{tpptrImport}}, \code{\link{tpptrNormalize}},
\code{\link{tpptrCurveFit}}.
}
\examples{
data(hdacTR_smallExample)
tpptrData <- tpptrImport(configTable = hdacTR_config, data = hdacTR_data)
concentrations <- tpptrTidyUpESets(tpptrData)
additionalInfos <- tpptrTidyUpESets(tpptrData, returnType = "featureData")
summary(concentrations)
}
|
b86e5183b17721898d9cdb5a879b9cf444fe5ecf | 2a8bd79984eefe94fcb2619bcb3bef6f70f3e1eb | /driverslicenses.R | 4ced4169e97afc1dc648da88ed8a514ae385b09a | [] | no_license | paulgp/driverslicenses | 5c2198251dd8632e6067d65a74dc7dc7463d3d78 | d13ec4d6a8ac0cf8976887bca2f4b8afde64419b | refs/heads/master | 2020-09-17T08:15:47.207727 | 2019-11-26T00:49:29 | 2019-11-26T00:49:29 | 224,049,987 | 0 | 2 | null | 2019-11-26T00:49:30 | 2019-11-25T22:03:11 | R | UTF-8 | R | false | false | 2,735 | r | driverslicenses.R | library(readxl)
library(tidycensus)
library(tidyverse)
library(ggrepel)
state_youth_pop <- read_excel("state_youth_pop.xlsx") %>%
gather(year, pop, -Geography) %>%
mutate(year = as.numeric(year)) %>%
rename(STATE = Geography)
driverslicenses <- read_excel("driverslicenses.xlsx", sheet = "Sheet2") %>%
gather(age, num_licenses, -STATE, -year) %>%
mutate(num_licenses = as.numeric(num_licenses)) %>%
mutate(STATE = str_replace(STATE, "1/", "")) %>%
mutate(STATE = str_replace(STATE, "2/", "")) %>%
mutate(STATE = str_replace(STATE, "3/", "")) %>%
mutate(STATE = str_trim(STATE)) %>%
inner_join(state_youth_pop) %>%
mutate(share = num_licenses/ pop) %>%
select(-num_licenses, -pop) %>%
spread(year, share) %>% mutate(class = case_when(`2016` - `1999` > 0 ~ "Up",
STATE == "Total" ~ "Total",
TRUE ~ "Down"),
class2 = case_when(`2016` - `1999` > 0 ~ "Up",
TRUE ~ "Down")) %>%
filter(STATE != "Dist. of Col." & STATE !="Rhode Island") %>%
filter(age == 17)
left_label <- paste(driverslicenses$STATE, round(driverslicenses$`1999`,2),sep=", ")
right_label <- paste(driverslicenses$STATE, round(driverslicenses$`2016`,2),sep=", ")
library(scales)
p <- ggplot(driverslicenses) +
geom_segment(aes(x=1, xend=2, y=`1999`, yend=`2016`, col=class), size=.75, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1) +
geom_vline(xintercept=2, linetype="dashed", size=.1) +
#scale_color_manual(values = c("green"="#00ba38", "red"="#f8766d", "black")) + # color of lines
labs(x="", y="", subtitle="Number of 17-year-old Drivers Licenses per 15-19 year old") + # Axis labels
xlim(.5, 2.5) +
facet_wrap(~class2) +
ylim(0.03,(1.06*(max(driverslicenses$`1999`, driverslicenses$`2016`)))) # X and Y axis limits
p <- p + geom_text_repel(label=left_label, y=driverslicenses$`1999`, x=rep(1, NROW(driverslicenses)), hjust=1.1, size=3.5)
p <- p + geom_text_repel(label=right_label, y=driverslicenses$`2016`, x=rep(2, NROW(driverslicenses)), hjust=-0.1, size=3.5)
p <- p + geom_text(label="1999", x=1, y=1.05*(max(driverslicenses$`1999`, driverslicenses$`2016`)), hjust=1.2, size=5) # title
p <- p + geom_text(label="2016", x=2, y=1.05*(max(driverslicenses$`1999`, driverslicenses$`2016`)), hjust=-0.1, size=5) # title
p + theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
panel.border = element_blank(),
plot.margin = unit(c(1,2,1,2), "cm"))
|
904ae86a360b41cd1d5eb9c7742b8b9fe0297d7d | 492effa10aab9a468e67eafb124a38630ae9673b | /Kokemaenjoki/Rsrc/1.6_Comp_NEP_rh.r | 9414dbbcc8cd1ee25a3e96e7a8bf90bfccb81295 | [] | no_license | ficusvirens/IBCcarbon_runs | fad930f476aeb57af6a9de6ea00e81f50055b995 | 0f395bfe0c60bd7200857065c290c31de7c6de17 | refs/heads/master | 2023-05-08T03:06:03.324978 | 2021-05-27T08:56:51 | 2021-05-27T08:56:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,385 | r | 1.6_Comp_NEP_rh.r | .libPaths(c("/projappl/project_2000994/project_rpackages", .libPaths()))
libpath <- .libPaths()[1]
devtools::source_url("https://raw.githubusercontent.com/ForModLabUHel/IBCcarbon_runs/master/Kokemaenjoki/Rsrc/settings.r")
source_url("https://raw.githubusercontent.com/ForModLabUHel/IBCcarbon_runs/master/general/functions.r")
###Settings
#Harvest scenarios
ststScen <- "Base"
runScens <- c("MaxSust","Low","Base") # "MaxSust" "Low" "Base"
###Process Yasso weather
load("weatherYassoStstCurClim.rdata")
load("weatherYassoAnnual.rdata")
weatherYasso <- array(0.,dim=c(3829, 84, 3))
for(i in 1:3829) weatherYasso[i,,] <- as.matrix(weatherYassoAnnual[id==i & year %in% 1:84,3:5])
set.seed(1)
ops <- split(data.all, sample(1:115, nrow(data.all), replace=T))
sampleID=8
runScen ="MaxSust"
####Load steady state soilC
load(file=paste0("outSoil/InitSoilCstst_",ststScen,".rdata"))
for(runScen in runScens){
####load model Outputs
soilTotC <- rh <- data.table()
for(sampleID in 1:115){
sampleX <- ops[[sampleID]]
load(paste0("output/CurrClim.rdata",runScen,"_sample",sampleID,".rdata"))
nSites <- dim(out$annual)[1]
nLayers <- dim(out$annual)[4]
nYears <- dim(out$annual)[2]
nSp <- 3
litterSize <- matrix(0,3,nSp)
litterSize[2,] <- 2
litterSize[1,] <- c(30,30,10)
species <- out$annual[,1,4,]
nClimID <- 3829
climIDs <- sampleX$CurrClimID
Lnw <- out$annual[,,26,] + out$annual[,,27,]
Lfw <- out$annual[,,28,]
Lw <- out$annual[,,29,]
litter <- array(0.,dim=c(nSites, nYears, nLayers, 3))
litter[,,,1] <- Lnw
litter[,,,2] <- Lfw
litter[,,,3] <- Lw
litter[which(is.na(litter))] <- 0.
soilC <- array(0.,dim=c(nSites,(nYears+1),5,3,nLayers))
soilC[,1,,,] <- soilCststXX[[sampleID]]$soilC
soilCsites <- .Fortran("runYasso",
litter=as.array(litter),
litterSize=as.array(litterSize),
nYears=as.integer(nYears),
nLayers=as.integer(nLayers),
nSites=as.integer(nSites),
nSp=as.integer(nSp),
species=as.matrix(species),
nClimID=as.integer(nClimID),
climIDs=as.integer(climIDs),
pAWEN=as.matrix(parsAWEN),
pYasso=as.double(pYAS),
weatherYasso=as.matrix(weatherYasso),
soilC=as.array(soilC))
soilC <- soilCsites[c(1,13)]
lit <- data.table(apply(soilC$litter,1:2,sum,na.rm=T))
soilX <- data.table(apply(soilC$soilC,1:2,sum,na.rm=T))
soilTotC <- rbind(soilTotC,soilX)
rh <- rbind(rh,(soilX[,1:84] - soilX[,2:85] + lit[,1:84])/10)
print(sampleID)
}
print(runScen)
save(soilTotC,rh,file=paste0("outSoil/DTsoilC_rh_",runScen,"_rh.rdata"))
print(paste(runScen, "saved"))
load(paste0("outSoil/DTsoilC_rh_",runScen,"_rh.rdata"))
soilC <- soilTotC
save(soilC,file=paste0("outputDT/soilC_",runScen,"_CurrClim.rdata"))
Rh <- rh
save(Rh,file=paste0("outputDT/Rh_",runScen,"_CurrClim.rdata"))
load(paste0("outputDT/npp_",runScen,"_CurrClim.rdata"))
print("dim npp")
print(dim(npp))
print("dim Rh")
print(dim(Rh))
NEP <- npp-Rh
save(NEP,file=paste0("outputDT/NEP_",runScen,"_CurrClim.rdata"))
print(paste(runScen, "all done"))
rm(Rh,rh,NEP,soilC,npp)
gc()
} |
7c67d84cccbc529c6134eaf4775fcbe811a9b0a7 | 7ebe128fc17cdc0e2f534dbe5940774e98da4ce8 | /R/optimizers.R | cb0a5001fd20d0ad5f023c231cb5314a3f72e06b | [] | no_license | cran/bamlss | 89f8d08be4599c03120acb9ed097c31916d1ef21 | 5535fb038104cdd3df08eccb92863a778cd56e75 | refs/heads/master | 2023-07-17T04:24:23.253981 | 2023-07-04T06:30:02 | 2023-07-04T06:30:02 | 82,776,249 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 221,915 | r | optimizers.R | ##################################################
## (1) Generic setup function for smooth terms. ##
##################################################
## For each s() term, an additional list() named "state" must be supplied,
## within the list of specifications returned by smooth.construct(). This list
## contains the following objects:
##
## - fitted.values: numeric, vector containing the current fitted values.
## - parameters: numeric, vector containing the current parameters.
## Can also contain smoothing variances/parameters,
## which should be named with "tau2", regression coefficients
## should be named with "b1", "b2", "b3", ..., "b"k.
## - edf: numeric, the current degrees of freedom of the term.
## - do.optim: NULL or logical, if NULL then per default the backfitting
## algorithm is allowed to optimize possible variance
## parameters within one update step of a term.
## - interval: numeric, if optimization is allowed, specifies the min and max
## of the search space for optimizing variances. This can also
## be supplied with the xt argument of s().
## - grid: integer, the grid length for searching variance parameters, if needed.
## Can also be supplied within the xt argument in s().
##
## 4 additional functions must be supplied using the provided backfitting functions.
##
## - get.mu(X, gamma): computes the fitted values.
## - prior(parameters): computes the log prior using the parameters.
## - edf(x, weights): computes degrees of freedom of the smooth term.
## - grad(score, parameters, ...): function that computes the gradient.
##
## NOTE: model.matrix effects are also added to the smooth term list with
## appropriate penalty structure. The name of the object in the
## list is "model.matrix", for later identifyng the pure model.matrix
## modeled effects.
##
## bamlss.engine.setup() sets up the basic structure, i.e., adds
## possible model.matrix terms to the smooth term list in x, also
## adds model.matrix terms of a random effect presentation of smooth
## terms to the "model.matrix" term. It calls the generic function
## bamlss.engine.setup.smooth(), which adds additional parts to the
## state list, as this could vary for special terms. A default
## method is provided.
bamlss.engine.setup <- function(x, update = "iwls", propose = "iwlsC_gp",
do.optim = NULL, df = NULL, parametric2smooth = TRUE, ...)
{
if(!is.null(attr(x, "bamlss.engine.setup"))) return(x)
foo <- function(x, id = NULL) {
if(!any(c("formula", "fake.formula") %in% names(x))) {
for(j in names(x))
x[[j]] <- foo(x[[j]], id = c(id, j))
} else {
if(is.null(id)) id <- ""
if(!is.null(dim(x$model.matrix)) & parametric2smooth) {
if(nrow(x$model.matrix) > 0 & !is.na(mean(unlist(x$model.matrix), na.rm = TRUE))) {
if(is.null(x$smooth.construct)) x$smooth.construct <- list()
label <- if(is.null(colnames(x$model.matrix))) {
paste("b", 1:ncol(x$model.matrix), sep = "", collapse = "+")
} else paste(colnames(x$model.matrix), collapse = "+")
x$smooth.construct <- c(list("model.matrix" = list(
"X" = x$model.matrix,
"S" = list(diag(0, ncol(x$model.matrix))),
"rank" = ncol(x$model.matrix),
"term" = label,
"label" = label,
"bs.dim" = ncol(x$model.matrix),
"fixed" = TRUE,
"is.model.matrix" = TRUE,
"by" = "NA",
"xt" = list("binning" = x$binning)
)), x$smooth.construct)
if(!is.null(attr(x$model.matrix, "binning"))) {
x$smooth.construct[["model.matrix"]]$binning <- attr(x$model.matrix, "binning")
x$smooth.construct[["model.matrix"]]$xt$binning <- TRUE
}
class(x$smooth.construct[["model.matrix"]]) <- c(class(x$smooth.construct[["model.matrix"]]),
"no.mgcv", "model.matrix")
x$model.matrix <- NULL
}
}
if(length(x$smooth.construct)) {
for(j in seq_along(x$smooth.construct)) {
x$smooth.construct[[j]] <- bamlss.engine.setup.smooth(x$smooth.construct[[j]], ...)
tdf <- NULL
if(!is.null(df)) {
if(!is.null(names(df))) {
if((x$smooth.construct[[j]]$label %in% names(df)))
tdf <- df[x$smooth.construct[[j]]$label]
} else tdf <- df[1]
}
if(is.null(list(...)$nodf))
x$smooth.construct[[j]] <- assign.df(x$smooth.construct[[j]], tdf, do.part = TRUE)
if(!is.null(x$smooth.construct[[j]]$xt[["update"]]))
x$smooth.construct[[j]]$update <- x$smooth.construct[[j]]$xt[["update"]]
if(is.null(x$smooth.construct[[j]]$update)) {
if(is.character(update)) {
if(!grepl("bfit_", update))
update <- paste("bfit", update, sep = "_")
update <- get(update)
}
if(is.null(x$smooth.construct[[j]]$is.model.matrix))
x$smooth.construct[[j]]$update <- update
else
x$smooth.construct[[j]]$update <- bfit_iwls
}
if(is.null(x$smooth.construct[[j]]$propose)) {
if(is.character(propose)) {
if(!grepl("GMCMC", propose))
propose <- paste("GMCMC", propose, sep = "_")
propose <- get(propose)
}
x$smooth.construct[[j]]$propose <- propose
}
if(is.null(do.optim))
x$smooth.construct[[j]]$state$do.optim <- TRUE
else
x$smooth.construct[[j]]$state$do.optim <- do.optim
if(!is.null(x$smooth.construct[[j]]$rank))
x$smooth.construct[[j]]$rank <- as.numeric(x$smooth.construct[[j]]$rank)
if(!is.null(x$smooth.construct[[j]]$Xf)) {
x$smooth.construct[[j]]$Xfcn <- paste(paste(paste(x$smooth.construct[[j]]$term, collapse = "."),
"Xf", sep = "."), 1:ncol(x$smooth.construct[[j]]$Xf), sep = ".")
colnames(x$smooth.construct[[j]]$Xf) <- x$smooth.construct[[j]]$Xfcn
if(is.null(x$smooth.construct[["model.matrix"]])) {
label <- paste(x$smooth.construct[[j]]$Xfcn, collapse = "+")
x$smooth.construct[["model.matrix"]] <- list(
"X" = x$smooth.construct[[j]]$Xf,
"S" = list(diag(0, ncol(x$Xf))),
"rank" = ncol(x$smooth.construct[[j]]$Xf),
"term" = label,
"label" = label,
"bs.dim" = ncol(x$smooth.construct[[j]]$Xf),
"fixed" = TRUE,
"is.model.matrix" = TRUE,
"by" = "NA"
)
x$smooth.construct <- c(x$smooth.construct, "model.matrix")
} else {
x$smooth.construct[["model.matrix"]]$X <- cbind(x$smooth.construct[["model.matrix"]]$X, x$smooth.construct[[j]]$Xf)
x$smooth.construct[["model.matrix"]]$S <- list(diag(0, ncol(x$smooth.construct[["model.matrix"]]$X)))
x$smooth.construct[["model.matrix"]]$bs.dim <- list(diag(0, ncol(x$smooth.construct[["model.matrix"]]$X)))
}
}
}
}
}
if(length(x$smooth.construct)) {
if("model.matrix" %in% names(x$smooth.construct)) {
if(length(nsc <- names(x$smooth.construct)) > 1) {
nsc <- c(nsc[nsc != "model.matrix"], "model.matrix")
x$smooth.construct <- x$smooth.construct[nsc]
}
}
if(any(is.nnet <- sapply(x$smooth.construct, function(z) inherits(z, "nnet.smooth")))) {
nsc <- names(x$smooth.construct)
nsc <- c(nsc[-which(is.nnet)], nsc[which(is.nnet)])
x$smooth.construct <- x$smooth.construct[nsc]
}
}
x
}
x <- foo(x)
attr(x, "bamlss.engine.setup") <- TRUE
x
}
## Generic additional setup function for smooth terms.
bamlss.engine.setup.smooth <- function(x, ...) {
UseMethod("bamlss.engine.setup.smooth")
}
## Simple extractor function.
get.state <- function(x, what = NULL) {
if(is.null(what)) return(x$state)
if(what %in% c("par", "parameters")) {
return(x$state$parameters)
} else {
if(what %in% c("tau2", "lambda")) {
p <- x$state$parameters
return(p[grep("tau2", names(p))])
} else {
if(what %in% "b") {
p <- x$state$parameters
return(p[!grepl("tau2", names(p)) & !grepl("edf", names(p)) & !grepl("lasso", names(p))])
} else return(x$state[[what]])
}
}
}
get.par <- function(x, what = NULL) {
if(is.null(what) | is.null(names(x))) return(x)
if(what %in% c("tau2", "lambda")) {
return(x[grep("tau2", names(x))])
} else {
if(what %in% "b") {
return(x[!grepl("tau2", names(x)) & !grepl("edf", names(x)) & !grepl("lasso", names(x)) & !grepl("bw", names(x))])
} else return(x[what])
}
}
set.par <- function(x, replacement, what) {
if(is.null(replacement))
return(x)
if(what %in% c("tau2", "lambda")) {
x[grep("tau2", names(x))] <- replacement
} else {
if(what %in% "b") {
if(as.integer(sum(!grepl("tau2", names(x)) & !grepl("edf", names(x)) & !grepl("lasso", names(x)) & !grepl("bw", names(x)))) != length(replacement)) {
stop("here")
}
x[!grepl("tau2", names(x)) & !grepl("edf", names(x)) & !grepl("lasso", names(x)) & !grepl("bw", names(x))] <- replacement
} else x[what] <- replacement
}
x
}
## The default method.
bamlss.engine.setup.smooth.default <- function(x, Matrix = FALSE, ...)
{
if(inherits(x, "special"))
return(x)
if(!is.null(x$margin)) {
x$xt <- c(x$xt, x$margin[[1]]$xt)
x$xt <- x$xt[unique(names(x$xt))]
x$fixed <- x$margin[[1]]$fixed
}
if(is.null(x$binning) & !is.null(x$xt[["binning"]])) {
if(is.logical(x$xt[["binning"]])) {
if(x$xt[["binning"]]) {
x$binning <- match.index(x$X)
x$binning$order <- order(x$binning$match.index)
x$binning$sorted.index <- x$binning$match.index[x$binning$order]
assign <- attr(x$X, "assign")
x$X <- x$X[x$binning$nodups, , drop = FALSE]
attr(x$X, "assign") <- assign
}
} else {
x$binning <- match.index(x$X)
x$binning$order <- order(x$binning$match.index)
x$binning$sorted.index <- x$binning$match.index[x$binning$order]
assign <- attr(x$X, "assign")
x$X <- x$X[x$binning$nodups, , drop = FALSE]
attr(x$X, "assign") <- assign
}
}
if(!is.null(x$binning)) {
if(nrow(x$X) != length(x$binning$nodups)) {
assign <- attr(x$X, "assign")
x$X <- x$X[x$binning$nodups, , drop = FALSE]
attr(x$X, "assign") <- assign
}
}
if(is.null(x$binning)) {
nr <- nrow(x$X)
x$binning <- list(
"match.index" = 1:nr,
"nodups" = 1:nr,
"order" = 1:nr,
"sorted.index" = 1:nr
)
}
x$nobs <- length(x$binning$match.index)
k <- length(x$binning$nodups)
x$weights <- rep(0, length = k)
x$rres <- rep(0, length = k)
x$fit.reduced <- rep(0, length = k)
state <- if(is.null(x$xt[["state"]])) list() else x$xt[["state"]]
if(is.null(x$fixed))
x$fixed <- if(!is.null(x$fx)) x$fx[1] else FALSE
if(!x$fixed & is.null(state$interval))
state$interval <- if(is.null(x$xt[["interval"]])) tau2interval(x) else x$xt[["interval"]]
if(!is.null(x$xt[["pSa"]])) {
x$S <- c(x$S, list("pSa" = x$xt[["pSa"]]))
priors <- make.prior(x)
x$prior <- priors$prior
x$grad <- priors$grad
x$hess <- priors$hess
}
ntau2 <- length(x$S)
if(length(ntau2) < 1) {
if(x$fixed) {
x$sp <- 1e+20
ntau2 <- 1
x$S <- list(diag(ncol(x$X)))
} else {
x$sp <- NULL
}
}
if(!is.null(x$xt[["sp"]])) {
x$sp <- x$xt[["sp"]]
for(j in seq_along(x$sp))
if(x$sp[j] == 0) x$sp[j] <- .Machine$double.eps^0.5
x$xt[["tau2"]] <- 1 / x$sp
}
if(!is.null(x$sp)) {
if(all(is.numeric(x$sp))) {
x$sp <- rep(x$sp, length.out = ntau2)
for(j in seq_along(x$sp))
if(x$sp[j] == 0) x$sp[j] <- .Machine$double.eps^0.5
x$fxsp <- TRUE
} else x$fxsp <- FALSE
} else x$fxsp <- FALSE
if(is.null(state$parameters)) {
state$parameters <- rep(0, ncol(x$X))
names(state$parameters) <- if(is.null(colnames(x$X))) {
paste("b", 1:length(state$parameters), sep = "")
} else colnames(x$X)
if(is.null(x$is.model.matrix)) {
if(ntau2 > 0) {
tau2 <- if(is.null(x$sp)) {
if(x$fixed) {
rep(1e+20, length.out = ntau2)
} else {
rep(if(!is.null(x$xt[["tau2"]])) {
x$xt[["tau2"]]
} else {
if(!is.null(x$xt[["lambda"]])) 1 / x$xt[["lambda"]] else 1000
}, length.out = ntau2)
}
} else rep(x$sp, length.out = ntau2)
if(all(is.logical(tau2)))
tau2 <- rep(0.0001, length(tau2))
names(tau2) <- paste("tau2", 1:ntau2, sep = "")
state$parameters <- c(state$parameters, tau2)
}
}
}
if((ntau2 > 0) & !any(grepl("tau2", names(state$parameters))) & is.null(x$is.model.matrix)) {
tau2 <- if(is.null(x$sp)) {
if(x$fixed) {
rep(1e+20, length.out = ntau2)
} else {
rep(if(!is.null(x$xt[["tau2"]])) {
x$xt[["tau2"]]
} else {
if(!is.null(x$xt[["lambda"]])) 1 / x$xt[["lambda"]] else 100
}, length.out = ntau2)
}
} else rep(x$sp, length.out = ntau2)
names(tau2) <- paste("tau2", 1:ntau2, sep = "")
state$parameters <- c(state$parameters, tau2)
}
x$a <- if(is.null(x$xt[["a"]])) 1e-04 else x$xt[["a"]]
x$b <- if(is.null(x$xt[["b"]])) 1e-04 else x$xt[["b"]]
if(is.null(x$edf)) {
x$edf <- function(x) {
tau2 <- get.state(x, "tau2")
if(x$fixed | !length(tau2)) return(ncol(x$X))
if(is.null(x$state$XX))
x$state$XX <- crossprod(x$X)
S <- 0
for(j in seq_along(tau2))
S <- S + 1 / tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](get.state(x, "b")) else x$S[[j]]
P <- matrix_inv(x$state$XX + S, index = x$sparse.setup)
edf <- try(sum_diag(x$state$XX %*% P), silent = TRUE)
if(inherits(edf, "try-error"))
edf <- ncol(x$X)
return(edf)
}
}
ng <- length(get.par(state$parameters, "b"))
x$lower <- c(rep(-Inf, ng),
if(is.list(state$interval)) {
unlist(sapply(state$interval, function(x) { x[1] }))
} else state$interval[1])
x$upper <- c(rep(Inf, ng),
if(is.list(state$interval)) {
unlist(sapply(state$interval, function(x) { x[2] }))
} else state$interval[2])
names(x$lower) <- names(x$upper) <- names(state$parameters)[1:length(x$upper)]
if(!is.null(x$sp)) {
if(length(x$sp) < 1)
x$sp <- NULL
if(is.logical(x$sp))
x[["sp"]] <- NULL
}
state$interval <- NULL
x$state <- state
if(!is.null(x$xt[["do.optim"]]))
x$state$do.optim <- x$xt[["do.optim"]]
x$sparse.setup <- sparse.setup(x$X, S = x$S)
x$added <- c("nobs", "weights", "rres", "state", "a", "b", "prior", "edf",
"grad", "hess", "lower", "upper")
args <- list(...)
force.Matrix <- if(is.null(args$force.Matrix)) FALSE else args$force.Matrix
if(!is.null(x$xt$force.Matrix))
force.Matrix <- x$xt$force.Matrix
if(!is.null(x$sparse.setup$crossprod)) {
if((ncol(x$sparse.setup$crossprod) < ncol(x$X) * 0.5) & force.Matrix)
Matrix <- TRUE
if(Matrix) {
x$X <- Matrix(x$X, sparse = TRUE)
for(j in seq_along(x$S))
x$S[[j]] <- Matrix(if(is.function(x$S[[j]])) x$S[[j]](c("b" = rep(0, attr(x$S[[j]], "npar")))) else x$S[[j]], sparse = TRUE)
if(force.Matrix)
x$update <- bfit_iwls_Matrix
priors <- make.prior(x)
x$prior <- priors$prior
x$grad <- priors$grad
x$hess <- priors$hess
}
}
if(ntau2 > 0) {
tau2 <- NULL
if(length(x$margin)) {
for(j in seq_along(x$margin)) {
if(!is.null(x$margin[[j]]$xt$tau2))
tau2 <- c(tau2, x$margin[[j]]$xt$tau2)
}
} else {
if(!is.null(x$xt$tau2))
tau2 <- x$xt$tau2
if(!is.null(x$xt[["lambda"]])) {
tau2 <- 1 / x$xt[["lambda"]]
}
}
if(!is.null(tau2)) {
tau2 <- rep(tau2, length.out = ntau2)
x$state$parameters <- set.par(x$state$parameters, tau2, "tau2")
}
}
pid <- !grepl("tau2", names(x$state$parameters)) & !grepl("edf", names(x$state$parameters))
x$pid <- list("b" = which(pid), "tau2" = which(!pid))
if(!length(x$pid$tau2))
x$pid$tau2 <- NULL
if(is.null(x$prior)) {
if(!is.null(x$xt[["prior"]]))
x$prior <- x$xt[["prior"]]
if(is.null(x$prior) | !is.function(x$prior)) {
priors <- make.prior(x)
x$prior <- priors$prior
x$grad <- priors$grad
x$hess <- priors$hess
}
}
x$fit.fun <- make.fit.fun(x)
x$state$fitted.values <- x$fit.fun(x$X, get.par(x$state$parameters, "b"))
x$state$edf <- x$edf(x)
x
}
## Function to find tau2 interval according to the
## effective degrees of freedom
tau2interval <- function(x, lower = .Machine$double.eps^0.8, upper = 1e+10)
{
if(length(x$S) < 2) {
return(c(lower, upper))
} else {
return(rep(list(c(lower, upper)), length.out = length(x$S)))
}
}
## Assign degrees of freedom.
assign.df <- function(x, df, do.part = FALSE, ret.tau2 = FALSE)
{
if(inherits(x, "special"))
return(x)
if(!is.null(x$is.model.matrix)) {
if(x$is.model.matrix)
return(x)
}
if(is.null(x$S))
return(x)
tau2 <- get.par(x$state$parameters, "tau2")
if(x$fixed | !length(tau2))
return(x)
if(!is.null(x$fxsp)) {
if(x$fxsp)
return(x)
}
if(!is.null(x$no.assign.df))
return(x)
df <- if(is.null(x$xt$df)) df else x$xt$df
if(is.null(df)) {
nc <- ncol(x$X)
df <- ceiling(nc * 0.5)
}
if(df > ncol(x$X))
df <- ncol(x$X)
XX <- crossprod(x$X)
if(length(tau2) > 1) {
objfun <- function(tau2, ret.edf = FALSE) {
S <- 0
for(i in seq_along(x$S))
S <- S + 1 / tau2[i] * (if(is.function(x$S[[i]])) x$S[[i]](c("b" = rep(0, attr(x$S[[i]], "npar")))) else x$S[[i]])
edf <- sum_diag(XX %*% matrix_inv(XX + S, index = x$sparse.setup))
if(ret.edf)
return(edf)
else
return((df - edf)^2)
}
if(do.part) {
opt <- tau2.optim(objfun, start = tau2, maxit = 1000, scale = 100,
add = FALSE, force.stop = FALSE, eps = .Machine$double.eps^0.8)
if(!inherits(opt, "try-error"))
tau2 <- opt
}
} else {
objfun <- function(tau2, ret.edf = FALSE) {
edf <- sum_diag(XX %*% matrix_inv(XX + 1 / tau2 * (if(is.function(x$S[[1]])) {
x$S[[1]](c("b" = rep(0, attr(x$S[[1]], "npar")), x$fixed.hyper))
} else x$S[[1]]), index = x$sparse.setup))
if(ret.edf)
return(edf)
else
return((df - edf)^2)
}
tau2 <- tau2.optim(objfun, start = tau2, maxit = 1000, scale = 10,
add = FALSE, force.stop = FALSE, eps = .Machine$double.eps^0.8)
if(inherits(tau2, "try-error"))
return(x)
}
if(ret.tau2)
return(tau2)
x$state$parameters <- set.par(x$state$parameters, tau2, "tau2")
x$state$edf <- objfun(tau2, ret.edf = TRUE)
return(x)
}
get.eta <- function(x, expand = TRUE)
{
nx <- names(x)
np <- length(nx)
eta <- vector(mode = "list", length = np)
names(eta) <- nx
for(j in 1:np) {
eta[[j]] <- 0
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
par <- x[[nx[j]]]$smooth.construct[[sj]]$state$parameters
par <- if(!is.null(x[[nx[j]]]$smooth.construct[[sj]]$pid)) {
par[x[[nx[j]]]$smooth.construct[[sj]]$pid$b]
} else get.state(x[[nx[j]]]$smooth.construct[[sj]], "b")
fit <- x[[nx[j]]]$smooth.construct[[sj]]$fit.fun(x[[nx[j]]]$smooth.construct[[sj]]$X, par, expand)
eta[[j]] <- eta[[j]] + fit
}
}
eta
}
ffdf_eval <- function(x, FUN)
{
# res <- NULL
# for(i in bamlss_chunk(x)) {
# res <- ffappend(res, FUN(x[i, ]))
# }
# res
## FIXME: ff support!
FUN(x)
}
ffdf_eval_sh <- function(y, par, FUN)
{
# res <- NULL
# for(i in bamlss_chunk(y)) {
# tpar <- list()
# for(j in names(par))
# tpar[[j]] <- par[[j]][i]
# res <- ffappend(res, FUN(y[i, ], tpar))
# }
# res
## FIXME: ff support!
FUN(y, par)
}
ff_eval <- function(x, FUN, lower = NULL, upper = NULL)
{
# res <- NULL
# for(i in bamlss_chunk(x)) {
# tres <- FUN(x[i])
# if(!is.null(lower)) {
# if(any(jj <- tres == lower[1]))
# tres[jj] <- lower[2]
# }
# if(!is.null(upper)) {
# if(any(jj <- tres == upper[1]))
# tres[jj] <- upper[2]
# }
# res <- ffappend(res, tres)
# }
# res
## FIXME: ff support!
FUN(x)
}
## Initialze.
init.eta <- function(eta, y, family, nobs)
{
if(is.null(family$initialize))
return(eta)
for(j in family$names) {
if(!is.null(family$initialize[[j]])) {
linkfun <- make.link2(family$links[j])$linkfun
if(inherits(y, "ffdf")) {
eta[[j]] <- ffdf_eval(y, function(x) { linkfun(family$initialize[[j]](x)) })
} else {
eta[[j]] <- linkfun(family$initialize[[j]](y))
eta[[j]] <- rep(eta[[j]], length.out = nobs)
}
}
}
return(eta)
}
get.edf <- function(x, type = 1)
{
nx <- names(x)
np <- length(nx)
edf <- 0
for(j in 1:np) {
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
edf <- edf + if(type < 2) {
x[[nx[j]]]$smooth.construct[[sj]]$edf(x[[nx[j]]]$smooth.construct[[sj]])
} else x[[nx[j]]]$smooth.construct[[sj]]$state$edf
}
}
edf
}
get.log.prior <- function(x, type = 1)
{
nx <- names(x)
np <- length(nx)
lp <- 0
for(j in 1:np) {
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
lp <- lp + if(type < 2) {
x[[nx[j]]]$smooth.construct[[sj]]$prior(x[[nx[j]]]$smooth.construct[[sj]]$state$parameters)
} else x[[nx[j]]]$smooth.construct[[sj]]$state$log.prior
}
}
lp
}
get.all.par <- function(x, drop = FALSE, list = TRUE)
{
nx <- names(x)
np <- length(nx)
par <- list()
for(i in nx) {
par[[i]] <- list()
if(!all(c("formula", "fake.formula") %in% names(x[[i]]))) {
for(k in names(x[[i]])) {
if(!is.null(x[[i]][[k]]$smooth.construct)) {
par[[i]][[k]]$s <- list()
for(j in names(x[[i]][[k]]$smooth.construct)) {
if(j == "model.matrix") {
par[[i]][[k]]$p <- x[[i]][[k]]$smooth.construct[[j]]$state$parameters
} else {
if(is.null(par[[i]][[k]]$s))
par[[i]][[k]]$s <- list()
par[[i]][[k]]$s[[j]] <- x[[i]][[k]]$smooth.construct[[j]]$state$parameters
if(!is.null(edf <- x[[i]][[k]]$smooth.construct[[j]]$state$edf))
par[[i]][[k]]$s[[j]] <- c(par[[i]][[k]]$s[[j]], "edf" = edf)
}
}
}
}
} else {
if(!is.null(x[[i]]$smooth.construct)) {
for(j in names(x[[i]]$smooth.construct)) {
if(j == "model.matrix") {
par[[i]]$p <- x[[i]]$smooth.construct[[j]]$state$parameters
} else {
if(is.null(par[[i]]$s))
par[[i]]$s <- list()
par[[i]]$s[[j]] <- x[[i]]$smooth.construct[[j]]$state$parameters
if(!is.null(edf <- x[[i]]$smooth.construct[[j]]$state$edf))
par[[i]]$s[[j]] <- c(par[[i]]$s[[j]], "edf" = edf)
}
}
}
}
}
if(!list) {
par <- unlist(par)
if(drop) {
for(j in c(".edf", ".tau2", ".alpha"))
par <- par[!grepl(j, names(par), fixed = TRUE)]
}
}
par
}
get.hessian <- function(x)
{
npar <- names(get.all.par(x, list = FALSE, drop = TRUE))
hessian <- list(); nh <- NULL
for(i in names(x)) {
for(j in names(x[[i]]$smooth.construct)) {
pn <- if(j == "model.matrix") paste(i, "p", sep = ".") else paste(i, "s", j, sep = ".")
if(is.null(x[[i]]$smooth.construct[[j]]$state$hessian))
x[[i]]$smooth.construct[[j]]$state$hessian <- diag(1e-07, ncol(x[[i]]$smooth.construct[[j]]$X))
hessian[[pn]] <- as.matrix(x[[i]]$smooth.construct[[j]]$state$hessian)
if(is.null(colnames(hessian[[pn]]))) {
cn <- colnames(x[[i]]$smooth.construct[[j]]$X)
if(is.null(cn))
cn <- paste("b", 1:ncol(x[[i]]$smooth.construct[[j]]$X), sep = "")
} else cn <- colnames(hessian[[pn]])
pn <- paste(pn, cn, sep = ".")
nh <- c(nh, pn)
}
}
hessian <- -1 * as.matrix(do.call("bdiag", hessian))
rownames(hessian) <- colnames(hessian) <- nh
hessian <- hessian[npar, npar]
return(hessian)
}
## Formatting for printing.
fmt <- Vectorize(function(x, width = 8, digits = 2) {
txt <- formatC(round(x, digits), format = "f", digits = digits, width = width)
if(nchar(txt) > width) {
txt <- strsplit(txt, "")[[1]]
txt <- paste(txt[1:width], collapse = "", sep = "")
}
txt
})
opt_bfit <- bfit <- function(x, y, family, start = NULL, weights = NULL, offset = NULL,
update = "iwls", criterion = c("AICc", "BIC", "AIC"),
eps = .Machine$double.eps^0.25, maxit = 400,
outer = NULL, inner = FALSE, mgcv = FALSE,
verbose = TRUE, digits = 4, flush = TRUE, nu = TRUE, stop.nu = NULL, ...)
{
nx <- family$names
if(!all(nx %in% names(x)))
stop("design construct names mismatch with family names!")
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, update = update, ...)
plot <- if(is.null(list(...)$plot)) {
FALSE
} else {
list(...)$plot
}
criterion <- match.arg(criterion)
np <- length(nx)
if(!is.null(nu)) {
if(!is.logical(nu)) {
if(nu < 0)
nu <- NULL
}
}
no_ff <- !inherits(y, "ffdf")
nobs <- nrow(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
if(!is.null(start))
x <- set.starting.values(x, start)
eta <- get.eta(x)
if(!is.null(weights))
weights <- as.data.frame(weights)
if(!is.null(offset)) {
offset <- as.data.frame(offset)
for(j in nx) {
if(!is.null(offset[[j]]))
eta[[j]] <- eta[[j]] + offset[[j]]
}
} else {
if(is.null(start))
eta <- init.eta(eta, y, family, nobs)
}
ia <- if(flush) interactive() else FALSE
if(is.null(outer)) {
outer <- FALSE
max_outer <- 1
} else {
max_outer <- if(outer) {
maxit + 1
} else {
0
}
}
if(mgcv) {
outer <- TRUE
inner <- TRUE
max_outer <- maxit + 1
}
inner_bf <- if(!mgcv) {
function(x, y, eta, family, edf, id, nu, logprior, ...) {
eps0 <- eps + 1; iter <- 1
while(eps0 > eps & iter < maxit) {
eta0 <- eta
for(sj in seq_along(x)) {
## Get updated parameters.
p.state <- x[[sj]]$update(x[[sj]], family, y, eta, id, edf = edf, ...)
if(!is.null(nu)) {
lpost0 <- family$loglik(y, family$map2par(eta))## + logprior
##lp <- logprior - x[[sj]]$prior(x[[sj]]$state$parameters)
eta2 <- eta
eta2[[id]] <- eta2[[id]] - x[[sj]]$state$fitted.values
b0 <- get.par(x[[sj]]$state$parameters, "b")
b1 <- get.par(p.state$parameters, "b")
objfun <- function(nu, diff = TRUE) {
p.state$parameters <- set.par(p.state$parameters, nu * b1 + (1 - nu) * b0, "b")
eta2[[id]] <- eta2[[id]] + x[[sj]]$fit.fun(x[[sj]]$X,
get.par(p.state$parameters, "b"))
lp2 <- family$loglik(y, family$map2par(eta2)) ##+ lp + x[[sj]]$prior(p.state$parameters)
if(diff) {
return(-1 * (lp2 - lpost0))
} else
return(lp2)
}
lpost1 <- objfun(1, diff = FALSE)
if(lpost1 < lpost0) {
if(!is.numeric(nu)) {
nuo <- optimize(f = objfun, interval = c(0, 1))$minimum
} else {
nuo <- nu
while((objfun(nuo, diff = FALSE) < lpost0) & (.Machine$double.eps < nuo)) {
nuo <- nuo / 2
}
}
if(!(objfun(nuo, diff = FALSE) < lpost0)) {
p.state$parameters <- set.par(p.state$parameters, nuo * b1 + (1 - nuo) * b0, "b")
p.state$fitted.values <- x[[sj]]$fit.fun(x[[sj]]$X,
get.par(p.state$parameters, "b"))
eta2[[id]] <- eta2[[id]] + p.state$fitted.values
lpost1 <- family$loglik(y, family$map2par(eta2)) ##+ lp + x[[sj]]$prior(p.state$parameters)
} else {
next
}
}
}
## Compute equivalent degrees of freedom.
edf <- edf - x[[sj]]$state$edf + p.state$edf
## Update log priors.
logprior <- logprior - x[[sj]]$prior(x[[sj]]$state$parameters) + x[[sj]]$prior(p.state$parameters)
## Update predictor and smooth fit.
eta[[id]] <- eta[[id]] - fitted(x[[sj]]$state) + fitted(p.state)
x[[sj]]$state <- p.state
}
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
if(is.na(eps0) | !is.finite(eps0)) eps0 <- eps + 1
iter <- iter + 1
}
return(list("x" = x, "eta" = eta, "edf" = edf))
}
} else {
function(x, y, eta, family, edf, id, z, hess, weights, ...) {
X <- lapply(x, function(x) { x$X })
S <- lapply(x, function(x) { x$S })
nt <- nt0 <- names(X)
nt <- rmf(nt)
names(X) <- names(S) <- nt
if("modelmatrix" %in% nt)
S <- S[!(nt %in% "modelmatrix")]
X$z <- z
f <- paste("z", paste(c("-1", nt), collapse = " + "), sep = " ~ ")
f <- as.formula(f)
if(!is.null(weights))
hess <- hess * weights
b <- gam(f, data = X, weights = hess, paraPen = S)
cb <- coef(b)
ncb <- names(cb)
tau2 <- if(length(b$sp)) 1 / b$sp else NULL
fitted <- 0
for(sj in seq_along(x)) {
tn <- rmf(nt0[sj])
par <- cb[grep(tn, ncb, fixed = TRUE)]
tedf <- sum(b$edf[grep(tn, ncb, fixed = TRUE)])
names(par) <- paste("b", 1:length(par), sep = "")
if(!is.null(tau2) & (tn != "modelmatrix")) {
ttau2 <- tau2[grep(tn, names(tau2), fixed = TRUE)]
names(ttau2) <- paste("tau2", 1:length(ttau2), sep = "")
lo <- x[[sj]]$lower[grep("tau2", names(x[[sj]]$lower), fixed = TRUE)]
up <- x[[sj]]$upper[grep("tau2", names(x[[sj]]$upper), fixed = TRUE)]
if(any(j <- ttau2 < lo))
ttau2[j] <- lo[j]
if(any(j <- ttau2 > up))
ttau2[j] <- up[j]
par <- c(par, ttau2)
} else {
names(par) <- colnames(x[[sj]]$X)
par <- c(par, "tau21" = 1e+20)
}
x[[sj]]$state$parameters <- par
x[[sj]]$state$fitted.values <- x[[sj]]$fit.fun(x[[sj]]$X, par)
fitted <- fitted + x[[sj]]$state$fitted.values
edf <- edf - x[[sj]]$state$edf + tedf
x[[sj]]$state$edf <- tedf
x[[sj]]$state$prior <- x[[sj]]$prior(par)
}
eta[[id]] <- fitted
return(list("x" = x, "eta" = eta, "edf" = edf))
}
}
## Backfitting main function.
backfit <- function(verbose = TRUE) {
eps0 <- eps + 1; iter <- 0
edf <- get.edf(x, type = 2)
ll_save <- NULL
ptm <- proc.time()
while(eps0 > eps & iter < maxit) {
eta0 <- eta
## Cycle through all parameters
for(j in 1:np) {
if(iter < max_outer) {
peta <- family$map2par(eta)
if(no_ff) {
## Compute weights.
hess <- process.derivs(family$hess[[nx[j]]](y, peta, id = nx[j]), is.weight = TRUE)
## Score.
score <- process.derivs(family$score[[nx[j]]](y, peta, id = nx[j]), is.weight = FALSE)
if(length(score) != nobs) {
stop("something wrong in processing the family $score() function! More elements in return value of $score() than the response!")
}
if(length(hess) != nobs) {
stop("something wrong in processing the family $hess() function! More elements in return value of $hess() than the response!")
}
} else {
## Same for large files.
hess <- ffdf_eval_sh(y, peta, FUN = function(y, par) {
process.derivs(family$hess[[nx[j]]](y, par, id = nx[j]), is.weight = TRUE)
})
score <- ffdf_eval_sh(y, peta, FUN = function(y, par) {
process.derivs(family$score[[nx[j]]](y, par, id = nx[j]), is.weight = FALSE)
})
}
## Compute working observations.
z <- eta[[nx[j]]] + 1 / hess * score
} else z <- hess <- score <- NULL
if(iter < 2) {
eta[[nx[j]]] <- get.eta(x)[[nx[j]]]
if(!is.null(offset)) {
if(!is.null(offset[[nx[j]]]))
eta[[nx[j]]] <- eta[[nx[j]]] + offset[[nx[j]]]
}
}
## And all terms.
if(inner) {
tbf <- inner_bf(x[[nx[j]]]$smooth.construct, y, eta, family,
edf = edf, id = nx[j], z = z, hess = hess, weights = weights[[nx[j]]],
criterion = criterion, iteration = iter, nu = nu, score = score,
logprior = get.log.prior(x))
x[[nx[j]]]$smooth.construct <- tbf$x
edf <- tbf$edf
eta <- tbf$eta
rm(tbf)
} else {
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
## Get updated parameters.
p.state <- x[[nx[j]]]$smooth.construct[[sj]]$update(x[[nx[j]]]$smooth.construct[[sj]],
family, y, eta, nx[j], edf = edf, z = z, hess = hess, weights = weights[[nx[j]]],
iteration = iter, criterion = criterion, score = score)
## Update predictor and smooth fit.
if(!is.null(nu) & !inherits(x[[nx[j]]]$smooth.construct[[sj]], "nnet0.smooth")) {
lp0 <- get.log.prior(x)
lpost0 <- family$loglik(y, family$map2par(eta)) ##+ lp0
##lp <- lp0 - x[[nx[j]]]$smooth.construct[[sj]]$prior(x[[nx[j]]]$smooth.construct[[sj]]$state$parameters)
eta2 <- eta
eta2[[nx[j]]] <- eta2[[nx[j]]] - x[[nx[j]]]$smooth.construct[[sj]]$state$fitted.values
b0 <- get.par(x[[nx[j]]]$smooth.construct[[sj]]$state$parameters, "b")
b1 <- get.par(p.state$parameters, "b")
objfun <- function(nu, diff = TRUE) {
p.state$parameters <- set.par(p.state$parameters, nu * b1 + (1 - nu) * b0, "b")
eta2[[nx[j]]] <- eta2[[nx[j]]] + x[[nx[j]]]$smooth.construct[[sj]]$fit.fun(x[[nx[j]]]$smooth.construct[[sj]]$X,
get.par(p.state$parameters, "b"))
lp2 <- family$loglik(y, family$map2par(eta2)) ##+ lp + x[[nx[j]]]$smooth.construct[[sj]]$prior(p.state$parameters)
if(diff) {
return(-1 * (lp2 - lpost0))
} else
return(lp2)
}
lpost1 <- objfun(1, diff = FALSE)
if(lpost1 < lpost0) {
if(!is.numeric(nu)) {
nuo <- optimize(f = objfun, interval = c(0, 1))$minimum
} else {
nuo <- nu
while((objfun(nuo, diff = FALSE) < lpost0) & (.Machine$double.eps < nuo)) {
nuo <- nuo / 2
}
}
if(!(objfun(nuo, diff = FALSE) < lpost0)) {
p.state$parameters <- set.par(p.state$parameters, nuo * b1 + (1 - nuo) * b0, "b")
p.state$fitted.values <- x[[nx[j]]]$smooth.construct[[sj]]$fit.fun(x[[nx[j]]]$smooth.construct[[sj]]$X, get.par(p.state$parameters, "b"))
eta2[[nx[j]]] <- eta2[[nx[j]]] + p.state$fitted.values
lpost1 <- family$loglik(y, family$map2par(eta2)) ##+ lp + x[[nx[j]]]
} else {
next
}
}
}
## Compute equivalent degrees of freedom.
edf <- edf - x[[nx[j]]]$smooth.construct[[sj]]$state$edf + p.state$edf
eta[[nx[j]]] <- eta[[nx[j]]] - fitted(x[[nx[j]]]$smooth.construct[[sj]]$state) + fitted(p.state)
x[[nx[j]]]$smooth.construct[[sj]]$state <- p.state
}
}
}
if(!is.null(stop.nu)) {
if(iter > stop.nu)
nu <- NULL
}
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
if(is.na(eps0) | !is.finite(eps0)) eps0 <- eps + 1
peta <- family$map2par(eta)
IC <- get.ic(family, y, peta, edf, nobs, criterion)
iter <- iter + 1
logLik <- family$loglik(y, peta)
if(verbose) {
cat(if(ia) "\r" else if(iter > 1) "\n" else NULL)
vtxt <- paste(criterion, " ", fmt(IC, width = 8, digits = digits),
" logPost ", fmt(family$loglik(y, peta) + get.log.prior(x), width = 8, digits = digits),
" logLik ", fmt(logLik, width = 8, digits = digits),
" edf ", fmt(edf, width = 6, digits = digits),
" eps ", fmt(eps0, width = 6, digits = digits + 2),
" iteration ", formatC(iter, width = nchar(maxit)), sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
}
ll_save <- c(ll_save, logLik)
if(iter > 2)
slope <- ll_save[length(ll_save)] - ll_save[length(ll_save) - 1L]
else
slope <- NA
if(plot) {
plot(ll_save, xlab = "Iteration", ylab = "logLik",
main = paste("Slope", fmt(slope, width = 6, digits = digits + 2)),
type = "l", ylim = c(0.9 * max(ll_save), max(ll_save)))
}
}
elapsed <- c(proc.time() - ptm)[3]
IC <- get.ic(family, y, peta, edf, nobs, criterion)
logLik <- family$loglik(y, peta)
logPost <- as.numeric(logLik + get.log.prior(x))
ll_save <- c(ll_save, logLik)
if(iter > 2)
slope <- ll_save[length(ll_save)] - ll_save[length(ll_save) - 1L]
else
slope <- NA
if(verbose) {
cat(if(ia) "\r" else "\n")
vtxt <- paste(criterion, " ", fmt(IC, width = 8, digits = digits),
" logPost ", fmt(logPost, width = 8, digits = digits),
" logLik ", fmt(family$loglik(y, peta), width = 8, digits = digits),
" edf ", fmt(edf, width = 6, digits = digits),
" eps ", fmt(eps0, width = 6, digits = digits + 2),
" iteration ", formatC(iter, width = nchar(maxit)), sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("\nelapsed time: ", et, "\n", sep = "")
}
if(plot) {
plot(ll_save, xlab = "Iteration", ylab = "logLik",
main = paste("Slope", fmt(slope, width = 6, digits = digits + 2)),
type = "l", ylim = c(0.9 * max(ll_save), max(ll_save)))
}
if(iter == maxit)
warning("the backfitting algorithm did not converge!")
names(IC) <- criterion
rval <- list("fitted.values" = eta, "parameters" = get.all.par(x), "edf" = edf,
"logLik" = logLik, "logPost" = logPost, "nobs" = nobs,
"converged" = iter < maxit, "runtime" = elapsed)
rval[[names(IC)]] <- IC
rval
}
backfit(verbose = verbose)
}
## Extract information criteria.
get.ic <- function(family, y, par, edf, n, type = c("AIC", "BIC", "AICc", "MP"), ...)
{
type <- match.arg(type)
ll <- family$loglik(y, par)
if(is.na(edf))
edf <- n - 1
denom <- (n - edf - 1)
if(is.na(denom)) {
add <- 0
} else {
if(denom < 1e-10) {
add <- 0
} else {
add <- (2 * edf * (edf + 1)) / denom
}
}
pen <- switch(type,
"AIC" = -2 * ll + 2 * edf,
"BIC" = -2 * ll + edf * log(n),
"AICc" = -2 * ll + 2 * edf + add,
"MP" = -1 * (ll + edf)
)
return(pen)
}
get.ic2 <- function(logLik, edf, n, type = c("AIC", "BIC", "AICc", "MP"), ...)
{
type <- match.arg(type)
denom <- (n - edf - 1)
if(denom < 1e-10) {
add <- 0
} else {
add <- (2 * edf * (edf + 1)) / denom
}
pen <- switch(type,
"AIC" = -2 * logLik + 2 * edf,
"BIC" = -2 * logLik + edf * log(n),
"AICc" = -2 * logLik + 2 * edf + add,
"MP" = -1 * (logLik + edf)
)
return(pen)
}
cround <- function(x, digits = 2)
{
return(x)
cdigits <- Vectorize(function(x) {
if(abs(x) >= 1)
return(0)
scipen <- getOption("scipen")
on.exit(options("scipen" = scipen))
options("scipen" = 100)
x <- strsplit(paste(x), "")[[1]]
x <- x[which(x == "."):length(x)][-1]
i <- which(x != "0")
x <- x[1:(i[1] - 1)]
n <- length(x)
if(n < 2) {
if(x != "0")
return(1)
else return(n + 1)
} else return(n + 1)
})
round(x, digits = cdigits(x) + digits)
}
## Naive smoothing parameter optimization.
tau2.optim <- function(f, start, ..., scale = 10, eps = .Machine$double.eps^0.5, maxit = 1, add = TRUE, force.stop = TRUE, optim = FALSE)
{
if(optim) {
lower <- start / scale
upper <- start * scale + if(add) 1 else 0
start <- optim(start, fn = f, method = "L-BFGS-B",
lower = lower, upper = upper)$par
return(start)
}
foo <- function(par, start, k) {
start[k] <- cround(par)
return(f(start, ...))
}
start <- cround(start)
ic0 <- f(start, ...)
iter <- 0; eps0 <- eps + 1
while((eps0 > eps) & (iter < maxit)) {
start0 <- start
for(k in seq_along(start)) {
xr <- c(start[k] / scale, start[k] * scale + if(add) 1 else 0)
tpar <- try(optimize(foo, interval = xr, start = start, k = k, tol = eps), silent = TRUE)
if(!inherits(tpar, "try-error")) {
if(tpar$objective < ic0) {
start[k] <- tpar$minimum
ic0 <- tpar$objective
}
}
}
if((length(start) < 2) & force.stop)
break
eps0 <- mean(abs((start - start0) / start0))
iter <- iter + 1
}
return(start)
}
## Function to create full parameter vector.
make_par <- function(x, type = 1, add.tau2 = FALSE) {
family <- attr(x, "family")
nx <- family$names
if(!all(nx %in% names(x)))
stop("parameter names mismatch with family names!")
np <- length(nx)
par <- lower <- upper <- NULL
for(j in 1:np) {
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
tpar <- x[[nx[j]]]$smooth.construct[[sj]]$state$parameters
tlower <- x[[nx[j]]]$smooth.construct[[sj]]$lower
tupper <- x[[nx[j]]]$smooth.construct[[sj]]$upper
if(!add.tau2) {
tlower <- tlower[!grepl("tau2", names(tlower))]
tupper <- tupper[!grepl("tau2", names(tupper))]
tpar <- tpar[!grepl("tau2", names(tpar))]
}
g <- get.par(tpar, "b")
npar <- paste(paste(nx[j], "h1", x[[nx[j]]]$smooth.construct[[sj]]$label, sep = ":"), 1:length(g), sep = ".")
if(length(tau2 <- get.par(tpar, "tau2"))) {
npar <- c(npar, paste(nx[j], "h1", paste(x[[nx[j]]]$smooth.construct[[sj]]$label,
paste("tau2", 1:length(tau2), sep = ""), sep = "."), sep = ":"))
}
names(tpar) <- names(tlower) <- names(tupper) <- if(type < 2) {
paste("p", j, ".t", sj, ".", names(tpar), sep = "")
} else npar
par <- c(par, tpar)
lower <- c(lower, tlower)
upper <- c(upper, tupper)
}
}
return(list("par" = par, "lower" = lower, "upper" = upper))
}
## Backfitting updating functions.
bfit_newton <- function(x, family, y, eta, id, ...)
{
args <- list(...)
eta[[id]] <- eta[[id]] - fitted(x$state)
tau2 <- if(!x$fixed) get.par(x$state$parameters, "tau2") else NULL
lp <- function(g) {
eta[[id]] <- eta[[id]] + x$fit.fun(x$X, g)
family$loglik(y, family$map2par(eta)) + x$prior(c(g, tau2))
}
if(is.null(family$gradient[[id]])) {
gfun <- NULL
} else {
gfun <- list()
gfun[[id]] <- function(g, y, eta, x, ...) {
gg <- family$gradient[[id]](g, y, eta, x, ...)
if(!is.null(x$grad)) {
gg <- gg + x$grad(score = NULL, c(g, tau2), full = FALSE)
}
drop(gg)
}
}
if(is.null(family$hessian[[id]])) {
hfun <- NULL
} else {
hfun <- list()
hfun[[id]] <- function(g, y, eta, x, ...) {
hg <- family$hessian[[id]](g, y, eta, x, ...)
if(!is.null(x$hess)) {
hg <- hg + x$hess(score = NULL, c(g, tau2), full = FALSE)
}
hg
}
}
g <- get.par(x$state$parameters, "b")
nu <- if(is.null(x$nu)) 0.1 else x$nu
g.grad <- grad(fun = lp, theta = g, id = id, prior = NULL,
args = list("gradient" = gfun, "x" = x, "y" = y, "eta" = eta))
g.hess <- hess(fun = lp, theta = g, id = id, prior = NULL,
args = list("gradient" = gfun, "hessian" = hfun, "x" = x, "y" = y, "eta" = eta))
Sigma <- matrix_inv(g.hess, index = x$sparse.setup)
g <- drop(g + nu * Sigma %*% g.grad)
x$state$parameters <- set.par(x$state$parameters, g, "b")
x$state$fitted.values <- x$fit.fun(x$X, get.state(x, "b"))
x$state$hessian <- Sigma
return(x$state)
}
boostm_fit0 <- function(x, grad, hess, z, nu, stop.criterion, family, y, eta, edf, id, do.optim, ...)
{
b0 <- get.par(x$state$parameters, "b")
xbin.fun(x$binning$sorted.index, hess, rep(0, length(grad)),
x$weights, x$rres, x$binning$order, x$binning$uind)
XWX <- do.XWX(x$X, 1 / x$weights, x$sparse.setup$matrix)
if(x$fixed) {
k <- length(b0)
S <- matrix(0, k, k)
} else {
if(do.optim) {
tpar <- x$state$parameters
edf <- edf - x$state$edf
eta[[id]] <- eta[[id]] - fitted(x$state)
objfun <- function(tau2) {
tpar2 <- set.par(tpar, tau2, "tau2")
S <- 0
for(j in seq_along(tau2))
S <- S + (1 / tau2[j]) * if(is.function(x$S[[j]])) x$S[[j]](c(tpar2, x$fixed.hyper)) else x$S[[j]]
xgrad <- t(x$X) %*% grad + S %*% b0
xhess <- XWX + S
Sigma <- matrix_inv(xhess, index = x$sparse.setup)
b1 <- b0 + drop(nu * Sigma %*% xgrad)
eta[[id]] <- eta[[id]] + x$fit.fun(x$X, b1)
edf <- edf + sum_diag(XWX %*% Sigma)
return(get.ic(family, y, family$map2par(eta), edf, length(eta[[1]]), type = stop.criterion))
}
tau2 <- tau2.optim(objfun, start = get.par(x$state$parameters, "tau2"), scale = 10, maxit = 10, force.stop = FALSE, add = FALSE)
x$state$parameters <- set.par(x$state$parameters, tau2, "tau2")
}
tau2 <- get.par(x$state$parameters, "tau2")
S <- 0
for(j in seq_along(tau2))
S <- S + (1 / tau2[j]) * if(is.function(x$S[[j]])) x$S[[j]](c(x$state$parameters, x$fixed.hyper)) else x$S[[j]]
}
xgrad <- t(x$X) %*% grad + S %*% b0
xhess <- XWX + S
Sigma <- matrix_inv(xhess, index = x$sparse.setup)
b1 <- b0 + drop(nu * Sigma %*% xgrad)
x$state$parameters <- set.par(x$state$parameters, b1, "b")
x$state$fitted.values <- x$fit.fun(x$X, b1)
x$state$hessian <- Sigma
x$state$edf <- sum_diag(XWX %*% Sigma)
return(x$state)
}
boostm_fit <- function(x, grad, hess, z, nu, stop.criterion, family, y, eta, edf, id, do.optim, ...)
{
x$state$do.optim <- do.optim
b0 <- get.par(x$state$parameters, "b")
state <- bfit_iwls(x = x, family = family, y = y, eta = eta, id = id, criterion = stop.criterion,
grad = grad, hess = hess, z = z, edf = edf, ...)
b1 <- get.par(state$parameters, "b")
state$parameters <- set.par(state$parameters, nu * b1 + (1 - nu) * b0, "b")
state$fitted.values <- x$fit.fun(x$X, get.par(state$parameters, "b"))
return(state)
}
#x smooth construct selbst x$X design matrix x$S als Funktion
bfit_lm <- function(x, family, y, eta, id, weights, criterion, ...)
{
args <- list(...)
peta <- family$map2par(eta)
hess <- family$hess[[id]](y, peta, id = id, ...)
## Score.
score <- family$score[[id]](y, peta, id = id, ...)
## Compute working observations.
z <- eta[[id]] + 1 / hess * score
## Compute reduced residuals.
e <- z - eta[[id]] + fitted(x$state)
if(!is.null(weights))
hess <- hess * weights
if(x$fixed | x$fxsp) {
b <- lm.wfit(x$X, e, hess)
} else {
tau2 <- get.par(x$state$parameters, "tau2")
S <- 0
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
n <- nrow(S)
w <- c(hess, rep(0, n))
e <- c(e, rep(1, n))
b <- lm.wfit(rbind(x$X, S), e, w)
}
x$state$parameters <- set.par(x$state$parameters, coef(b), "b")
x$state$fitted.values <- x$X %*% coef(b)
x$state
}
bfit_iwls <- function(x, family, y, eta, id, weights, criterion, ...)
{
args <- list(...)
no_ff <- !inherits(y, "ff")
peta <- family$map2par(eta)
enet2 <- x$xt$enet2
if(is.null(enet2))
enet2 <- FALSE
nobs <- length(eta[[1L]])
if(is.null(args$hess)) {
## Compute weights.
if(no_ff) {
hess <- process.derivs(family$hess[[id]](y, peta, id = id, ...), is.weight = TRUE)
} else {
hess <- ffdf_eval_sh(y, peta, FUN = function(y, par) {
process.derivs(family$hess[[id]](y, par, id = id), is.weight = TRUE)
})
}
if(length(hess) != nobs) {
stop("something wrong in processing the family $hess() function! More elements in return value of $hess() than the response!")
}
} else hess <- args$hess
if(!is.null(weights))
hess <- hess * weights
if(is.null(args$z)) {
## Score.
if(no_ff) {
score <- process.derivs(family$score[[id]](y, peta, id = id, ...), is.weight = FALSE)
} else {
score <- ffdf_eval_sh(y, peta, FUN = function(y, par) {
process.derivs(family$score[[id]](y, par, id = id), is.weight = FALSE)
})
}
if(length(score) != nobs) {
stop("something wrong in processing the family $score() function! More elements in return value of $score() than the response!")
}
## Compute working observations.
z <- eta[[id]] + 1 / hess * score
} else z <- args$z
## Compute partial predictor.
eta[[id]] <- eta[[id]] - fitted(x$state)
## Compute reduced residuals.
e <- z - eta[[id]]
xbin.fun(x$binning$sorted.index, hess, e, x$weights, x$rres, x$binning$order, x$binning$uind)
## Old parameters.
g0 <- get.state(x, "b")
## Compute mean and precision.
XWX <- do.XWX(x$X, 1 / x$weights, x$sparse.setup$matrix)
if(!x$state$do.optim | x$fixed | x$fxsp) {
if(x$fixed) {
P <- matrix_inv(XWX + if(!is.null(x$xt[["pS"]])) x$xt[["pS"]] else 0, index = x$sparse.setup)
} else {
S <- 0
tau2 <- get.state(x, "tau2")
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](c(g0, x$fixed.hyper)) else x$S[[j]]
if(enet2)
S <- S + diag(1, ncol(S)) * (1 - 1 / tau2[1L]) / 2
P <- matrix_inv(XWX + S + if(!is.null(x$xt[["pS"]])) x$xt[["pS"]] else 0, index = x$sparse.setup)
}
if(is.null(x$xt[["pm"]])) {
x$state$parameters <- set.par(x$state$parameters, drop(P %*% crossprod(x$X, x$rres)), "b")
} else {
pS <- if(!is.null(x$xt[["pS"]])) {
x$xt[["pS"]]
} else {
if(!is.null(x$xt[["pSa"]])) {
1 / tau2[length(tau2)] * x$xt[["pSa"]]
} else 0
}
x$state$parameters <- set.par(x$state$parameters, drop(P %*% (crossprod(x$X, x$rres) + pS %*% x$xt[["pm"]])), "b")
}
} else {
args <- list(...)
edf0 <- args$edf - x$state$edf
eta2 <- eta
env <- new.env()
objfun <- function(tau2, ...) {
S <- 0
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](c(g0, x$fixed.hyper)) else x$S[[j]]
if(enet2)
S <- S + diag(1, ncol(S)) * (1 - 1 / tau2[1L]) / 2
P <- matrix_inv(XWX + S + if(!is.null(x$xt[["pS"]])) x$xt[["pS"]] else 0, index = x$sparse.setup)
if(inherits(P, "try-error")) return(NA)
if(is.null(x$xt[["pm"]])) {
g <- drop(P %*% crossprod(x$X, x$rres))
} else {
pS <- if(!is.null(x$xt[["pS"]])) {
x$xt[["pS"]]
} else {
if(!is.null(x$xt[["pSa"]])) {
1 / tau2[length(tau2)] * x$xt[["pSa"]]
} else 0
}
g <- drop(P %*% (crossprod(x$X, x$rres) + pS %*% x$xt[["pm"]]))
}
if(!is.null(x$doCmat)) {
V <- P %*% t(x$C)
W <- x$C %*% V
U <- chol2inv(chol(W)) %*% t(V)
g <- drop(g - t(U) %*% x$C %*% g)
}
if(any(is.na(g)) | any(g %in% c(-Inf, Inf))) g <- rep(0, length(g))
fit <- x$fit.fun(x$X, g)
if(!is.null(x$doCmat))
fit <- fit - mean(fit, na.rm = TRUE)
edf <- sum_diag(XWX %*% P)
eta2[[id]] <- eta2[[id]] + fit
ic <- get.ic(family, y, family$map2par(eta2), edf0 + edf, length(z), criterion, ...)
if(!is.null(env$ic_val)) {
if((ic < env$ic_val) & (ic < env$ic00_val)) {
par <- c(g, tau2)
names(par) <- names(x$state$parameters)
x$state$parameters <- par
x$state$fitted.values <- fit
x$state$edf <- edf
if(!is.null(x$prior)) {
if(is.function(x$prior))
x$state$log.prior <- x$prior(par)
}
assign("state", x$state, envir = env)
assign("ic_val", ic, envir = env)
}
} else assign("ic_val", ic, envir = env)
return(ic)
}
assign("ic00_val", objfun(tau2 <- get.state(x, "tau2")), envir = env)
tau2 <- tau2.optim(objfun, start = tau2)
if(!is.null(env$state))
return(env$state)
x$state$parameters <- set.par(x$state$parameters, tau2, "tau2")
S <- 0
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](c(x$state$parameters, x$fixed.hyper)) else x$S[[j]]
P <- matrix_inv(XWX + S + if(!is.null(x$xt[["pS"]])) x$xt[["pS"]] else 0, index = x$sparse.setup)
if(is.null(x$xt[["pm"]])) {
g <- drop(P %*% crossprod(x$X, x$rres))
} else {
pS <- if(!is.null(x$xt[["pS"]])) {
x$xt[["pS"]]
} else {
if(!is.null(x$xt[["pSa"]])) {
1 / tau2[length(tau2)] * x$xt[["pSa"]]
} else 0
}
g <- drop(P %*% (crossprod(x$X, x$rres) + pS %*% x$xt[["pm"]]))
}
if(!is.null(x$doCmat)) {
V <- P %*% t(x$C)
W <- x$C %*% V
U <- chol2inv(chol(W)) %*% t(V)
g <- drop(g - t(U) %*% x$C %*% g)
}
x$state$parameters <- set.par(x$state$parameters, g, "b")
}
## Compute fitted values.
g <- get.state(x, "b")
if(any(is.na(g)) | any(g %in% c(-Inf, Inf))) {
x$state$parameters <- set.par(x$state$parameters, rep(0, length(get.state(x, "b"))), "b")
}
x$state$fitted.values <- x$fit.fun(x$X, get.state(x, "b"))
if(!is.null(x$doCmat))
x$state$fitted.values <- x$state$fitted.values - mean(x$state$fitted.values, na.rm = TRUE)
x$state$edf <- sum_diag(XWX %*% P)
if(!is.null(x$prior)) {
if(is.function(x$prior))
x$state$log.prior <- x$prior(x$state$parameters)
}
return(x$state)# returing!
}
bfit_iwls_Matrix <- function(x, family, y, eta, id, weights, criterion, ...)
{
args <- list(...)
peta <- family$map2par(eta)
if(is.null(args$hess)) {
## Compute weights.
hess <- family$hess[[id]](y, peta, id = id, ...)
} else hess <- args$hess
if(!is.null(weights))
hess <- hess * weights
hess <- process.derivs(hess, is.weight = TRUE)
if(is.null(args$z)) {
## Score.
score <- process.derivs(family$score[[id]](y, peta, id = id, ...), is.weight = FALSE)
## Compute working observations.
z <- eta[[id]] + 1 / hess * score
} else z <- args$z
## Compute partial predictor.
eta[[id]] <- eta[[id]] - fitted(x$state)
## Compute reduced residuals.
e <- z - eta[[id]]
xbin.fun(x$binning$sorted.index, hess, e, x$weights, x$rres, x$binning$order)
## Compute mean and precision.
XWX <- crossprod(Diagonal(x = x$weights) %*% x$X, x$X)
Xr <- crossprod(x$X, x$rres)
if(!x$state$do.optim | x$fixed | x$fxsp) {
if(!x$fixed) {
tau2 <- get.state(x, "tau2")
S <- Matrix(0, ncol(x$X), ncol(x$X))
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
U <- chol(XWX + S)
} else {
U <- chol(XWX)
}
P <- chol2inv(U)
b <- P %*% Xr
x$state$parameters <- set.par(x$state$parameters, as.numeric(b), "b")
} else {
args <- list(...)
edf0 <- args$edf - x$state$edf
eta2 <- eta
env <- new.env()
objfun <- function(tau2, ...) {
S <- Matrix(0, ncol(x$X), ncol(x$X))
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
U <- chol(XWX + S)
P <- chol2inv(U)
b <- P %*% Xr
fit <- x$fit.fun(x$X, b)
edf <- sum_diag(XWX %*% P)
eta2[[id]] <- eta2[[id]] + fit
ic <- get.ic(family, y, family$map2par(eta2), edf0 + edf, length(z), criterion, ...)
if(!is.null(env$ic_val)) {
if((ic < env$ic_val) & (ic < env$ic00_val)) {
par <- c(as.numeric(b), tau2)
names(par) <- names(x$state$parameters)
x$state$parameters <- par
x$state$fitted.values <- fit
x$state$edf <- edf
if(!is.null(x$prior)) {
if(is.function(x$prior))
x$state$log.prior <- x$prior(par)
}
assign("state", x$state, envir = env)
assign("ic_val", ic, envir = env)
}
} else assign("ic_val", ic, envir = env)
return(ic)
}
assign("ic00_val", objfun(get.state(x, "tau2")), envir = env)
tau2 <- tau2.optim(objfun, start = get.state(x, "tau2"))
if(!is.null(env$state))
return(env$state)
S <- Matrix(0, ncol(x$X), ncol(x$X))
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
U <- chol(XWX + S)
P <- chol2inv(U)
b <- P %*% Xr
x$state$parameters <- set.par(x$state$parameters, as.numeric(b), "b")
x$state$parameters <- set.par(x$state$parameters, tau2, "tau2")
}
## Compute fitted values.
x$state$fitted.values <- x$fit.fun(x$X, get.state(x, "b"))
x$state$edf <- sum_diag(XWX %*% P)
if(!is.null(x$prior)) {
if(is.function(x$prior))
x$state$log.prior <- x$prior(x$state$parameters)
}
return(x$state)
}
bfit_glmnet <- function(x, family, y, eta, id, weights, criterion, ...)
{
requireNamespace("glmnet")
args <- list(...)
peta <- family$map2par(eta)
hess <- if(is.null(args$hess)) {
hess <- process.derivs(family$hess[[id]](y, peta, id = id, ...), is.weight = TRUE)
} else args$hess
if(!is.null(weights))
hess <- hess * weights
if(is.null(args$z)) {
score <- process.derivs(family$score[[id]](y, peta, id = id, ...), is.weight = FALSE)
## Compute working observations.
z <- eta[[id]] + 1 / hess * score
} else z <- args$z
## Compute partial predictor.
eta[[id]] <- eta[[id]] - fitted(x$state)
## Compute residuals.
e <- z - eta[[id]]
if(is.null(x$xt$alpha))
x$xt$alpha <- 1
if(is.null(x$xt$nlambda))
x$xt$nlambda <- 100
if(is.null(x$xt$lambda.min.ratio))
x$xt$lambda.min.ratio <- 1e-20
b <- glmnet::glmnet(x$X, e, alpha = x$xt$alpha,
nlambda = x$xt$nlambda, standardize = FALSE, intercept = FALSE,
lambda.min.ratio = x$xt$lambda.min.ratio,
weights = hess)
tLL <- b$nulldev - deviance(b)
k <- b$df
n <- b$nobs
IC <- switch(criterion,
"AICc" = -tLL + 2*k + 2*k*(k + 1)/(n - k - 1),
"BIC" = -tLL + k * log(n),
"AIC" = -tLL + 2 * k
)
i <- which.min(IC)
x$state$parameters <- set.par(x$state$parameters, as.numeric(b$lambda[i]), "tau2")
cb <- as.numeric(coef(b, s = b$lambda[i])[-1])
x$state$parameters <- set.par(x$state$parameters, cb, "b")
x$state$fitted.values <- x$X %*% cb
x$state$edf <- b$df[i]
return(x$state)
}
## Updating based on optim.
bfit_optim <- function(x, family, y, eta, id, weights, criterion, ...)
{
## Compute partial predictor.
eta[[id]] <- eta[[id]] - fitted(x$state)
eta2 <- eta
tpar <- x$state$parameters
## Objective for regression coefficients.
objfun <- function(b, tau2 = NULL) {
tpar <- set.par(tpar, b, "b")
if(!is.null(tau2) & !x$fixed)
tpar <- set.par(tpar, tau2, "tau2")
fit <- x$fit.fun(x$X, b)
eta2[[id]] <- eta[[id]] + fit
ll <- if(is.null(weights[[id]])) {
family$loglik(y, family$map2par(eta2))
} else {
sum(family$d(y, family$map2par(eta2)) * weights[[id]], na.rm = TRUE)
}
lp <- x$prior(tpar)
val <- -1 * (ll + lp)
if(!is.finite(val)) val <- NA
val
}
## Gradient function.
grad <- if(!is.null(family$score[[id]]) & is.function(x$grad)) {
function(gamma, tau2 = NULL) {
tpar <- set.par(tpar, gamma, "b")
if(!is.null(tau2) & !x$fixed)
tpar <- set.par(tpar, tau2, "tau2")
eta2[[id]] <- eta[[id]] + x$fit.fun(x$X, tpar)
peta <- family$map2par(eta2)
score <- drop(family$score[[id]](y, peta))
grad <- x$grad(score, tpar, full = FALSE)
return(drop(-1 * grad))
}
} else NULL
suppressWarnings(opt <- try(optim(get.par(tpar, "b"), fn = objfun, gr = grad,
method = "BFGS", control = list(), tau2 = get.par(tpar, "tau2"), hessian = TRUE,
lower = if(!is.null(x$force.positive)) 1e-10 else -Inf),
silent = TRUE))
if(!inherits(opt, "try-error")) {
tpar <- set.par(tpar, opt$par, "b")
x$state$fitted.values <- x$fit.fun(x$X, tpar)
x$state$parameters <- tpar
x$state$hessian <- opt$hessian
}
return(x$state)
}
## Compute fitted.values from set of parameters.
get.eta.par <- function(par, x)
{
nx <- names(x)
eta <- vector(mode = "list", length = length(nx))
names(eta) <- nx
for(j in nx) {
eta[[j]] <- 0.0
for(sj in names(x[[j]]$smooth.construct)) {
xl <- if(sj != "model.matrix") {
paste(j, "s", x[[j]]$smooth.construct[[sj]]$label, sep = ".")
} else {
paste(j, "p", strsplit(x[[j]]$smooth.construct[[sj]]$label, "+", fixed = TRUE)[[1]], sep = ".")
}
tpar <- par[grep2(xl, names(par), fixed = TRUE)]
x[[j]]$smooth.construct[[sj]]$state$parameters <- set.par(x[[j]]$smooth.construct[[sj]]$state$parameters, tpar, "b")
x[[j]]$smooth.construct[[sj]]$state$fitted.values <- x[[j]]$smooth.construct[[sj]]$fit.fun(x[[j]]$smooth.construct[[sj]]$X,
get.par(tpar, "b"))
eta[[j]] <- eta[[j]] + fitted(x[[j]]$smooth.construct[[sj]]$state)
}
if(!is.null(x[[j]]$model.matrix)) {
xl <- paste(j, "p", colnames(x[[j]]$model.matrix), sep = ".")
tpar <- par[grep(xl, names(par), fixed = TRUE)]
eta[[j]] <- eta[[j]] + drop(x[[j]]$model.matrix %*% tpar)
}
}
return(eta)
}
## The log-posterior.
log_posterior <- function(par, x, y, family, verbose = TRUE, digits = 3, scale = NULL, ienv = NULL)
{
nx <- names(x)
eta <- vector(mode = "list", length = length(nx))
names(eta) <- nx
lprior <- 0.0
for(j in nx) {
eta[[j]] <- 0.0
for(sj in names(x[[j]]$smooth.construct)) {
xl <- if(sj != "model.matrix") {
paste(j, "s", x[[j]]$smooth.construct[[sj]]$label, sep = ".")
} else {
paste(j, "p", strsplit(x[[j]]$smooth.construct[[sj]]$label, "+", fixed = TRUE)[[1]], sep = ".")
}
tpar <- par[grep2(xl, names(par), fixed = TRUE)]
if(x[[j]]$smooth.construct[[sj]]$by == "NA") {
tpar <- tpar[!grepl(":", names(tpar), fixed = TRUE)]
}
bb <- get.par(tpar, "b")
x[[j]]$smooth.construct[[sj]]$state$parameters <- set.par(x[[j]]$smooth.construct[[sj]]$state$parameters, bb, "b")
x[[j]]$smooth.construct[[sj]]$state$fitted.values <- x[[j]]$smooth.construct[[sj]]$fit.fun(x[[j]]$smooth.construct[[sj]]$X, bb)
eta[[j]] <- eta[[j]] + fitted(x[[j]]$smooth.construct[[sj]]$state)
if(any(grepl("tau2", names(tpar)))) {
lprior <- lprior + x[[j]]$smooth.construct[[sj]]$prior(c(tpar, x[[j]]$smooth.construct[[sj]]$fixed.hyper))
} else {
lprior <- lprior + x[[j]]$smooth.construct[[sj]]$prior(c(tpar, get.state(x[[j]]$smooth.construct[[sj]], "tau2"), x[[j]]$smooth.construct[[sj]]$fixed.hyper))
}
}
}
ll <- family$loglik(y, family$map2par(eta))
lp <- as.numeric(ll + lprior)
if(verbose) {
cat(if(interactive()) "\r" else "\n")
vtxt <- paste("logLik ", fmt(ll, width = 8, digits = digits),
" logPost ", fmt(lp, width = 8, digits = digits),
" iteration ", formatC(ienv$bamlss_log_posterior_iteration, width = 4), sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & interactive()) flush.console()
bamlss_log_posterior_iteration <- ienv$bamlss_log_posterior_iteration + 1
assign("bamlss_log_posterior_iteration", bamlss_log_posterior_iteration, envir = ienv)
}
if(!is.null(scale))
lp <- lp * scale
return(lp)
}
## Gradient vector of the log-posterior.
grad_posterior <- function(par, x, y, family, ...)
{
nx <- names(x)
eta <- vector(mode = "list", length = length(nx))
names(eta) <- nx
grad <- NULL
for(j in nx) {
eta[[j]] <- 0
for(sj in names(x[[j]]$smooth.construct)) {
xl <- if(sj != "model.matrix") {
paste(j, "s", x[[j]]$smooth.construct[[sj]]$label, sep = ".")
} else {
paste(j, "p", strsplit(x[[j]]$smooth.construct[[sj]]$label, "+", fixed = TRUE)[[1]], sep = ".")
}
tpar <- par[grep2(xl, names(par), fixed = TRUE)]
if((x[[j]]$smooth.construct[[sj]]$by == "NA") & (sj != "model.matrix")) {
tpar <- tpar[!grepl(":", names(tpar), fixed = TRUE)]
}
x[[j]]$smooth.construct[[sj]]$state$parameters <- set.par(x[[j]]$smooth.construct[[sj]]$state$parameters, tpar, "b")
x[[j]]$smooth.construct[[sj]]$state$fitted.values <- x[[j]]$smooth.construct[[sj]]$fit.fun(x[[j]]$smooth.construct[[sj]]$X,
get.par(tpar, "b"))
eta[[j]] <- eta[[j]] + fitted(x[[j]]$smooth.construct[[sj]]$state)
}
}
for(j in nx) {
score <- family$score[[j]](y, family$map2par(eta), id = j)
for(sj in names(x[[j]]$smooth.construct)) {
tgrad <- x[[j]]$smooth.construct[[sj]]$grad(score, c(x[[j]]$smooth.construct[[sj]]$state$parameters, x[[j]]$smooth.construct[[sj]]$fixed.hyper), full = FALSE)
grad <- c(grad, tgrad)
}
}
return(grad)
}
## Optimizer based on optim().
opt_optim <- function(x, y, family, start = NULL, verbose = TRUE, digits = 3,
gradient = TRUE, hessian = FALSE, eps = .Machine$double.eps^0.5, maxit = 100, ...)
{
nx <- family$names
if(!all(nx %in% names(x)))
stop("design construct names mismatch with family names!")
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, ...)
if(!is.null(start))
x <- set.starting.values(x, start)
nobs <- nrow(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
for(i in names(x)) {
for(j in seq_along(x[[i]]$smooth.construct)) {
if(is.null(x[[i]]$smooth.construct[[j]]$grad)) {
gradient <- FALSE
} else {
if(!all(c("score", "parameters", "full") %in% names(formals(x[[i]]$smooth.construct[[j]]$grad))))
gradient <- FALSE
}
}
}
par <- get.all.par(x, list = FALSE, drop = TRUE)
ienv <- NULL
if(verbose) {
ienv <- new.env()
bamlss_log_posterior_iteration <- 1
assign("bamlss_log_posterior_iteration", bamlss_log_posterior_iteration, envir = ienv)
}
if(!hessian) {
opt <- optim(par, fn = log_posterior,
gr = if(!is.null(family$score) & gradient) grad_posterior else NULL,
x = x, y = y, family = family, method = "BFGS", verbose = verbose,
digits = digits, ienv = ienv, control = list(fnscale = -1, reltol = eps, maxit = maxit),
hessian = TRUE)
if(verbose) {
cat("\n")
rm(ienv)
}
eta <- get.eta.par(opt$par, x)
return(list("parameters" = opt$par, "fitted.values" = eta,
"logPost" = opt$value, "logLik" = family$loglik(y, family$map2par(eta)),
"nobs" = nobs, "hessian" = opt$hessian, "converged" = opt$convergence < 1))
} else {
fn <- if(is.null(family$p2d)) {
log_posterior
} else function(par, ...) { sum(family$p2d(par, log = TRUE), na.rm = TRUE) }
opt <- optimHess(par, fn = fn,
gr = if(!is.null(family$score) & gradient & is.null(family$p2d)) grad_posterior else NULL,
x = x, y = y, family = family, verbose = verbose, digits = digits, ienv = ienv,
control = list(fnscale = -1, reltol = eps, maxit = maxit))
if(verbose) {
cat("\n")
rm(ienv)
}
return(opt)
}
}
## Fast computation of weights and residuals when binning.
xbin.fun <- function(ind, weights, e, xweights, xrres, oind, uind = NULL)
{
if(inherits(ind, "ff")) {
stop("ff support stops here!")
} else {
.Call("xbin_fun", as.integer(ind), as.numeric(weights),
as.numeric(e), as.numeric(xweights), as.numeric(xrres),
as.integer(oind), PACKAGE = "bamlss")
}
invisible(NULL)
}
xcenter <- function(x)
{
.Call("xcenter", as.numeric(x), PACKAGE = "bamlss")
}
## Modified likelihood based boosting.
opt_boostm <- boostm <- function(x, y, family, offset = NULL,
nu = 0.1, df = 3, maxit = 400, mstop = NULL,
verbose = TRUE, digits = 4, flush = TRUE,
eps = .Machine$double.eps^0.25, plot = TRUE,
initialize = TRUE, stop.criterion = "BIC",
force.stop = !is.null(stop.criterion),
do.optim = TRUE, always = FALSE, ...)
{
## FIXME: hard coded!
offset <- weights <- NULL
nx <- family$names
if(!all(nx %in% names(x)))
stop("parameter names mismatch with family names!")
if(!is.null(mstop))
maxit <- mstop
if(!is.null(stop.criterion))
stop.criterion <- toupper(stop.criterion)
if(is.null(maxit))
stop("please set either argument 'maxit' or 'mstop'!")
always2 <- FALSE
if(!is.logical(always)) {
if(is.character(always)) {
if(!is.na(pmatch(always, "best"))) {
always2 <- TRUE
always <- TRUE
} else {
always <- FALSE
}
}
}
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, df = NULL, ...)
np <- length(nx)
nobs <- nrow(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
## Setup boosting structure, i.e, all parametric
## terms get an entry in $smooth.construct object.
## Intercepts are initalized.
x <- boost_transform(x = x, y = y, df = df, family = family,
maxit = maxit, eps = eps, initialize = initialize, offset = offset,
weights = weights)
for(i in nx) {
for(j in names(x[[i]]$smooth.construct))
x[[i]]$smooth.construct[[j]]$criterion <- x[[i]]$smooth.construct[[j]]$loglik
}
## Create a list() that saves the states for
## all parameters and model terms.
states <- make.state.list(x)
## Matrix of all parameters.
parm <- make.par.list(x, iter = maxit)
## Term selector help vectors.
select <- rep(NA, length = length(nx))
names(select) <- nx
loglik <- select
## Criterion used.
sic <- if(is.null(stop.criterion)) "BIC" else stop.criterion
## Save criterion in list().
crit <- ll.contrib <- make.state.list(x, type = 2)
## Extract actual predictor.
eta <- get.eta(x)
if(!is.null(offset)) {
offset <- as.data.frame(offset)
for(j in nx) {
if(!is.null(offset[[j]]))
eta[[j]] <- eta[[j]] + offset[[j]]
}
}
## Print stuff.
ia <- if(flush) interactive() else FALSE
## Save edf and IC?
edf <- save.ic <- rep(NA, maxit)
## Env for C.
rho <- new.env()
## Start boosting.
eps0 <- 1; iter <- if(initialize) 2 else 1
save.ll <- NULL; stopped <- FALSE
ll <- family$loglik(y, family$map2par(eta))
medf <- get.edf(x, type = 2) + if(initialize) length(nx) else 0
ic0 <- -2 * ll + medf * (if(tolower(sic) == "aic") 2 else log(nobs))
ptm <- proc.time()
while(iter <= maxit) {
eta0 <- eta
## Actual parameters.
peta <- family$map2par(eta)
## Cycle through all parameters and terms.
for(i in nx) {
## Actual gradient.
grad <- process.derivs(family$score[[i]](y, peta, id = i), is.weight = FALSE)
## Actual hessian.
hess <- process.derivs(family$score[[i]](y, peta, id = i), is.weight = FALSE)
## Working response.
z <- eta[[i]] + 1 / hess * grad
for(j in names(x[[i]]$smooth.construct)) {
if(always) {
if(j == "(Intercept)") {
crit[[i]][j] <- -Inf
next
}
}
## Get update.
states[[i]][[j]] <- if(is.null(x[[i]]$smooth.construct[[j]][["boostm.fit"]])) {
boostm_fit(x[[i]]$smooth.construct[[j]], grad, hess, z, nu, stop.criterion,
family, y, eta, medf, id = i, do.optim = do.optim, ...)
} else {
x[[i]]$smooth.construct[[j]][["boostm.fit"]](x[[i]]$smooth.construct[[j]],
grad = grad, hess = hess, z = z, nu = nu, criterion = stop.criterion,
family = family, y = y, eta = eta, edf = medf, id = i,
do.optim = do.optim, iteration = iter, ...)
}
## Get contribution.
eta[[i]] <- eta[[i]] + fitted(states[[i]][[j]]) - fitted(x[[i]]$smooth.construct[[j]]$state)
tll <- family$loglik(y, family$map2par(eta))
if(is.null(stop.criterion)) {
crit[[i]][j] <- -1 * (ll - tll)
} else {
tedf <- medf - x[[i]]$smooth.construct[[j]]$state$edf + states[[i]][[j]]$edf
ic1 <- -2 * tll + tedf * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
crit[[i]][j] <- ic1
}
ll.contrib[[i]][j] <- tll - ll
eta[[i]] <- eta0[[i]]
}
## Which one is best?
select[i] <- which.min(crit[[i]])
}
i <- which.min(sapply(crit, function(x) { min(x) }))
## Which term to update.
take <- c(nx[i], names(crit[[i]])[select[i]])
## Update selected term.
eta[[take[1]]] <- eta[[take[1]]] + fitted(states[[take[1]]][[take[2]]]) - fitted(x[[take[1]]]$smooth.construct[[take[2]]]$state)
## Save parameters.
parm[[take[1]]][[take[2]]][iter, ] <- get.par(states[[take[1]]][[take[2]]]$parameters, "b") - get.par(x[[take[1]]]$smooth.construct[[take[2]]]$state$parameters, "b")
medf <- medf - x[[take[1]]]$smooth.construct[[take[2]]]$state$edf + states[[take[1]]][[take[2]]]$edf
## Write to x.
x[[take[1]]]$smooth.construct[[take[2]]]$state <- states[[take[1]]][[take[2]]]
x[[take[1]]]$smooth.construct[[take[2]]]$selected[iter] <- 1
x[[take[1]]]$smooth.construct[[take[2]]]$loglik[iter] <- ll.contrib[[take[1]]][take[2]]
x[[take[1]]]$smooth.construct[[take[2]]]$criterion[iter] <- -1 * crit[[take[1]]][take[2]]
## Intercept updating.
if(always) {
nxa <- if(always2) take[1] else nx
for(ii in nxa) {
if("(Intercept)" %in% names(x[[ii]]$smooth.construct)) {
## Actual gradient.
grad <- process.derivs(family$score[[ii]](y, peta, id = ii), is.weight = FALSE)
## Actual hessian.
hess <- process.derivs(family$score[[ii]](y, peta, id = ii), is.weight = FALSE)
## Working response.
z <- eta[[ii]] + 1 / hess * grad
## Get update.
states[[ii]][["(Intercept)"]] <- boostm_fit(x[[ii]]$smooth.construct[["(Intercept)"]],
grad, hess, z, nu, stop.criterion, family, y, eta, medf, id = i, do.optim = do.optim, ...)
ll <- family$loglik(y, family$map2par(eta))
## Update predictor.
eta[[ii]] <- eta[[ii]] + fitted(states[[ii]][["(Intercept)"]]) - fitted(x[[ii]]$smooth.construct[["(Intercept)"]]$state)
## Save parameters.
parm[[ii]][["(Intercept)"]][iter, ] <- get.par(states[[ii]][["(Intercept)"]]$parameters, "b") - get.par(x[[ii]]$smooth.construct[["(Intercept)"]]$state$parameters, "b")
medf <- medf - x[[ii]]$smooth.construct[["(Intercept)"]]$state$edf + states[[ii]][["(Intercept)"]]$edf
tll <- family$loglik(y, family$map2par(eta))
ll.contrib[[ii]]["(Intercept)"] <- tll - ll
## Write to x.
x[[ii]]$smooth.construct[["(Intercept)"]]$state <- states[[ii]][["(Intercept)"]]
x[[ii]]$smooth.construct[["(Intercept)"]]$selected[iter] <- 1
x[[ii]]$smooth.construct[["(Intercept)"]]$loglik[iter] <- ll.contrib[[ii]]["(Intercept)"]
}
}
}
edf[iter] <- medf
## Change.
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
if(is.na(eps0) | !is.finite(eps0)) eps0 <- eps + 1
ll <- family$loglik(y, family$map2par(eta))
ic0 <- -2 * ll + medf * (if(tolower(sic) == "aic") 2 else log(nobs))
save.ll <- c(save.ll, ll)
save.ic[iter] <- ic0
qsel <- get.qsel(x, iter)
if(verbose) {
cat(if(ia) "\r" else "\n")
vtxt <- paste(
paste(sic, " ", fmt(save.ic[iter], width = 8, digits = digits), " ", sep = ""),
"logLik ", fmt(ll, width = 8, digits = digits),
" edf ", fmt(edf[iter], width = 4, digits = digits), " ",
" eps ", fmt(eps0, width = 6, digits = digits + 2),
" iteration ", formatC(iter, width = nchar(maxit)),
" qsel ", qsel, sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
}
if(!is.null(stop.criterion)) {
if(iter > 2) {
if(!is.na(save.ic[iter - 1]) & force.stop) {
if(save.ic[iter - 1] < save.ic[iter]) {
stopped <- TRUE
break
}
}
}
}
iter <- iter + 1
}
elapsed <- c(proc.time() - ptm)[3]
if(verbose) {
cat("\n")
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("\n elapsed time: ", et, "\n", sep = "")
}
itr <- if(!stopped) maxit else (iter - 1)
bsum <- make.boost_summary(x, itr, save.ll, edf, FALSE, nobs)
bsum$criterion <- list(
"bic" = -2 * save.ll[1:itr] + edf[1:itr] * log(nobs),
"aic" = -2 * save.ll[1:itr] + edf[1:itr] * 2,
"edf" = edf[1:itr]
)
if(plot)
plot.boost_summary(bsum)
return(list("parameters" = parm2mat(parm, itr),
"fitted.values" = eta, "nobs" = nobs, "boost_summary" = bsum,
"runtime" = elapsed))
}
## Gradient boosting.
opt_boost <- boost <- function(x, y, family, weights = NULL, offset = NULL,
nu = 0.1, nu.adapt = TRUE, df = 4, maxit = 400, mstop = NULL,
maxq = NULL, qsel.splitfactor = FALSE,
verbose = TRUE, digits = 4, flush = TRUE,
eps = .Machine$double.eps^0.25, nback = NULL, plot = TRUE,
initialize = TRUE, stop.criterion = NULL, select.type = 1, force.stop = TRUE,
hatmatrix = !is.null(stop.criterion), reverse.edf = FALSE, approx.edf = FALSE,
always = FALSE, ...)
{
nx <- family$names
if(!all(nx %in% names(x)))
stop("parameter names mismatch with family names!")
if(reverse.edf | approx.edf)
hatmatrix <- FALSE
if(!is.null(mstop))
maxit <- mstop
light <- list(...)$boost.light
if(is.null(light))
light <- FALSE
if(!is.null(nback)) {
if(is.null(maxit))
maxit <- 10000
}
nu <- rep(nu, length.out = length(nx))
names(nu) <- nx
always2 <- always3 <- FALSE
if(!is.logical(always)) {
if(is.character(always)) {
if(!is.na(pmatch(always, "best"))) {
always2 <- TRUE
always <- TRUE
} else {
if(!is.na(pmatch(always, "yes"))) {
always3 <- TRUE
always <- TRUE
} else {
always <- FALSE
}
}
}
}
if(is.null(maxit))
stop("please set either argument 'maxit' or 'mstop'!")
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, df = NULL, nodf = TRUE, ...)
start <- list(...)$start
if(!is.null(start))
x <- set.starting.values(x, start)
np <- length(nx)
nobs <- nrow(y)
CRPS <- !is.null(list(...)$crps) | !is.null(list(...)$CRPS)
yname <- names(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
if(!is.null(offset))
initialize <- FALSE
## Setup boosting structure, i.e, all parametric
## terms get an entry in $smooth.construct object.
## Intercepts are initalized.
x <- boost_transform(x = x, y = y, df = df, family = family,
maxit = maxit, eps = eps, initialize = initialize, offset = offset,
weights = weights, always3 = always3, ...)
if(!is.null(list(...)$ret.x)) {
if(list(...)$ret.x)
return(x)
}
## Create a list() that saves the states for
## all parameters and model terms.
states <- make.state.list(x)
## Matrix of all parameters.
parm <- make.par.list(x, iter = if(light) 1L else maxit)
## Term selector help vectors.
select <- rep(NA, length = length(nx))
names(select) <- nx
loglik <- select
## Save rss in list().
rss <- make.state.list(x, type = 2)
## Extract actual predictor.
eta <- get.eta(x)
if(!is.null(offset)) {
offset <- as.data.frame(offset)
for(j in nx) {
if(!is.null(offset[[j]]))
eta[[j]] <- eta[[j]] + offset[[j]]
}
}
W <- NULL
if(!is.null(weights)) {
if(attr(weights, "identical"))
W <- as.numeric(weights[, 1])
}
## Print stuff.
ia <- if(flush) interactive() else FALSE
## Hat matrix?
HatMat <- list()
edf <- Imat <- save.ic <- NULL
if(hatmatrix) {
for(i in nx)
HatMat[[i]] <- diag(length(eta[[1]]))
edf <- rep(0, maxit)
if(!is.null(stop.criterion))
save.ic <- rep(NA, maxit)
Imat <- diag(nobs)
}
if(reverse.edf | approx.edf) {
edf <- rep(0, maxit)
if(!is.null(stop.criterion))
save.ic <- rep(NA, maxit)
}
selectfun <- list(...)$selectfun
selectmodel <- list(...)$selectmodel
nthreads <- list(...)$nthreads
if(is.null(selectmodel))
selectmodel <- TRUE
if(!is.null(selectfun))
save.ic <- rep(NA, maxit)
if(!is.null(selectfun))
stop.criterion <- "userIC"
## Env for C.
rho <- new.env()
if(is.null(maxq))
maxq <- Inf
qsel <- 0
## Start boosting.
eps0 <- 1; iter <- if(initialize) 2 else 1
save.ll <- NULL
ll <- if(is.null(W)) {
family$loglik(y, family$map2par(eta))
} else {
sum(family$d(y, family$map2par(eta)) * W)
}
redf <- if(initialize) length(nx) else 0
loglik <- loglik2 <- NULL
iter_ll2 <- 0
nu0 <- nu
ptm <- proc.time()
while(iter <= maxit & qsel < maxq) {
if(iter > 2)
loglik2 <- loglik
eta0 <- eta
## Cycle through all parameters
for(i in nx) {
peta <- family$map2par(eta)
## Actual gradient.
grad <- process.derivs(family$score[[i]](y, peta, id = i), is.weight = FALSE)
if(length(grad) != nobs)
stop("something wrong in processing the family $score() function! More elements in return value of $score() than the response!")
## Fit to gradient.
for(j in names(x[[i]]$smooth.construct)) {
if(always) {
if(j == "(Intercept)") {
rss[[i]][j] <- Inf
next
}
}
## Get updated parameters.
nu2 <- if(inherits(x[[i]]$smooth.construct[[j]], "nnet.boost")) nu[i] else nu[i]
states[[i]][[j]] <- if(is.null(x[[i]]$smooth.construct[[j]][["boost.fit"]])) {
if(hatmatrix) {
boost_fit(x[[i]]$smooth.construct[[j]], grad, nu2,
hatmatrix = hatmatrix, weights = if(!is.null(weights)) weights[, i] else NULL,
nthreads = nthreads)
} else {
try(.Call("boost_fit", x[[i]]$smooth.construct[[j]], grad, nu2,
if(!is.null(weights)) as.numeric(weights[, i]) else numeric(0), rho), silent = TRUE)
}
} else {
x[[i]]$smooth.construct[[j]][["boost.fit"]](x = x[[i]]$smooth.construct[[j]],
y = grad, nu = nu2, hatmatrix = hatmatrix,
weights = if(!is.null(weights)) weights[, i] else NULL,
rho = rho, nthreads = nthreads, always3 = always3)
}
## Get rss.
if(is.null(selectfun)) {
if(is.null(stop.criterion)) {
rss[[i]][j] <- states[[i]][[j]]$rss
} else {
if(select.type == 1) {
rss[[i]][j] <- states[[i]][[j]]$rss
} else {
teta <- eta
teta[[i]] <- teta[[i]] + fitted(states[[i]][[j]])
if(is.null(W))
tll <- family$loglik(y, family$map2par(teta))
else
tll <- sum(family$d(y, family$map2par(teta), log = TRUE) * W)
if(!light) {
if(approx.edf) {
tredf <- redf
if(!is.null(x[[i]]$smooth.construct[[j]]$is.model.matrix) | inherits(x[[i]]$smooth.construct[[j]], "nnet.boost")) {
if(x[[i]]$smooth.construct[[j]]$state$init.edf < 1)
tredf <- tredf + 1
} else {
if(inherits(x[[i]]$smooth.construct[[j]], "lasso.smooth") | inherits(x[[i]]$smooth.construct[[j]], "nnet.smooth")) {
if(iter < 2) {
aset <- if(x[[i]]$smooth.construct[[j]]$fuse) {
sum(abs(unique_fuse(get.par(states[[i]][[j]]$parameters, "b"))) > 1e-10)
} else {
sum(abs(get.par(states[[i]][[j]]$parameters, "b")) > 1e-10)
}
tredf <- tredf + aset
} else {
aset0 <- apply(parm[[i]][[j]][1:(iter - 1L), , drop = FALSE], 2, sum)
aset1 <- apply(rbind(parm[[i]][[j]][1:(iter - 1L), , drop = FALSE],
get.par(states[[i]][[j]]$parameters, "b")), 2, sum)
if(x[[i]]$smooth.construct[[j]]$fuse) {
aset0 <- sum(abs(unique_fuse(aset0)) > 1e-10)
aset1 <- sum(abs(unique_fuse(aset1)) > 1e-10)
} else {
aset0 <- sum(abs(aset0) > 1e-10)
aset1 <- sum(abs(aset1) > 1e-10)
}
aset <- aset1 - aset0
tredf <- tredf + aset
}
} else {
tredf <- tredf + nu[i] * x[[i]]$smooth.construct[[j]]$state$init.edf
}
}
rss[[i]][j] <- -2 * tll + tredf * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
} else {
if(reverse.edf) {
states[[i]][[j]]$redf <- reverse_edf(x = x[[i]]$smooth.construct[[j]], bn = get.par(states[[i]][[j]]$parameters, "b"),
bmat = parm[[i]][[j]][1:iter, , drop = FALSE], nobs, grad, teta[[i]])
tredf <- redf + states[[i]][[j]]$redf$edf
rss[[i]][j] <- -2 * tll + tredf * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
} else {
## tedf0 <- sum(diag(Imat - HatMat[[i]] %*% (Imat - states[[i]][[j]]$hat)))
tedf <- hatmat_trace(HatMat[[i]], states[[i]][[j]]$hat)
if(length(nxr <- nx[nx != i])) {
for(ii in nxr)
tedf <- tedf + hatmat_sumdiag(HatMat[[i]])
}
rss[[i]][j] <- -2 * tll + tedf * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
}
}
}
}
}
} else {
rss[[i]][j] <- selfun(iter = iter, i = i, j = j, state = states[[i]][[j]],
parm = parm, x = x, family = family, sfun = selectfun, yname = yname, weights = weights,
selectmodel = selectmodel)
}
}
## Which one is best?
if(always & (length(rss[[i]]) < 2)) {
if(names(rss[[i]]) == "(Intercept)")
next
}
select[i] <- which.min(rss[[i]])
if(nu.adapt) {
tbeta <- get.par(states[[i]][[select[i]]]$parameters, "b") * 1 / nu[i]
fv <- function(v) {
beta <- v * tbeta
eta[[i]] <- eta[[i]] + x[[i]]$smooth.construct[[select[i]]]$fit.fun(x[[i]]$smooth.construct[[select[i]]]$X, beta)
family$loglik(y, family$map2par(eta))
}
v <- optimize(fv, interval = c(.Machine$double.eps^0.5, 1), maximum = TRUE)$maximum
beta <- nu[i] * v * tbeta
states[[i]][[select[i]]]$parameters <- set.par(states[[i]][[select[i]]]$parameters, beta, "b")
states[[i]][[select[i]]]$fitted.values <- x[[i]]$smooth.construct[[select[i]]]$fit.fun(x[[i]]$smooth.construct[[select[i]]]$X, beta)
}
## Compute likelihood contribution.
eta[[i]] <- eta[[i]] + fitted(states[[i]][[select[i]]])
llf <- if(is.null(W)) {
if(CRPS & !is.null(family$crps)) {
-1 * family$crps(y, family$map2par(eta))
} else {
family$loglik(y, family$map2par(eta))
}
} else {
sum(family$d(y, family$map2par(eta), log = TRUE) * W)
}
loglik[i] <- -1 * (ll - llf)
eta[[i]] <- eta0[[i]]
}
if(is.null(stop.criterion) & is.null(selectfun)) {
i <- which.max(loglik)
} else {
i <- if(select.type == 1) {
which.max(loglik)
} else {
which.min(sapply(rss, function(x) { min(x) }))
}
}
## Which term to update.
take <- c(nx[i], names(rss[[i]])[select[i]])
## Update selected base learner.
eta[[take[1]]] <- eta[[take[1]]] + states[[take[1]]][[take[2]]]$fitted.values
## Write to x.
if(is.null(x[[take[1]]]$smooth.construct[[take[2]]][["increase"]])) {
x[[take[1]]]$smooth.construct[[take[2]]]$state <- increase(x[[take[1]]]$smooth.construct[[take[2]]]$state,
states[[take[1]]][[take[2]]])
} else {
x[[take[1]]]$smooth.construct[[take[2]]]$state <- x[[take[1]]]$smooth.construct[[take[2]]][["increase"]](x[[take[1]]]$smooth.construct[[take[2]]]$state,
states[[take[1]]][[take[2]]])
}
x[[take[1]]]$smooth.construct[[take[2]]]$selected[iter] <- 1
x[[take[1]]]$smooth.construct[[take[2]]]$loglik[iter] <- loglik[i]
## Save parameters.
if(always3) {
tpar <- get.par(states[[take[1]]][[take[2]]]$parameters, "b")
x[[take[1]]]$smooth.construct[["(Intercept)"]]$selected[iter] <- 1
##parm[[take[1]]][["(Intercept)"]][iter, ] <- tpar[1]
if(light) {
parm[[take[1]]][[take[2]]] <- parm[[take[1]]][[take[2]]] + tpar[-1]
} else {
parm[[take[1]]][[take[2]]][iter, ] <- tpar[-1]
}
} else {
if(light) {
parm[[take[1]]][[take[2]]] <- parm[[take[1]]][[take[2]]] + get.par(states[[take[1]]][[take[2]]]$parameters, "b")
} else {
parm[[take[1]]][[take[2]]][iter, ] <- get.par(states[[take[1]]][[take[2]]]$parameters, "b")
}
}
## Intercept updating.
if(always) {
ll <- if(is.null(W)) {
family$loglik(y, family$map2par(eta))
} else {
sum(family$d(y, family$map2par(eta), log = TRUE) * W)
}
nxa <- if(always2) take[1] else nx
for(ii in nxa) {
if("(Intercept)" %in% names(x[[ii]]$smooth.construct)) {
if(always3) {
if(ii == take[1])
next
}
peta <- family$map2par(eta)
## Actual gradient.
grad <- process.derivs(family$score[[ii]](y, peta, id = ii), is.weight = FALSE)
## Update.
states[[ii]][["(Intercept)"]] <- if(hatmatrix) {
boost_fit(x[[ii]]$smooth.construct[["(Intercept)"]], grad, nu[ii],
hatmatrix = hatmatrix, weights = if(!is.null(weights)) weights[, ii] else NULL)
} else {
.Call("boost_fit", x[[ii]]$smooth.construct[["(Intercept)"]], grad, nu[ii],
if(!is.null(weights)) as.numeric(weights[, ii]) else numeric(0), rho, PACKAGE = "bamlss")
}
eta[[ii]] <- eta[[ii]] + fitted(states[[ii]][["(Intercept)"]])
x[[ii]]$smooth.construct[["(Intercept)"]]$state <- increase(x[[ii]]$smooth.construct[["(Intercept)"]]$state,
states[[ii]][["(Intercept)"]])
x[[ii]]$smooth.construct[["(Intercept)"]]$selected[iter] <- 1
x[[ii]]$smooth.construct[["(Intercept)"]]$loglik[iter] <- -1 * (ll - family$loglik(y, family$map2par(eta)))
if(light) {
parm[[ii]][["(Intercept)"]] <- parm[[ii]][["(Intercept)"]] + get.par(states[[ii]][["(Intercept)"]]$parameters, "b")
} else {
parm[[ii]][["(Intercept)"]][iter, ] <- get.par(states[[ii]][["(Intercept)"]]$parameters, "b")
}
if(approx.edf) {
if(x[[ii]]$smooth.construct[["(Intercept)"]]$state$init.edf < 1) {
redf <- redf + 1
x[[ii]]$smooth.construct[["(Intercept)"]]$state$init.edf <- 1
}
}
}
}
}
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
if(is.na(eps0) | !is.finite(eps0)) eps0 <- eps + 1
peta <- family$map2par(eta)
ll <- if(is.null(W)) {
if(CRPS & !is.null(family$crps)) {
-1 * family$crps(y, peta)
} else {
family$loglik(y, peta)
}
} else {
sum(family$d(y, peta, log = TRUE) * W)
}
save.ll <- c(save.ll, ll)
if(hatmatrix) {
HatMat[[take[1]]] <- HatMat[[take[1]]] %*% (Imat - x[[take[1]]]$smooth.construct[[take[2]]]$state$hat)
for(i in nx)
edf[iter] <- edf[iter] + hatmat_sumdiag(HatMat[[i]])
if(!is.null(stop.criterion)) {
save.ic[iter] <- -2 * ll + edf[iter] * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
if(iter > (if(initialize) 2 else 1)) {
if(!is.na(save.ic[iter - 1]) & force.stop) {
if(save.ic[iter - 1] < save.ic[iter]) {
nback <- TRUE
break
}
}
}
}
}
if(reverse.edf | approx.edf) {
if(approx.edf) {
if(!is.null(x[[take[1]]]$smooth.construct[[take[2]]]$is.model.matrix) | inherits(x[[take[1]]]$smooth.construct[[take[2]]], "nnet.boost")) {
if(x[[take[1]]]$smooth.construct[[take[2]]]$state$init.edf < 1) {
redf <- redf + 1
x[[take[1]]]$smooth.construct[[take[2]]]$state$init.edf <- 1
}
} else {
if(inherits(x[[take[1]]]$smooth.construct[[take[2]]], "lasso.smooth") | inherits(x[[take[1]]]$smooth.construct[[take[2]]], "nnet.smooth")) {
if(iter < 2) {
aset <- if(x[[take[1]]]$smooth.construct[[take[2]]]$fuse) {
sum(abs(unique_fuse(parm[[take[1]]][[take[2]]][if(light) 1L else iter, ])) > 1e-10)
} else {
sum(abs(parm[[take[1]]][[take[2]]][if(light) 1L else iter, ]) > 1e-10)
}
redf <- redf + aset
} else {
aset0 <- apply(parm[[take[1]]][[take[2]]][if(light) 1L else 1:(iter - 1L), , drop = FALSE], 2, sum)
aset1 <- apply(parm[[take[1]]][[take[2]]][if(light) 1L else 1:iter, , drop = FALSE], 2, sum)
if(x[[take[1]]]$smooth.construct[[take[2]]]$fuse) {
aset0 <- sum(abs(unique_fuse(aset0)) > 1e-10)
aset1 <- sum(abs(unique_fuse(aset1)) > 1e-10)
} else {
aset0 <- sum(abs(aset0) > 1e-10)
aset1 <- sum(abs(aset1) > 1e-10)
}
aset <- aset1 - aset0
redf <- redf + aset
}
} else {
redf <- redf + nu[take[1]] * x[[take[1]]]$smooth.construct[[take[2]]]$state$init.edf
}
}
} else {
if(is.null(stop.criterion))
stop("reverse.edf not implemented!")
redf <- redf + states[[take[1]]][[take[2]]]$redf$edf
}
edf[iter] <- redf
if(!is.null(stop.criterion)) {
save.ic[iter] <- -2 * ll + edf[iter] * (if(tolower(stop.criterion) == "aic") 2 else log(nobs))
if(iter > ((if(initialize) 2 else 1) * 100)) {
if(!is.na(save.ic[iter - 1]) & force.stop) {
if(save.ic[iter - 1] < save.ic[iter]) {
nback <- TRUE
break
}
}
}
}
}
if(!is.null(selectfun)) {
save.ic[iter] <- min(unlist(rss))
if(force.stop & (iter > (if(initialize) 2 else 1))) {
if(save.ic[iter - 1] < save.ic[iter]) {
nback <- TRUE
break
}
}
}
## Compute number of selected base learners.
qsel <- get.qsel(x, if(light) 1L else iter, qsel.splitfactor = qsel.splitfactor)
if(verbose) {
cat(if(ia) "\r" else "\n")
vtxt <- paste(
if(!is.null(stop.criterion)) paste(stop.criterion, " ", fmt(save.ic[iter], width = 8, digits = digits), " ", sep = "") else NULL,
if(CRPS & !is.null(family$crps)) "CRPS" else "logLik ", fmt(ll, width = 8, digits = digits),
if(hatmatrix | reverse.edf | approx.edf) paste(" edf ", fmt(edf[iter], width = 4, digits = digits), " ", sep = "") else NULL,
" eps ", fmt(eps0, width = 6, digits = digits + 2),
" iteration ", formatC(iter, width = nchar(maxit)),
" qsel ", qsel, sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
}
if((iter > 2) & all(loglik2 == loglik)) {
warning("no more improvements in the log-likelihood, setting nu = nu * 0.9!")
## nu[take[1]] <- nu[take[1]] * 0.9
iter_ll2 <- iter_ll2 + 1
}
if(all(nu < .Machine$double.eps^0.5) & (iter_ll2 > 10)) {
nback <- TRUE
warning(paste("no more improvements after", iter_ll2, "iterations in the log-likelihood, stopped!"))
break
}
iter <- iter + 1
if(!is.null(nback)) {
if(iter > nback) {
dll <- abs(diff(tail(save.ll, nback)))
if(any(!is.finite(dll)) | any(is.na(dll)))
break
if(all(dll < eps))
break
}
}
}
elapsed <- c(proc.time() - ptm)[3]
if(verbose) {
cat("\n")
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("\n elapsed time: ", et, "\n", sep = "")
}
bsum <- make.boost_summary(x, if(is.null(nback)) maxit else (iter - 1), save.ll, edf,
(hatmatrix | approx.edf | reverse.edf), length(eta[[1]]))
if(plot)
plot.boost_summary(bsum)
if(!is.null(selectfun)) {
if(is.null(bsum$criterion))
bsum$criterion <- list()
bsum$criterion$userIC <- save.ic[1:(if(is.null(nback)) maxit else (iter - 1))]
}
return(list("parameters" = parm2mat(parm, if(light) { 1L} else { if(is.null(nback)) maxit else (iter - 1) }),
"fitted.values" = eta, "nobs" = nobs, "boost_summary" = bsum, "runtime" = elapsed))
}
reverse_edf <- function(x, bn, bmat, nobs, y, eta, approx = TRUE)
{
beta <- bn + apply(bmat, 2, sum)
fit <- x$X %*% beta
y <- y + fit
tX <- t(x$X)
XX <- crossprod(x$X)
objfun <- function(tau2) {
if(!x$fixed) {
S <- 0
for(j in seq_along(x$S))
S <- S + 1/tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](beta) else x$S[[j]]
} else {
S <- 1/tau2 * diag(1, ncol(x$X))
}
beta2 <- matrix_inv(XX + S, index = x$sparse.setup) %*% tX %*% y
mean((fit - x$X %*% beta2)^2)
}
tau2 <- tau2.optim(objfun, start = x$boost.tau2, maxit = 100)
if(!x$fixed) {
S <- 0
for(j in seq_along(x$S))
S <- S + 1/tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](beta) else x$S[[j]]
} else {
I <- diag(1, ncol(x$X))
S <- 1/tau2 * I
}
P <- matrix_inv(XX + S, index = x$sparse.setup)
edf <- sum_diag(XX %*% P)
return(list("edf" = edf - x$state$edf, "tau2" = tau2, "fedf" = edf))
}
unique_fuse <- function(x, digits = 4) {
unique(round(x, digits = digits))
}
if(FALSE) {
n <- 1000
d <- data.frame("x" = runif(n, -3, 3))
d$y <- 1.2 + sin(d$x) + rnorm(n, sd = 0.3)
plot(d)
b1 <- bamlss(y ~ s(x,k=50), data = d, sampler = FALSE, optimizer = boost, stop.criterion = "AIC", reverse = TRUE)
b1 <- bamlss(y ~ s(x,k=50), data = d, sampler = FALSE, optimizer = boost, stop.criterion = "AIC", reverse = FALSE)
d$p1 <- predict(b1, model = "mu")
d$p2 <- predict(b1, model = "mu")
plot2d(p1 ~ x, data = d)
plot2d(p2 ~ x, data = d, add = TRUE, col.lines = 4)
plot2d(I(1.2 + sin(x)) ~ x, data = d, add = TRUE, col.lines = 2)
b1 <- bamlss(y ~ s(x,k=50), data = d, sampler = FALSE)
}
selfun <- function(iter, i, j, state, parm, x, family, sfun, yname, weights, selectmodel = TRUE)
{
if(is.null(selectmodel))
selectmodel <- FALSE
parm[[i]][[j]][iter, ] <- get.par(state$parameters, "b")
parm <- parm2mat(parm, mstop = iter, fixed = iter)
if(!selectmodel) {
formula <- list()
for(i in names(x))
formula[[i]] <- x[[i]][c("formula", "fake.formula")]
class(formula) <- c("bamlss.formula", "list")
environment(formula) <- environment(formula[[1]]$formula)
attr(formula, "response.name") <- yname
m <- list("formula" = formula, "x" = x, "family" = family, "parameters" = parm)
class(m) <- c("bamlss", "bamlss.frame", "list")
return(sfun(m))
} else {
return(sfun(parm))
}
}
boost_frame <- function(formula, train, test, family = "gaussian", ...)
{
if(!all(names(test) == names(train)))
stop("test and training data must contain the same variables!")
bf <- bamlss.frame(formula, data = train, family = family, ...)
for(i in names(bf$x)) {
for(j in seq_along(bf$x[[i]]$smooth.construct)) {
if(!inherits(bf$x[[i]]$smooth.construct[[j]], "no.mgcv") & !inherits(bf$x[[i]]$smooth.construct[[j]], "special")) {
if(!is.null(bf$x[[i]]$smooth.construct[[j]]$is.refund)) {
rfcall <- bf$x[[i]]$smooth.construct[[j]]$refund.call
tfm <- eval(parse(text = rfcall), envir = test)
tfme <- eval(tfm$call, envir = tfm$data)
bf$x[[i]]$smooth.construct[[j]]$X <- smoothCon(tfme, data = tfm$data, n = nrow(tfm$data[[1L]]),
knots = NULL, absorb.cons = TRUE)[[1]]$X
rm(tfm)
rm(tfme)
} else {
bf$x[[i]]$smooth.construct[[j]]$X <- PredictMat(bf$x[[i]]$smooth.construct[[j]], test)
}
} else {
if(is.null(bf$x[[i]]$smooth.construct[[j]]$PredictMat)) {
bf$x[[i]]$smooth.construct[[j]]$X <- PredictMat(bf$x[[i]]$smooth.construct[[j]], test)
} else {
bf$x[[i]]$smooth.construct[[j]]$X <- bf$x[[i]]$smooth.construct[[j]]$PredictMat(bf$x[[i]]$smooth.construct[[j]], test)
}
}
}
if(!is.null(bf$x[[i]]$model.matrix)) {
sc <- attr(bf$x[[i]]$model.matrix, "scale")
bf$x[[i]]$model.matrix <- model.matrix(drop.terms.bamlss(bf$x[[i]]$terms,
sterms = FALSE, keep.response = FALSE, data = test), data = test)
if(ncol(bf$x[[i]]$model.matrix) > 0) {
if(!is.null(sc)) {
for(name in unique(unlist(lapply(sc, names)))) {
bf$x[[i]]$model.matrix[,name] <- (bf$x[[i]]$model.matrix[,name] - sc$center[name] ) / sc$scale[name]
}
}
} else bf$x[[i]]$model.matrix <- NULL
}
}
yname <- names(bf$y)
family <- bf$family
bf <- opt_boost(x = bf$x, y = bf$y, family = bf$family,
weights = model.weights(bf$model.frame),
offset = model.offset(bf$model.frame), ret.x = TRUE, initialize = FALSE, ...)
formula <- list()
for(i in names(bf))
formula[[i]] <- bf[[i]][c("formula", "fake.formula")]
class(formula) <- c("bamlss.formula", "list")
environment(formula) <- environment(formula[[1]]$formula)
attr(formula, "response.name") <- yname
bf <- list("formula" = formula, "x" = bf, "family" = family)
class(bf) <- c("boost_frame", "list")
bf
}
predict.boost_frame <- function(object, type = c("link", "parameter"), ...)
{
type <- match.arg(type)
object$x <- set.starting.values(object$x, object$parameters)
fit <- get.eta(object$x, expand = TRUE)
if(type == "parameter")
fit <- object$family$map2par(fit)
return(fit)
}
## Updating the hat-matrix.
hatmat_trace <- function(H0, H1)
{
.Call("hatmat_trace", H0, H1, PACKAGE = "bamlss")
}
hatmat_sumdiag <- function(H)
{
.Call("hatmat_sumdiag", H, PACKAGE = "bamlss")
}
## Boost setup.
boost_transform <- function(x, y, df = NULL, family,
weights = NULL, offset = NULL, maxit = 100,
eps = .Machine$double.eps^0.25, initialize = TRUE,
nu = 0.1, nu.adapt = TRUE, ...)
{
np <- length(x)
nx <- names(x)
## Initialize select indicator and intercepts.
for(j in 1:np) {
nid <- NULL
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
if(!is.null(df) & !inherits(x[[nx[j]]]$smooth.construct[[sj]], "randombits.smooth") & !inherits(x[[nx[j]]]$smooth.construct[[sj]], "nnet.smooth") & !inherits(x[[nx[j]]]$smooth.construct[[sj]], "nnet2.smooth")) {
if(inherits(x[[nx[j]]]$smooth.construct[[sj]], "lasso.smooth"))
x[[nx[j]]]$smooth.construct[[sj]]$xt$df <- df
x[[nx[j]]]$smooth.construct[[sj]] <- assign.df(x[[nx[j]]]$smooth.construct[[sj]], df, do.part = TRUE)
}
if(!is.null(x[[nx[j]]]$smooth.construct[[sj]]$fxsp)) {
if(!x[[nx[j]]]$smooth.construct[[sj]]$fxsp & !x[[nx[j]]]$smooth.construct[[sj]]$fixed) {
x[[nx[j]]]$smooth.construct[[sj]]$old.optimize <- x[[nx[j]]]$smooth.construct[[sj]]$state$do.optim
x[[nx[j]]]$smooth.construct[[sj]]$state$do.optim <- FALSE
x[[nx[j]]]$smooth.construct[[sj]]$do.optim <- FALSE
}
}
}
if(has_pterms(x[[nx[j]]]$terms)) {
ii <- which(names(x[[nx[j]]]$smooth.construct) == "model.matrix")
model.matrix <- list()
cn <- colnames(x[[nx[j]]]$smooth.construct[[ii]]$X)
g0 <- get.par(x[[nx[j]]]$smooth.construct[[ii]]$state$parameters, "b")
nm <- NULL
assign <- attr(x[[nx[j]]]$smooth.construct[[ii]]$X, "assign")
for(pj in 1:ncol(x[[nx[j]]]$smooth.construct[[ii]]$X)) {
model.matrix[[pj]] <- list()
model.matrix[[pj]]$label <- cn[pj]
model.matrix[[pj]]$term <- cn[pj]
model.matrix[[pj]]$X <- x[[nx[j]]]$smooth.construct[[ii]]$X[, pj, drop = FALSE]
model.matrix[[pj]]$binning <- x[[nx[j]]]$smooth.construct[[ii]]$binning
model.matrix[[pj]]$nobs <- x[[nx[j]]]$smooth.construct[[ii]]$nobs
model.matrix[[pj]]$fixed <- TRUE
model.matrix[[pj]]$fxsp <- FALSE
model.matrix[[pj]]$weights <- x[[nx[j]]]$smooth.construct[[ii]]$weights
model.matrix[[pj]]$rres <- x[[nx[j]]]$smooth.construct[[ii]]$rres
model.matrix[[pj]]$fit.reduced <- x[[nx[j]]]$smooth.construct[[ii]]$fit.reduced
model.matrix[[pj]]$fit.fun <- x[[nx[j]]]$smooth.construct[[ii]]$fit.fun
model.matrix[[pj]]$state <- list("parameters" = g0[pj])
model.matrix[[pj]]$state$fitted.values <- drop(model.matrix[[pj]]$X %*% g0[pj])
if(!is.null(model.matrix[[pj]]$binning$match.index))
model.matrix[[pj]]$state$fitted.values <- model.matrix[[pj]]$state$fitted.values[model.matrix[[pj]]$binning$match.index]
model.matrix[[pj]]$state$edf <- 0
model.matrix[[pj]]$state$rss <- 0
model.matrix[[pj]]$state$do.optim <- FALSE
model.matrix[[pj]]$is.model.matrix <- TRUE
model.matrix[[pj]]$selected <- rep(0, length = maxit)
model.matrix[[pj]]$sparse.setup <- sparse.setup(model.matrix[[pj]]$X, S = model.matrix[[pj]]$S)
model.matrix[[pj]]$upper <- Inf
model.matrix[[pj]]$lower <- -Inf
model.matrix[[pj]]$assign <- assign[pj]
class(model.matrix[[pj]]) <- class(x[[nx[j]]]$smooth.construct[[ii]])
}
names(model.matrix) <- cn
x[[nx[j]]]$smooth.construct[[ii]] <- NULL
x[[nx[j]]]$smooth.construct <- c(model.matrix, x[[nx[j]]]$smooth.construct)
attr(x[[nx[j]]], "assign") <- assign
}
}
always3 <- list(...)$always3
if(is.null(always3))
always3 <- FALSE
## Save more info.
for(j in 1:np) {
for(sj in seq_along(x[[nx[j]]]$smooth.construct)) {
if(always3 & (x[[nx[j]]]$smooth.construct[[sj]]$label != "(Intercept)")) {
x[[nx[j]]]$smooth.construct[[sj]]$X <- cbind(1, x[[nx[j]]]$smooth.construct[[sj]]$X)
x[[nx[j]]]$smooth.construct[[sj]]$with.itcpt <- TRUE
x[[nx[j]]]$smooth.construct[[sj]]$state$parameters <- c("b0" = 0, x[[nx[j]]]$smooth.construct[[sj]]$state$parameters)
}
x[[nx[j]]]$smooth.construct[[sj]]$state$init.edf <- x[[nx[j]]]$smooth.construct[[sj]]$state$edf
x[[nx[j]]]$smooth.construct[[sj]]$state$edf <- 0
nc <- ncol(x[[nx[j]]]$smooth.construct[[sj]]$X)
nr <- nrow(x[[nx[j]]]$smooth.construct[[sj]]$X)
x[[nx[j]]]$smooth.construct[[sj]]$XWX <- matrix(0, nc, nc)
x[[nx[j]]]$smooth.construct[[sj]]$XW <- matrix(0, nc, nr)
x[[nx[j]]]$smooth.construct[[sj]]$selected <- rep(0, length = maxit)
x[[nx[j]]]$smooth.construct[[sj]]$loglik <- rep(0, length = maxit)
x[[nx[j]]]$smooth.construct[[sj]]$state$rss <- 0
if(is.null(x[[nx[j]]]$smooth.construct[[sj]]$is.model.matrix))
x[[nx[j]]]$smooth.construct[[sj]]$boost.tau2 <- get.par(x[[nx[j]]]$smooth.construct[[sj]]$state$parameters, "tau2")
else
x[[nx[j]]]$smooth.construct[[sj]]$boost.tau2 <- 1000
if(!is.null(x[[nx[j]]]$smooth.construct[[sj]]$S))
x[[nx[j]]]$smooth.construct[[sj]]$penaltyFunction <- as.integer(sapply(x[[nx[j]]]$smooth.construct[[sj]]$S, is.function))
else
x[[nx[j]]]$smooth.construct[[sj]]$penaltyFunction <- 0L
if(inherits(x[[nx[j]]]$smooth.construct[[sj]], "nnet.smooth") | inherits(x[[nx[j]]]$smooth.construct[[sj]], "nnet2.smooth"))
x[[nx[j]]]$smooth.construct[[sj]]$fuse <- FALSE
}
}
if(initialize) {
nobs <- if(is.null(dim(y))) length(y) else nrow(y)
eta <- get.eta(x)
eta <- init.eta(eta, y, family, nobs)
if(!is.null(offset)) {
offset <- as.data.frame(offset)
for(j in nx) {
if(!is.null(offset[[j]]))
eta[[j]] <- eta[[j]] + offset[[j]]
}
}
start <- unlist(lapply(eta, mean, na.rm = TRUE))
par <- rep(0, length(nx))
names(par) <- nx
eps0 <- eps + 1L
k2 <- 0
while((eps < eps0) & (k2 < 100)) {
eta0 <- eta
for(i in nx) {
ll0 <- family$loglik(y, family$map2par(eta))
peta <- family$map2par(eta)
hess <- process.derivs(family$hess[[i]](y, peta, id = i), is.weight = TRUE)
score <- process.derivs(family$score[[i]](y, peta, id = i), is.weight = FALSE)
z <- eta[[i]] + 1 / hess * score
b0 <- par[i]
if(!is.null(weights)) {
if(attr(weights, "identical"))
hess <- hess * as.numeric(weights[, 1])
}
par[i] <- 1 / (sum(hess) + 1e-20) * sum(hess * z, na.rm = TRUE)
eta[[i]] <- rep(par[i], nobs)
ll1 <- family$loglik(y, family$map2par(eta))
if(ll1 < ll0) {
fnu <- function(nu2) {
b <- nu2 * par[i] + (1 - nu2) * b0
eta[[i]] <- rep(b, nobs)
return(family$loglik(y, family$map2par(eta)))
}
nu2 <- 1
while((fnu(nu2) < ll0) & (.Machine$double.eps < nu2)) {
nu2 <- nu2 / 2
}
par[i] <- nu2 * par[i] + (1 - nu2) * b0
eta[[i]] <- rep(par[i], nobs)
}
}
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
k2 <- k2 + 1
}
for(i in nx) {
if(!is.null(x[[i]]$smooth.construct[["(Intercept)"]])) {
x[[i]]$smooth.construct[["(Intercept)"]]$state$parameters[1] <- par[i]
x[[i]]$smooth.construct[["(Intercept)"]]$state$fitted.values <- rep(par[i], length = nobs)
x[[i]]$smooth.construct[["(Intercept)"]]$state$edf <- 1
x[[i]]$smooth.construct[["(Intercept)"]]$state$init.edf <- 1
}
}
}
return(x)
}
boost_fit_nnet <- function(nu, X, N, y, ind, nthreads = NULL)
{
if(is.null(nthreads))
nthreads <- 1L
.Call("boost_fit_nnet", nu, X, N, y, ind, as.integer(nthreads))
}
## Simple list() generator for
## saving states of model terms.
make.state.list <- function(x, type = 1, intercept = TRUE)
{
elmts <- c("formula", "fake.formula")
if(all(elmts %in% names(x))) {
rval <- list()
if(!is.null(x$model.matrix))
rval$model.matrix <- NA
if(!is.null(x$smooth.construct)) {
for(j in names(x$smooth.construct)) {
if(j == "(Intercept)" & intercept)
rval[[j]] <- NA
if(j != "(Intercept)")
rval[[j]] <- NA
}
}
if(type > 1)
rval <- unlist(rval)
} else {
rval <- list()
for(j in names(x)) {
rval[[j]] <- make.state.list(x[[j]], type, intercept = intercept)
}
}
return(rval)
}
make.par.list <- function(x, iter)
{
elmts <- c("formula", "fake.formula")
if(all(elmts %in% names(x))) {
rval <- list()
if(!is.null(x$smooth.construct)) {
for(j in names(x$smooth.construct)) {
rval[[j]] <- if(is.null(x$smooth.construct[[j]]$special.npar)) {
matrix(0, nrow = iter, ncol = ncol(x$smooth.construct[[j]]$X))
} else {
matrix(0, nrow = iter, ncol = x$smooth.construct[[j]]$special.npar)
}
colnames(rval[[j]]) <- names(get.par(x$smooth.construct[[j]]$state$parameters, "b"))
rval[[j]][1, ] <- get.par(x$smooth.construct[[j]]$state$parameters, "b")
if(!is.null(x$smooth.construct[[j]]$with.itcpt)) {
if(length(i <- grep("b0", colnames(rval[[j]]))))
rval[[j]] <- rval[[j]][, -i, drop = FALSE]
}
if(!is.null(x$smooth.construct[[j]]$is.model.matrix))
attr(rval[[j]], "is.model.matrix") <- TRUE
if(inherits(x$smooth.construct[[j]], "nnet.smooth"))
class(rval[[j]]) <- c(class(rval[[j]]), "nnet.smooth")
}
}
} else {
rval <- list()
for(j in names(x)) {
rval[[j]] <- make.par.list(x[[j]], iter)
}
}
return(rval)
}
parm2mat <- function(x, mstop, fixed = NULL)
{
nx <- names(x)
for(i in seq_along(x)) {
is.mm <- NULL
for(j in names(x[[i]])) {
if(!is.null(attr(x[[i]][[j]], "is.model.matrix")))
is.mm <- c(is.mm, j)
cn <- colnames(x[[i]][[j]])
if(!inherits(x[[i]][[j]], "nnet.smooth")) {
x[[i]][[j]] <- apply(x[[i]][[j]][1:mstop, , drop = FALSE], 2, cumsum)
} else {
x[[i]][[j]] <- x[[i]][[j]][1:mstop, , drop = FALSE]
}
if(!is.matrix(x[[i]][[j]]))
x[[i]][[j]] <- matrix(x[[i]][[j]], ncol = length(cn))
colnames(x[[i]][[j]]) <- cn
}
if(!is.null(is.mm)) {
x[[i]][["p"]] <- do.call("cbind", x[[i]][is.mm])
colnames(x[[i]][["p"]]) <- is.mm
x[[i]][is.mm[is.mm != "p"]] <- NULL
}
sm <- names(x[[i]])
sm <- sm[sm != "p"]
if(length(sm)) {
x[[i]][["s"]] <- x[[i]][sm]
x[[i]][sm[sm != "s"]] <- NULL
}
n <- names(x[[i]])
for(j in names(x[[i]])) {
if(j != "s") {
colnames(x[[i]][[j]]) <- paste(nx[i], j, colnames(x[[i]][[j]]), sep = ".")
} else {
for(k in names(x[[i]][[j]])) {
colnames(x[[i]][[j]][[k]]) <- paste(nx[i], j, k, colnames(x[[i]][[j]][[k]]), sep = ".")
}
x[[i]][[j]] <- do.call("cbind", x[[i]][[j]])
}
}
x[[i]] <- do.call("cbind", x[[i]])
}
x <- do.call("cbind", x)
if(!is.null(fixed))
x <- x[fixed, ]
return(x)
}
## Retransform 'x' to 'bamlss.frame' structure.
boost.retransform <- function(x) {
for(i in names(x)) {
if(has_pterms(x[[i]]$terms)) {
state <- list()
X <- drop <- xscales <- NULL
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "model.matrix")) {
drop <- c(drop, j)
b <- get.par(x[[i]]$smooth.construct[[j]]$state$parameters, "b")
X <- cbind(X, x[[i]]$smooth.construct[[j]]$X)
state$parameters <- c(state$parameters, b)
}
}
label <- paste(drop, collapse = "+")
binning <- x[[i]]$smooth.construct[[drop[1]]]$binning
state$fitted.values <- drop(X %*% state$parameters)
x[[i]]$smooth.construct[drop] <- NULL
x[[i]]$smooth.construct$model.matrix <- list(
"X" = X,
"S" = list(diag(0, ncol(X))),
"rank" = ncol(X),
"term" = label,
"label" = label,
"bs.dim" = ncol(X),
"fixed" = TRUE,
"is.model.matrix" = TRUE,
"by" = "NA",
"xt" = list("binning" = binning),
"state" = state
)
x[[i]]$smooth.construct$model.matrix$fit.fun <- make.fit.fun(x[[i]]$smooth.construct$model.matrix)
}
}
return(x)
}
## Boosting iwls.
boost_iwls <- function(x, hess, resids, nu)
{
## Initial parameters and fit.
g0 <- get.par(x$state$parameters, "b")
fit0 <- fitted(x$state)
## Compute reduced residuals.
xbin.fun(x$binning$sorted.index, hess, resids, x$weights, x$rres, x$binning$order)
## Compute mean and precision.
XWX <- do.XWX(x$X, 1 / x$weights, x$sparse.setup$matrix)
if(x$fixed) {
P <- matrix_inv(XWX, index = x$sparse.setup)
} else {
S <- 0
tau2 <- get.state(x, "tau2")
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
P <- matrix_inv(XWX + S, index = x$sparse.setup)
}
## New parameters.
g <- nu * drop(P %*% crossprod(x$X, x$rres))
## Finalize.
x$state$parameters <- set.par(x$state$parameters, g, "b")
x$state$fitted.values <- x$fit.fun(x$X, get.state(x, "b"))
## Find edf.
xbin.fun(x$binning$sorted.index, hess, resids + fit0 + fitted(x$state), x$weights, x$rres, x$binning$order)
XWX <- do.XWX(x$X, 1 / x$weights, x$sparse.setup$matrix)
if(x$fixed) {
P <- matrix_inv(XWX, index = x$sparse.setup)
} else {
g0 <- g0 + g
objfun <- function(tau2) {
S <- 0
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
P <- matrix_inv(XWX + S, index = x$sparse.setup)
g1 <- drop(P %*% crossprod(x$X, x$rres))
sum((g1 - g0)^2)
}
if(length(get.state(x, "tau2")) < 2) {
tau2 <- optimize(objfun, interval = x$state$interval)$minimum
} else {
i <- grep("tau2", names(x$lower))
tau2 <- if(!is.null(x$state$true.tau2)) x$state$true.tau2 else get.state(x, "tau2")
opt <- try(optim(tau2, fn = objfun, method = "L-BFGS-B",
lower = x$lower[i], upper = x$upper[i]), silent = TRUE)
if(!inherits(opt, "try-error"))
tau2 <- opt$par
}
if(inherits(tau2, "try-error"))
stop(paste("problem in finding optimum smoothing parameter for term ", x$label, "!", sep = ""))
attr(x$state$parameters, "true.tau2") <- tau2
S <- 0
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * x$S[[j]]
P <- matrix_inv(XWX + S, index = x$sparse.setup)
}
## Assign degrees of freedom.
x$state$edf <- sum_diag(XWX %*% P)
attr(x$state$parameters, "edf") <- x$state$edf
return(x$state)
}
## Boosting gradient fit.
boost_fit <- function(x, y, nu, hatmatrix = TRUE, weights = NULL, ...)
{
## process weights.
if(is.null(weights))
weights <- rep(1, length = length(y))
## Compute reduced residuals.
xbin.fun(x$binning$sorted.index, weights, y, x$weights, x$rres, x$binning$order)
## Compute mean and precision.
XWX <- do.XWX(x$X, 1 / x$weights, x$sparse.setup$matrix)
if(x$fixed) {
P <- matrix_inv(XWX, index = x$sparse.setup)
} else {
S <- 0
tau2 <- get.state(x, "tau2")
for(j in seq_along(x$S))
S <- S + 1 / tau2[j] * if(is.function(x$S[[j]])) x$S[[j]](x$state$parameters) else x$S[[j]]
P <- matrix_inv(XWX + S, index = x$sparse.setup)
}
## New parameters.
g <- nu * drop(P %*% crossprod(x$X, x$rres))
## Finalize.
x$state$parameters <- set.par(x$state$parameters, g, "b")
x$state$fitted.values <- x$fit.fun(x$X, get.state(x, "b"))
if(any(is.na(x$state$fitted.values))) {
stop("why?")
}
x$state$rss <- sum((x$state$fitted.values - y)^2 * weights)
if(hatmatrix)
x$state$hat <- nu * x$X %*% P %*% t(x$X)
return(x$state)
}
## Increase coefficients.
increase <- function(state0, state1)
{
g <- get.par(state0$parameters, "b") + get.par(state1$parameters, "b")
state0$fitted.values <- fitted(state0) + fitted(state1)
state0$parameters <- set.par(state0$parameters, g, "b")
state0$edf <- state1$edf
state0$parameters <- set.par(state0$parameters, get.par(state1$parameters, "tau2"), "tau2")
attr(state0$parameters, "true.tau2") <- attr(state1$parameters, "true.tau2")
attr(state0$parameters, "edf") <- attr(state1$parameters, "edf")
state0$special <- state1$special
state0$hat <- state1$hat
if(!is.null(state1$redf)) {
state0$boost.tau2 <- state1$redf$tau2
state0$edf <- state1$redf$fedf
}
state0
}
## Extract number of selected base learners
get.qsel <- function(x, iter, qsel.splitfactor = FALSE)
{
rval <- 0
for(i in names(x)) {
assign <- as.character(attr(x[[i]], "assign"))
if(!length(assign))
return(1)
uassign <- unique(assign)
facID <- sapply(uassign, function(x) { sum(assign == x) > 1 })
assign <- uassign[facID]
asssel <- list()
for(m in assign)
asssel[[m]] <- 0
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "linear.smooth") | inherits(x[[i]]$smooth.construct[[j]], "randombits.smooth") | inherits(x[[i]]$smooth.construct[[j]], "nnet2.smooth")) {
np <- names(x[[i]]$smooth.construct[[j]]$state$parameters)
rval <- rval + sum(abs(x[[i]]$smooth.construct[[j]]$state$parameters[grep("b", np)]) > 1e-10)
next
}
rval <- rval + 1 * (any(x[[i]]$smooth.construct[[j]]$selected[1:iter] > 0) &
j != "(Intercept)")
if(!is.null(x[[i]]$smooth.construct[[j]]$assign)) {
m <- as.character(x[[i]]$smooth.construct[[j]]$assign)
if(m %in% assign) {
asssel[[m]] <- asssel[[m]] +
1 * any(x[[i]]$smooth.construct[[j]]$selected[1:iter] > 0)
}
}
}
if(!qsel.splitfactor) {
for(m in assign) {
rval <- rval - max(0, asssel[[m]] - 1)
}
}
rm(asssel)
}
rval
}
get.maxq <- function(x)
{
rval <- 0
for(i in names(x)) {
rval <- rval + length(names(x[[i]]$smooth.construct))
}
rval
}
## Extract summary for boosting.
make.boost_summary <- function(x, mstop, save.ic, edf, hatmatrix, nobs)
{
nx <- names(x)
labels <- NULL
ll.contrib <- crit.contrib <- NULL
bsum <- lmat <- list()
for(i in nx) {
rn <- NULL
for(j in names(x[[i]]$smooth.construct)) {
labels <- c(labels, paste(x[[i]]$smooth.construct[[j]]$label, i, sep = "."))
rn <- c(rn, x[[i]]$smooth.construct[[j]]$label)
bsum[[i]] <- rbind(bsum[[i]], sum(x[[i]]$smooth.construct[[j]]$selected[1:mstop]) / mstop * 100)
lmat[[i]] <- rbind(lmat[[i]], sum(x[[i]]$smooth.construct[[j]]$loglik[1:mstop]))
ll.contrib <- cbind(ll.contrib, cumsum(x[[i]]$smooth.construct[[j]]$loglik[1:mstop]))
if(!is.null(x[[i]]$smooth.construct[[j]]$criterion))
crit.contrib <- cbind(crit.contrib, cumsum(x[[i]]$smooth.construct[[j]]$criterion[1:mstop]))
}
if(!is.matrix(bsum[[i]])) bsum[[i]] <- matrix(bsum[[i]], nrow = 1)
bsum[[i]] <- cbind(bsum[[i]], lmat[[i]])
if(!is.matrix(bsum[[i]])) bsum[[i]] <- matrix(bsum[[i]], nrow = 1)
colnames(bsum[[i]]) <- c(paste(i, "% selected"), "LogLik contrib.")
rownames(bsum[[i]]) <- rownames(lmat[[i]]) <- rn
bsum[[i]] <- bsum[[i]][order(bsum[[i]][, 2], decreasing = TRUE), , drop = FALSE]
}
colnames(ll.contrib) <- labels
if(!is.null(crit.contrib))
colnames(crit.contrib) <- labels
names(bsum) <- nx
bsum <- list("summary" = bsum, "mstop" = mstop,
"ic" = save.ic[1:mstop], "loglik" = ll.contrib)
if(hatmatrix) {
bsum$criterion <- list()
bsum$criterion$bic <- -2 * bsum$ic + edf[1:mstop] * log(nobs)
bsum$criterion$aic <- -2 * bsum$ic + edf[1:mstop] * 2
bsum$criterion$edf <- edf[1:mstop]
}
if(!is.null(crit.contrib))
bsum$crit.contrib <- crit.contrib
class(bsum) <- "boost_summary"
return(bsum)
}
boost_summary <- function(object, ...)
{
if(!is.null(object$model.stats$optimizer$boost_summary))
print.boost_summary(object$model.stats$optimizer$boost_summary, ...)
invisible(object$model.stats$optimizer$boost_summary)
}
## Smallish print function for boost summaries.
print.boost_summary <- function(x, summary = TRUE, plot = TRUE,
which = c("loglik", "loglik.contrib"), intercept = TRUE,
spar = TRUE, ...)
{
if(inherits(x, "bamlss"))
x <- x$model.stats$optimizer$boost_summary
if(is.null(x))
stop("no summary for boosted model available")
if(summary) {
np <- length(x$summary)
cat("\n")
cat("logLik. =", if(is.na(x$ic[x$mstop])) x$ic[x$mstop - 1] else x$ic[x$mstop], "-> at mstop =", x$mstop, "\n---\n")
for(j in 1:np) {
if(length(x$summary[[j]]) < 2) {
print(round(x$summary[[j]], digits = 4))
} else printCoefmat(x$summary[[j]], digits = 4)
if(j != np)
cat("---\n")
}
cat("\n")
}
if(plot) {
if(!is.character(which)) {
which <- c("loglik", "loglik.contrib", "parameters", "aic", "bic", "user")[as.integer(which)]
} else {
which <- tolower(which)
which <- match.arg(which, c("loglik", "loglik.contrib", "parameters", "aic", "bic", "user"), several.ok = TRUE)
}
if(spar) {
op <- par(no.readonly = TRUE)
on.exit(par(op))
par(mfrow = c(1, length(which)))
}
for(w in which) {
if(w == "loglik") {
if(spar)
par(mar = c(5.1, 4.1, 2.1, 2.1))
plot(x$ic, type = "l", xlab = "Iteration", ylab = "logLik", ...)
abline(v = x$mstop, lwd = 3, col = "lightgray")
axis(3, at = x$mstop, labels = paste("mstop =", x$mstop))
}
if(w == "loglik.contrib") {
if(spar)
par(mar = c(5.1, 4.1, 2.1, 10.1))
if(!intercept) {
j <- grep("(Intercept)", colnames(x$loglik), fixed = TRUE)
x$loglik <- x$loglik[, -j]
}
args <- list(...)
if(!is.null(args$name)) {
x$loglik <- x$loglik[, grep2(args$name, colnames(x$loglik), fixed = TRUE), drop = FALSE]
}
xn <- sapply(strsplit(colnames(x$loglik), ".", fixed = TRUE), function(x) { x[length(x)] })
if(is.null(cols <- args$mcol)) {
cols <- rainbow_hcl(length(unique(xn)))
} else {
cols <- rep(cols, length.out = length(unique(xn)))
}
matplot(x$loglik, type = "l", lty = 1,
xlab = "Iteration", ylab = "LogLik contribution", col = cols[as.factor(xn)],
lwd = args$lwd, axes = FALSE, main = args$main)
box()
axis(2)
at <- pretty(1:nrow(x$loglik))
at[1L] <- 1
axis(1, at = at)
cn <- colnames(x$loglik)
if(!is.null(args$drop)) {
for(dn in args$drop)
cn <- gsub(dn, "", cn, fixed = TRUE)
}
if(!is.null(args$name)) {
for(n in args$name)
cn <- gsub(n, "", cn, fixed = TRUE)
}
at <- x$loglik[nrow(x$loglik), ]
if(!is.null(args$showzero)) {
if(!args$showzero) {
cn <- cn[at != 0]
at <- at[at != 0]
}
}
labs <- labs0 <- cn
plab <- at
o <- order(plab, decreasing = TRUE)
labs <- labs[o]
plab <- plab[o]
rplab <- diff(range(plab))
dthres <- args$dthres
if(is.null(dthres))
dthres <- 0.02
for(i in 1:(length(plab) - 1)) {
dp <- abs(plab[i] - plab[i + 1]) / rplab
if(dp <= dthres) {
labs[i + 1] <- paste(c(labs[i], labs[i + 1]), collapse = ",")
labs[i] <- ""
}
}
labs <- labs[order(o)]
at <- at[labs != ""]
labs <- labs[labs != ""]
axis(4, at = at, labels = labs, las = 1)
if(!isFALSE(args$mstop)) {
abline(v = x$mstop, lwd = 3, col = "lightgray")
axis(3, at = x$mstop, labels = paste("mstop =", x$mstop))
}
}
if(w %in% c("aic", "bic", "user")) {
if(!is.null(x$criterion)) {
if(spar)
par(mar = c(5.1, 4.1, 2.1, 2.1))
args <- list()
if(is.null(args$xlab))
args$xlab <- "Iteration"
if(is.null(args$ylab))
args$ylab <- if(w == "user") "User IC" else toupper(w)
plot(x$criterion[[if(w == "user") "userIC" else w]], type = "l", xlab = args$xlab, ylab = args$ylab)
i <- which.min(x$criterion[[w]])
abline(v = i, lwd = 3, col = "lightgray")
if(!is.null(x$criterion$edf))
axis(3, at = i, labels = paste("mstop = ", i, ", edf = ", round(x$criterion$edf[i], digits = 2), sep = ""))
}
}
}
}
return(invisible(x))
}
plot.boost_summary <- function(x, ...)
{
print.boost_summary(x, summary = FALSE, plot = TRUE, ...)
}
boost_plot <- function(x, which = c("loglik", "loglik.contrib", "parameters", "aic", "bic", "user"),
intercept = TRUE, spar = TRUE, mstop = NULL, name = NULL, drop = NULL, labels = NULL, color = NULL, ...)
{
if(!is.character(which)) {
which <- c("loglik", "loglik.contrib", "parameters")[as.integer(which)]
} else {
which <- tolower(which)
which <- match.arg(which, several.ok = TRUE)
}
if(spar) {
op <- par(no.readonly = TRUE)
on.exit(par(op))
par(mfrow = c(1, length(which)))
}
if(is.null(mstop))
mstop <- x$model.stats$optimizer$boost_summary$mstop
x$model.stats$optimizer$boost_summary$mstop <- mstop
x$model.stats$optimizer$boost_summary$ic <- x$model.stats$optimizer$boost_summary$ic[1:mstop]
x$model.stats$optimizer$boost_summary$loglik <- x$model.stats$optimizer$boost_summary$loglik[1:mstop, , drop = FALSE]
for(w in which) {
if(w %in% c("loglik", "loglik.contrib", "aic", "bic", "user")) {
if((w == "loglik") & spar)
par(mar = c(5.1, 4.1, 2.1, 2.1))
if((w == "loglik.contrib") & spar)
par(mar = c(5.1, 4.1, 2.1, 10.1))
plot.boost_summary(x, which = w, spar = FALSE, intercept = intercept, name = name, ...)
}
if(w == "parameters") {
if(spar)
par(mar = c(5.1, 4.1, 2.1, 10.1))
if(!is.null(drop)) {
x$parameters <- x$parameters[, -grep2(drop, colnames(x$parameters), fixed = TRUE), drop = FALSE]
}
if(!is.null(name)) {
x$parameters <- x$parameters[, grep2(name, colnames(x$parameters), fixed = TRUE), drop = FALSE]
}
p <- x$parameters[1:mstop, , drop = FALSE]
if(!intercept)
p <- p[, -grep("(Intercept)", colnames(p), fixed = TRUE), drop = FALSE]
xn <- sapply(strsplit(colnames(x$parameters), ".", fixed = TRUE), function(x) { x[1] })
if(length(unique(xn)) < 2)
xn <- sapply(strsplit(colnames(x$parameters), ".", fixed = TRUE), function(x) { x[3] })
cols <- if(is.null(color)) {
if(length(unique(xn)) < 2) "black" else rainbow_hcl(length(unique(xn)))
} else {
if(is.function(color)) {
color(length(unique(xn)))
} else {
rep(color, length.out = length(unique(xn)))
}
}
if(is.null(labels)) {
labs <- labs0 <- colnames(p)
plab <- p[nrow(p), ]
o <- order(plab, decreasing = TRUE)
labs <- labs[o]
plab <- plab[o]
rplab <- diff(range(plab))
for(i in 1:(length(plab) - 1)) {
dp <- abs(plab[i] - plab[i + 1]) / rplab
if(length(dp) < 1)
dp <- 0
if(is.na(dp))
dp <- 0
if(dp <= 0.02) {
labs[i + 1] <- paste(c(labs[i], labs[i + 1]), collapse = ",")
labs[i] <- ""
}
}
labs <- labs[order(o)]
if(!is.null(name)) {
for(j in seq_along(name))
labs <- gsub(name[j], "", labs, fixed = TRUE)
}
} else labs <- rep(labels, length.out = ncol(p))
at <- p[nrow(p), ]
at <- at[labs != ""]
labs <- labs[labs != ""]
matplot(p, type = "l", lty = 1, col = cols[as.factor(xn)], xlab = "Iteration", ...)
abline(v = mstop, lwd = 3, col = "lightgray")
axis(4, at = at, labels = labs, las = 1)
axis(3, at = mstop, labels = paste("mstop =", mstop))
}
}
}
## Assign starting values.
set.starting.values <- function(x, start)
{
if(!is.null(start)) {
if(is.list(start)) {
if("parameters" %in% names(start))
start <- start$parameters
}
if(is.list(start))
start <- unlist(start)
if(is.matrix(start)) {
nstart <- colnames(start)
start <- as.vector(start[nrow(start), , drop = TRUE])
names(start) <- nstart
}
nstart <- names(start)
tns <- sapply(strsplit(nstart, ".", fixed = TRUE), function(x) { x[1] })
nx <- names(x)
for(id in nx) {
if(!is.null(x[[id]]$smooth.construct)) {
if(!is.null(x[[id]]$smooth.construct$model.matrix)) {
if(length(take <- grep(paste(id, "p", sep = "."), nstart[tns %in% id], fixed = TRUE, value = TRUE))) {
cn <- paste(id, "p", colnames(x[[id]]$smooth.construct$model.matrix$X), sep = ".")
i <- grep2(take, cn, fixed = TRUE)
if(length(i)) {
tpar <- start[take[i]]
i <- grep2(c(".edf", ".accepted", ".alpha"), names(tpar), fixed = TRUE)
if(length(i))
tpar <- tpar[-i]
names(tpar) <- gsub(paste(id, "p.", sep = "."), "", names(tpar), fixed = TRUE)
if(any(l <- grepl("tau2", take))) {
tau2 <- start[take[l]]
names(tau2) <- gsub(paste(id, "p.", sep = "."), "", names(tau2), fixed = TRUE)
tpar <- c(tpar, tau2)
}
if(all(names(tpar) %in% names(x[[id]]$smooth.construct$model.matrix$state$parameters))) {
x[[id]]$smooth.construct$model.matrix$state$parameters[names(tpar)] <- tpar
x[[id]]$smooth.construct$model.matrix$state$fitted.values <- x[[id]]$smooth.construct$model.matrix$fit.fun(x[[id]]$smooth.construct$model.matrix$X, x[[id]]$smooth.construct$model.matrix$state$parameters)
}
}
}
}
for(j in seq_along(x[[id]]$smooth.construct)) {
tl <- x[[id]]$smooth.construct[[j]]$label
tl <- paste(id, "s", tl, sep = ".")
if(inherits(x[[id]]$smooth.construct[[j]], "nnet.boost")) {
take <- tl
} else {
take <- grep(paste0(tl, "."), nstart[tns %in% id], fixed = TRUE, value = TRUE)
}
if(is.null(x[[id]]$smooth.construct[[j]]$by))
x[[id]]$smooth.construct[[j]]$by <- "NA"
if(x[[id]]$smooth.construct[[j]]$by == "NA") {
take <- take[!grepl(paste(tl, ":", sep = ""), take, fixed = TRUE)]
}
if(length(take)) {
tpar <- start[take]
i <- grep2(c(".edf", ".accepted", ".alpha"), names(tpar), fixed = TRUE)
tpar <- if(length(i)) tpar[-i] else tpar
names(tpar) <- gsub(paste(tl, ".", sep = ""), "", names(tpar), fixed = TRUE)
if(inherits(x[[id]]$smooth.construct[[j]], "nnet0.smooth")) {
spar <- tpar
} else {
spar <- x[[id]]$smooth.construct[[j]]$state$parameters
if(length(get.par(tpar, "b")))
spar <- set.par(spar, get.par(tpar, "b"), "b")
if(any(grepl("tau2", names(tpar)))) {
spar <- set.par(spar, get.par(tpar, "tau2"), "tau2")
}
}
x[[id]]$smooth.construct[[j]]$state$parameters <- spar
x[[id]]$smooth.construct[[j]]$state$fitted.values <- x[[id]]$smooth.construct[[j]]$fit.fun(x[[id]]$smooth.construct[[j]]$X, x[[id]]$smooth.construct[[j]]$state$parameters)
}
}
}
}
}
return(x)
}
opt_lasso <- lasso <- function(x, y, start = NULL, adaptive = TRUE,
lower = 0.001, upper = 1000, nlambda = 100, lambda = NULL, multiple = FALSE,
verbose = TRUE, digits = 4, flush = TRUE,
nu = NULL, stop.nu = NULL, ridge = .Machine$double.eps^0.5,
zeromodel = NULL, ...)
{
method <- list(...)$method
if(is.null(method))
method <- 1
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, update = bfit_iwls, ...)
start2 <- start
if(lower < 1e-20)
lower <- 1e-20
lambdas <- if(is.null(lambda)) {
exp(seq(log(upper), log(lower), length = nlambda))
} else lambda
lambdas <- rep(list(lambdas), length = length(x))
names(lambdas) <- names(x)
lambdas <- as.matrix(do.call(if(multiple) "expand.grid" else "cbind", lambdas))
if(length(verbose) < 2)
verbose <- c(verbose, FALSE)
ia <- if(flush) interactive() else FALSE
par <- list(); ic <- NULL
ptm <- proc.time()
fuse <- NULL
for(i in names(x)) {
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "lasso.smooth")) {
x[[i]]$smooth.construct[[j]]$state$do.optim <- FALSE
x[[i]]$smooth.construct[[j]]$fxsp <- TRUE
fuse <- c(fuse, x[[i]]$smooth.construct[[j]]$fuse)
if(adaptive) {
tau2 <- get.par(x[[i]]$smooth.construct[[j]]$state$parameters, "tau2")
tau2 <- rep(1/ridge, length.out = length(tau2))
x[[i]]$smooth.construct[[j]]$state$parameters <- set.par(x[[i]]$smooth.construct[[j]]$state$parameters, tau2, "tau2")
x[[i]]$smooth.construct[[j]]$LAPEN <- x[[i]]$smooth.construct[[j]]$S
x[[i]]$smooth.construct[[j]]$S <- list(diag(length(get.par(x[[i]]$smooth.construct[[j]]$state$parameters, "b"))))
}
}
}
}
fuse <- if(is.null(fuse)) FALSE else any(fuse)
if(!is.null(nu))
nu <- rep(nu, length.out = 2)
if(!is.null(stop.nu))
stop.nu <- rep(stop.nu, length.out = 2)
if(adaptive & fuse) {
if(verbose[1] & is.null(zeromodel))
cat("Estimating adaptive weights\n---\n")
if(is.null(zeromodel)) {
if(method == 1) {
zeromodel <- opt_bfit(x = x, y = y, start = start, verbose = verbose[1], nu = nu[2], stop.nu = stop.nu[2], ...)
} else {
zeromodel <- opt_optim(x = x, y = y, start = start, verbose = verbose[1], ...)
}
}
x <- lasso_transform(x, zeromodel, nobs = nrow(y))
} else {
if(!is.null(zeromodel)) {
x <- lasso_transform(x, zeromodel, nobs = nrow(y))
}
}
for(l in 1:nrow(lambdas)) {
if(l > 1)
start <- unlist(par[[l - 1]])
tau2 <- NULL
for(i in names(x)) {
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "lasso.smooth")) {
tau2 <- get.par(x[[i]]$smooth.construct[[j]]$state$parameters, "tau2")
nt <- names(tau2)
tau2 <- rep(1 / lambdas[l, i], length.out = length(tau2))
names(tau2) <- paste(i, "s", x[[i]]$smooth.construct[[j]]$label, nt, sep = ".")
if(!is.null(start) & (l > 1)) {
if(all(names(tau2) %in% names(start))) {
start[names(tau2)] <- tau2
} else {
start <- c(start, tau2)
}
} else {
start <- c(start, tau2)
}
}
}
}
if((l < 2) & !is.null(start2)) {
start <- c(start, start2)
start <- start[!duplicated(names(start))]
}
if(method == 1) {
b <- opt_bfit(x = x, y = y, start = start, verbose = verbose[2], nu = nu[2], stop.nu = stop.nu[2], ...)
} else {
b <- opt_optim(x = x, y = y, start = start, verbose = verbose[2], ...)
}
nic <- grep("ic", names(b), value = TRUE, ignore.case = TRUE)
if(!length(nic)) {
b$edf <- sum(abs(unlist(b$parameters)) > .Machine$double.eps^0.25)
b$BIC <- -2 * b$logLik + b$edf * log(nrow(y))
}
nic <- grep("ic", names(b), value = TRUE, ignore.case = TRUE)
par[[l]] <- unlist(b$parameters)
mstats <- c(b$logLik, b$logPost, b[[nic]], b[["edf"]])
names(mstats) <- c("logLik", "logPost", nic, "edf")
ic <- rbind(ic, mstats)
if(!is.null(list(...)$track)) {
plot(ic[, nic] ~ c(1:l), type = "l", xlab = "Iteration", ylab = nic)
}
if(!is.null(stop.nu)) {
if(l > stop.nu)
nu <- NULL
}
if(verbose[1]) {
cat(if(ia) "\r" else if(l > 1) "\n" else NULL)
vtxt <- paste(nic, " ", fmt(b[[nic]], width = 8, digits = digits),
" edf ", fmt(mstats["edf"], width = 6, digits = digits),
" lambda ", paste(fmt(if(!multiple) lambdas[l, 1] else lambdas[l, ], width = 6, digits = digits), collapse = ","),
" iteration ", formatC(l, width = nchar(nlambda)), sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
}
}
elapsed <- c(proc.time() - ptm)[3]
if(verbose[1]) {
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("\nelapsed time: ", et, "\n", sep = "")
}
colnames(lambdas) <- paste("lambda", names(x), sep = ".")
ic <- cbind(ic, "lambda" = lambdas)
rownames(ic) <- NULL
attr(ic, "multiple") <- multiple
class(ic) <- c("lasso.stats", "matrix")
list("parameters" = do.call("rbind", par), "lasso.stats" = ic, "nobs" = nrow(y))
}
lasso_transform <- function(x, zeromodel, nobs = NULL, ...)
{
if(bframe <- inherits(x, "bamlss.frame")) {
if(is.null(x$x))
stop("no 'x' object in 'bamlss.frame'!")
x <- x$x
}
for(i in names(x)) {
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "lasso.smooth")) {
if(!is.null(x[[i]]$smooth.construct[[j]]$LAPEN)) {
x[[i]]$smooth.construct[[j]]$S <- x[[i]]$smooth.construct[[j]]$LAPEN
x[[i]]$smooth.construct[[j]]$LAPEN <- NULL
}
if(x[[i]]$smooth.construct[[j]]$fuse) {
if(is.list(zeromodel$parameters)) {
beta <- get.par(zeromodel$parameters[[i]]$s[[j]], "b")
} else {
if(is.matrix(zeromodel$parameters)) {
beta <- grep(paste(i, ".s.", j, ".", sep = ""), colnames(zeromodel$parameters), fixed = TRUE)
beta <- get.par(zeromodel$parameters[nrow(zeromodel$parameters), beta], "b")
} else {
beta <- grep(paste(i, ".s.", j, ".", sep = ""), names(zeromodel$parameters), fixed = TRUE)
beta <- get.par(zeromodel$parameters[beta], "b")
}
}
df <- x[[i]]$smooth.construct[[j]]$lasso$df
Af <- x[[i]]$smooth.construct[[j]]$Af
w <- rep(0, ncol(Af))
fuse_type <- x[[i]]$smooth.construct[[j]]$fuse_type
k <- ncol(x[[i]]$smooth.construct[[j]]$X)
if(is.null(nobs))
nobs <- nrow(x[[i]]$smooth.construct[[j]]$X)
if(x[[i]]$smooth.construct[[j]]$xt$gfx) {
w <- NULL
for(ff in 1:ncol(Af))
w <- c(w, 1/abs(t(Af[, ff]) %*% beta))
} else {
nref <- nobs - sum(df)
for(ff in 1:ncol(Af)) {
ok <- which(Af[, ff] != 0)
w[ff] <- if(fuse_type == "nominal") {
if(length(ok) < 2) {
2 / (k + 1) * sqrt((df[ok[1]] + nref) / nobs)
} else {
2 / (k + 1) * sqrt((df[ok[1]] + df[ok[2]]) / nobs)
}
} else {
if(length(ok) < 2) {
sqrt((df[ok[1]] + nref) / nobs)
} else {
sqrt((df[ok[1]] + df[ok[2]]) / nobs)
}
}
w[ff] <- w[ff] * 1 / abs(t(Af[, ff]) %*% beta)
}
}
names(w) <- paste("lasso", 1:length(w), sep = "")
w[!is.finite(w)] <- 1e10
x[[i]]$smooth.construct[[j]]$fixed.hyper <- w
} else {
w <- get.par(zeromodel$parameters[[i]]$s[[j]], "b")
names(w) <- paste("lasso", 1:length(w), sep = "")
w[!is.finite(w)] <- 1e10
x[[i]]$smooth.construct[[j]]$fixed.hyper <- w
}
}
}
}
if(bframe) {
return(list("x" = x))
} else {
return(x)
}
}
print.lasso.stats <- function(x, digits = 4, ...)
{
ls <- attr(lasso_stop(x), "stats")
ic <- grep("ic", names(ls), ignore.case = TRUE, value = TRUE)
cat(ic, "=", ls[ic], "-> at lambda =", ls[grep("lambda", names(ls))], "\n")
ls <- ls[!grepl("lambda", names(ls))]
ls <- paste(names(ls), "=", round(ls, digits = digits), collapse = " ")
cat(ls, "\n---\n")
return(invisible(NULL))
}
lasso_coef <- function(x, ...) {
cx <- coef.bamlss(x, ...)
ncx <- if(!is.null(dim(cx))) colnames(cx) else names(cx)
if(is.null(x$x))
x$x <- smooth.construct(x)
for(i in names(x$x)) {
for(j in names(x$x[[i]]$smooth.construct)) {
if(inherits(x$x[[i]]$smooth.construct[[j]], "lasso.smooth")) {
for(jj in names(x$x[[i]]$smooth.construct[[j]]$lasso$trans)) {
cid <- paste(i, ".s.", x$x[[i]]$smooth.construct[[j]]$label, ".",
x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$colnames, sep = "")
if(is.null(x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$blockscale)) {
if(is.null(dim(cx))) {
cx[cid] <- cx[cid] / x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$scale
} else {
cx[, cid] <- cx[, cid, drop = FALSE] / x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$scale
}
} else {
if(is.null(dim(cx))) {
cx[cid] <- solve(x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$blockscale, cx[cid])
} else {
for(ii in 1:nrow(cx)) {
cx[ii, cid] <- solve(x$x[[i]]$smooth.construct[[j]]$lasso$trans[[jj]]$blockscale, cx[ii, cid])
}
}
}
}
}
}
}
cx
}
lasso_plot <- function(x, which = c("criterion", "parameters"), spar = TRUE, model = NULL, name = NULL,
mstop = NULL, retrans = FALSE, color = NULL, show.lambda = TRUE, labels = NULL,
digits = 2, ...)
{
if(is.null(model))
model <- x$family$names
model <- x$family$names[pmatch(model, x$family$names)]
if(any(is.na(model))) {
model <- model[!is.na(model)]
if(!length(model))
stop("argument model is spcified wrong")
else
warning("argument model is spcified wrong")
}
if(!is.character(which)) {
which <- c("criterion", "parameters")[as.integer(which)]
} else {
which <- tolower(which)
which <- match.arg(which, several.ok = TRUE)
}
if(is.null(mstop))
mstop <- 1:nrow(x$parameters)
if(retrans)
x$parameters <- lasso_coef(x)
npar <- colnames(x$parameters)
for(j in c("Intercept", ".edf", ".lambda", ".tau"))
npar <- npar[!grepl(j, npar, fixed = TRUE)]
x$parameters <- x$parameters[, npar, drop = FALSE]
ic <- x$model.stats$optimizer$lasso.stats
multiple <- attr(ic, "multiple")
log_lambda <- log(ic[, grep("lambda", colnames(ic)), drop = FALSE])
nic <- grep("ic", colnames(ic), value = TRUE, ignore.case = TRUE)
if(spar) {
op <- par(no.readonly = TRUE)
on.exit(par(op))
n <- if("criterion" %in% which) {
if(multiple) length(model) else 1
} else 0
if("parameters" %in% which)
n <- n + length(model)
par(mfrow = n2mfrow(n), mar = c(5.1, 5.1, 4.1, 1.1))
}
at <- pretty(1:nrow(ic))
at[at == 0] <- 1
if("criterion" %in% which) {
if(!multiple) {
plot(ic[, nic], type = "l",
xlab = expression(log(lambda[, 1])), ylab = nic, axes = FALSE, lwd = list(...)$lwd)
at <- pretty(mstop)
at[at == 0] <- 1
axis(1, at = at, labels = as.numeric(fmt(log_lambda[, 1][mstop][at], digits)))
axis(2)
if(show.lambda) {
i <- which.min(ic[, nic])
abline(v = i, col = "lightgray", lwd = 2, lty = 2)
val <- round(ic[i, grep("lambda", colnames(ic))[1]], 4)
axis(3, at = i, labels = substitute(paste(lambda, '=', val)))
}
if(!is.null(main <- list(...)$main))
mtext(main, side = 3, line = 2.5, cex = 1.2, font = 2)
box()
} else {
main <- list(...)$main
if(is.null(main))
main <- model
main <- rep(main, length.out = length(model))
k <- 1
for(m in model) {
imin <- which.min(ic[, nic])
lambda_min <- ic[imin, grep("lambda", colnames(ic))]
tlambda <- names(lambda_min)
tlambda <- tlambda[!grepl(m, tlambda)]
take <- NULL
for(j in tlambda)
take <- cbind(take, ic[, j] == lambda_min[j])
take <- apply(take, 1, all)
tic <- ic[take, nic]
plot(tic, type = "l", xlab = expression(log(lambda[, 1])), ylab = nic, axes = FALSE, lwd = list(...)$lwd)
at <- pretty(1:length(tic))
at[at == 0] <- 1
axis(1, at = at, labels = as.numeric(fmt(log_lambda[take, paste("lambda", m, sep = ".")][at], digits)))
axis(2)
if(show.lambda) {
i <- which.min(tic)
abline(v = i, col = "lightgray", lwd = 2, lty = 2)
val <- lambda_min[paste("lambda", m, sep = ".")]
axis(3, at = i, labels = substitute(paste(lambda, '=', val)))
}
box()
if(!is.expression(main[k])) {
if(main[k] != "")
mtext(main[k], side = 3, line = 2.5, cex = 1.2, font = 2)
} else {
mtext(main[k], side = 3, line = 2.5, cex = 1.2, font = 2)
}
k <- k + 1
}
}
}
if("parameters" %in% which) {
main <- list(...)$main
if(is.null(main))
main <- model
main <- rep(main, length.out = length(model))
imin <- which.min(ic[, nic])
lambda_min <- ic[imin, grep("lambda", colnames(ic))]
k <- 1
for(m in model) {
if(spar)
par(mar = c(5.1, 5.1, 4.1, 10.1))
tpar <- x$parameters[, grep(paste(m, ".", sep = ""), colnames(x$parameters), fixed = TRUE), drop = FALSE]
if(multiple) {
tlambda <- names(lambda_min)
tlambda <- tlambda[!grepl(m, tlambda)]
take <- NULL
for(j in tlambda)
take <- cbind(take, ic[, j] == lambda_min[j])
take <- apply(take, 1, all)
tpar <- tpar[take, , drop = FALSE]
} else {
take <- 1:nrow(tpar)
}
if(!is.null(name))
tpar <- tpar[, grep2(name, colnames(tpar), fixed = TRUE), drop = FALSE]
if(max(mstop) > nrow(tpar))
mstop <- nrow(tpar)
tpar <- tpar[if(length(mstop) < 2) 1:mstop else mstop, , drop = FALSE]
xn <- sapply(strsplit(colnames(tpar), ".", fixed = TRUE), function(x) { x[1] })
if(length(unique(xn)) < 2)
xn <- sapply(strsplit(colnames(tpar), ".", fixed = TRUE), function(x) { x[3] })
cols <- if(is.null(color)) {
if(length(unique(xn)) < 2) "black" else rainbow_hcl(length(unique(xn)))
} else {
if(is.function(color)) {
color(length(unique(xn)))
} else {
rep(color, length.out = length(unique(xn)))
}
}
add <- if(is.null(list(...)$add)) FALSE else list(...)$add
nolabels <- if(is.null(list(...)$nolabels)) FALSE else list(...)$nolabels
matplot(tpar, type = "l", lty = 1, col = cols[as.factor(xn)],
xlab = expression(log(lambda)), ylab = expression(beta[j]), axes = FALSE, add = add,
lwd = list(...)$lwd)
if(!nolabels) {
if(is.null(labels)) {
labs <- labs0 <- colnames(tpar)
plab <- tpar[nrow(tpar), ]
o <- order(plab, decreasing = TRUE)
labs <- labs[o]
plab <- plab[o]
rplab <- diff(range(plab))
for(i in 1:(length(plab) - 1)) {
dp <- abs(plab[i] - plab[i + 1]) / rplab
if(dp <= 0.02) {
labs[i + 1] <- paste(c(labs[i], labs[i + 1]), collapse = ",")
labs[i] <- ""
}
}
labs <- labs[order(o)]
if(!is.null(name)) {
for(j in seq_along(name))
labs <- gsub(name[j], "", labs, fixed = TRUE)
}
} else labs <- rep(labels, length.out = ncol(tpar))
at <- tpar[nrow(tpar), ]
at <- at[labs != ""]
labs <- labs[labs != ""]
axis(4, at = at, labels = labs, las = 1, cex.axis = list(...)$labcex)
}
at <- pretty(1:nrow(tpar))
at[at == 0] <- 1
axis(1, at = at, labels = as.numeric(fmt(log_lambda[take, paste("lambda", m, sep = ".")][at], digits)))
axis(2)
if(show.lambda) {
i <- which.min(ic[take, nic])
abline(v = i, col = "lightgray", lwd = 1, lty = 2)
cat(colnames(tpar)[abs(tpar[i, ]) > 0.009], "\n")
val <- round(lambda_min[paste("lambda", m, sep = ".")], digits)
if(multiple) {
lval <- parse(text = paste('paste(lambda[', m, '], "=", ', val, ')', sep = ''))
axis(3, at = i, labels = lval)
} else {
axis(3, at = i, labels = substitute(paste(lambda, '=', val)))
}
}
box()
if(!is.expression(main[k])) {
if(main[k] != "")
mtext(main[k], side = 3, line = 2.5, cex = 1.2, font = 2)
} else {
mtext(main[k], side = 3, line = 2.5, cex = 1.2, font = 2)
}
k <- k + 1
}
}
return(invisible(NULL))
}
lasso_stop <- function(x)
{
if(!inherits(x, "lasso.stats"))
x <- x$model.stats$optimizer$lasso.stats
nic <- grep("ic", colnames(x), value = TRUE, ignore.case = TRUE)
i <- which.min(x[, nic])
attr(i, "stats") <- x[i, ]
i
}
## Deep learning bamlss.
dl.bamlss <- function(object,
optimizer = "adam", epochs = 30, batch_size = NULL,
nlayers = 2, units = 100, activation = "sigmoid", l1 = NULL, l2 = NULL,
verbose = TRUE, ...)
{
stopifnot(requireNamespace("keras"))
stopifnot(requireNamespace("tensorflow"))
if(!inherits(object, "bamlss")) {
object <- bamlss.frame(object, ...)
}
y <- object$y
nobs <- nrow(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
nx <- names(object$formula)
X <- list()
for(i in nx) {
X[[i]] <- object$x[[i]]$model.matrix
if(!is.null(object$x[[i]]$smooth.construct)) {
for(j in seq_along(object$x[[i]]$smooth.construct))
X[[i]] <- cbind(X[[i]], object$x[[i]]$smooth.construct[[j]]$X)
}
}
family <- family(object)
if(is.null(family$keras))
family$keras <- keras_loss(family$family)
if(is.null(family$keras$nloglik))
stop("no keras negative loglik() function is specified!")
nll <- family$keras$nloglik
if(is.null(optimizer))
optimizer <- keras::optimizer_rmsprop(lr = 0.0001)
if(is.character(optimizer)) {
optimizer <- match.arg(optimizer, c("adam", "sgd", "rmsprop",
"adagrad", "adadelta", "adamax", "nadam"))
}
models <- inputs <- outputs <- list()
units <- rep(units, length.out = nlayers)
activation <- rep(activation, length.out = nlayers)
if(!is.null(l1))
l1 <- rep(l1, length.out = nlayers)
if(!is.null(l2))
l2 <- rep(l2, length.out = nlayers)
pen <- NULL
if(!is.null(l1) & is.null(l2))
pen <- paste0('regularizer_l1(', l1, ')')
if(is.null(l1) & !is.null(l2))
pen <- paste0('regularizer_l2(', l2, ')')
if(!is.null(l1) & !is.null(l2))
pen <- paste0('regularizer_l1_l2(', l1, ', ', l2, ')')
for(j in 1:length(X)) {
inputs[[j]] <- keras::layer_input(shape = ncol(X[[j]]))
itcpt <- FALSE
if(ncol(X[[j]]) < 2) {
if(colnames(X[[i]]) == "(Intercept)")
itcpt <- TRUE
}
if(itcpt) {
opts <- c('outputs[[j]] <- inputs[[j]] %>%',
'layer_dense(units = 1, use_bias = FALSE)')
} else {
opts <- c('outputs[[j]] <- inputs[[j]] %>%',
paste0('layer_dense(units = ', units,
if(!is.null(pen)) paste0(', kernel_regularizer = ', pen) else NULL,
', activation = "', activation, '") %>%'),
'layer_dense(units = 1)')
}
opts <- paste(opts, collapse = " ")
eval(parse(text = opts))
}
final_output <- keras::layer_concatenate(outputs)
model <- keras::keras_model(inputs, final_output)
model <- keras::compile(model,
loss = nll,
optimizer = optimizer
)
names(X) <- NULL
if(is.null(dim(y))) {
if(length(X) > 1) {
Y <- matrix(y, ncol = 1)
for(j in 1:(length(nx) - 1))
Y <- cbind(Y, 1)
} else {
Y <- matrix(y, ncol = 1)
}
if(length(X) < 2)
X <- X[[1L]]
} else {
nc <- ncol(y)
Y <- y
for(j in 1:(length(nx) - nc))
Y <- cbind(Y, 1)
}
ptm <- proc.time()
history <- keras::fit(model,
x = X,
y = Y,
epochs = epochs, batch_size = batch_size,
verbose = as.integer(verbose)
)
elapsed <- c(proc.time() - ptm)[3]
if(verbose) {
cat("\n")
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("\n elapsed time: ", et, "\n", sep = "")
}
object$dnn <- model
object$fitted.values <- as.data.frame(predict(model, X))
colnames(object$fitted.values) <- nx
object$elapsed <- elapsed
object$history <- history
class(object) <- c("dl.bamlss", "bamlss.frame")
return(object)
}
## Extractor functions.
fitted.dl.bamlss <- function(object, ...) { object$fitted.values }
family.dl.bamlss <- function(object, ...) { object$family }
residuals.dl.bamlss <- function(object, ...) { residuals.bamlss(object, ...) }
plot.dl.bamlss <- function(x, ...) { plot(x$history, ...) }
logLik.dl.bamlss <- function(object, ...)
{
nd <- list(...)$newdata
rn <- response_name(object)
if(!is.null(nd)) {
y <- eval(parse(text = rn), envir = nd)
} else {
nd <- model.frame(object)
y <- nd[[rn]]
}
par <- predict(object, newdata = nd, type = "parameter")
ll <- sum(family(object)$d(y, par, log = TRUE), na.rm = TRUE)
attr(ll, "df") <- NA
class(ll) <- "logLik"
return(ll)
}
## Predict function.
predict.dl.bamlss <- function(object, newdata, model = NULL,
type = c("link", "parameter"), drop = TRUE, ...)
{
## If data have been scaled (scale.d = TRUE)
if(!missing(newdata) & ! is.null(attr(object$model.frame, 'scale')) ) {
sc <- attr(object$model.frame, 'scale')
for( name in unique(unlist(lapply(sc,names))) ) {
newdata[,name] <- (newdata[,name] - sc$center[name] ) / sc$scale[name]
}
}
if(missing(newdata))
newdata <- NULL
if(is.null(newdata)) {
newdata <- model.frame.bamlss.frame(object)
} else {
if(is.character(newdata)) {
if(file.exists(newdata <- path.expand(newdata)))
newdata <- read.table(newdata, header = TRUE, ...)
else stop("cannot find newdata")
}
if(is.matrix(newdata) || is.list(newdata))
newdata <- as.data.frame(newdata)
}
type <- match.arg(type)
nx <- names(object$formula)
X <- list()
for(i in seq_along(nx)) {
tfi <- drop.terms.bamlss(object$x[[i]]$terms,
sterms = FALSE, keep.response = FALSE, data = newdata, specials = NULL)
X[[i]] <- model.matrix(tfi, data = newdata)
if(!is.null(object$x[[i]]$smooth.construct)) {
for(j in seq_along(object$x[[i]]$smooth.construct)) {
X[[i]] <- cbind(X[[i]], PredictMat(object$x[[i]]$smooth.construct[[j]], newdata))
}
}
}
if(length(X) < 2)
X <- X[[1L]]
pred <- as.data.frame(predict(object$dnn, X))
colnames(pred) <- nx
if(!is.null(model)) {
if(is.character(model))
nx <- nx[grep(model, nx)[1]]
else
nx <- nx[as.integer(model)]
}
pred <- pred[nx]
if(type == "parameter") {
links <- object$family$links[nx]
for(j in seq_along(links)) {
if(links[j] != "identity") {
linkinv <- make.link2(links[j])$linkinv
pred[[j]] <- linkinv(pred[[j]])
}
}
}
if((length(pred) < 2) & drop)
pred <- pred[[1L]]
return(pred)
}
## Most likely transformations.
opt_mlt <- function(x, y, family, start = NULL, weights = NULL, offset = NULL,
criterion = c("AICc", "BIC", "AIC"),
eps = .Machine$double.eps^0.25, maxit = 400,
verbose = TRUE, digits = 4, flush = TRUE, nu = NULL, stop.nu = NULL, ...)
{
nx <- family$names
if(!all(nx %in% names(x)))
stop("design construct names mismatch with family names!")
if(is.null(attr(x, "bamlss.engine.setup")))
x <- bamlss.engine.setup(x, ...)
nobs <- nrow(y)
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
Fy <- ecdf(y)
Fy <- Fy(y)
Fy[Fy > 0.9999] <- 0.999
Fy[Fy < 0.0001] <- 0.001
Yhat <- family$distr$q(Fy)
opt <- opt_bfit(x = x, y = data.frame("y" = Yhat),
family = complete.bamlss.family(Gaussian_bamlss()),
eps = eps, maxit = maxit, nu = nu, update = bfit_optim())
criterion <- match.arg(criterion)
np <- length(nx)
if(!is.null(nu)) {
if(nu < 0)
nu <- NULL
}
if(!is.null(start))
x <- set.starting.values(x, start)
else
x <- set.starting.values(x, opt$parameters)
eta <- get.eta(x)
if(!is.null(weights))
weights <- as.data.frame(weights)
if(!is.null(offset)) {
offset <- as.data.frame(offset)
for(j in nx) {
if(!is.null(offset[[j]]))
eta[[j]] <- eta[[j]] + offset[[j]]
}
} else {
if(is.null(start))
eta <- init.eta(eta, y, family, nobs)
}
ia <- if(flush) interactive() else FALSE
eps0 <- eps + 1; iter <- 0
edf <- get.edf(x, type = 2)
ptm <- proc.time()
while(eps0 > eps & iter < maxit) {
eta0 <- eta
## Cycle through all terms.
for(sj in seq_along(x$mu$smooth.construct)) {
## Get updated parameters.
p.state <- mlt_update(x$mu$smooth.construct[[sj]],
family$distr, y, eta, edf = edf, weights = weights$mu,
iteration = iter, criterion = criterion)
}
}
stop("here!")
}
mlt_update <- function(x, distr, y, eta, edf, weights, iteration, criterion)
{
beta <- get.par(x$state$parameters, "b")
score <- t(x$X) %*% (distr$dd(eta$mu) / distr$d(eta$mu))
if(inherits(x, "mlt.smooth"))
score <- score + t(x$dX) %*% as.numeric((1 / (x$dX %*% beta + 1e-10)))
w <- distr$ddd(eta$mu) / distr$d(eta$mu) - (distr$dd(eta$mu) / distr$d(eta$mu))^2
hess <- crossprod(x$X * w, x$X)
if(inherits(x, "mlt.smooth"))
hess <- hess - crossprod(x$dX * as.numeric(1 / (x$dX %*% beta + 1e-10)^2), x$dX)
print(beta)
beta <- beta + hess %*% score
print(beta)
stop("yess!\n")
}
boost.net <- function(formula, maxit = 1000, nu = 1, nodes = 10, df = 4,
lambda = NULL, dropout = NULL, flush = TRUE, initialize = TRUE,
eps = .Machine$double.eps^0.25, verbose = TRUE, digits = 4,
activation = "sigmoid",
r = list("sigmoid" = 0.01, "gauss" = 0.01, "sin" = 0.01, "cos" = 0.01),
s = list("sigmoid" = 10000, "gauss" = 20, "sin" = 20, "cos" = 20),
select = FALSE, ...)
{
bf <- bamlss.frame(formula, ...)
y <- bf$y
has_offset <- any(grepl("(offset)", names(bf$model.frame), fixed = TRUE))
nx <- names(bf$x)
np <- length(nx)
nobs <- nrow(y)
nu <- rep(nu, length.out = np)
names(nu) <- nx
nodes <- rep(nodes, length.out = np)
names(nodes) <- nx
if(!is.null(lambda)) {
lambda <- rep(lambda, length.out = np)
names(lambda) <- nx
}
if(is.data.frame(y)) {
if(ncol(y) < 2)
y <- y[[1]]
}
if(!is.null(dropout)) {
dropout <- rep(dropout, length.out = np)
if(any(dropout > 1) | any(dropout < 0))
stop("argument dropout must be between [0,1]!")
names(dropout) <- nx
}
ntake <- rep(NA, length.out = np)
names(ntake) <- nx
Xn <- s01 <- list()
beta <- taken <- list()
for(i in nx) {
k <- ncol(bf$x[[i]]$model.matrix)
if(!is.null(dropout))
ntake[i] <- ceiling(k * (1 - dropout[i]))
else
ntake[i] <- k - 1L
if(k > 1) {
w <- list()
for(j in activation)
w[[j]] <- n.weights(nodes[i], k = ntake[i], type = j)
w <- unlist(w)
if(!is.null(dropout))
taken[[i]] <- matrix(0L, nrow = maxit, ncol = ntake[i])
beta[[i]] <- matrix(0, nrow = maxit, ncol = k + (nodes[i] * length(activation)) + length(w))
colnames(beta[[i]]) <- c(colnames(bf$x[[i]]$model.matrix),
paste0("b", 1:(nodes[i] * length(activation))), names(w))
Xn[[i]] <- bf$x[[i]]$model.matrix
for(j in 2:k) {
xmin <- min(Xn[[i]][, j], 2, na.rm = TRUE)
xmax <- max(Xn[[i]][, j], 2, na.rm = TRUE)
if((xmax - xmin) < sqrt(.Machine$double.eps)) {
xmin <- 0
xmax <- 1
}
Xn[[i]][, j] <- (Xn[[i]][, j] - xmin) / (xmax - xmin)
s01[[i]]$xmin <- c(s01[[i]]$xmin, xmin)
s01[[i]]$xmax <- c(s01[[i]]$xmax, xmax)
}
} else {
beta[[i]] <- matrix(0, nrow = maxit, ncol = 1)
colnames(beta[[i]]) <- "(Intercept)"
nodes[i] <- -1
}
}
if(initialize & !has_offset) {
objfun <- function(par) {
eta <- list()
for(i in seq_along(nx))
eta[[nx[i]]] <- rep(par[i], length = nobs)
ll <- bf$family$loglik(y, bf$family$map2par(eta))
return(ll)
}
gradfun <- function(par) {
eta <- list()
for(i in seq_along(nx))
eta[[nx[i]]] <- rep(par[i], length = nobs)
peta <- bf$family$map2par(eta)
grad <- par
for(j in nx) {
score <- process.derivs(bf$family$score[[j]](y, peta, id = j), is.weight = FALSE)
grad[i] <- mean(score)
}
return(grad)
}
start <- init.eta(get.eta(bf$x), y, bf$family, nobs)
start <- unlist(lapply(start, mean, na.rm = TRUE))
opt <- optim(start, fn = objfun, gr = gradfun, method = "BFGS", control = list(fnscale = -1))
eta <- list()
for(i in nx) {
beta[[i]][1, "(Intercept)"] <- as.numeric(opt$par[i])
eta[[i]] <- rep(as.numeric(opt$par[i]), length = nobs)
}
} else {
eta <- get.eta(bf$x)
}
if(has_offset) {
for(i in nx) {
eta[[i]] <- eta[[i]] + bf$model.frame[["(offset)"]][, i]
}
}
logLik <- rep(0, maxit)
logLik[1] <- bf$family$loglik(y, bf$family$map2par(eta))
ia <- if(flush) interactive() else FALSE
iter <- 2
eps0 <- eps + 1
ll_contrib <- rep(NA, np)
names(ll_contrib) <- nx
ll_contrib_save <- list()
for(i in nx)
ll_contrib_save[[i]] <- rep(0, maxit)
par <- bpar <- Z <- list()
tau2o <- rep(0.1, np)
names(tau2o) <- nx
edfn <- rep(NA, np)
names(edfn) <- nx
ptm <- proc.time()
while(iter <= maxit & eps0 > eps) {
eta0 <- eta
ll0 <- bf$family$loglik(y, bf$family$map2par(eta))
for(i in nx) {
peta <- bf$family$map2par(eta)
grad <- process.derivs(bf$family$score[[i]](y, peta, id = i), is.weight = FALSE)
if(nodes[i] > 0) {
Z[[i]] <- NULL
w <- list()
k <- ncol(bf$x[[i]]$model.matrix)
for(j in activation) {
if(is.null(dropout)) {
w[[j]] <- n.weights(nodes[i], k = ntake[i],
rint = r[[j]], sint = s[[j]], type = j,
x = Xn[[i]][sample(1:nobs, size = nodes[i], replace = FALSE), -1, drop = FALSE])
Z[[i]] <- cbind(Z[[i]], nnet2Zmat(Xn[[i]], w[[j]], j))
} else {
taken[[i]][iter, ] <- sample(2:k, size = ntake[i], replace = FALSE)
w[[j]] <- n.weights(nodes[i], k = ntake[i],
rint = r[[j]], sint = s[[j]], type = j,
x = Xn[[i]][sample(1:nobs, size = nodes[i], replace = FALSE), taken[[i]][iter, ], drop = FALSE])
Z[[i]] <- cbind(Z[[i]], nnet2Zmat(Xn[[i]][, c(1, taken[[i]][iter, ]), drop = FALSE], w[[j]], j))
}
}
Z[[i]] <- cbind(bf$x[[i]]$model.matrix, Z[[i]])
S <- diag(c(rep(0, k), rep(1, ncol(Z[[i]]) - k)))
ZZ <- crossprod(Z[[i]])
if(is.null(lambda)) {
fn <- function(tau2) {
Si <- 1 / tau2 * S
P <- matrix_inv(ZZ + Si)
b <- drop(P %*% crossprod(Z[[i]], grad))
fit <- Z[[i]] %*% b
edf <- sum_diag(ZZ %*% P) - k
ic <- if(is.null(df)) {
sum((grad - fit)^2) + 2 * edf
} else {
(df - edf)^2
}
return(ic)
}
tau2o[i] <- tau2.optim(fn, tau2o[i], maxit = 1e+04, force.stop = FALSE)
} else {
tau2o[i] <- 1/lambda[i]
}
S <- 1 / tau2o[i] * S
P <- matrix_inv(ZZ + S)
b <- nu[i] * drop(P %*% crossprod(Z[[i]], grad))
par[[i]] <- c(b, unlist(w))
bpar[[i]] <- b
eta[[i]] <- eta[[i]] + Z[[i]] %*% b
edfn[i] <- sum_diag(ZZ %*% P) - k
} else {
mgrad <- nu[i] * mean(grad)
eta[[i]] <- eta[[i]] + mgrad
par[[i]] <- bpar[[i]] <- mgrad
Z[[i]] <- matrix(1, nrow = length(grad), ncol = 1)
}
ll1 <- bf$family$loglik(y, bf$family$map2par(eta))
if(ll1 < ll0) {
nu[i] <- nu[i] * 0.9
next
}
ll_contrib[i] <- ll1 - ll0
if(select) {
eta[[i]] <- eta0[[i]]
} else {
ll_contrib_save[[i]][iter] <- ll_contrib[i]
beta[[i]][iter, ] <- par[[i]]
}
}
if(select) {
i <- nx[which.max(ll_contrib)]
beta[[i]][iter, ] <- par[[i]]
eta[[i]] <- eta[[i]] + Z[[i]] %*% bpar[[i]]
ll_contrib_save[[i]][iter] <- ll_contrib[i]
}
eps0 <- do.call("cbind", eta)
eps0 <- mean(abs((eps0 - do.call("cbind", eta0)) / eps0), na.rm = TRUE)
if(is.na(eps0) | !is.finite(eps0)) eps0 <- eps + 1
ll <- bf$family$loglik(y, bf$family$map2par(eta))
logLik[iter] <- ll
iter <- iter + 1
if(verbose) {
cat(if(ia) "\r" else if(iter > 1) "\n" else NULL)
vtxt <- paste(
"logLik ", fmt(ll, width = 8, digits = digits),
" edf ", paste(paste(nx, fmt(edfn, digits = 2, width = 4)), collapse = " "),
" eps ", fmt(eps0, width = 6, digits = digits + 2),
" iteration ", formatC(iter - 1L, width = nchar(maxit)), sep = "")
cat(vtxt)
if(.Platform$OS.type != "unix" & ia) flush.console()
}
}
elapsed <- c(proc.time() - ptm)[3]
if(verbose) {
cat("\n")
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("elapsed time: ", et, "\n", sep = "")
}
scale <- list()
for(i in nx) {
beta[[i]] <- beta[[i]][1L:(iter - 1L), , drop = FALSE]
ll_contrib_save[[i]]<- cumsum(ll_contrib_save[[i]][1:(iter - 1L)])
scale[[i]] <- attr(bf$x[[i]]$model.matrix, "scale")
if(!is.null(dropout)) {
taken[[i]] <- taken[[i]][1L:(iter - 1L), , drop = FALSE]
taken[[i]][1L, ] <- taken[[i]][2L, ]
}
}
rval <- list(
"parameters" = beta,
"fitted.values" = eta,
"loglik" = data.frame("loglik" = logLik[1L:(iter - 1L)]),
"family" = bf$family,
"formula" = bf$formula,
"nodes" = nodes,
"elapsed" = elapsed,
"activation" = activation,
"scale" = scale,
"s01" = s01,
"taken" = taken,
"ntake" = ntake,
"dropout" = dropout
)
rval$loglik[["contrib"]] <- do.call("cbind", ll_contrib_save)
rval$call <- match.call()
class(rval) <- "boost.net"
return(rval)
}
predict.boost.net <- function(object, newdata, model = NULL,
verbose = FALSE, cores = 1, mstop = NULL, matrix = FALSE, ...)
{
nx <- object$family$names
formula <- object$formula
for(i in nx) {
formula[[i]]$formula <- delete.response(formula[[i]]$formula)
formula[[i]]$fake.formula <- delete.response(formula[[i]]$fake.formula)
}
bf <- bamlss.frame(formula, data = newdata, family = object$family)
Xn <- list()
for(i in nx) {
if(!is.null(object$scale[[i]])) {
for(j in 1:ncol(bf$x[[i]]$model.matrix)) {
bf$x[[i]]$model.matrix[, j] <- (bf$x[[i]]$model.matrix[, j] - object$scale[[i]]$center[j]) / object$scale[[i]]$scale[j]
}
}
if(!is.null(object$s01[[i]])) {
Xn[[i]] <- bf$x[[i]]$model.matrix
for(j in 1:length(object$s01[[i]]$xmin)) {
Xn[[i]][, j + 1L] <- (Xn[[i]][, j + 1L] - object$s01[[i]]$xmin[j]) / (object$s01[[i]]$xmax[j] - object$s01[[i]]$xmin[j])
}
}
}
activation <- object$activation
nodes <- object$nodes
if(is.null(model))
model <- nx
for(j in seq_along(model))
model[j] <- grep(model[j], nx, fixed = TRUE, value = TRUE)
fit <- list()
for(j in model) {
fit[[j]] <- 0.0
k <- ncol(bf$x[[j]]$model.matrix)
if(!is.null(object$dropout))
ind <- as.factor(sort(rep(rep(1:nodes[j]), object$ntake[i] + 1L)))
else
ind <- as.factor(sort(rep(rep(1:nodes[j]), k)))
nr <- nrow(object$parameters[[j]])
if(!is.null(mstop))
nr <- min(c(nr, mstop))
if(cores < 2) {
for(i in 1:nr) {
if(verbose)
cat(i, "/", sep = "")
if(nodes[j] > 0) {
b <- object$parameters[[j]][i, 1:(k + nodes[j] * length(activation))]
w <- object$parameters[[j]][i, -c(1:(k + nodes[j] * length(activation)))]
Z <- NULL
for(a in activation) {
wa <- split(w[grep(a, names(w))], ind)
if(is.null(object$dropout)) {
Z <- cbind(Z, nnet2Zmat(Xn[[j]], wa, a))
} else {
Z <- cbind(Z, nnet2Zmat(Xn[[j]][, c(1, object$taken[[j]][i, ]), drop = FALSE], wa, a))
}
}
Z <- cbind(bf$x[[j]]$model.matrix, Z)
fit[[j]] <- fit[[j]] + drop(Z %*% b)
} else {
fit[[j]] <- fit[[j]] + object$parameters[[j]][i, "(Intercept)"]
}
}
} else {
jind <- split(1:nr, as.factor(sort(rep(1:cores, length.out = nr))))
parallel_fun <- function(cid) {
if(verbose)
cat(j, ": started core", cid, "\n", sep = "")
fit2 <- 0
for(i in jind[[cid]]) {
if(nodes[j] > 0) {
b <- object$parameters[[j]][i, 1:(k + nodes[j] * length(activation))]
w <- object$parameters[[j]][i, -c(1:(k + nodes[j] * length(activation)))]
Z <- NULL
for(a in activation) {
wa <- split(w[grep(a, names(w))], ind)
if(is.null(object$dropout)) {
Z <- cbind(Z, nnet2Zmat(Xn[[j]], wa, a))
} else {
Z <- cbind(Z, nnet2Zmat(Xn[[j]][, c(1, object$taken[[j]][i, ]), drop = FALSE], wa, a))
}
}
Z <- cbind(bf$x[[j]]$model.matrix, Z)
fit2 <- fit2 + drop(Z %*% b)
} else {
fit2 <- fit2 + object$parameters[[j]][i, "(Intercept)"]
}
}
if(verbose)
cat(j, ": finished core", cid, "\n", sep = "")
fit2
}
fit[[j]] <- parallel::mclapply(1:cores, parallel_fun, mc.cores = cores)
fit[[j]] <- do.call("cbind", fit[[j]])
fit[[j]] <- rowSums(fit[[j]])
}
}
if(verbose)
cat("\n")
if(length(fit) < 2) {
fit <- fit[[1L]]
} else {
fit <- as.data.frame(fit)
}
return(fit)
}
################################################################################
#### STOCHASTIC GRADIENT DESCENT ####
################################################################################
#### ## sgd fitter
#### sgdfit <- function(x, y, gammaFun = function(i) 1/i, shuffle = TRUE,
#### CFun = function(beta) diag(length(beta)),
#### start = rep(0, ncol(x)), i.state = 0, link = function(x) x) {
####
#### N <- length(y)
####
#### ## shuffle observations
#### shuffle <- if(shuffle) sample(1L:N) else 1L:N
####
#### ## Explicit SVG
#### beta <- start
#### betaXVec <- matrix(0, nrow = N, ncol = length(beta))
####
#### for (i in seq.int(N)) {
#### mu <- drop(link(beta %*% x[shuffle[i],]))
#### grad <- (y[shuffle[i]] - mu)
#### beta <- beta + gammaFun(i + i.state) * grad * drop(x[shuffle[i],] %*% CFun(beta))
#### betaXVec[i,] <- beta
#### }
####
#### rval <- list()
#### rval$shuffle <- shuffle
#### rval$coef <- beta
#### rval$y <- y
#### rval$x <- x
#### rval$i.state <- i
#### rval$diagnostics <- list("betaMat" = betaXVec)
#### class(rval) <- "sgdfit"
####
#### rval
#### }
####
## Implicit SGD
opt_isgd <- function(x, y, family, weights = NULL, offset = NULL,
gammaFun = function(i) 1/(1+i), shuffle = TRUE,
CFun = function(beta) diag(length(beta)),
start = NULL, i.state = 0) {
## constants
nx <- family$names
if(!all(nx %in% names(x)))
stop("parameter names mismatch with family names!")
N <- nrow(y)
y <- as.matrix(y[[1]])
## shuffle observations
shuffle <- if(shuffle) sample(1L:N) else 1L:N
## grep design matrices
X <- sgd_grep_X(x)
m <- sapply(X, ncol) ## number of columns in each design matrix
## make a list where each elements contains the indices for selecting the
## coefficients corresponding to a distributional parameter.
rng <- list()
for(j in 1:length(m)) {
rng[[j]] <- seq(c(1, cumsum(m)[-length(m)] + 1)[j], cumsum(m)[j])
}
names(rng) <- names(m)
## Implicit SVG
beta <- if(is.null(start)) rep(0, sum(m)) else start
names(beta) <- do.call("c", lapply(X, colnames))
betaXVec <- matrix(0, nrow = N, ncol = length(beta))
colnames(betaXVec) <- names(beta)
## grad and link functions
gfun <- family$score
lfun <- lapply(family$links, make.link)
zetaVec <- list()
for(nxi in nx) zetaVec[[nxi]] <- numeric(N)
ptm <- proc.time()
for(i in seq.int(N)) {
cat(sprintf(" * nobs %i\r", i))
## evaluate gammaFun for current iteration
gamma <- gammaFun(i + i.state)
## predictor
eta <- list()
for(nxi in nx) {
eta[[nxi]] <- drop(beta[rng[[nxi]]] %*% X[[nxi]][shuffle[i],])
}
for(nxi in nx) {
## find zeta: see slide 110 (Ioannis Big Data Course)
XCX <- c(X[[nxi]][shuffle[i],, drop = FALSE] %*%
CFun(beta[rng[[nxi]]]) %*%
t(X[[nxi]][shuffle[i],, drop = FALSE]))
zeta_fun <- make_zeta_fun(y = y[shuffle[i], , drop = FALSE],
eta = eta, XCX = XCX, gfun = gfun,
lfun = lfun, gamma = gamma, parname = nxi)
upper <- .1
lower <- -upper
root <- tryCatch(uniroot(zeta_fun, c(lower, upper))$root,
error = function(e) e)
## if the first try fails, the interval is enlarged 3 times,
## if no root is found zeta/root is set to 0.
ierror <- 0
while(inherits(root, "error")) {
ierror <- ierror + 1
if(ierror > 3) {
root <- 0
} else {
lower <- lower * 10
upper <- upper * 10
root <- tryCatch(uniroot(zeta_fun, c(lower, upper))$root,
error = function(e) e)
}
}
zetaVec[[nxi]][i] <- root
## update beta, eta
beta[rng[[nxi]]] <- beta[rng[[nxi]]] + c(root) * c(X[[nxi]][shuffle[i],] %*%
CFun(beta[rng[[nxi]]]))
eta[[nxi]] <- eta[[nxi]] + root * XCX
}
## keep betapath
betaXVec[i,] <- beta
}
elapsed <- c(proc.time() - ptm)[3]
cat(sprintf("\n * runtime = %.3f\n", elapsed))
rval <- list()
rval$parameters <- betaXVec
## fitted values
rval$fitted.values <- eta
## summary
sgdsum <- list()
sgdsum$shuffle <- shuffle
sgdsum$coef <- beta
sgdsum$path <- betaXVec
sgdsum$y <- y
sgdsum$x <- x
sgdsum$i.state <- i
sgdsum$nobs <- N
sgdsum$runtime <- elapsed
sgdsum$zeta <- zetaVec
class(sgdsum) <- "sgd.summary"
rval$sgd.summary <- sgdsum
rval
}
print.sgd.summary <- function(x, ...) {
print(x$coef)
invisible(x)
}
plot.sgd.summary <- function(x, ...) {
k <- length(x$beta)
## coef paths
matplot(x$path, type = "l", col = colorspace::rainbow_hcl(k), lty = 1)
### if(!is.null(betaref))
### abline(h = betaref, col = colorspace::rainbow_hcl(3), lty = 3)
invisible(x)
}
### helper functions
make_zeta_fun <- function(y, eta, XCX, gfun, lfun, gamma, parname) {
rfun <- function(zeta) {
eta[[parname]] <- eta[[parname]] + zeta * XCX
par <- list()
for(nxi in names(eta)) { par[[nxi]] <- lfun[[nxi]]$linkinv(eta[[nxi]]) }
rval <- gamma * gfun[[parname]](y, par) - zeta
rval
}
rfun
}
sgd_grep_X <- function(x) {
X <- list()
for(nxi in names(x)) {
X[[nxi]] <- x[[nxi]]$model.matrix
colnames(X[[nxi]]) <- paste(nxi, "p", colnames(X[[nxi]]), sep = ".")
for(sci in names(x[[nxi]]$smooth.construct)) {
xx <- x[[nxi]]$smooth.construct[[sci]]$X
colnames(xx) <- paste(nxi, "s", sci, 1L:ncol(xx), sep = ".")
X[[nxi]] <- cbind(X[[nxi]], xx)
}
}
return(X)
}
#opt_sgd.ff <- function(x, y, family, weights = NULL, offset = NULL,
# gammaFun = function(i) 1/(1+i),
# shuffle = TRUE, start = NULL, i.state = 0,
# batch = 1L)
#{
# nx <- family$names
# if(!all(nx %in% names(x)))
# stop("parameter names mismatch with family names!")
# N <- nrow(y)
# y <- y[[1]]
# ## Shuffle observations.
# shuffle_id <- NULL
# for(i in bamlss_chunk(y)) {
# ind <- i[1]:i[2]
# shuffle_id <- ffbase::ffappend(shuffle_id, if(shuffle) sample(ind) else ind)
# }
# if(!is.null(start))
# start <- unlist(start)
# beta <- list()
# for(i in nx) {
# beta[[i]] <- list()
# if(!is.null(x[[i]]$model.matrix)) {
# if(!is.null(start)) {
# beta[[i]][["p"]] <- start[paste0(i, ".p.", colnames(x[[i]]$model.matrix))]
# } else {
# beta[[i]][["p"]] <- rep(0, ncol(x[[i]]$model.matrix))
# }
# names(beta[[i]][["p"]]) <- colnames(x[[i]]$model.matrix)
# }
# if(!is.null(x[[i]]$smooth.construct)) {
# for(j in names(x[[i]]$smooth.construct)) {
# ncX <- ncol(x[[i]]$smooth.construct[[j]]$X)
# if(is.null(start)) {
# beta[[i]][[paste0("s.", j)]] <- rep(0, ncX)
# } else {
# beta[[i]][[paste0("s.", j)]] <- start[paste0(i, ".s.", j, ".b", 1:ncX)]
# }
# names(beta[[i]][[paste0("s.", j)]]) <- paste0("b", 1:ncX)
# }
# }
# }
# ## Init eta.
# k <- batch
# eta <- list()
# for(i in nx) {
# eta[[i]] <- 0
# if(!is.null(x[[i]]$model.matrix))
# eta[[i]] <- eta[[i]] + sum(beta[[i]][["p"]] * x[[i]]$model.matrix[shuffle_id[1:k], ])
# if(!is.null(x[[i]]$smooth.construct)) {
# for(j in names(x[[i]]$smooth.construct)) {
# eta[[i]] <- eta[[i]] + sum(beta[[i]][[paste0("s.", j)]] * x[[i]]$smooth.construct[[j]]$X[shuffle_id[1:k], ])
# }
# }
# }
# iter <- 1L
# ptm <- proc.time()
# while(k <= N) {
# cat(sprintf(" * nobs %i\r", k))
# take <- (k - batch + 1L):k
# ## Evaluate gammaFun for current iteration.
# gamma <- gammaFun(iter + i.state)
# ## Extract response.
# yn <- y[shuffle_id[take]]
# for(i in nx) {
# eta[[i]] <- 0
# if(!is.null(x[[i]]$model.matrix))
# eta[[i]] <- eta[[i]] + sum(beta[[i]][["p"]] * x[[i]]$model.matrix[shuffle_id[take], ])
# if(!is.null(x[[i]]$smooth.construct)) {
# for(j in names(x[[i]]$smooth.construct)) {
# eta[[i]] <- eta[[i]] + sum(beta[[i]][[paste0("s.", j)]] * x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], ])
# }
# }
# ## Linear part.
# if(!is.null(x[[i]]$model.matrix)) {
# Xn <- x[[i]]$model.matrix[shuffle_id[take], , drop = FALSE]
# rn <- gamma * family$score[[i]](yn, family$map2par(eta))
# foo <- function(zeta) {
# eta[[i]] <- eta[[i]] + drop(Xn %*% (t(Xn) %*% zeta))
# rval <- gamma * family$score[[i]](yn, family$map2par(eta)) - zeta
# rval
# }
# zeta <- multiroot(foo, start = rn)
# zeta <- zeta$root
# beta[[i]][["p"]] <- beta[[i]][["p"]] + drop(t(Xn) %*% zeta)
# eta[[i]] <- drop(x[[i]]$model.matrix[shuffle_id[take], , drop = FALSE] %*% beta[[i]][["p"]])
# if(!is.null(x[[i]]$smooth.construct)) {
# for(j in names(x[[i]]$smooth.construct)) {
# eta[[i]] <- eta[[i]] + drop(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE] %*% beta[[i]][[paste0("s.", j)]])
# }
# }
# }
# ## Nonlinear.
# if(!is.null(x[[i]]$smooth.construct)) {
# for(j in names(x[[i]]$smooth.construct)) {
# Xn <- x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE]
# rn <- gamma * family$score[[i]](yn, family$map2par(eta))
# foo <- function(zeta) {
# eta[[i]] <- eta[[i]] + drop(Xn %*% (t(Xn) %*% zeta))
# rval <- gamma * family$score[[i]](yn, family$map2par(eta)) - zeta
# rval
# }
# zeta <- multiroot(foo, start = rn)
# zeta <- zeta$root
# ## Update beta, eta.
# beta[[i]][[paste0("s.", j)]] <- beta[[i]][[paste0("s.", j)]] + drop(t(Xn) %*% zeta)
# eta[[i]] <- 0
# if(!is.null(x[[i]]$model.matrix))
# eta[[i]] <- eta[[i]] + drop(x[[i]]$model.matrix[shuffle_id[take], , drop = FALSE] %*% beta[[i]][["p"]])
# for(jj in names(x[[i]]$smooth.construct)) {
# eta[[i]] <- eta[[i]] + drop(x[[i]]$smooth.construct[[jj]]$X[shuffle_id[take], , drop = FALSE] %*% beta[[i]][[paste0("s.", jj)]])
# }
# }
# }
# }
# k <- k + batch
# iter <- iter + 1L
# }
# elapsed <- c(proc.time() - ptm)[3]
# cat(sprintf("\n * runtime = %.3f\n", elapsed))
# rval <- list()
# rval$parameters <- unlist(beta)
# rval$fitted.values <- eta
# rval$shuffle <- shuffle
# rval$runtime <- elapsed
# rval
#}
opt_bbfit <- bbfit <- function(x, y, family, shuffle = TRUE, start = NULL, offset = NULL,
epochs = 1, nbatch = 10, verbose = TRUE, ...)
{
## Paper: https://openreview.net/pdf?id=ryQu7f-RZ
## https://www.ijcai.org/proceedings/2018/0753.pdf
aic <- list(...)$aic
loglik <- list(...)$loglik
if(is.null(loglik))
loglik <- FALSE
if(loglik)
aic <- FALSE
if(is.null(aic))
aic <- FALSE
eps_loglik <- list(...)$eps_loglik
if(is.null(eps_loglik))
eps_loglik <- 0.001
select <- list(...)$select
if(is.null(select))
select <- FALSE
lasso <- list(...)$lasso
if(is.null(lasso))
lasso <- FALSE
OL <- list(...)$OL
if(is.null(OL))
OL <- FALSE
if(OL)
lasso <- TRUE
initialize <- list(...)$initialize
if(is.null(initialize))
initialize <- TRUE
K <- list(...)$K
if(is.null(K))
K <- 2
always <- list(...)$always
if(is.null(always))
always <- FALSE
slice <- list(...)$slice
if(is.null(slice))
slice <- FALSE
nu <- if(is.null(list(...)$nu)) 0.05 else list(...)$nu
sslice <- NULL
if(!is.logical(slice)) {
sslice <- slice
eps_loglik <- -Inf
always <- TRUE
nu <- 1
slice <- FALSE
}
if(slice) {
eps_loglik <- -Inf
always <- TRUE
nu <- 1
}
nx <- family$names
if(!all(nx %in% names(x)))
stop("parameter names mismatch with family names!")
N <- nrow(y)
batch_ids <- list(...)$batch_ids
if(!is.null(batch_ids)) {
if(!is.list(batch_ids)) {
if(length(batch_ids) == 2L) {
yind <- 1:N
nb <- floor(batch_ids[1])
ni <- batch_ids[2]
batch_ids <- vector(mode = "list", length = ni)
for(i in 1:ni)
batch_ids[[i]] <- sample(yind, size = nb, replace = FALSE)
rm(yind)
}
}
}
random <- all(nbatch < 1) & all(nbatch > 0)
batch_select <- srandom <- samp_ids <- FALSE
if(is.null(batch_ids)) {
if(!random) {
batch <- floor(seq.int(1, N, length.out = nbatch + 1L)[-1])
batch[length(batch)] <- N
batch <- as.list(batch)
start0 <- 1L
for(i in 1:length(batch)) {
batch[[i]] <- c(start0, batch[[i]])
start0 <- batch[[i]][-1L] + 1L
}
} else {
if(length(nbatch) < 2L) {
batch <- floor(N * nbatch)
batch <- list(c(1, batch), c(batch + 1L, N))
} else {
batch <- list(nbatch)
srandom <- TRUE
samp_ids <- 1:N
}
}
} else {
if(is.factor(batch_ids)) {
batch <- split(1:N, batch_ids)
batch <- lapply(batch, range)
} else {
if(is.list(batch_ids)) {
batch <- batch_ids
rm(batch_ids)
}
}
if(!is.list(batch))
stop("argument batch_ids specified wrong!")
nbatch <- length(batch)
batch_select <- TRUE
}
if(!is.null(dim(y))) {
if(ncol(y) < 2)
y <- y[[1]]
}
noff <- !inherits(y, "ff") #
if(!is.null(start))
start <- unlist(start)
beta <- eta <- etas <- tau2 <- ll_contrib <- medf <- parm <- LLC <- ionly <- list()
for(i in nx) {
beta[[i]] <- list()
tau2[[i]] <- list()
medf[[i]] <- list()
parm[[i]] <- list()
LLC[[i]] <- list()
ll_contrib[[i]] <- list()
eta[[i]] <- etas[[i]] <- 0
if(!is.null(x[[i]]$model.matrix)) {
ll_contrib[[i]][["p"]] <- medf[[i]][["p.edf"]] <- NA
LLC[[i]][["p"]] <- 0
parm[[i]][["p"]] <- matrix(nrow = 0, ncol = ncol(x[[i]]$model.matrix))
colnames(parm[[i]][["p"]]) <- colnames(x[[i]]$model.matrix)
if(ncol(x[[i]]$model.matrix) < 2) {
if(colnames(x[[i]]$model.matrix) == "(Intercept)")
ionly[[i]] <- TRUE
} else {
ionly[[i]] <- FALSE
}
beta[[i]][["p"]] <- rep(0, ncol(x[[i]]$model.matrix))
names(beta[[i]][["p"]]) <- colnames(x[[i]]$model.matrix)
start_ok <- FALSE
if(!is.null(start)) {
start2 <- start[paste0(i, ".p.", colnames(x[[i]]$model.matrix))]
start2 <- start2[!is.na(start2)]
if(length(start2)) {
start_ok <- TRUE
names(start2) <- gsub(paste0(i, ".p."), "", names(start2))
beta[[i]][["p"]][names(start2)] <- start2
}
}
if(!start_ok) {
if(!is.null(family$initialize) & is.null(offset) & initialize) {
if(noff) {
shuffle_id <- sample(seq_len(N))
} else {
shuffle_id <- NULL
for(ii in bamlss_chunk(y)) {
shuffle_id <- ffappend(shuffle_id, if(shuffle) sample(ii) else ii)
}
}
if(!srandom) {
take <- if(length(batch[[1L]]) > 2) batch[[1L]] else batch[[1L]][1L]:batch[[1L]][2L]
} else {
take <- sample(samp_ids, floor(batch[[1L]][1L] * N))
}
if(is.null(dim(y))) {
yn <- y[shuffle_id[take]]
} else {
yn <- y[shuffle_id[take], , drop = FALSE]
}
if(i %in% names(family$initialize)) {
if(any(is.na(yn))) {
stop("NA values in response, please check!")
}
yinit <- make.link2(family$links[i])$linkfun(family$initialize[[i]](yn))
beta[[i]][["p"]]["(Intercept)"] <- mean(yinit, na.rm = TRUE)
}
}
}
names(beta[[i]][["p"]]) <- colnames(x[[i]]$model.matrix)
}
if(!is.null(x[[i]]$smooth.construct)) {
for(j in names(x[[i]]$smooth.construct)) {
if(!is.null(x[[i]]$smooth.construct[[j]]$orig.class))
class(x[[i]]$smooth.construct[[j]]) <- x[[i]]$smooth.construct[[j]]$orig.class
ll_contrib[[i]][[paste0("s.", j)]] <- medf[[i]][[paste0("s.", j, ".edf")]] <- -1
LLC[[i]][[paste0("s.", j)]] <- 0
ncX <- ncol(x[[i]]$smooth.construct[[j]]$X)
if(is.null(x[[i]]$smooth.construct[[j]]$xt$center))
x[[i]]$smooth.construct[[j]]$xt$center <- TRUE
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
tpar <- x[[i]]$smooth.construct[[j]]$state$parameters
tpar <- tpar[!grepl("tau2", names(tpar))]
ncX <- length(tpar)
}
if(OL) {
x[[i]]$smooth.construct[[j]]$S <- list()
}
ncS <- length(x[[i]]$smooth.construct[[j]]$S) + if(lasso) 1L else 0L
parm[[i]][[paste0("s.", j)]] <- matrix(nrow = 0L, ncol = ncX + ncS + 1L)
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
tpar <- x[[i]]$smooth.construct[[j]]$state$parameters
tpar <- tpar[!grepl("tau2", names(tpar))]
colnames(parm[[i]][[paste0("s.", j)]]) <- c(names(tpar), paste0("tau2", 1:ncS), "edf")
} else {
colnames(parm[[i]][[paste0("s.", j)]]) <- c(paste0("b", 1:ncX), paste0("tau2", 1:ncS), "edf")
}
if(lasso) {
lS <- length(x[[i]]$smooth.construct[[j]]$S)
x[[i]]$smooth.construct[[j]]$S[[lS + 1]] <- function(parameters, ...) {
b <- get.par(parameters, "b")
A <- 1 / sqrt(b^2 + 1e-05)
A <- if(length(A) < 2) matrix(A, 1, 1) else diag(A)
A
}
attr(x[[i]]$smooth.construct[[j]]$S[[lS + 1]], "npar") <- ncX
}
if(!inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
if(noff) {
shuffle_id <- sample(1:N)
} else {
shuffle_id <- NULL
for(ii in bamlss_chunk(y)) {
shuffle_id <- ffappend(shuffle_id, if(shuffle) sample(ii) else ii)
}
}
if(!srandom) {
if(length(batch[[1L]]) < 3)
take <- batch[[1L]][1L]:batch[[1L]][2L]
else
take <- batch[[1L]]
} else {
take <- sample(samp_ids, floor(batch[[1L]][1L] * N))
}
Xn <- x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE]
XX <- crossprod(Xn)
objfun1 <- function(tau2, retedf = FALSE) {
S <- 0
for(l in seq_along(x[[i]]$smooth.construct[[j]]$S)) {
S <- S + 1 / tau2[l] * if(is.function(x[[i]]$smooth.construct[[j]]$S[[l]])) {
x[[i]]$smooth.construct[[j]]$S[[l]](c("b" = rep(0, ncol(XX))))
} else {
x[[i]]$smooth.construct[[j]]$S[[l]]
}
}
edf <- tryCatch(sum_diag(XX %*% matrix_inv(XX + S)), error = function(e) { 2 * ncX })
if(retedf) {
return(edf)
} else {
tedf <- if(ncX <= 40) {
4
} else {
10
}
return((tedf - edf)^2)
}
}
tau2[[i]][[j]] <- rep(0.1, length(x[[i]]$smooth.construct[[j]]$S))
opt <- try(tau2.optim(objfun1, start = tau2[[i]][[j]],
scale = 10000, optim = TRUE), silent = TRUE)
if(!inherits(opt, "try-error")) {
cat(" .. df", i, "term", x[[i]]$smooth.construct[[j]]$label,
objfun1(opt, retedf = TRUE), "\n")
tau2[[i]][[j]] <- opt
} else {
cat(" .. df", i, "term", x[[i]]$smooth.construct[[j]]$label, "-1\n")
tau2[[i]][[j]] <- rep(1, length(x[[i]]$smooth.construct[[j]]$S))
}
} else {
tau2[[i]][[j]] <- rep(100, length(x[[i]]$smooth.construct[[j]]$S))
}
start_ok <- FALSE
if(!is.null(start)) {
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
start2 <- start[grep(paste0(i, ".s.", j, "."), names(start), fixed = TRUE)]
start2 <- start2[!grepl("tau2", names(start2))]
} else {
start2 <- start[paste0(i, ".s.", j, ".b", 1:ncX)]
}
if(!all(is.na(start2))) {
if(any(is.na(start2)))
stop("dimensions do not match, check starting values!")
start_ok <- TRUE
beta[[i]][[paste0("s.", j)]] <- if(all(is.na(start2))) rep(0, ncX) else start2
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
npar <- x[[i]]$smooth.construct[[j]]$state$parameters
npar <- npar[!grepl("tau2", names(npar))]
names(beta[[i]][[paste0("s.", j)]]) <- names(npar)
}
}
}
if(!start_ok) {
beta[[i]][[paste0("s.", j)]] <- rep(0, ncX)
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
npar <- x[[i]]$smooth.construct[[j]]$state$parameters
npar <- npar[!grepl("tau2", names(npar))]
beta[[i]][[paste0("s.", j)]] <- npar
}
}
if(!inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
names(beta[[i]][[paste0("s.", j)]]) <- paste0("b", 1:ncX)
}
x[[i]]$smooth.construct[[j]]$xt[["prior"]] <- "ig"
x[[i]]$smooth.construct[[j]]$xt[["a"]] <- 0.0001
x[[i]]$smooth.construct[[j]]$xt[["b"]] <- 10
priors <- make.prior(x[[i]]$smooth.construct[[j]])
x[[i]]$smooth.construct[[j]]$prior <- priors$prior
x[[i]]$smooth.construct[[j]]$grad <- priors$grad
x[[i]]$smooth.construct[[j]]$hess <- priors$hess
}
}
}
tbeta <- if(select) beta else NA
tau2f <- rep(1, length(nx))
names(tau2f) <- nx
iter <- 1L
ptm <- proc.time()
for(ej in 1:epochs) {
if(verbose & (epochs > 1))
cat("starting epoch", ej, "\n")
## Shuffle observations.
if(!batch_select) {
if(noff) {
shuffle_id <- sample(1:N)
} else {
shuffle_id <- NULL
for(ii in bamlss_chunk(y)) {
shuffle_id <- ffappend(shuffle_id, if(shuffle) sample(ii) else ii)
}
}
} else {
shuffle_id <- 1:N
}
edf <- NA
bind <- if(!random & !srandom) {
seq_along(batch)
} else 1L
for(bid in bind) {
if(!srandom) {
if(length(batch[[bid]]) > 2) {
take <- batch[[bid]]
take2 <- if(bid < 2) {
batch[[bid + 1L]]
} else {
batch[[bid - 1L]]
}
} else {
take <- batch[[bid]][1L]:batch[[bid]][2L]
take2 <- if(bid < 2) {
batch[[bid + 1L]][1L]:batch[[bid + 1L]][2L]
} else {
batch[[bid - 1L]][1L]:batch[[bid - 1L]][2L]
}
}
} else {
take <- sample(samp_ids, floor(batch[[bid]][1L] * N))
take2 <- sample(samp_ids, floor(batch[[bid]][2L] * N))
}
## Extract responses.
if(is.null(dim(y))) {
yn <- y[shuffle_id[take]]
yt <- y[shuffle_id[take2]]
} else {
yn <- y[shuffle_id[take], , drop = FALSE]
yt <- y[shuffle_id[take2], , drop = FALSE]
}
for(i in nx) {
eta[[i]] <- etas[[i]] <- 0
if(!is.null(x[[i]]$model.matrix)) {
eta[[i]] <- eta[[i]] + drop(x[[i]]$model.matrix[shuffle_id[take], , drop = FALSE] %*% beta[[i]][["p"]])
etas[[i]] <- etas[[i]] + drop(x[[i]]$model.matrix[shuffle_id[take2], , drop = FALSE] %*% beta[[i]][["p"]])
}
if(!is.null(x[[i]]$smooth.construct)) {
for(j in names(x[[i]]$smooth.construct)) {
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
eta[[i]] <- eta[[i]] + x[[i]]$smooth.construct[[j]]$fit.fun(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE],
beta[[i]][[paste0("s.", j)]])
etas[[i]] <- etas[[i]] + x[[i]]$smooth.construct[[j]]$fit.fun(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take2], , drop = FALSE],
beta[[i]][[paste0("s.", j)]])
} else {
eta[[i]] <- eta[[i]] + xcenter(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE] %*% beta[[i]][[paste0("s.", j)]])
etas[[i]] <- etas[[i]] + xcenter(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take2], , drop = FALSE] %*% beta[[i]][[paste0("s.", j)]])
}
}
}
if(!is.null(offset)) {
if(i %in% colnames(offset)) {
eta[[i]] <- eta[[i]] + offset[shuffle_id[take], i]
etas[[i]] <- etas[[i]] + offset[shuffle_id[take2], i]
}
}
}
eta00 <- eta
edf <- 0
for(i in nx) {
## Linear part.
if(!is.null(x[[i]]$model.matrix)) {
Xn <- x[[i]]$model.matrix[shuffle_id[take], , drop = FALSE]
Xt <- x[[i]]$model.matrix[shuffle_id[take2], , drop = FALSE]
peta <- family$map2par(eta)
petas <- family$map2par(etas)
ll0 <- family$loglik(yt, petas)
score <- process.derivs(family$score[[i]](yn, peta, id = i), is.weight = FALSE)
hess <- process.derivs(family$hess[[i]](yn, peta, id = i), is.weight = TRUE)
scores <- process.derivs(family$score[[i]](yt, petas, id = i), is.weight = FALSE)
hesss <- process.derivs(family$hess[[i]](yt, petas, id = i), is.weight = TRUE)
b0 <- beta[[i]][["p"]]
eta_0 <- eta[[i]]
etas_0 <- etas[[i]]
z <- eta[[i]] + 1/hess * score
zs <- etas[[i]] + 1/hesss * scores
eta[[i]] <- eta[[i]] - drop(Xn %*% b0)
e <- z - eta[[i]]
XWX <- crossprod(Xn * hess, Xn)
I <- diag(1, ncol(XWX))
I[1, 1] <- 1e-10
if(!ionly[[i]]) {
if(ncol(I) > 1)
I[1, 1] <- 0
}
etas[[i]] <- etas[[i]] - drop(Xt %*% b0)
objfun2 <- function(tau2, retLL = FALSE, step = FALSE) {
P <- matrix_inv(XWX + 1/tau2 * I)
if(step) {
b <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
} else {
b <- drop(P %*% crossprod(Xn * hess, e))
}
etas[[i]] <- etas[[i]] + drop(Xt %*% b)
if(retLL) {
return(family$loglik(yt, family$map2par(etas)))
}
if(aic | loglik) {
if(aic) {
iedf <- sum_diag(XWX %*% P)
ll <- -2 * family$loglik(yt, family$map2par(etas)) + K * iedf
} else {
ll <- -1 * family$loglik(yt, family$map2par(etas))
}
} else {
ll <- mean((zs - etas[[i]])^2, na.rm = TRUE)
}
return(ll)
}
if(ionly[[i]]) {
tau2fe <- 1e+10
} else {
tau2fe <- try(tau2.optim(objfun2, tau2f[i], optim = TRUE), silent = TRUE)
}
ll_contrib[[i]][["p"]] <- NA
if(!inherits(tau2fe, "try-error")) {
ll1 <- objfun2(tau2fe, retLL = TRUE, step = TRUE)
epsll <- (ll1 - ll0)/abs(ll0)
if(is.na(epsll)) {
ll1 <- ll0 <- 1
epsll <- -1
}
accept <- epsll >= -0.5
if((((ll1 > ll0) & (epsll > eps_loglik)) | always) & accept) {
tau2f[i] <- tau2fe
P <- matrix_inv(XWX + 1/tau2f[i] * I)
if(select) {
tbeta[[i]][["p"]] <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
} else {
beta[[i]][["p"]] <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
}
tedf <- sum_diag(XWX %*% P)
edf <- edf + tedf
ll_contrib[[i]][["p"]] <- ll1 - ll0
medf[[i]][["p.edf"]] <- c(medf[[i]][["p.edf"]], tedf)
}
}
if(!select) {
eta[[i]] <- eta[[i]] + drop(Xn %*% beta[[i]][["p"]])
etas[[i]] <- etas[[i]] + drop(Xt %*% beta[[i]][["p"]])
} else {
eta[[i]] <- eta_0
etas[[i]] <- etas_0
}
}
## Nonlinear.
if(!is.null(x[[i]]$smooth.construct)) {
for(j in names(x[[i]]$smooth.construct)) {
Xn <- x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE]
Xt <- x[[i]]$smooth.construct[[j]]$X[shuffle_id[take2], , drop = FALSE]
b0 <- beta[[i]][[paste0("s.", j)]]
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
Xn <- x[[i]]$smooth.construct[[j]]$getZ(Xn, b0)
Xt <- x[[i]]$smooth.construct[[j]]$getZ(Xt, b0)
b0 <- b0[1:ncol(Xn)]
}
eta_0 <- eta[[i]]
etas_0 <- etas[[i]]
peta <- family$map2par(eta)
petas <- family$map2par(etas)
ll0 <- family$loglik(yt, petas)
score <- process.derivs(family$score[[i]](yn, peta, id = i), is.weight = FALSE)
hess <- process.derivs(family$hess[[i]](yn, peta, id = i), is.weight = TRUE)
scores <- process.derivs(family$score[[i]](yt, petas, id = i), is.weight = FALSE)
hesss <- process.derivs(family$hess[[i]](yt, petas, id = i), is.weight = TRUE)
z <- eta[[i]] + 1/hess * score
zs <- etas[[i]] + 1/hesss * scores
if(x[[i]]$smooth.construct[[j]]$xt$center) {
eta[[i]] <- eta[[i]] - xcenter(Xn %*% b0)
} else {
eta[[i]] <- eta[[i]] - drop(Xn %*% b0)
}
e <- z - eta[[i]]
if(x[[i]]$smooth.construct[[j]]$xt$center) {
etas[[i]] <- etas[[i]] - xcenter(Xt %*% b0)
} else {
etas[[i]] <- etas[[i]] - drop(Xt %*% b0)
}
wts <- NULL
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
wts <- unlist(x[[i]]$smooth.construct[[j]]$sample_weights(
x = x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], -1, drop = FALSE],
y = e, weights = hess, wts = beta[[i]][[paste0("s.", j)]])
)
Xn <- x[[i]]$smooth.construct[[j]]$getZ(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE], wts)
Xt <- x[[i]]$smooth.construct[[j]]$getZ(x[[i]]$smooth.construct[[j]]$X[shuffle_id[take2], , drop = FALSE], wts)
}
XWX <- crossprod(Xn * hess, Xn)
objfun3 <- function(tau2, retLL = FALSE, step = FALSE) {
S <- 0
for(l in 1:length(tau2)) {
S <- S + 1/tau2[l] * if(is.function(x[[i]]$smooth.construct[[j]]$S[[l]])) {
x[[i]]$smooth.construct[[j]]$S[[l]](c(b0, x[[i]]$smooth.construct[[j]]$fixed.hyper))
} else {
x[[i]]$smooth.construct[[j]]$S[[l]]
}
}
P <- matrix_inv(XWX + S)
if(step) {
b <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
} else {
b <- drop(P %*% crossprod(Xn * hess, e))
}
if(x[[i]]$smooth.construct[[j]]$xt$center) {
etas[[i]] <- etas[[i]] + xcenter(Xt %*% b)
} else {
etas[[i]] <- etas[[i]] + drop(Xt %*% b)
}
if(retLL) {
return(family$loglik(yt, family$map2par(etas)))
}
if(aic | loglik) {
if(aic) {
iedf <- sum_diag(XWX %*% P)
ll <- -2 * family$loglik(yt, family$map2par(etas)) + K * iedf
} else {
names(b) <- paste0("b", 1:length(b))
names(tau2) <- paste0("tau2", 1:length(tau2))
ll <- -1 * (family$loglik(yt, family$map2par(etas)) + x[[i]]$smooth.construct[[j]]$prior(c(b, tau2)))
}
} else {
ll <- mean((zs - etas[[i]])^2, na.rm = TRUE)
}
return(ll)
}
if(!is.null(sslice)) {
if(iter > sslice)
slice <- TRUE
}
if(!slice) {
tau2s <- try(tau2.optim(objfun3, tau2[[i]][[j]], optim = TRUE), silent = TRUE)
} else {
theta <- c(b0, "tau2" = tau2[[i]][[j]])
ii <- grep("tau2", names(theta))
logP <- function(g, ...) {
-1 * objfun3(get.par(g, "tau2"))
}
sok <- TRUE
for(jj in ii) {
theta <- try(uni.slice(theta, x[[i]]$smooth.construct[[j]], family, NULL,
NULL, i, jj, logPost = logP, lower = 0, ll = ll0), silent = TRUE)
if(inherits(theta, "try-error")) {
sok <- FALSE
break
}
}
if(sok) {
tau2s <- as.numeric(get.par(theta, "tau2"))
} else {
tau2s <- NA
class(tau2s) <- "try-error"
}
}
ll_contrib[[i]][[paste0("s.", j)]] <- NA
accept <- TRUE
if(!inherits(tau2s, "try-error")) {
ll1 <- objfun3(tau2s, retLL = TRUE, step = TRUE)
epsll <- (ll1 - ll0)/abs(ll0)
if(is.na(epsll)) {
ll1 <- ll0 <- 1
epsll <- -1
}
# if(!slice) {
# accept <- TRUE
# } else {
# epsll < 0.5
# }
# if(!always) {
accept <- epsll >= -0.5
# } else {
# accept <- TRUE
# }
if((((ll1 > ll0) & (epsll > eps_loglik)) | always) & accept) {
tau2[[i]][[j]] <- tau2s
S <- 0
for(l in 1:length(tau2[[i]][[j]])) {
S <- S + 1/tau2[[i]][[j]][l] * if(is.function(x[[i]]$smooth.construct[[j]]$S[[l]])) {
x[[i]]$smooth.construct[[j]]$S[[l]](c(b0, x[[i]]$smooth.construct[[j]]$fixed.hyper))
} else {
x[[i]]$smooth.construct[[j]]$S[[l]]
}
}
P <- matrix_inv(XWX + S)
if(select) {
tbeta[[i]][[paste0("s.", j)]] <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
if(!is.null(wts)) {
names(tbeta[[i]][[paste0("s.", j)]]) <- paste0("bb", 1:length(tbeta[[i]][[paste0("s.", j)]]))
tbeta[[i]][[paste0("s.", j)]] <- c(tbeta[[i]][[paste0("s.", j)]], wts)
}
} else {
beta[[i]][[paste0("s.", j)]] <- b0 + nu * (drop(P %*% crossprod(Xn * hess, e)) - b0)
if(!is.null(wts)) {
names(beta[[i]][[paste0("s.", j)]]) <- paste0("bb", 1:length(beta[[i]][[paste0("s.", j)]]))
beta[[i]][[paste0("s.", j)]] <- c(beta[[i]][[paste0("s.", j)]], wts)
}
}
tedf <- sum_diag(XWX %*% P)
edf <- edf + tedf
ll_contrib[[i]][[paste0("s.", j)]] <- ll1 - ll0
medf[[i]][[paste0("s.", j, ".edf")]] <- c(medf[[i]][[paste0("s.", j, ".edf")]], tedf)
}
} else {
warning(paste0("check distribution of term ", j, "!"))
}
if(!select & accept) {
if(inherits(x[[i]]$smooth.construct[[j]], "nnet0.smooth")) {
nid <- 1:x[[i]]$smooth.construct[[j]]$nodes
if(x[[i]]$smooth.construct[[j]]$xt$center) {
eta[[i]] <- eta[[i]] + xcenter(Xn %*% beta[[i]][[paste0("s.", j)]][nid])
etas[[i]] <- etas[[i]] + xcenter(Xt %*% beta[[i]][[paste0("s.", j)]][nid])
} else {
eta[[i]] <- eta[[i]] + drop(Xn %*% beta[[i]][[paste0("s.", j)]][nid])
etas[[i]] <- etas[[i]] + drop(Xt %*% beta[[i]][[paste0("s.", j)]][nid])
}
#fit <- Xn %*% beta[[i]][[paste0("s.", j)]][nid]
#Z <- x[[i]]$smooth.construct[[j]]$X[shuffle_id[take], , drop = FALSE]
#plot(Z[, 2], e)
#plot2d(fit ~ Z[,2], add = TRUE)
} else {
if(x[[i]]$smooth.construct[[j]]$xt$center) {
eta[[i]] <- eta[[i]] + xcenter(Xn %*% beta[[i]][[paste0("s.", j)]])
etas[[i]] <- etas[[i]] + xcenter(Xt %*% beta[[i]][[paste0("s.", j)]])
} else {
eta[[i]] <- eta[[i]] + drop(Xn %*% beta[[i]][[paste0("s.", j)]])
etas[[i]] <- etas[[i]] + drop(Xt %*% beta[[i]][[paste0("s.", j)]])
}
#fit <- Xn %*% beta[[i]][[paste0("s.", j)]]
#Z <- d$x2[shuffle_id[take]]
#plot(Z, e)
#plot2d(fit ~ Z, add = TRUE)
}
}
if(!accept | select) {
eta[[i]] <- eta_0
etas[[i]] <- etas_0
}
}
}
}
if(select) {
llc <- unlist(ll_contrib)
if(!all(is.na(llc))) {
llval <- max(llc, na.rm = TRUE)
llc <- names(llc)[which.max(llc)]
llc <- strsplit(llc, ".", fixed = TRUE)[[1]]
llc <- c(llc[1], paste0(llc[-1], collapse = "."))
beta[[llc[1]]][[llc[2]]] <- b0 <- tbeta[[llc[1]]][[llc[2]]]
csm <- TRUE
if(llc[2] != "p") {
llc2 <- gsub("s.", "", llc[2], fixed = TRUE)
Xn <- x[[llc[1]]]$smooth.construct[[llc2]]$X[shuffle_id[take], , drop = FALSE]
Xt <- x[[llc[1]]]$smooth.construct[[llc2]]$X[shuffle_id[take2], , drop = FALSE]
if(inherits(x[[llc[1]]]$smooth.construct[[llc2]], "nnet0.smooth")) {
Xn <- x[[llc[1]]]$smooth.construct[[llc2]]$getZ(Xn, b0)
Xt <- x[[llc[1]]]$smooth.construct[[llc2]]$getZ(Xt, b0)
b0 <- b0[1:ncol(Xn)]
}
csm <- x[[llc[1]]]$smooth.construct[[llc2]]$xt$center
} else {
csm <- FALSE
Xn <- x[[llc[1]]]$model.matrix[shuffle_id[take], , drop = FALSE]
Xt <- x[[llc[1]]]$model.matrix[shuffle_id[take2], , drop = FALSE]
}
ll_iter <- attr(LLC[[llc[1]]][[llc[2]]], "iteration")
ll_iter <- c(ll_iter, iter)
LLC[[llc[1]]][[llc[2]]] <- c(LLC[[llc[1]]][[llc[2]]], llval)
attr(LLC[[llc[1]]][[llc[2]]], "iteration") <- ll_iter
if(csm) {
eta[[llc[1]]] <- eta[[llc[1]]] + xcenter(Xn %*% b0)
etas[[llc[1]]] <- etas[[llc[1]]] + xcenter(Xt %*% b0)
} else {
eta[[llc[1]]] <- eta[[llc[1]]] + drop(Xn %*% b0)
etas[[llc[1]]] <- etas[[llc[1]]] + drop(Xt %*% b0)
}
}
}
for(i in nx) {
for(j in names(parm[[i]])) {
jj <- paste0(strsplit(j, ".", fixed = TRUE)[[1]][-1], collapse = ".")
tedf <- medf[[i]][[paste0(j, ".edf")]]
tpar <- if(j != "p") {
c(beta[[i]][[j]], tau2[[i]][[jj]], tedf[length(tedf)])
} else {
beta[[i]][[j]]
}
names(tpar) <- NULL
parm[[i]][[j]] <- rbind(parm[[i]][[j]], tpar)
}
}
eta00 <- do.call("cbind", eta00)
eta01 <- do.call("cbind", eta)
if(iter < 2L)
eta00[abs(eta00) < 1e-20] <- 1e-20
eps <- mean(abs((eta01 - eta00) / eta00), na.rm = TRUE)
if(verbose) {
edf <- abs(edf)
btxt <- if(srandom) {
NA
} else {
if(length(batch[[bid]]) > 2) {
length(batch[[bid]]) * iter
} else {
batch[[bid]][2L]
}
}
if(iter < 2) {
cat(sprintf(" * iter %i, nobs %i, edf %f\r", iter, btxt, round(edf, 4)))
} else {
cat(sprintf(" * iter %i, nobs %i, eps %f, edf %f\r", iter, btxt, round(eps, 4), round(edf, 2)))
}
}
iter <- iter + 1L
}
if(verbose)
cat("\n")
}
elapsed <- c(proc.time() - ptm)[3]
if(verbose) {
cat("\n")
et <- if(elapsed > 60) {
paste(formatC(format(round(elapsed / 60, 2), nsmall = 2), width = 5), "min", sep = "")
} else paste(formatC(format(round(elapsed, 2), nsmall = 2), width = 5), "sec", sep = "")
cat("elapsed time: ", et, "\n", sep = "")
}
for(i in nx) {
for(j in seq_along(medf[[i]])) {
medf[[i]][[j]] <- if(all(is.na(medf[[i]][[j]]))) {
0
} else median(medf[[i]][[j]], na.rm = TRUE)
}
for(j in names(parm[[i]])) {
colnames(parm[[i]][[j]]) <- paste0(i, ".", j, ".", colnames(parm[[i]][[j]]))
}
parm[[i]] <- do.call("cbind", parm[[i]])
}
parm <- do.call("cbind", parm)
rownames(parm) <- NULL
##if(nrow(parm) > 1L)
## parm <- parm[-1L, , drop = FALSE]
rval <- list()
rval$parameters <- c(unlist(beta), unlist(medf))
rval$fitted.values <- eta
rval$shuffle <- shuffle
rval$runtime <- elapsed
rval$edf <- edf
rval$nbatch <- nbatch
rval$parpaths <- parm
rval$epochs <- epochs
rval$n.iter <- iter
if(select) {
rval$llcontrib <- LLC
}
rval
}
contribplot <- function(x, ...) {
if(is.null(ll <- x$model.stats$optimizer$llcontrib))
stop("nothing to plot")
iter <- x$model.stats$optimizer$n.iter - 1L
iter2 <- NULL
sf <- list()
for(i in names(ll)) {
sf[[i]] <- list()
for(j in names(ll[[i]])) {
if(!is.null(ll[[i]][[j]])) {
ii <- attr(ll[[i]][[j]], "iteration")
sf[[i]][[j]] <- length(ii)
iter2 <- c(iter2, length(ii))
llv <- rep(0, iter)
llv[ii] <- ll[[i]][[j]][-1]
llv <- cumsum(llv)
ll[[i]][[j]] <- c(0, llv)
} else {
ll[[i]][[j]] <- rep(0, iter)
sf[[i]][[j]] <- 0
}
}
sf[[i]] <- do.call("rbind", sf[[i]])
sf[[i]] <- sf[[i]][order(sf[[i]][, 1], decreasing = TRUE), , drop = FALSE]
colnames(sf[[i]]) <- "Sel. freq."
ll[[i]] <- do.call("cbind", ll[[i]])
colnames(ll[[i]]) <- paste0(i, ".", colnames(ll[[i]]))
}
iter2 <- sum(iter2)
for(i in names(ll)) {
sf[[i]] <- sf[[i]]/iter2
cat(i, "\n", sep = "")
printCoefmat(sf[[i]])
cat("\n")
}
ll <- do.call("cbind", ll)
print.boost_summary(list("loglik" = ll, "mstop" = iter),
summary = FALSE, which = "loglik.contrib", ...)
invisible(list("loglik" = ll, "selfreqs" = sf))
}
opt_bbfitp <- bbfitp <- function(x, y, family, mc.cores = 1, ...)
{
seeds <- ceiling(runif(mc.cores, 1, 1000000))
parallel_fun <- function(i) {
set.seed(seeds[i])
opt_bbfit(x, y, family, ...)
}
b <- parallel::mclapply(1:mc.cores, parallel_fun, mc.cores = mc.cores)
rval <- list()
rval$samples <- lapply(b, function(x) {
if(inherits(x, "try-error")) {
writeLines(x)
return(x)
} else {
return(as.mcmc(x$parpaths))
}
})
is_err <- sapply(rval$samples, is.character)
if(all(is_err))
stop("something went wrong in bbfitp()!")
if(any(is_err))
warning("one core reports an error.")
b <- b[!is_err]
rval$samples <- as.mcmc.list(rval$samples[!is_err])
rval$parameters <- colMeans(do.call("rbind", lapply(b, function(x) x$parpaths)))
rval$nbatch <- b[[1]]$nbatch
rval$runtime <- mean(sapply(b, function(x) x$runtime))
rval$epochs <- b[[1]]$epochs
rval
}
bbfit_plot <- function(x, name = NULL, ...)
{
x <- x$model.stats$optimizer$parpaths
if(is.null(x)) {
warning("there is nothing to plot")
return(invisible(NULL))
}
if(!is.null(name)) {
for(i in name) {
x <- x[, grep(i, colnames(x), fixed = TRUE), drop = FALSE]
}
}
cn <- colnames(x)
cn2 <- strsplit(cn, ".", fixed = TRUE)
cn2 <- lapply(cn2, function(x) { paste0(x[-length(x)], collapse = ".") })
cn2 <- as.factor(unlist(cn2))
cat(levels(cn2), "\n")
col <- rainbow_hcl(nlevels(cn2))[cn2]
matplot(x, type = "l", lty = 1, xlab = "Iteration", ylab = "Coefficients", col = col, ...)
return(invisible(x))
}
new_formula <- function(object, thres = 0) {
sel <- contribplot(object, plot = FALSE)
yname <- response.name(object)
formula <- list()
for(i in names(sel$selfreqs)) {
eff <- sel$selfreqs[[i]][sel$selfreqs[[i]] > thres, , drop = FALSE]
eff <- rownames(eff)
eff <- gsub("s.", "", eff, fixed = TRUE)
eff <- eff[eff != "p"]
if(length(eff)) {
eff <- paste(sort(eff), collapse = "+")
formula[[i]] <- as.formula(paste("~", eff))
}
}
fc <- paste0("update(formula[[1L]],", yname, " ~ .)")
formula[[1L]] <- eval(parse(text = fc))
return(formula)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.