blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
127ee0002a0379860b47c324cac0c750264fecd4
|
0c3b366d4a270b7520b3e46bc94399283281c470
|
/server.R
|
fab442c2d1de34bd753978527322b3050ff1e2d5
|
[] |
no_license
|
tcash21/overwatch-optimizer
|
2f7d651e5b2e3650e73a09c0956216b83d4e78df
|
4a06d78b707a40709f0313871d4d813852311cc9
|
refs/heads/master
| 2021-01-23T14:56:30.602733
| 2017-06-13T21:39:37
| 2017-06-13T21:39:37
| 93,263,800
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,678
|
r
|
server.R
|
library(shiny)
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
base_url <- 'https://owapi.net/api/v3/u/'
server <- function(input, output) {
players <- reactiveValues()
players$list <- unique(final$user)
new_players <- reactiveValues()
new_players$list <- list()
the_stats <- reactiveValues(stats = final)
the_stats_qp <- reactiveValues(stats = final_q)
table_results <- reactiveValues(results = all_results)
table_results_q <- reactiveValues(results = all_results_q)
observeEvent(input$reset2, {
if(input$type == 'qp'){
the_stats_qp$stats <- final_q
} else {
the_stats$stats <- final
}
})
observeEvent(input$add_player, {
withProgress(message = 'Fetching Battle.net Stats...', value = 0, {
incProgress(amount = 1 / length(heroes), message = paste0('Getting ', input$newPlayer, ' stats'))
user <- input$newPlayer
user <- gsub("#", "-", user)
players$list[user] <- user
the_url <- paste0(base_url, user, '/heroes')
r <- GET(the_url)
stats <- fromJSON(content(r, 'text'))
i <- length(results) + 1
results[[i]] <- stats
hs <- lapply(results, function(x) parseHeroStats(x, mode=input$type))
all_results <- sapply(heroes, function(x) calculateScores(x, hs))
table_results$results <- all_results
needed_fields <- lapply(all_results, function(x) data.frame(hero=x$hero, user=x$username, score=x$score, games_played=x$games_played))
scaled_scores <- lapply(needed_fields, function(y) range01(y$score))
updated <- mapply(cbind, needed_fields, scaled=scaled_scores)
updated <-do.call('rbind', updated)
updated$type <- char_lookup[match(updated$hero, char_lookup$hero),]$type
the_stats$stats <- updated
the_stats_qp$stats <- updated
})
})
showPlayers <- reactive({
the_users <- players$list
the_text <- 'Showing results for users: <br><ul>'
for(u in the_users){
the_text <- paste0(the_text, paste0('<li>', u, '</li>'))
}
the_text <- paste0(the_text, '</ul>')
HTML(the_text)
})
output$about <- renderText(HTML("<br>Optimal hero score calculations: <a href = 'https://docs.google.com/spreadsheets/d/1IA_lxxftfDh6_eEH70xPqxopIFRosIkOOe8F9IZPHtQ/edit#gid=0', target='_blank'>Here</a>
<br>Note: Scores not available yet for McCree, Widowmaker, Orisa, Torb, Sombra, Hanzo. <p>
Data from: <a href = 'https://github.com/SunDwarf/OWAPI/blob/master/api.md'>OWAPI</a>"))
output$players <- renderText({ showPlayers()})
parseStats <- reactive({
if(length(players$list) >= 6){
if(input$type == 'qp'){
some_stats <- subset(the_stats_qp$stats, games_played >= as.numeric(input$min_gp))
} else if (input$type == 'comp'){
some_stats <- subset(the_stats$stats, games_played >= as.numeric(input$min_gp))
}
if(any(some_stats$scaled == 'NaN')){
some_stats <- some_stats[-which(some_stats$scaled == 'NaN'),]
}
some_stats$id <- 1:nrow(some_stats)
return(some_stats)
}
})
output$table1 <- DT::renderDataTable({
player_stats <- parseStats()
if(length(unique(player_stats$user)) < 6){
return()
}
player_stats$hero <- as.character(player_stats$hero)
player_stats$user <- as.character(player_stats$user)
con <- rbind(t(model.matrix(~ type + 0, player_stats)), t(model.matrix(~ hero + 0, player_stats)), t(model.matrix(~ user + 0, player_stats)), rep(1, nrow(player_stats)))
if(length(unique(player_stats$type)) == 4){
dir <- c("=", "=", "=", "=", rep("<=", length(unique(player_stats$hero))), rep("<=", length(unique(player_stats$user))), "=")
rhs <- c(input$damage,input$defense,input$support,input$tank, rep(1, length(unique(player_stats$hero))), rep(1, length(unique(player_stats$user))), 6)
} else {
dir <- c("=", "=", "=", rep("<=", length(unique(player_stats$hero))), rep("<=", length(unique(player_stats$user))), "=")
rhs <- c(input$damage,input$support,input$tank, rep(1, length(unique(player_stats$hero))), rep(1, length(unique(player_stats$user))), 6)
}
obj <- player_stats$scaled
opt <- lp("max", obj, con, dir, rhs, all.bin=TRUE)
optcomp <- player_stats[which(opt$solution == 1),]
optcomp$exclude <- paste0("<button id='button_", optcomp$id, "' type=\'button\' class=\'btn btn-default action-button\' onclick=\'Shiny.onInputChange("select_button", this.id)\'>Exclude</button>")
optcomp[,c(2,1,4,5,6,8)]
}, rownames= FALSE, selection = 'single', options=list(bPaginate=FALSE, bFilter=FALSE), caption='Optimal Team Comp', escape = FALSE)
observeEvent(input$select_button, {
selectedRow <- as.numeric(strsplit(input$select_button, "_")[[1]][2])
player_stats <- parseStats()
user <- player_stats[match(selectedRow, player_stats$id),]$user
if(input$type == 'qp'){
the_stats_qp$stats <- the_stats_qp$stats[-which(the_stats_qp$stats$user %in% user),]
} else {
the_stats$stats <- the_stats$stats[-which(the_stats$stats$user %in% user),]
}
})
output$table2 <- DT::renderDataTable({
if(input$type == 'comp'){
x <- table_results$results[[which(names(table_results$results) == input$hero)]]
x <- subset(x, games_played >= as.numeric(input$min_gp))
} else if (input$type == 'qp'){
x <- table_results_q$results[[which(names(table_results_q$results) == input$hero)]]
x <- subset(x, tp >= as.numeric(input$min_gp))
}
y<-x[, grep("per_min|objective|time_spent", colnames(x))]
remove <- c("objective_time_avg_per_min", "win_percentage_per_min", "time_spent_on_fire_avg_per_min", "eliminations_per_life_per_min", "critical_hit_accuracy_per_min", "weapon_accuracy_per_min", "games_played_per_min")
i <- match(remove, colnames(y), 0)
y <- y[,-i]
y <- cbind(x$user, range01(x$score), x[,gsub("_per_min", "", remove[which(i != 0)])], y)
colnames(y)[1:2] <- c("User", "Score")
y <- y[order(y$Score, decreasing=TRUE),]
y<-y[,match(unique(colnames(y)), colnames(y))]
datatable(y, caption=paste0('User stats for ', input$hero), options = list(scrollX = TRUE, order = list(2, 'desc')))
})
output$table3 <- DT::renderDataTable({
player_stats <- parseStats()
player_stats$hero <- as.character(player_stats$hero)
player_stats$user <- as.character(player_stats$user)
the_player <- gsub("\\#", "-", input$newPlayer)
player_stats[tolower(player_stats$user) %in% tolower(the_player),]
})
}
|
3c4a1bbc39d76d0e831d91999885a4ae577ed034
|
1e547e1adca5f7fac44617a35270a991c3e52267
|
/man/Anime.Rd
|
bcce8f536575071d720ddb896a726d141e159185
|
[] |
no_license
|
cran/cSEM
|
4e4c37fd58f1b166ca8784c7ea41bc6a8ec22517
|
3818f7efdd3efd6c92a9d288b395edc752654389
|
refs/heads/master
| 2022-11-27T23:23:01.742331
| 2022-11-24T16:50:05
| 2022-11-24T16:50:05
| 236,578,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 650
|
rd
|
Anime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zz_datasets.R
\docType{data}
\name{Anime}
\alias{Anime}
\title{Data: Anime}
\format{
An object of class \code{data.frame} with 183 rows and 13 columns.
}
\source{
Original source: \href{https://github.com/ISS-Analytics/pls-predict/}{github.com/ISS-Analytics/pls-predict/}
}
\usage{
Anime
}
\description{
A data frame with 183 observations and 13 variables.
}
\details{
The data set for the example on \href{https://github.com/ISS-Analytics/pls-predict/}{github.com/ISS-Analytics/pls-predict/}
with irrelevant variables removed.
}
\keyword{datasets}
|
7ce40d2aa91baa5ce4e45838a9c52559d44fe639
|
0d7e453393ccc1682509042099365c97a1856a6b
|
/packages_to_install.R
|
1ffd6c7f291697e2cabfca958ad4aea8891114e5
|
[
"MIT"
] |
permissive
|
thomasevans/seabird_flights
|
5cb66732102a052511484a59bd73eb905df74cda
|
090eafb07eb6d04f6224f512a4b5d58241fcd8a9
|
refs/heads/master
| 2020-05-21T19:20:24.435302
| 2017-03-12T21:50:32
| 2017-03-12T21:50:32
| 53,323,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
packages_to_install.R
|
packages_installed <- installed.packages(lib.loc = NULL, priority = NULL,
noCache = FALSE, fields = NULL,
subarch = .Platform$r_arch)
str(packages_installed)
package_names <- packages_installed[,1]
save(package_names, packages_installed, file = "packages_20160428.RData")
# ?save
lapply(paste("package:", package_names, sep = ""), detach,
character.only = TRUE, unload = TRUE)
install.packages(package_names)
sess.pkgs <- function (package = NULL)
{ z <- list()
if (is.null(package)) {
package <- grep("^package:", search(), value = TRUE)
keep <- sapply(package, function(x) x == "package:base" ||
!is.null(attr(as.environment(x), "path")))
package <- sub("^package:", "", package[keep])
}
pkgDesc <- lapply(package, packageDescription)
if (length(package) == 0)
stop("no valid packages were specified")
basePkgs <- sapply(pkgDesc, function(x) !is.null(x$Priority) &&
x$Priority == "base")
z$basePkgs <- package[basePkgs]
if (any(!basePkgs)) {
z$otherPkgs <- package[!basePkgs]
}
z
}
lapply(paste("package:",sess.pkgs()$otherPkgs, sep=""), detach,
character.only = TRUE, unload = TRUE)
|
5f5af9d832fa2f8ac3c067dc11cc6f1101b84c20
|
cca3dcf0a8d90b3178227e8ccf1d195533fdb305
|
/run_analysis.R
|
ce664b7939ff1d1cc5327eef81a63b8db4222845
|
[] |
no_license
|
Gbemileke/DS_Specialization_C03_CP1
|
e20b10a888000cb1b76cf357c49b3fc038a0320f
|
3cb4b04a8fabc102539d0952eddafcd8653992d0
|
refs/heads/master
| 2021-01-10T19:44:42.241983
| 2014-05-25T16:43:29
| 2014-05-25T16:43:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,916
|
r
|
run_analysis.R
|
# Getting and Cleaning Data - Course Project 1 (CP1)
# clean the global environment
rm( list=ls() )
# note about working directory:
# - it has to be changed to your local folder which contains the following
# - it contains run_analysis.R
# - it contains the uncompressed rawdata folder called "UCI HAR Dataset"
setwd('/home/jespestana/Documents/E-Courses/Data_Science_Specialization/03_Getting_Cleaning_Data/exercises/week_3/CP1')
# ---------------------------------------------------------
# Part 1: Creation of data set 1 (CP1 requirements 1-4)
# ---------------------------------------------------------
# Step 1. Reading data from rawfiles and conversion to proper data types
# subject_id (1 line/record)
# files:
# ./UCI HAR Dataset/train/subject_train.txt
# ./UCI HAR Dataset/test/subject_test.txt
raw_subject_id_train <- readLines("./UCI HAR Dataset/train/subject_train.txt")
raw_subject_id_train <- as.integer(raw_subject_id_train) # integer vector
raw_subject_id_test <- readLines("./UCI HAR Dataset/test/subject_test.txt")
raw_subject_id_test <- as.integer(raw_subject_id_test) # integer vector
# activity performed during acquisition record (1 line/record)
# files:
# ./UCI HAR Dataset/activity_labels.txt , correspondance: int_id vs activity
# ./UCI HAR Dataset/train/y_train.txt
# ./UCI HAR Dataset/test/y_test.txt
raw_activity_factornames <- readLines("./UCI HAR Dataset/activity_labels.txt")
raw_activity_factornames <- sapply(
raw_activity_factornames,
function(x) {
idx <- grepRaw( " ", x)
substr(x, (idx+1), nchar(x)) },
USE.NAMES=FALSE )
raw_activity_factornames <- factor(
raw_activity_factornames,
levels=raw_activity_factornames )
raw_activity_type_train <- readLines("./UCI HAR Dataset/train/y_train.txt")
raw_activity_type_train <- as.integer(raw_activity_type_train)
raw_activity_type_test <- readLines("./UCI HAR Dataset/test/y_test.txt")
raw_activity_type_test <- as.integer(raw_activity_type_test)
# features, results of processing the rawdata (1 line/record)
# files:
# ./UCI HAR Dataset/features.txt , column/variable names
# ./UCI HAR Dataset/train/X_train.txt
# ./UCI HAR Dataset/test/X_test.txt
raw_feature_names <- readLines("./UCI HAR Dataset/features.txt")
raw_feature_names <- sapply(
raw_feature_names,
function(x) {
idx <- grepRaw( " ", x)
substr(x, (idx+1), nchar(x)) },
USE.NAMES=FALSE )
raw_feature_df_train <- read.csv(
file="./UCI HAR Dataset/train/X_train.txt",
sep="",
header=FALSE )
colnames(raw_feature_df_train) <- raw_feature_names
#sum(as.character(sapply( raw_feature_vector_train, class)) == "numeric")
raw_feature_df_test <- read.csv(
file="./UCI HAR Dataset/test/X_test.txt",
sep="",
header=FALSE )
colnames(raw_feature_df_test) <- raw_feature_names
rm( list = c("raw_feature_names") )
#sum(as.character(sapply( raw_feature_vector_test, class)) == "numeric")
# Step 2. Merge the train and test data sets
raw_feature_df <- rbind( raw_feature_df_train, raw_feature_df_test)
raw_activity_type <- c( raw_activity_type_train, raw_activity_type_test)
raw_subject_id <- c( raw_subject_id_train, raw_subject_id_test)
raw_activity_type_str <- character(length(raw_activity_type))
for (i in 1:length(raw_activity_factornames)) {
raw_activity_type_str[raw_activity_type==i] <- levels(raw_activity_factornames)[i]
}
raw_activity_type <- factor(
raw_activity_type_str,
levels=levels(raw_activity_factornames) )
#raw_activity_type_fc <- factor(
# raw_activity_type_str,
# levels=levels(raw_activity_factornames) )
#for (i in 1:length(raw_activity_factornames)) {
# print( sum( raw_activity_type == i ) )
# print( sum( raw_activity_type_fc == levels(raw_activity_factornames)[i] ) )
#}
rm( list = c("raw_feature_df_train","raw_feature_df_test",
"raw_activity_type_train","raw_activity_type_test",
"raw_subject_id_train","raw_subject_id_test"),
"i", "raw_activity_type_str")
# Step 3. Select correct columns from raw_feature_df
feature_names <- colnames(raw_feature_df)
idx <- sapply(feature_names,
function(x) {
grepl("mean",x) | grepl("std",x)
},
USE.NAMES=FALSE )
raw_feature_df <- raw_feature_df[,idx]
rm( list = c("feature_names","idx"))
# Step 4. Create the data frame for DataSet1
dataset1_df <- data.frame(
subject=raw_subject_id,
activity=raw_activity_type,
raw_feature_df)
colnames(dataset1_df) <- c( "subject", "activity", colnames(raw_feature_df))
rm( list = c("raw_subject_id","raw_activity_type","raw_feature_df"))
# Step 5. Save the DataSet1 data frame to a csv file with txt extension
write.csv(
dataset1_df,
file="dataset1.txt",
row.names = FALSE )
# Step 6. Example code to read DataSet1 properly (with proper data types)
dataset1_df_read <- read.csv(
file="dataset1.txt",
header=TRUE,
check.names=FALSE,
stringsAsFactors=FALSE)
dataset1_df_read[["activity"]] <- factor(
dataset1_df_read[["activity"]],
levels=c("WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING") )
rm( list = c("dataset1_df","dataset1_df_read","raw_activity_factornames"))
# ------------------------------------------------------
# Part 2: Creation of data set 2 (CP1 requirement 5)
# ------------------------------------------------------
# Step 1. Properly read DataSet1 from file
dataset1_df <- read.csv(
file="dataset1.txt",
header=TRUE,
check.names=FALSE,
stringsAsFactors=FALSE)
dataset1_df[["activity"]] <- factor(
dataset1_df[["activity"]],
levels=c("WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING") )
# Step 2. Process DataSet1 to fill the DataSet2 data frame
# (perform averages for each subject and activity)
idx_mean_features <- grepl("mean", colnames(dataset1_df))
unique_subjects <- as.numeric( rownames( table(dataset1_df[["subject"]]) ) )
unique_activities <- levels(dataset1_df[["activity"]])
dataset2_df <- data.frame(
subject =integer (length(unique_subjects)*length(unique_activities)),
activity=character(length(unique_subjects)*length(unique_activities)),
matrix(
data=numeric(sum(idx_mean_features) *
length(unique_subjects)*
length(unique_activities)),
ncol=sum(idx_mean_features),
nrow=length(unique_subjects)*length(unique_activities) ),
stringsAsFactors=FALSE
)
colnames(dataset2_df) <- c("subject",
"activity",
colnames(dataset1_df)[idx_mean_features])
count <- 1
for (subject in unique_subjects) {
for (activity in unique_activities) {
idx_subject <- ( dataset1_df[["subject"]] == subject )
idx_activity <- ( dataset1_df[["activity"]] == activity )
subset_df <- dataset1_df[ idx_subject & idx_activity,
idx_mean_features ]
dataset2_df[count,"subject"] <- subject
dataset2_df[count,"activity"] <- activity
dataset2_df[count,3:ncol(dataset2_df)] <- sapply( subset_df, mean)
count <- count + 1
}
}
dataset2_df[["activity"]] <- factor(
dataset2_df[["activity"]],
levels=c("WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING") )
rm( list = c("activity", "count","idx_activity","idx_mean_features",
"idx_subject","subject","subset_df","unique_activities",
"unique_subjects") )
# Step 3. Save the DataSet2 data frame to a csv file with txt extension
write.csv(
dataset2_df,
file="dataset2.txt",
row.names = FALSE )
# Step 4. Example code to read DataSet2 properly (with proper data types)
dataset2_df_read <- read.csv(
file="dataset2.txt",
header=TRUE,
check.names=FALSE,
stringsAsFactors=FALSE)
dataset2_df_read[["activity"]] <- factor(
dataset2_df_read[["activity"]],
levels=c("WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING") )
rm( list = c("dataset1_df","dataset2_df","dataset2_df_read") )
|
59acdd7804f52d8954de4c159a15802148db1329
|
64c20141ed7d581d36f41ee4e0378f515697aefe
|
/iGSEA/3-iGSEA_test_LFQ_B.R
|
919767e17e1cc837aa3c46eb9444da40b2012ddb
|
[] |
no_license
|
chiosislab/Chaperomics_controllability_2020
|
3028691a819a1b551d1d85010a0833cb4bd16873
|
1dda330c4001a07390bccafef82415030f3ad459
|
refs/heads/master
| 2022-04-24T22:41:02.209096
| 2020-04-25T02:59:12
| 2020-04-25T02:59:12
| 258,673,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,049
|
r
|
3-iGSEA_test_LFQ_B.R
|
################################################
#### Loading PPI databases (BioGrid+Intact) ####
################################################
load (file="./library/ppi_2018_0508.Rdata")
#write.csv (ppi, "ppi.csv", row.names = F, quote=F)
################################################
#### Loading Chaperome ID ####
################################################
load (file="./library/list.chaperome.brehme.uniprot.Rdata")
################################################
#### Loading Data ####
################################################
load ("./proteomics_input/ms1.uniprot.results.Rdata")
input <- ms1.uniprot.results.all
remove (ms1.uniprot.results.all)
## Human AD up/down
data.up <- input [input$LFQ.B.p <= 0.1 & input$LFQ.B.FC > 1,]
data.down <- input [input$LFQ.B.p <= 0.1 & input$LFQ.B.FC < 1,]
uniprot <- read.csv ("./library/uniprot-reviewed_human.1220.2017_extended.tab", quote="", header=TRUE, sep="\t", stringsAsFactors = FALSE)
################################################
#### Converting UniProtAC to EntrezID
################################################
#library(UniProt.ws)
#up <- UniProt.ws(taxId=9606)
#uniprot2entrezID <- select(up, uniprot$Entry, c("ENTRY-NAME","ENTREZ_GENE"),"UNIPROTKB")
#write.csv (uniprot2entrezID, "~/Google Drive/R/UniProt/uniprot2entrezID_2.csv")
#uniprot2entrezID <- read.csv("~/Google Drive/R/UniProt/uniprot2entrezID.csv")
#uniprot2entrezID <- read.csv ("~/Google Drive/R/UniProt/2018_05_08/uniprot-reviewed_human.0508.2018.slim.tab", quote = "", header=TRUE, sep="\t", stringsAsFactors = F)
#uniprot2entrezID <- subset (uniprot2entrezID, select=c(Entry, Cross.reference..GeneID.))
#names (uniprot2entrezID) <- c("UNIPROTKB","ENTREZ_GENE")
#library (splitstackshape)
#uniprot2entrezID <- concat.split.multiple(uniprot2entrezID, split.cols = "ENTREZ_GENE",seps=";",direction="long")
#write.csv (uniprot2entrezID, "~/Google Drive/R/UniProt/uniprot2entrezID.csv",row.names = F,quote=F)
uniprot2entrezID <- read.csv("./library/uniprot2entrezID.csv")
attach.entregene <- function (x,uniprotkb_col) {
output <- merge (x, uniprot2entrezID, by.x=uniprotkb_col,by.y="UNIPROTKB",all.x=TRUE,sort=F)
output <- output [!is.na (output$ENTREZ_GENE),]
return (output)
}
uniprot.entry.name2entreID <- function (x) {
input <- data.frame (ID=x,stringsAsFactors = F)
output <- merge (input, uniprot[,c("Entry.name","Entry")],by.x="ID",by.y="Entry.name",all.x=TRUE,sort=F)
output <- merge (output, uniprot2entrezID, by.x="Entry",by.y="UNIPROTKB",all.x=TRUE,sort=F)
return (output$ENTREZ_GENE)
}
################################################
#### Preparing input file
################################################
list.chaperome.input <- input$Entry.name [input$Entry %in% list.chaperome.brehme.uniprot$UNIPROTKB]
################################################
#### Preapring iGSEA (GO Enrichment module)
################################################
#source("https://bioconductor.org/biocLite.R")
#biocLite("clusterProfiler")
#biocLite("org.Hs.eg.db")
library ("clusterProfiler")
library ("org.Hs.eg.db")
iGSEA.v2 <- function (input, list.chaperome.input, ppi, pAdjustMethod, ont, pvalueCutoff, outputfile) {
for (i in 1:length (list.chaperome.input)) {
if (i==1) {df.total<-NULL
interactors <- NULL
df <- NULL}
inp <- list.chaperome.input[i]
ppi.selected.1 <- ppi[ppi$PARTICIPANT_A_Entry.name %in% inp,]
ppi.selected.2 <- ppi[ppi$PARTICIPANT_B_Entry.name %in% inp,]
interactors <- c(ppi.selected.1$PARTICIPANT_B_Entry, ppi.selected.2$PARTICIPANT_A_Entry) # Switch from UniProt Entry name to Entry
interactors <- interactors [interactors %in% input]
#interactors.entrezID <- uniprot.entry.name2entreID(interactors)
#interactors.entrezID <- interactors.entrezID[!is.na (interactors.entrezID)]
print (paste0("Chaperome_ID:",inp," ",i,"/",length (list.chaperome.input)))
print (paste0("Interactors_ID:",interactors))
if (length(interactors)>0) {
go.enrichment <- enrichGO (interactors, 'org.Hs.eg.db',keyType = "UNIPROT", pAdjustMethod = "BH", ont="BP",readable = T,pvalueCutoff = 0.1)
## Note: Depending on the version of ClusterProfiler, the argment keytype could be either "keyType" or "keytype". "KeyType" was used on iMac-29''
df <- as.data.frame(go.enrichment)
if (nrow (df)>0) {df$Chaperome_ID <- inp}
if (is.null(df.total)) {df.total <- df}
else {df.total <- rbind (df.total, df)}
}
print (df)
}
return (df.total)
write.table (df.total, outputfile, row.names = F, quote=F,sep="\t")
}
setwd("./LFQ.B")
input.up <- as.character(data.up$Entry)
iGSEA.up <- iGSEA.v2 (input.up, list.chaperome.input, ppi, "BH", "BP", 1, "df.total.up.txt")
save (iGSEA.up,file="iGSEA.up.results.Rdata")
input.down <- as.character(data.down$Entry)
iGSEA.down <- iGSEA.v2 (input.down, list.chaperome.input, ppi, "BH", "BP", 1, "df.total.down.txt")
save (iGSEA.down,file="iGSEA.down.results.Rdata")
load (file="iGSEA.down.results.Rdata")
save (iGSEA.up, iGSEA.down,file="iGSEA.results.Rdata")
iGSEA.up$Status <- "Up"
iGSEA.down$Status <- "Down"
iGSEA.total <- rbind (iGSEA.up, iGSEA.down)
p.cutoff <- 0.001
log10 (iGSEA.total$p.adjust) -> iGSEA.total$log10.p.adjust
iGSEA.selected <- iGSEA.total [iGSEA.total$p.adjust <= p.cutoff, ]
df.sidetable <- data.frame(name=unique(iGSEA.selected$Chaperome_ID), Description=unique (iGSEA.selected$Chaperome_ID))
write.table (iGSEA.selected, "iGSEA.selected.txt",sep="\t",row.names = F, quote=F)
write.table (df.sidetable, "sidetable.txt",sep="\t",row.names = F, quote=F)
|
ac99e5cb8358b18ee3950265824ba71f8200a945
|
5355ce6341489f05dc5894a70cf5cff1f951a194
|
/man/RN.Rd
|
98ed2dbf841c6c72d94d51ba48a6fedef3091cb8
|
[] |
no_license
|
AndreMikulec/econModel
|
5032565f1722275425f75b55811493d45bf87f8c
|
22b8507838116d3e33b6e40cf891988ad104ac7b
|
refs/heads/master
| 2023-06-03T19:23:00.544927
| 2021-06-26T07:38:20
| 2021-06-26T07:38:20
| 303,683,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,032
|
rd
|
RN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{RN}
\alias{RN}
\title{Time Near Events or Event Run Identification}
\usage{
RN(x, z = "a", e = T, n = F, ...)
}
\arguments{
\item{x}{xts object}
\item{z}{"after"(default). Before or after the event. "before" is the other option.}
\item{e}{T(default). Detect the observation that holds the value of T(TRUE). This can be any value that can be testing on the right hand side of the equals (==) sign. A special option to be an event is NA.}
\item{n}{F(default). Do not "!" not-the-event. T is useful against NA to detect !is.na(NA). The special case of NA internally is handled !is.na(NA).}
\item{...}{dots passed}
}
\value{
xts object
}
\description{
\preformatted{
Counts the number of observations to/from an event.
This is called the time-distance to/from an event.
Alternately, gives the range of an event.
Range of the event - Run - (RN)
}
}
\examples{
\dontrun{
# RN(Time Near Events or Event Run Identification) examples
}
}
|
8a525016080f9a184d834b1eb84e44b1d105af08
|
324b10d3f13c49c0b1cf6b1a5be2eb5fc3526da0
|
/parallel.R
|
10b2ff3f59cfb44d7efa060b260b0967411e9173
|
[] |
no_license
|
IwoA/sfc.parameters
|
ff974449fc7d83a5a9c0ab77af6459235952a530
|
d0fa49f9c8123d35818865d1d9581235168dac2f
|
refs/heads/master
| 2021-05-29T02:14:30.268359
| 2015-06-22T21:08:02
| 2015-06-22T21:08:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,896
|
r
|
parallel.R
|
#sfc.param<-function(modelfile, paramfile){
#start time
strt<-Sys.time()
model<-sfc.model("art.txt")
#modeldata<-simulate(model)
where<-c(model$variables[,"name"]) #creates vector of variables' names
#par<-read.table("art1.txt", header=FALSE,sep=",")
par<-read.table("art1.txt")
#search for number of row (ind) for parameter. Necessary for sfc.editVar function
ind<-match(par[,1],where)
#initialise all parameters with appropriate values of lowerBound
model$variables[,"initial value"][ind[1]:(ind[1]+length(ind)-1)]<-par[,2]
#initialise vector of parameters
p<-list(seq(par[1,2],par[1,3],par[1,4]))
for (i in 1:nrow(par)){
p[i]<-list(c(seq(par[i,2],par[i,3],par[i,4]))) #creates one list of values of consecutive parameters
}
combinations<-expand.grid(p) #creates matrix of combinations
colnames(combinations)<-par[,1] #parameters are in columns, combinations in rows
#creates matrix of results
results<-matrix(0,nrow=nrow(par)+2,ncol=nrow(combinations))
row.names(results)<-c(as.character(par[,1]),"Difference","Stable from")
#first loop changes rows in combinations
for (i in 1:nrow(combinations)){
#second loop picks values in row in combination and creates a new model with new combination of parameters
model$variables[,"initial value"][ind[1]:(ind[1]+length(ind)-1)]<-as.numeric(combinations[i,])
results[1:nrow(par),i]<-model$variables[,"initial value"][ind[1]:(ind[1]+length(ind)-1)]
}
do <- function (...){
model$variables[,"initial value"][ind[1]:(ind[1]+length(ind)-1)]<-as.numeric(...)
modeldata<-simulate(model) #simulates model with new set of variables
}
# rezultaty<-apply (results,2,do)
library(parallel)
cl<-makeCluster(detectCores()-1)
clusterEvalQ(cl,library(PKSFC))
# clusterApply(cl, model<-sfc.model("art.txt"))
# clusterEvalQ(cl, ind)
clusterExport(cl,"model")
clusterExport(cl,"ind")
rezultaty<-parLapply (cl,results,do)
stopCluster(cl)
|
e5b2bb02afcfdd4b62c7a17d681fd8b7f7ca52d0
|
340579e1004d968eb2fba44f3b7da8bcc2e1a67b
|
/R/BHSBVAR.R
|
8eefdf232f5c8a5ed0e0263a0cec0108650bdfd4
|
[] |
no_license
|
lnsongxf/BHSBVAR
|
51cf033fc591109166a9744635fe39ab969a5a54
|
e86eeea78a084e32dc849b47f4abdbe20bcbbcc9
|
refs/heads/master
| 2020-05-01T19:31:50.749319
| 2019-01-23T08:40:03
| 2019-01-23T08:40:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60,926
|
r
|
BHSBVAR.R
|
# Create matrices containing dependent and independet variables.
#' @keywords internal
getXY <- function(data1, nlags) {
varnames <- colnames(data1)
data1 <- data1 - (matrix(data = 1, nrow = nrow(data1), ncol = ncol(data1)) %*% diag(x = colMeans(x = data1, na.rm = FALSE, dims = 1)))
colnames(data1) <- varnames
X <- matrix(data = NA_real_, nrow = (nrow(data1) - nlags), ncol = (ncol(data1) * nlags))
for (k in 1:nlags) {
X[, (ncol(data1) * (k - 1) + 1):(ncol(data1) * k)] <- data1[(nlags - k + 1):(nrow(data1) - k), ]
}
X <- cbind(X, 1)
colnames(X) <- c(paste(rep(colnames(data1), nlags), ".L", sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(data1)), decreasing = FALSE), sep = ""), "cons")
Y <- data1[(nlags + 1):nrow(data1), ]
list1 <- list(X, Y)
return(list1)
}
# Check arguments from the BH_SBVAR function that should be integers.
#' @keywords internal
check_integers <- function(list1) {
#testing inputs that should be integers
for (i in 1:length(list1)) {
if (((class(list1[[i]]) != "numeric") & (class(list1[[i]]) != "integer")) || (!is.finite(list1[[i]]))) {
return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = ""))
}
if ((list1[[i]] %% 1) != 0) {
return(paste(names(list1[i]), ": Must be a whole number.", sep = ""))
}
if ((names(list1[i]) == "nlags") && (list1[[i]] <= 0)) {
return(paste(names(list1[i]), ": Must be greater than 0.", sep = ""))
}
if ((names(list1[i]) == "itr") && (list1[[i]] < 100)) {
return(paste(names(list1[i]), ": Must be greater than 100.", sep = ""))
}
if ((names(list1[i]) == "burn") && (list1[[i]] < 0)) {
return(paste(names(list1[i]), ": Must be greater than or equal to 0.", sep = ""))
}
if ((names(list1[i]) == "thin") && (list1[[i]] <= 0)) {
return(paste(names(list1[i]), ": Must be greater than 0.", sep = ""))
}
if ((names(list1[i]) == "h1_irf") && (list1[[i]] < 4)) {
return(paste(names(list1[i]), ": Must be greater than or equal to 3.", sep = ""))
}
}
return("pass")
}
# Check arguments from the BH_SBVAR function that should be doubles.
#' @keywords internal
check_doubles <- function(list1) {
#testing inputs that could be doubles
for (i in 1:length(list1)) {
if (((class(list1[[i]]) != "numeric") & (class(list1[[i]]) != "integer")) || (!is.finite(list1[[i]]))) {
return(paste(names(list1[i]), ": Must be finite 'numeric' or 'integer'.", sep = ""))
}
if ((names(list1[i]) == "ci") && ((list1[[i]] < 0.7) | (list1[[i]] > 1))) {
return(paste(names(list1[i]), ": Must be greater than or equal to 0.7 and less than or equal to 1.", sep = ""))
}
}
return("pass")
}
# Check arguments from the BH_SBVAR function that should be matrices.
#' @keywords internal
check_matrices <- function(list1, nlags) {
#testing inputs that should be matrices
for (i in 1:length(list1)) {
if (!is.matrix(list1[[i]])) {
return(paste(names(list1[i]), ": Must be a matrix.", sep = ""))
}
if (any(!is.finite(list1[[i]]))) {
return(paste(names(list1[i]), ": Elements must be finite numeric values", sep = ""))
}
if ((names(list1[i]) == "y") && (nrow(list1[[i]]) <= ncol(list1[[i]]))) {
return("y: The number of rows must be greater than the number of columns.")
}
if ((names(list1[i]) == "y") && (ncol(list1[[i]]) < 2)) {
return(paste("y: The number of columns or endogenous variables must be greater than 1.", sep = ""))
}
if ((names(list1[i]) == "y") && (((ncol(list1[[i]]) * nlags) + 1) >= (nrow(list1[[i]])))) {
return(paste("y: The number observations must be greater than ", ((ncol(list1[[i]]) * nlags) + 1),". Reduce the number of lags or increase the number of observations.", sep = ""))
}
if ((names(list1[i]) == "pP") && (nrow(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) {
return(paste("pP: The number of rows must equal ", ((nlags * ncol(list1$y)) + 1), ".", sep = ""))
}
if ((names(list1[i]) == "pP") && (ncol(list1[[i]]) != ncol(list1$y))) {
return(paste("pP: The number of columns must equal ", (ncol(list1$y)), ".", sep = ""))
}
if ((names(list1[i]) == "pP_sig") && (nrow(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) {
return(paste("pP_sig: The number of rows must equal ", ((nlags * ncol(list1$y)) + 1), ".", sep = ""))
}
if ((names(list1[i]) == "pP_sig") && (ncol(list1[[i]]) != ((nlags * ncol(list1$y)) + 1))) {
return(paste("pP_sig: The number of columns must equal ",((nlags * ncol(list1$y)) + 1), ".", sep = ""))
}
if ((names(list1[i]) == "pP_sig") && (any(list1[[i]] < 0))) {
return(paste("pP_sig: Elements must be greater than or equal to 0.", sep = ""))
}
if ((names(list1[i]) == "pP_sig") && (!isSymmetric(list1[[i]]))) {
return(paste("pP_sig: Must be symmetric.", sep = ""))
}
if ((names(list1[i]) == "kappa1") && (nrow(list1[[i]]) != 1)) {
return(paste("kappa1: The number of rows must equal 1.", sep = ""))
}
if ((names(list1[i]) == "kappa1") && (ncol(list1[[i]]) != ncol(list1$y))) {
return(paste("kappa1: The number of columns must equal ", ncol(list1$y), ".", sep = ""))
}
if ((names(list1[i]) == "kappa1") && (any(list1[[i]] < 0))) {
return(paste("kappa1: Elements must be greater than or equal to 0.", sep = ""))
}
}
return("pass")
}
# Check arguments from the BH_SBVAR function that should be arrays.
#' @keywords internal
check_arrays <- function(list1, y) {
for (i in 1:length(list1)) {
if (!is.array(list1[[i]])) {
return(paste(names(list1[i]), ": Must be an array.", sep = ""))
}
if (!is.numeric(list1[[i]])) {
return(paste(names(list1[i]), ": Should contain 'numeric' elements for arrays specifying prior distributions. Use 'NA_real_' for elements in arrays that contain all NAs.", sep = ""))
}
if ((names(list1[i]) == "pA") && (all(is.na(list1[[i]][, , 1])))) {
return(paste(names(list1[i]), "[, , 1]: Should indicate at least one parameter to be estimated.", sep = ""))
}
if ((names(list1[i]) == "pA") && ((dim(list1[[i]])[1] != ncol(y)) | (dim(list1[[i]])[2] != ncol(y)) | (dim(list1[[i]])[3] != 8))) {
return(paste(names(list1[i]), ": Should be an (", ncol(y), ", ", ncol(y), ", 8) array.", sep = ""))
}
if ((names(list1[i]) == "pdetA") && ((dim(list1[[i]])[1] != 1) | (dim(list1[[i]])[2] != 1) | (dim(list1[[i]])[3] != 6))) {
return(paste(names(list1[i]), ": Should be an (1, 1, 6) array.", sep = ""))
}
if ((names(list1[i]) == "pH") && ((dim(list1[[i]])[1] != ncol(y)) | (dim(list1[[i]])[2] != ncol(y)) | (dim(list1[[i]])[3] != 6))) {
return(paste(names(list1[i]), ": Should be an (", ncol(y), ", ", ncol(y), ", 6) array.", sep = ""))
}
for (j in 1:(dim(list1[[i]])[1])) {
for (k in 1:(dim(list1[[i]])[2])) {
if (is.na(list1[[i]][j, k, 1])) { #if distribution is not specified, no other parameters should be specified
if ((names(list1[i]) == "pA") && (any(!is.na(list1[[i]][j, k, c(1:2, 4:8)])))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so sign, scale, degrees of freedom, skew, long-run restriction, and proposal scaling parameter (", names(list1[i]),"[", j, ", ", k, ", c(2,4:7)]) should all be NA.", sep = ""))
}
if ((names(list1[i]) == "pA") && (!is.finite(list1[[i]][j, k, 3]))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so position (", names(list1[i]), "[", j, ", ", k, ", 3]) should be some constant value.", sep = ""))
}
if ((names(list1[i]) != "pA") && (any(!is.na(list1[[i]][j, k, 1:6])))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Indicates no prior distribution so sign, position, scale, degrees of freedom, skew (", names(list1[i]), "[", j, ", ", k, ", 1:6]) should all be NA.", sep = ""))
}
} else if (list1[[i]][j,k,1] == 0) { #if distribution is 0 (symmetric t-distribution), parameters in slices 2:5 must be specified
if ((!is.na(list1[[i]][j, k, 2])) && ((list1[[i]][j, k, 2] != 1) & (list1[[i]][j, k, 2] != (-1)))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 2]: Sign should be indicated with a NA, 1, or -1.", sep = ""))
}
if (!is.finite(list1[[i]][j, k, 3])) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be indicated with a finite number.", sep = ""))
}
if ((!is.na(list1[[i]][j, k, 2])) && ((list1[[i]][j, k, 3]) != 0) && ((list1[[i]][j, k, 2]) != ((list1[[i]][j, k, 3]) / abs(list1[[i]][j, k, 3])))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should have the same sign as sign (", names(list1[i]), "[", j, ", ", k, ", 2]).", sep = ""))
}
if ((!is.finite(list1[[i]][j, k, 4])) || (list1[[i]][j, k, 4] <= 0)) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 4]: Scale should be indicated with a finite number greater than 0.", sep = ""))
}
if ((!is.finite(list1[[i]][j, k, 5])) || (list1[[i]][j, k, 5] <= 2)) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 5]: Degrees of freedom should be indicated with a finite number greater than 2.", sep = ""))
}
if (any(!is.na(list1[[i]][j, k, 6]))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should be NA.", sep = ""))
}
if ((names(list1[i]) == "pA") && ((!is.na(list1[[i]][j, k, 7])) && ((!is.finite(list1[[i]][j, k, 7])) || (list1[[i]][j, k, 7] != 1)))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 7]: Long-run restriction should be indicated with an NA (no long-run restriction) or a 1 (long-run restriction).", sep = ""))
}
if ((names(list1[i]) == "pA") && ((is.na(list1[[i]][j, k, 8])) || (!is.finite(list1[[i]][j, k, 8])) || (list1[[i]][j, k, 8] < 0.1))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 8]: Proposal scaling parameter should be greater than or equal to 0.1.", sep = ""))
}
} else if (list1[[i]][j, k, 1] == 1) { #if distribution is 1 (non-central t-distribution), parameters in slices 2:6 must be specified
if (!is.na(list1[[i]][j, k, 2])) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 2]: Sign should be NA.", sep = ""))
}
if (!is.finite(list1[[i]][j, k, 3])) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 3]: Position should be indicated with a finite number.", sep = ""))
}
if ((!is.finite(list1[[i]][j, k, 4])) || (list1[[i]][j, k, 4] <= 0)) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 4]: Scale should be indicated with a finite number greater than 0.", sep = ""))
}
if ((!is.finite(list1[[i]][j, k, 5])) || (list1[[i]][j, k, 5] <= 2)) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 5]: Degrees of freedom should be indicated with a finite number greater than 2.", sep = ""))
}
if (!is.finite(list1[[i]][j, k, 6])) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should be indicated with a finite number.", sep = ""))
}
if (((list1[[i]][j, k, 6] == 0) & (list1[[i]][j, k, 3] != 0)) | ((list1[[i]][j, k, 6] != 0) & (list1[[i]][j, k, 3] == 0))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should be zero if position (", names(list1[i]), "[", j, ", ", k, ", 3]) is zero.", sep = ""))
}
if ((list1[[i]][j, k, 6] != 0) && (list1[[i]][j, k, 3] != 0) && (((list1[[i]][j, k, 6]) / abs(list1[[i]][j, k, 6])) != ((list1[[i]][j, k, 3]) / abs(list1[[i]][j, k, 3])))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 6]: Skew should have the same sign as position (", names(list1[i]), "[", j, ", ", k, ", 3]).", sep = ""))
}
if ((names(list1[i]) == "pA") && ((!is.na(list1[[i]][j, k, 7])) && ((!is.finite(list1[[i]][j, k, 7])) || (list1[[i]][j, k, 7] != 1)))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 7]: Long-run restriction should be indicated with an NA (no long-run restriction) or a 1 (long-run restriction).", sep = ""))
}
if ((names(list1[i]) == "pA") && ((is.na(list1[[i]][j, k, 8])) || (!is.finite(list1[[i]][j, k, 8])) || (list1[[i]][j, k, 8] < 0.1))) {
return(paste(names(list1[i]), "[", j, ", ", k, ", 8]: Proposal scaling parameter should be greater than or equal to 0.1.", sep = ""))
}
} else {
return(paste(names(list1[i]), "[", j, ", ", k, ", 1]: Distribution should be indicated with a NA (no prior), 0 (symetric t-distribution), or 1 (non-central t-distribution).", sep = ""))
}
}
}
}
return("pass")
}
# Check arguments from the BH_SBVAR function
#' @keywords internal
arguments_check <- function(y, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, itr, burn, thin, acc_irf, h1_irf, ci) {
test <- check_integers(list(nlags = nlags, itr = itr, burn = burn, thin = thin, h1_irf = h1_irf))
if (test != "pass") {
return(test)
}
test <- check_doubles(list(ci = ci))
if (test != "pass") {
return(test)
}
if ((!is.logical(acc_irf)) || (is.na(acc_irf))) {
return(paste("acc_irf: Must be logical 'TRUE' or 'FALSE'.", sep = ""))
}
if (floor((itr - burn) / thin) < 5000) {
return(paste("'floor((itr-burn)/thin)' must be greater than or equal to 5000.", sep = ""))
}
test <- check_matrices(list(y = y, pP = pP, pP_sig = pP_sig, kappa1 = kappa1), nlags)
if (test != "pass") {
return(test)
}
test <- check_arrays(list(pA = pA, pdetA = pdetA, pH = pH), y)
if (test != "pass") {
return(test)
}
# check pR_sig
if (!is.array(pR_sig)) {
return("pR_sig: Must be an array.")
}
if (any(!is.finite(pR_sig)) || (any(pR_sig < 0))) {
return("pR_sig: Mulst contain finite values greater than or equal to 0.")
}
if ((dim(pR_sig)[1] != ((nlags * ncol(y)) + 1)) | (dim(pR_sig)[2] != ((nlags * ncol(y)) + 1)) | (dim(pR_sig)[3] != ncol(y))) {
return(paste("pR_sig: Dimensions should be (", ((nlags * ncol(y)) + 1), ", ", ((nlags * ncol(y)) + 1), ", ", (ncol(y)), ").", sep = ""))
}
for (i in 1:ncol(y)) {
if (any(is.finite(pA[, i, 7]))) {
n <- which(is.finite(pA[, i, 7]))
for (j in n) {
if (pR_sig[j, j, i] <= 0) {
return(paste("pR_sig: The value at pR_sig[", j, ", ", j, ", ", i, "] should be a finite value greater than 0 since pA[",j,", ", i,", "," 7] indicates a long-run restriction.", sep = ""))
}
}
if (any(!is.finite(pA[, i, 7]))) {
n <- which(!is.finite(pA[, i, 7]))
for (j in n) {
for (k in seq(from = j, to = (dim(pR_sig)[1] - 1), by = ncol(y))) {
for (l in seq(from = j, to = (dim(pR_sig)[2] - 1), by = ncol(y))) {
if (pR_sig[k, l, i] != 0) {
return(paste("pR_sig: The vlue at pR_sig[", k, ", ", l, ", ", i, "] should be 0.", sep = ""))
}
}
}
}
}
if ((any(pR_sig[(dim(pR_sig)[1]), , i] != 0)) | (any(pR_sig[, (dim(pR_sig)[2]), i] != 0))) {
return(paste("pR_sig: The values at pR_sig[", (dim(pR_sig)[1]), ", , ", i, "] and pR_sig[,",(dim(pR_sig)[2]), ", ", i, "] should be 0.", sep = ""))
}
} else {
if (any(pR_sig[, , i] != 0)) {
return(paste("pR_sig: Each element in pR_sig[, , ", i, "] should be 0 since there were no long-run restrictions indicated for equation ", i, ".", sep = ""))
}
}
if (!isSymmetric(pR_sig[, , i])) {
return(paste("pR_sig[, , ", i, "]: Must be symmetric.", sep = ""))
}
}
return("pass")
}
# Line Plots
#' @keywords internal
line_plot <- function(data1, prior_name, i, j) {
if (prior_name == "pA") {
elast = -1
} else {
elast = 1
}
graphics::plot(x = (elast * data1), type = "l", col = "black", yaxs = "r", xaxs = "i", xlab = "Iteration", ylab = "Estimate")
if (prior_name == "pA") {
graphics::title(main = paste("-A(", i, "," , j, ")", sep = ""), col.main = "black")
} else if (prior_name == "pH") {
graphics::title(main = paste("H(", i, "," , j, ")", sep = ""), col.main = "black")
} else if (prior_name == "pdetA") {
graphics::title(main = paste("Determinant of A"), col.main = "black")
}
Sys.sleep(0.25)
}
# Autocorrelation Plots
#' @keywords internal
acf_plot <- function(data1, prior_name, i, j) {
if (any(data1 != data1[1])) {
stats::acf(x = stats::ts(data1), lag.max = NULL, plot = TRUE, type = c("correlation"), demean = TRUE, main = "", xlab = "Lag Length", ylab = "Correlation", ci = 0)
if (prior_name == "pA") {
graphics::title(main = paste("-A(", i, "," , j, ")", sep = ""), col.main = "black")
} else if (prior_name == "pH") {
graphics::title(main = paste("H(", i, "," , j, ")", sep = ""), col.main = "black")
} else if (prior_name == "pdetA") {
graphics::title(main = paste("Determinant of A"), col.main = "black")
}
Sys.sleep(0.25)
} else {
if (prior_name == "pA") {
warning(paste("No variation in -A(", i, "," , j, ")", sep = ""), immediate. = TRUE)
} else if (prior_name == "pH") {
warning(paste("No variation in H(", i, "," , j, ")", sep = ""), immediate. = TRUE)
} else if (prior_name == "pdetA") {
warning(paste("No variation in det(A)", sep = ""), immediate. = TRUE)
}
}
}
#' Structural Bayesian Vector Autoregression
#'
#' Runs a Structural Bayesian Vector Autoregression model with the method developed by Baumeister and Hamilton (2015, 2017, and 2018).
#' @author Paul Richardson
#' @export
#' @import Rcpp
#' @name BH_SBVAR
#' @param y \emph{(T x n)} matrix containing the endogenous variables. \emph{T} is the number of observations and \emph{n} is the number of endogenous variables.
#' @param nlags Integer specifying the lag order.
#' @param pA \emph{(n x n x 8)} array where \emph{n} is the number of endogenous variables and each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution positions, distribution scales, distribution degrees of freedom, distribution skew, long-run restriction scale parameters, and random-walk proposal scale parameters for the coefficient matrix \emph{A}, respectively.
#' @param pdetA \emph{(1 x 1 x 6)} array where each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution positions, distribution scales, distribution degrees of freedom, and distribution skew parameters for the determinant of \emph{A}, respectively (default = NULL). NULL indicates no priors for the determinant of \emph{A}.
#' @param pH \emph{(n x n x 6)} array where \emph{n} is the number of endogenous variables and each slice of the third dimension contains the prior distributions (NA - no prior, 0 - symmetric t-distribution, 1 - non-central t-distribution), sign restrictions (NA - no restriction, 1 - positive restriction, -1 - negative restriction), distribution positions, distribution scales, distribution degrees of freedom, distribution skew parameters for \emph{H}, the inverse of \emph{A}, respectively (default = NULL). NULL indicates no priors for the inverse of \emph{A}.
#' @param pP \emph{(k x n)} matrix containing the prior position parameters for the reduced form lagged coefficient matrix \emph{\eqn{\Phi}} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no priors for \emph{\eqn{\Phi}}.
#' @param pP_sig \emph{(k x k)} matrix containing values indicating confidence in the priors for \emph{\eqn{\Phi}} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no priors for \emph{\eqn{\Phi}}.
#' @param pR_sig \emph{(k x k x n)} array containing values indicating confidence in long-run restrictions on the reduced form lagged coefficient matrix \emph{\eqn{\Phi}} (default = NULL). \emph{\eqn{k = n L + 1}}, \emph{n} is the number of endogenous variables, and \emph{L} is the lag length. NULL indicates no long-run restrictions.
#' @param kappa1 \emph{(1 x n)} matrix containing values indicating confidence in priors for the structural variances (default = NULL). \emph{n} is the number of endogenous variables. NULL indicates no priors for structural variances.
#' @param itr Integer specifying the total number of iterations for the algorithm (default = 5000).
#' @param burn Integer specifying the number of draws to throw out at the beginning of the algorithm (default = 0).
#' @param thin Integer specifying the thinning parameter (default = 1). All draws beyond burn are kept when thin = 1. Draw 1, draw 3, etc. beyond burn are kept when thin = 2.
#' @param acc_irf Boolean indicating whether accumulated impulse responses are to be returned (default = TRUE).
#' @param h1_irf Integer specifying the time horizon for computing impulse responses (default = 12).
#' @param ci Numeric value indicating credibility intervals for the estimates to be returned (default = 0.975).
#' @details Runs a Structural Bayesian Vector Autoregression model with the method developed in Baumeister and Hamilton (2015, 2017, and 2018). The function returns a list containing the results.
#' @return A list containing the following:
#' @return accept_rate: Acceptance rate of the algorithm.
#' @return y and x: Matrices containing the endogenous variables and their lags.
#' @return pA, pdetA, pH, pP, pP_sig, pR, pR_sig, tau1, and kappa1: Matrices and arrays containing prior information.
#' @return A_start: Matrix containing estimates of the parameters in \emph{A} from the optimization routine.
#' @return A, detA, H, B, and Phi: Arrays containing estimates of the model parameters. The first, second, and third slices of the third dimension are lower, median, and upper bounds of the estimates.
#' @return HD and IRF: Arrays containing historical decomposition of structural shocks and impulse response functions. The first, second, and third slices of the third dimension are lower, median, and upper bounds of the estimates.
#' @return A_den, detA_den, and H_den: Lists containing the horizontal and vertical axis coordinates of posterior densities of \emph{A}, \emph{det(A)}, and \emph{H}.
#' @return Line and ACF plots of the estimates for \emph{A}, \emph{det(A)}, and \emph{H}.
#' @references Baumeister, C., and Hamilton, J.D. (2015). Sign restrictions, structural vector autoregressions, and useful prior information. \emph{Econometrica}, 83(5), 1963-1999.
#' @references Baumeister, C., and Hamilton, J.D. (2017). Structural interpretation of vector autoregressions with incomplete identification: Revisiting the role of oil supply and demand shocks (No. w24167). National Bureau of Economic Research.
#' @references Baumeister, C., and Hamilton, J.D. (2018). Inference in structural vector autoregressions when the identifying assumptions are not fully believed: Re-evaluating the role of monetary policy in economic fluctuations. \emph{Journal of Monetary Economics},
#' @examples
#' # Import data
#' library(BHSBVAR)
#' set.seed(123)
#' data(USLMData)
#' y <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2)
#' colnames(y) <- c("Wage", "Employment")
#'
#' # Set function arguments
#' nlags <- 4
#' itr <- 5000
#' burn <- 0
#' thin <- 1
#' acc_irf <- TRUE
#' h1_irf <- 20
#' ci <- 0.975
#'
#' # Priors for A
#' pA <- array(data = NA, dim = c(2, 2, 8))
#' pA[, , 1] <- c(0, NA, 0, NA)
#' pA[, , 2] <- c(1, NA, -1, NA)
#' pA[, , 3] <- c(0.6, 1, -0.6, 1)
#' pA[, , 4] <- c(0.6, NA, 0.6, NA)
#' pA[, , 5] <- c(3, NA, 3, NA)
#' pA[, , 6] <- c(NA, NA, NA, NA)
#' pA[, , 7] <- c(NA, NA, 1, NA)
#' pA[, , 8] <- c(2.4, NA, 2.4, NA)
#'
#' # Position priors for Phi
#' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA))
#' pP[1:nrow(pA), 1:ncol(pA)] <-
#' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA))
#'
#' # Confidence in the priors for Phi
#' x1 <-
#' matrix(data = NA, nrow = (nrow(y) - nlags),
#' ncol = (ncol(y) * nlags))
#' for (k in 1:nlags) {
#' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <-
#' y[(nlags - k + 1):(nrow(y) - k),]
#' }
#' x1 <- cbind(x1, 1)
#' colnames(x1) <-
#' c(paste(rep(colnames(y), nlags),
#' ".L",
#' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)),
#' decreasing = FALSE),
#' sep = ""),
#' "cons")
#' y1 <- y[(nlags + 1):nrow(y),]
#' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1))
#' for (i in 1:ncol(y1)) {
#' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1)
#' yy <- matrix(data = y1[, i], ncol = 1)
#' phi <- solve(t(xx) %*% xx, t(xx) %*% yy)
#' ee[, i] <- yy - (xx %*% phi)
#' }
#' somega <- (t(ee) %*% ee) / nrow(ee)
#' lambda0 <- 0.2
#' lambda1 <- 1
#' lambda3 <- 100
#' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1)
#' v1 <- v1^((-2) * lambda1)
#' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1)
#' v3 <- kronecker(v1, v2)
#' v3 <- (lambda0^2) * rbind(v3, (lambda3^2))
#' v3 <- 1 / v3
#' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3))
#' diag(pP_sig) <- v3
#'
#' # Confidence in long-run restriction priors
#' pR_sig <-
#' array(data = 0,
#' dim = c(((nlags * ncol(y)) + 1),
#' ((nlags * ncol(y)) + 1),
#' ncol(y)))
#' Ri <-
#' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags),
#' matrix(data = c(1, 0), nrow = 1)),
#' 0)
#' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1
#'
#' # Confidence in priors for D
#' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y))
#'
#' # Set graphical parameters
#' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif",
#' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1)
#'
#' # Run the model and estimate the model parameters
#' results1 <-
#' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig,
#' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn,
#' thin = thin, acc_irf = acc_irf,
#' h1_irf = h1_irf, ci = ci)
BH_SBVAR <- function(y, nlags, pA, pdetA = NULL, pH = NULL, pP = NULL, pP_sig = NULL, pR_sig = NULL, kappa1 = NULL, itr = 5000, burn = 0, thin = 1, acc_irf = TRUE, h1_irf = 12, ci = 0.975) {
#construct objects from NULL inputs
if (is.null(pdetA)) {
pdetA <- array(data = NA_real_, dim = c(1, 1, 6))
}
if (is.null(pH)) {
pH <- array(data = NA_real_, dim = c(ncol(y), ncol(y), 6))
}
if (is.null(pP) | is.null(pP_sig)) {
pP <- matrix(data = 0, nrow = ((nlags * ncol(y)) + 1), ncol = ncol(y))
pP_sig <- matrix(data = 0, nrow = ((nlags * ncol(y)) + 1), ncol = ((nlags * ncol(y)) + 1))
}
if (is.null(pR_sig)) {
pR_sig <- array(data = 0, dim = c(((nlags * ncol(y)) + 1), ((nlags * ncol(y)) + 1), ncol(y)))
}
if (is.null(kappa1)) {
kappa1 <- matrix(data = 0, nrow = 1, ncol = ncol(y))
}
#check BH_SBVAR function arguments
test <- arguments_check(y, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, itr, burn, thin, acc_irf, h1_irf, ci)
if (test != "pass") {
stop(test)
}
#create proposal scale matrix
scale_ar <- diag(x = c(pA[, , 8])[which(!is.na(pA[, , 1]))], nrow = length(which(!is.na(pA[, , 1]))), ncol = length(which(!is.na(pA[, , 1]))))
#trim pA
pA <- pA[, , 1:7]
#check for variable names
if (is.null(colnames(y))) {
colnames(y) <- paste("y", 1:ncol(y), sep = "")
} else {
colnames(y) <- make.names(colnames(y), unique = TRUE)
}
rownames(y) <- NULL
#get variable names
varnames <- colnames(y)
#get x and y data matrices
list1 <- getXY(y, nlags)
x1 <- list1[[1]]
y1 <- list1[[2]]
#omega
omega <- ((t(y1) %*% y1) - (t(y1) %*% x1) %*% solve(t(x1) %*% x1) %*% t(t(y1) %*% x1)) / nrow(y1)
#somega
ee <- matrix(data = NA_real_, nrow = nrow(y1), ncol = ncol(y1), dimnames = list(rownames(y1), colnames(y1)))
for (i in 1:ncol(y1)) {
xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1)
yy <- matrix(data = y1[, i], ncol = 1)
phi <- solve((t(xx) %*% xx), (t(xx) %*% yy))
ee[, i] <- yy - (xx %*% phi)
}
somega <- (t(ee) %*% ee) / nrow(ee)
#optimization
startvalues <- matrix(data = c(pA[, , 3])[c(which(!is.na(pA[, , 1])))], ncol = 1)
lower <- matrix(data = c(pA[, , 2])[c(which(!is.na(pA[, , 1])))], ncol = 1)
upper <- matrix(data = c(pA[, , 2])[c(which(!is.na(pA[, , 1])))], ncol = 1)
for (i in 1:nrow(lower)) {
if (is.na(lower[i, 1])) {
lower[i, 1] <- -Inf
upper[i, 1] <- Inf
} else if (lower[i, 1] == 1) {
lower[i, 1] <- 0.0001
upper[i, 1] <- Inf
} else if (lower[i, 1] == -1) {
lower[i, 1] <- -Inf
upper[i, 1] <- -0.0001
}
}
A_optim <- stats::optim(par = c(startvalues), fn = post_A_optim, pA = pA, pdetA = pdetA, pH = pH, pP = pP, pP_sig = pP_sig, pR_sig = pR_sig, kappa1 = kappa1, y1 = y1, x1 = x1, omega = omega, somega = somega, nlags = nlags, method = "L-BFGS-B", lower = c(lower), upper = c(upper), hessian = TRUE, control = list(maxit = 2500))
#test convergence
if (A_optim$convergence != 0) {
stop("Optimization routine convergence was not successful.")
}
#optimum values in A
A_start <- matrix(data = NA_real_, nrow = (nrow(pA) * ncol(pA)), ncol = 1)
A_start[c(which(!is.na(pA[, , 1]))), 1] <- A_optim[[1]]
A_start[c(which(is.na(pA[, , 1]))), 1] <- c(pA[, , 3])[c(which(is.na(pA[, , 1])))]
A_start <- matrix(data = A_start, nrow = nrow(pA), ncol = ncol(pA), dimnames = list(colnames(y1), colnames(y1)))
#test that optimized starting values are consistent with sign restrictions
H_max <- solve(A_start)
for (i in 1:nrow(pA)) {
for (j in 1:ncol(pA)) {
if ((!is.na(pA[i, j, 1])) && (pA[i, j, 1] == 0) && (!is.na(pA[i, j, 2])) && (pA[i, j, 2] != (A_start[i, j] / abs(A_start[i, j])))) {
stop("Optimization routine produces values for the elements in A that are not cosistent with sign restrictions.")
}
if ((!is.na(pH[i, j, 1])) && (pH[i, j, 1] == 0) && (!is.na(pH[i, j, 2])) && (pH[i, j, 2] != (H_max[i, j] / abs(H_max[i, j])))) {
warning("Optimization routine produces values for the elements in H that are not cosistent with sign restrictions.", immediate. = TRUE)
}
}
}
if ((!is.na(pdetA[1, 1, 1])) && (pdetA[1, 1, 1] == 0) && (!is.na(pdetA[1, 1, 2])) && (pdetA[1, 1, 2] != (det(A_start) / abs(det(A_start))))) {
warning("Optimization routine produces values for the determinant of A that are not consistent with sign restrictions.", immediate. = TRUE)
}
#scale
H0 <- A_optim[[6]]
if (min(eigen(solve(H0))[[1]]) > 0) {
PH <- t(chol(solve(H0)))
} else {
PH <- diag(x = 1, nrow = nrow(H0))
}
scale1 <- PH * scale_ar
#Metropolis-Hastings Algorithm
results <- MAIN(y1, x1, omega, somega, nlags, pA, pdetA, pH, pP, pP_sig, pR_sig, kappa1, A_start, itr, burn, thin, scale1, h1_irf, acc_irf, ci, varnames, line_plot, acf_plot)
return(results)
}
# Check arguments from the IRF_Plots, HD_Plots, Dist_Plots functions.
#' @keywords internal
check_results <- function(results, xlab, ylab) {
if ((!is.list(results)) || (length(results) == 0)) {
return(paste("results: Must be a list of arrays obtained from running BH_SBVAR() function.", sep = ""))
}
if ((is.null(results$y)) || (!is.matrix(results$y)) || (any(!is.finite(results$y)))) {
return(paste("results: y from BH_SBVAR() is not present", sep = ""))
}
if ((is.null(results$A)) || (!is.array(results$A)) || (any(!is.finite(results$A))) || (dim(results$A)[1] != dim(results$A)[2]) || (dim(results$A)[3] < 3) || (dim(results$A)[2] != ncol(results$y)) || (dim(results$A)[2] < 2)) {
return(paste("results: A from BH_SBVAR() is not present", sep = ""))
}
if ((is.null(results$IRF)) || (!is.array(results$IRF)) || (any(!is.finite(results$IRF))) || (dim(results$IRF)[1] < 4) || (dim(results$IRF)[3] < 3) || (dim(results$IRF)[2] != ((dim(results$A)[2])^2))) {
return(paste("results: IRF from BH_SBVAR() is not present", sep = ""))
}
if ((is.null(results$HD)) || (!is.array(results$HD)) || (dim(results$HD)[2] < 4) || (dim(results$HD)[3] < 3) || (dim(results$HD)[2] != ((dim(results$A)[2])^2))) {
return(paste("results: HD from BH_SBVAR() is not present", sep = ""))
}
if ((!is.null(xlab)) && ((class(xlab) != "character") || (length(xlab) != 1))) {
return(paste("xlab: Must be a character vector containing the label for the horizontal axis", sep = ""))
}
if ((!is.null(ylab)) && ((class(ylab) != "character") || (length(ylab) != 1))) {
return(paste("ylab: Must be a character vector containing the label for the vertical axis", sep = ""))
}
return("pass")
}
#' Plot Impulse Responses
#'
#' Plot Impulse Responses.
#' @author Paul Richardson
#' @export
#' @name IRF_Plots
#' @param results List containing the results from running BH_SBVAR().
#' @param varnames Character vector containing the names of the endogenous variables.
#' @param shocknames Character vector containing the names of the shocks.
#' @param xlab Character label for the horizontal axis of impulse response plots (default = NULL). Default produces plots without a label for the horizontal axis.
#' @param ylab Character label for the vertical axis of impulse response plots (default = NULL). Default produces plots without a label for the vertical axis.
#' @details Plots impulse responses and returns a list containing the actual processed data used to create the plots.
#' @return A list containing impulse responses:
#' @examples
#' # Import data
#' library(BHSBVAR)
#' set.seed(123)
#' data(USLMData)
#' y <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2)
#' colnames(y) <- c("Wage", "Employment")
#'
#' # Set function arguments
#' nlags <- 4
#' itr <- 5000
#' burn <- 0
#' thin <- 1
#' acc_irf <- TRUE
#' h1_irf <- 20
#' ci <- 0.975
#'
#' # Priors for A
#' pA <- array(data = NA, dim = c(2, 2, 8))
#' pA[, , 1] <- c(0, NA, 0, NA)
#' pA[, , 2] <- c(1, NA, -1, NA)
#' pA[, , 3] <- c(0.6, 1, -0.6, 1)
#' pA[, , 4] <- c(0.6, NA, 0.6, NA)
#' pA[, , 5] <- c(3, NA, 3, NA)
#' pA[, , 6] <- c(NA, NA, NA, NA)
#' pA[, , 7] <- c(NA, NA, 1, NA)
#' pA[, , 8] <- c(2.4, NA, 2.4, NA)
#'
#' # Position priors for Phi
#' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA))
#' pP[1:nrow(pA), 1:ncol(pA)] <-
#' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA))
#'
#' # Confidence in the priors for Phi
#' x1 <-
#' matrix(data = NA, nrow = (nrow(y) - nlags),
#' ncol = (ncol(y) * nlags))
#' for (k in 1:nlags) {
#' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <-
#' y[(nlags - k + 1):(nrow(y) - k),]
#' }
#' x1 <- cbind(x1, 1)
#' colnames(x1) <-
#' c(paste(rep(colnames(y), nlags),
#' ".L",
#' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)),
#' decreasing = FALSE),
#' sep = ""),
#' "cons")
#' y1 <- y[(nlags + 1):nrow(y),]
#' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1))
#' for (i in 1:ncol(y1)) {
#' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1)
#' yy <- matrix(data = y1[, i], ncol = 1)
#' phi <- solve(t(xx) %*% xx, t(xx) %*% yy)
#' ee[, i] <- yy - (xx %*% phi)
#' }
#' somega <- (t(ee) %*% ee) / nrow(ee)
#' lambda0 <- 0.2
#' lambda1 <- 1
#' lambda3 <- 100
#' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1)
#' v1 <- v1^((-2) * lambda1)
#' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1)
#' v3 <- kronecker(v1, v2)
#' v3 <- (lambda0^2) * rbind(v3, (lambda3^2))
#' v3 <- 1 / v3
#' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3))
#' diag(pP_sig) <- v3
#'
#' # Confidence in long-run restriction priors
#' pR_sig <-
#' array(data = 0,
#' dim = c(((nlags * ncol(y)) + 1),
#' ((nlags * ncol(y)) + 1),
#' ncol(y)))
#' Ri <-
#' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags),
#' matrix(data = c(1, 0), nrow = 1)),
#' 0)
#' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1
#'
#' # Confidence in priors for D
#' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y))
#'
#' # Set graphical parameters
#' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif",
#' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1)
#'
#' # Run the model and estimate the model parameters
#' results1 <-
#' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig,
#' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn,
#' thin = thin, acc_irf = acc_irf,
#' h1_irf = h1_irf, ci = ci)
#'
#' # Plot impulse responses
#' varnames <- colnames(USLMData)[2:3]
#' shocknames <- c("Labor Demand","Labor Supply")
#' irf_results <-
#' IRF_Plots(results = results1, varnames = varnames,
#' shocknames = shocknames)
IRF_Plots <- function(results, varnames, shocknames = NULL, xlab = NULL, ylab = NULL) {
#test arguments
test <- check_results(results, xlab, ylab)
if (test != "pass") {
stop(test)
}
if ((class(varnames) != "character") || (length(varnames) != dim(results$A)[2])) {
return(paste("varnames: Must be a character vector containing the names of the endogenous variables", sep = ""))
}
if (is.null(shocknames)) {
shocknames <- varnames
}
if ((class(shocknames) != "character") || (length(shocknames) != dim(results$A)[2])) {
stop(paste("shocknames: Must be a character vector containing the names of the shocks", sep = ""))
}
if (is.null(xlab)) {
xlab <- ""
}
if (is.null(ylab)) {
ylab <- ""
}
IRF <- results$IRF
nvar <- dim(results$A)[1]
xticks <- floor(dim(IRF)[1] / 4)
#store results from impulse responses
irf_results <- list()
for (j in 1:nvar) {
for (i in 1:nvar) {
#impulse responses
irf_name <- paste("Impulse_", c(varnames[j]), "_Response_", c(varnames[i]), sep = "")
irf_results[[(length(irf_results) + 1)]] <- IRF[, ((nvar * (j - 1)) + i), ]
names(irf_results)[length(irf_results)] <- irf_name[1]
#impulse response plots
mat_ts <- stats::ts(cbind(0, irf_results[[length(irf_results)]]))
colnames(mat_ts) <- c("Series1", "Series2", "Series3", "Series4")
stats::ts.plot(mat_ts, col = c("black", "red", "black", "red"), gpars = list(xlab = xlab, ylab = ylab, xaxs = "i", yaxs = "r", xaxt = "n", lty = c(1, 2, 1, 2)))
graphics::title(main = paste("Response of ", varnames[i], " to ", shocknames[j], sep = ""), col.main = "black")
graphics::axis(side = 1, at = seq(from = 1, to = nrow(mat_ts), by = xticks), labels = seq(from = 0, to = (nrow(mat_ts) - 1),by = xticks))
}
}
return(irf_results)
}
#' Plot Historical Decompositions
#'
#' Plot Historical Decompositions.
#' @author Paul Richardson
#' @export
#' @name HD_Plots
#' @param results List containing the results from running BH_SBVAR().
#' @param varnames Character vector containing the names of the endogenous variables.
#' @param shocknames Character vector containing the names of the shocks.
#' @param xlab Character label for the horizontal axis of historical decomposition plots (default = NULL). Default produces plots without a label for the horizontal axis.
#' @param ylab Character label for the vertical axis of historical decomposition plots (default = NULL). Default produces plots without a label for the vertical axis.
#' @param freq Numeric value indicating the frequency of the data.
#' @param start_date Numeric vector indicating the date of the first observation of the endogenous variables included in the model.
#' @details Plots historical decompositions and returns a list containing the actual processed data used to create the plots.
#' @return A list containing historical decompositions:
#' @examples
#' # Import data
#' library(BHSBVAR)
#' set.seed(123)
#' data(USLMData)
#' y <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2)
#' colnames(y) <- c("Wage", "Employment")
#'
#' # Set function arguments
#' nlags <- 4
#' itr <- 5000
#' burn <- 0
#' thin <- 1
#' acc_irf <- TRUE
#' h1_irf <- 20
#' ci <- 0.975
#'
#' # Priors for A
#' pA <- array(data = NA, dim = c(2, 2, 8))
#' pA[, , 1] <- c(0, NA, 0, NA)
#' pA[, , 2] <- c(1, NA, -1, NA)
#' pA[, , 3] <- c(0.6, 1, -0.6, 1)
#' pA[, , 4] <- c(0.6, NA, 0.6, NA)
#' pA[, , 5] <- c(3, NA, 3, NA)
#' pA[, , 6] <- c(NA, NA, NA, NA)
#' pA[, , 7] <- c(NA, NA, 1, NA)
#' pA[, , 8] <- c(2.4, NA, 2.4, NA)
#'
#' # Position priors for Phi
#' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA))
#' pP[1:nrow(pA), 1:ncol(pA)] <-
#' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA))
#'
#' # Confidence in the priors for Phi
#' x1 <-
#' matrix(data = NA, nrow = (nrow(y) - nlags),
#' ncol = (ncol(y) * nlags))
#' for (k in 1:nlags) {
#' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <-
#' y[(nlags - k + 1):(nrow(y) - k),]
#' }
#' x1 <- cbind(x1, 1)
#' colnames(x1) <-
#' c(paste(rep(colnames(y), nlags),
#' ".L",
#' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)),
#' decreasing = FALSE),
#' sep = ""),
#' "cons")
#' y1 <- y[(nlags + 1):nrow(y),]
#' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1))
#' for (i in 1:ncol(y1)) {
#' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1)
#' yy <- matrix(data = y1[, i], ncol = 1)
#' phi <- solve(t(xx) %*% xx, t(xx) %*% yy)
#' ee[, i] <- yy - (xx %*% phi)
#' }
#' somega <- (t(ee) %*% ee) / nrow(ee)
#' lambda0 <- 0.2
#' lambda1 <- 1
#' lambda3 <- 100
#' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1)
#' v1 <- v1^((-2) * lambda1)
#' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1)
#' v3 <- kronecker(v1, v2)
#' v3 <- (lambda0^2) * rbind(v3, (lambda3^2))
#' v3 <- 1 / v3
#' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3))
#' diag(pP_sig) <- v3
#'
#' # Confidence in long-run restriction priors
#' pR_sig <-
#' array(data = 0,
#' dim = c(((nlags * ncol(y)) + 1),
#' ((nlags * ncol(y)) + 1),
#' ncol(y)))
#' Ri <-
#' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags),
#' matrix(data = c(1, 0), nrow = 1)),
#' 0)
#' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1
#'
#' # Confidence in priors for D
#' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y))
#'
#' # Set graphical parameters
#' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif",
#' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1)
#'
#' # Run the model and estimate the model parameters
#' results1 <-
#' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig,
#' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn,
#' thin = thin, acc_irf = acc_irf,
#' h1_irf = h1_irf, ci = ci)
#'
#' # Plot historical decompositions
#' varnames <- colnames(USLMData)[2:3]
#' shocknames <- c("Labor Demand","Labor Supply")
#' freq <- 4
#' start_date <-
#' c(floor(USLMData[(nlags + 1), 1]),
#' round(((USLMData[(nlags + 1), 1] %% 1) * freq), digits = 0))
#' hd_results <-
#' HD_Plots(results = results1, varnames = varnames,
#' shocknames = shocknames,
#' freq = freq, start_date = start_date)
HD_Plots <- function(results, varnames, shocknames = NULL, xlab = NULL, ylab = NULL, freq, start_date) {
#test arguments
test <- check_results(results, xlab, ylab)
if (test != "pass") {
stop(test)
}
if ((class(varnames) != "character") || (length(varnames) != dim(results$A)[2])) {
return(paste("varnames: Must be a character vector containing the names of the endogenous variables", sep = ""))
}
if (is.null(shocknames)) {
shocknames <- varnames
}
if ((class(shocknames) != "character") || (length(shocknames) != dim(results$A)[2])) {
stop(paste("shocknames: Must be a character vector containing the names of the shocks", sep = ""))
}
if ((class(freq) != "numeric") || (!is.finite(freq)) || (length(freq) != 1) || ((freq %% 1) != 0) || (freq < 1)) {
stop("freq: Must be a finite whole number grater than 0.")
}
if ((class(start_date) != "numeric") || (any(!is.finite(start_date))) || (length(start_date) != 2) || (any((start_date %% 1) != 0)) || (any(start_date < 0))) {
stop("start_date: Must be a numeric vector containing finite whole numbers greater than or equal to 0.")
}
if (is.null(xlab)) {
xlab <- ""
}
if (is.null(ylab)) {
ylab <- ""
}
y <- results$y
HD <- results$HD
nvar <- dim(results$A)[1]
#store results from histroical decompositions
hd_results <- list()
for (j in 1:nvar) {
for (i in 1:nvar) {
#historical decomposition
hd_name <- paste("Contribution_", c(varnames[j]), "_On_", c(varnames[i]), sep = "")
hd_results[[(length(hd_results) + 1)]] <- HD[, ((nvar * (j - 1)) + i), ]
names(hd_results)[length(hd_results)] <- hd_name[1]
#historical decomposition plots
mat_ts <- stats::ts(cbind(0, y[,i], hd_results[[length(hd_results)]]), frequency = freq, start = start_date)
colnames(mat_ts) <- c("Series1", "Series2", "Series3", "Series4", "Series5")
stats::ts.plot(mat_ts, col = c("black", "black", "red", "red", "red"), gpars = list(xlab = xlab, ylab = ylab, xaxs = "i", yaxs = "r", lty = c(1, 1, 2, 1, 2)))
graphics::title(main = paste("Contribution of ", shocknames[j], " Shocks on ", varnames[i], sep = ""), col.main = "black")
}
}
return(hd_results)
}
# Density Plots
#' @keywords internal
den_plot <- function(list2, den1, elast, lb, ub, nticks0, A_titles, H_titles, xlab, ylab, k, j, i) {
yticks <- signif(((max(den1[, 2]) - min(den1[, 2])) / nticks0), 2)
graphics::plot(x = (elast * den1[, 1]), y = den1[, 2], type = "l", col = "black", yaxs = "i", xaxs = "r", yaxt = "n", xlab = xlab, ylab = ylab, xlim = c(lb, ub), ylim = c(0, (yticks * (nticks0 + 1))))
if (names(list2[k]) == "pA") {
graphics::title(main = A_titles[i, j], col.main = "black")
} else if (names(list2[k]) == "pH") {
graphics::title(main = H_titles[i, j], col.main = "black")
} else if (names(list2[k]) == "pdetA") {
graphics::title(main = paste("Determinant of A"), col.main = "black")
}
graphics::axis(side = 2, at = seq(from = -yticks, to = (nticks0 * yticks), by = yticks), labels = seq(from = -yticks, to = (nticks0 * yticks), by = yticks))
graphics::polygon(x = (elast * den1[, 1]), y = den1[, 2], col = "blue")
}
#' Plot Posterior Distributions Against Priors
#'
#' Plot Posterior Distributions Against Priors.
#' @author Paul Richardson
#' @export
#' @import Rcpp
#' @name Dist_Plots
#' @param results List containing the results from running BH_SBVAR().
#' @param A_titles \emph{(n x n)} matrix containing the titles for the plots of the estimated parameters in the coefficient matrix \emph{A}. \emph{n} is the number of endogenous variables.
#' @param H_titles \emph{(n x n)} matrix containing the titles for the plots of the estimated parameters in the coefficient matrix \emph{H} (default = NULL). \emph{n} is the number of endogenous variables.
#' @param xlab Character label for the horizontal axis of historical decomposition plots (default = NULL). Default produces plots without a label for the horizontal axis.
#' @param ylab Character label for the vertical axis of historical decomposition plots (default = NULL). Default produces plots without a label for the vertical axis.
#' @details Plots posterior distributions against prior distributions.
#' @examples
#' # Import data
#' library(BHSBVAR)
#' set.seed(123)
#' data(USLMData)
#' y <- matrix(data = c(USLMData$Wage, USLMData$Employment), ncol = 2)
#' colnames(y) <- c("Wage", "Employment")
#'
#' # Set function arguments
#' nlags <- 4
#' itr <- 5000
#' burn <- 0
#' thin <- 1
#' acc_irf <- TRUE
#' h1_irf <- 20
#' ci <- 0.975
#'
#' # Priors for A
#' pA <- array(data = NA, dim = c(2, 2, 8))
#' pA[, , 1] <- c(0, NA, 0, NA)
#' pA[, , 2] <- c(1, NA, -1, NA)
#' pA[, , 3] <- c(0.6, 1, -0.6, 1)
#' pA[, , 4] <- c(0.6, NA, 0.6, NA)
#' pA[, , 5] <- c(3, NA, 3, NA)
#' pA[, , 6] <- c(NA, NA, NA, NA)
#' pA[, , 7] <- c(NA, NA, 1, NA)
#' pA[, , 8] <- c(2.4, NA, 2.4, NA)
#'
#' # Position priors for Phi
#' pP <- matrix(data = 0, nrow = ((nlags * ncol(pA)) + 1), ncol = ncol(pA))
#' pP[1:nrow(pA), 1:ncol(pA)] <-
#' diag(x = 1, nrow = nrow(pA), ncol = ncol(pA))
#'
#' # Confidence in the priors for Phi
#' x1 <-
#' matrix(data = NA, nrow = (nrow(y) - nlags),
#' ncol = (ncol(y) * nlags))
#' for (k in 1:nlags) {
#' x1[, (ncol(y) * (k - 1) + 1):(ncol(y) * k)] <-
#' y[(nlags - k + 1):(nrow(y) - k),]
#' }
#' x1 <- cbind(x1, 1)
#' colnames(x1) <-
#' c(paste(rep(colnames(y), nlags),
#' ".L",
#' sort(rep(seq(from = 1, to = nlags, by = 1), times = ncol(y)),
#' decreasing = FALSE),
#' sep = ""),
#' "cons")
#' y1 <- y[(nlags + 1):nrow(y),]
#' ee <- matrix(data = NA, nrow = nrow(y1), ncol = ncol(y1))
#' for (i in 1:ncol(y1)) {
#' xx <- cbind(x1[, seq(from = i, to = (ncol(x1) - 1), by = ncol(y1))], 1)
#' yy <- matrix(data = y1[, i], ncol = 1)
#' phi <- solve(t(xx) %*% xx, t(xx) %*% yy)
#' ee[, i] <- yy - (xx %*% phi)
#' }
#' somega <- (t(ee) %*% ee) / nrow(ee)
#' lambda0 <- 0.2
#' lambda1 <- 1
#' lambda3 <- 100
#' v1 <- matrix(data = (1:nlags), nrow = nlags, ncol = 1)
#' v1 <- v1^((-2) * lambda1)
#' v2 <- matrix(data = diag(solve(diag(diag(somega)))), ncol = 1)
#' v3 <- kronecker(v1, v2)
#' v3 <- (lambda0^2) * rbind(v3, (lambda3^2))
#' v3 <- 1 / v3
#' pP_sig <- diag(x = 1, nrow = nrow(v3), ncol = nrow(v3))
#' diag(pP_sig) <- v3
#'
#' # Confidence in long-run restriction priors
#' pR_sig <-
#' array(data = 0,
#' dim = c(((nlags * ncol(y)) + 1),
#' ((nlags * ncol(y)) + 1),
#' ncol(y)))
#' Ri <-
#' cbind(kronecker(matrix(data = 1, nrow = 1, ncol = nlags),
#' matrix(data = c(1, 0), nrow = 1)),
#' 0)
#' pR_sig[, , 2] <- (t(Ri) %*% Ri) / 0.1
#'
#' # Confidence in priors for D
#' kappa1 <- matrix(data = 2, nrow = 1, ncol = ncol(y))
#'
#' # Set graphical parameters
#' par(cex.axis = 0.8, cex.main = 1, font.main = 1, family = "serif",
#' mfrow = c(2, 2), mar = c(2, 2.2, 2, 1), las = 1)
#'
#' # Run the model and estimate the model parameters
#' results1 <-
#' BH_SBVAR(y = y, nlags = nlags, pA = pA, pP = pP, pP_sig = pP_sig,
#' pR_sig = pR_sig, kappa1 = kappa1, itr = itr, burn = burn,
#' thin = thin, acc_irf = acc_irf,
#' h1_irf = h1_irf, ci = ci)
#'
#' # Plot Posterior and Prior Densities
#' A_titles <-
#' matrix(data = NA_character_, nrow = dim(pA)[1], ncol = dim(pA)[2])
#' A_titles[1, 1] <- "Wage Elasticity of Labor Demand"
#' A_titles[1, 2] <- "Wage Elasticity of Labor Supply"
#' par(mfcol = c(1, 2))
#' dist_results <-
#' Dist_Plots(results = results1, A_titles = A_titles)
Dist_Plots <- function(results, A_titles, H_titles = NULL, xlab = NULL, ylab = NULL) {
#test arguments
test <- check_results(results, xlab, ylab)
if (test != "pass") {
stop(test)
}
if (is.null(xlab)) {
xlab <- ""
}
if (is.null(ylab)) {
ylab <- ""
}
pA <- results$pA
pdetA <- results$pdetA
pH <- results$pH
A_den <- results$A_den
detA_den <- results$detA_den
H_den <- results$H_den
if (!is.matrix(A_titles) || ((nrow(A_titles) != dim(pA)[1]) | (ncol(A_titles) != dim(pA)[2]))) {
stop(paste("A_titles: Must be a matrix with row and column length each equal to the number of endogenous variables.", sep = ""))
}
if (is.null(H_titles)) {
H_titles <- matrix(data = NA_character_, nrow = dim(pA)[1], ncol = dim(pA)[2])
}
if (!is.matrix(H_titles) || ((nrow(H_titles) != dim(pH)[1]) | (ncol(H_titles) != dim(pH)[2]))) {
stop(paste("H_titles: Must be a matrix with row and column length each equal to the number of endogenous variables.", sep = ""))
}
for (i in 1:dim(pA)[1]) {
for (j in 1:dim(pA)[2]) {
if ((is.na(pA[i,j,1])) && (!is.na(A_titles[i,j]))) {
stop(paste("A_titles: A_titles[", i, ", ", j, "] should be empty since pA[", i, ", ", j, ", ", 1, "] is empty.", sep = ""))
}
if ((!is.na(pA[i,j,1])) && (is.na(A_titles[i,j]))) {
stop(paste("A_titles: A_titles[", i, ", ", j, "] is missing.", sep = ""))
}
if ((is.na(pH[i,j,1])) && (!is.na(H_titles[i,j]))) {
stop(paste("H_titles: H_titles[", i, ", ", j, "] should be empty since pH[", i, ", ", j, ", ", 1, "] is empty.", sep = ""))
}
if ((!is.na(pH[i,j,1])) && (is.na(H_titles[i,j]))) {
stop(paste("H_titles: H_titles[", i, ", ", j, "] is missing.", sep = ""))
}
}
}
nticks0 <- 3
list1 <- list(A_den = A_den, H_den = H_den)
list2 <- list(pA = pA, pH = pH)
for (k in 1:length(list1)) {
if (names(list2[k]) == "pA") {
elast <- -1
} else {
elast <- 1
}
max_distance <- 0
distance <- 0
for (j in 1:(dim(list2[[k]])[2])) { #equations are by column
for (i in 1:(dim(list2[[k]])[1])) {
if (any(!is.na(list1[[k]]$hori[i, j,]))) {
distance <- ceiling(max(list1[[k]]$hori[i, j,], na.rm = TRUE) - min(list1[[k]]$hori[i, j,], na.rm = TRUE))
}
if (distance > max_distance) {
max_distance <- distance
}
}
}
for (j in 1:(dim(list2[[k]])[2])) { #equations are by column
for (i in 1:(dim(list2[[k]])[1])) {
if (!is.na(list2[[k]][i, j, 1])) {
if (is.na(list2[[k]][i,j,2])) {
ub <- (elast * stats::median(list1[[k]]$hori[i,j,])) + (max_distance * 0.5)
lb <- (elast * stats::median(list1[[k]]$hori[i,j,])) - (max_distance * 0.5)
} else if (list2[[k]][i,j,2] == 1) {
if (names(list2[k]) == "pA") {
ub <- 0
lb <- (-1) * max_distance
} else {
ub <- max_distance
lb <- 0
}
} else if (list2[[k]][i,j,2] == (-1)) {
if (names(list2[k]) == "pA") {
ub <- max_distance
lb <- 0
} else {
ub <- 0
lb <- (-1) * max_distance
}
}
den1 <- cbind(list1[[k]]$hori[i,j,],list1[[k]]$vert[i,j,])
den_plot(list2, den1, elast, lb, ub, nticks0, A_titles, H_titles, xlab, ylab, k, j, i)
if (list2[[k]][i, j, 1] == 0) {
if (is.na(list2[[k]][i, j, 2])) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
} else if (list2[[k]][i, j, 2] == 1) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t_p(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
} else if (list2[[k]][i, j, 2] == (-1)) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t_n(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
}
} else if (list2[[k]][i, j, 1] == 1) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_nonc_t(prior_den[h, 1], list2[[k]][i, j, 3], list2[[k]][i, j, 4], list2[[k]][i, j, 5], list2[[k]][i, j, 6])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
}
}
}
}
}
list2 <- list(pdetA = pdetA)
elast <- 1
if (!is.na(pdetA[1, 1, 1])) {
max_distance <- ceiling(max(detA_den$hori[1, 1,], na.rm = TRUE) - min(detA_den$hori[1, 1,], na.rm = TRUE))
if (list2[[1]][1,1,2] == 1) {
ub <- max_distance
lb <- 0
} else if (list2[[1]][1,1,2] == (-1)) {
ub <- 0
lb <- (-1) * max_distance
} else {
ub <- (elast * stats::median(detA_den$hori[1,1,])) + (max_distance * 0.5)
lb <- (elast * stats::median(detA_den$hori[1,1,])) - (max_distance * 0.5)
}
den1 <- cbind(detA_den$hori[1,1,],detA_den$vert[1,1,])
den_plot(list2, den1, elast, lb, ub, nticks0, A_titles, H_titles, xlab, ylab, 1, 1, 1)
if (pdetA[1, 1, 1] == 0) {
if (is.na(pdetA[1, 1, 2])) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
} else if (pdetA[1, 1, 2] == 1) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t_p(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
} else if (pdetA[1, 1, 2] == (-1)) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_t_n(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
}
} else if (pdetA[1, 1, 1] == 1) {
prior_den <- matrix(data = seq(from = (elast * lb), to = (elast * ub), by = (elast * (ub - lb) / 500)), nrow = 501, ncol = 2)
for (h in 1:nrow(prior_den)) {
prior_den[h, 2] <- prior_nonc_t(prior_den[h, 1], list2[[1]][1, 1, 3], list2[[1]][1, 1, 4], list2[[1]][1, 1, 5], list2[[1]][1, 1, 6])
}
graphics::lines(x = (elast * prior_den[, 1]), y = prior_den[, 2], type = "l", col = "red")
}
}
}
|
cb9bfcab0a90a09e3cf408268c558ccca6b7138e
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/QCAGUI/R/makeChart.R
|
c4b9788392d989a96c1d29cd2364b5f9eff63cbb
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,800
|
r
|
makeChart.R
|
`makeChart` <-
function(primes = "", configs = "", snames = "") {
if (!isNamespaceLoaded("QCA")) {
requireNamespace("QCA", quietly = TRUE)
}
prmat <- is.matrix(primes)
comat <- is.matrix(configs)
if (prmat + comat == 2) {
if (!(is.numeric(primes) & is.numeric(configs))) {
cat("\n")
stop(simpleError("Matrices have to be numeric.\n\n"))
}
if (any(primes < 0) | any(configs < 0)) {
cat("\n")
stop(simpleError("Matrix values have to be non-negative.\n\n"))
}
if (any(apply(primes, 1, sum) == 0) | any(apply(configs, 1, sum) == 0)) {
cat("\n")
stop(simpleError("Matrices have to be specified at implicants level.\n\n"))
}
primes2 <- matrix(logical(length(primes)), dim(primes))
primes2[primes > 0] <- TRUE
mtrx <- sapply(seq(nrow(primes)), function(x) {
apply(configs, 1, function(y) {
all(primes[x, primes2[x, ]] == y[primes2[x, ]])
})
})
if (nrow(configs) == 1) {
mtrx <- matrix(mtrx)
}
else {
mtrx <- t(mtrx)
}
collapse = ifelse(all(nchar(colnames(primes)) == 1) & all(nchar(colnames(configs)) == 1), "", "*")
rownames(mtrx) <- QCA::writePrimeimp(primes, collapse = collapse, uplow = all(primes < 3) | all(configs < 3))
colnames(mtrx) <- QCA::writePrimeimp(configs, collapse = collapse, uplow = all(primes < 3) | all(configs < 3))
return(mtrx)
}
else if (prmat + comat == 0) {
tconfigs <- translate(configs, snames)
if (identical(snames, "")) {
snames <- colnames(tconfigs)
}
tprimes <- translate(primes, snames)
mtrx <- matrix(FALSE, nrow=nrow(tprimes), ncol=nrow(tconfigs))
for (i in seq(nrow(mtrx))) {
for (j in seq(ncol(mtrx))) {
tp <- tprimes[i, ]
tc <- tconfigs[j, ]
if (is.element("mv", names(attributes(tprimes)))) {
tpmv <- attr(tprimes, "mv")[i, ]
tcmv <- attr(tconfigs, "mv")[j, ]
mtrx[i, j] <- all(tp[tp >= 0] == tc[tp >= 0]) & all(tpmv[tp >= 0] == tcmv[tp >= 0])
}
else {
mtrx[i, j] <- all(tp[tp >= 0] == tc[tp >= 0])
}
}
}
colnames(mtrx) <- rownames(tconfigs)
rownames(mtrx) <- rownames(tprimes)
return(mtrx)
}
else {
cat("\n")
stop(simpleError("Both arguments have to be matrices.\n\n"))
}
}
|
ebb17fa219ddd64ad99290fbedc5a008d83ad8d6
|
6738306f6c56ed5273ec8bc940e9aa71bea5230b
|
/RFiles/husmann.R
|
ce64e6db84596c7b5d9683a562913788e5493ae9
|
[] |
no_license
|
kaihusmann/optimization_essay
|
194d79d738677fe7fe94eca442055670ca4fbbd8
|
d76884cd1fb73f2d8cca4688312f9710dafe77ba
|
refs/heads/master
| 2021-01-22T03:44:07.933557
| 2017-06-12T17:05:12
| 2017-06-12T17:05:12
| 81,453,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,079
|
r
|
husmann.R
|
#----------------------------------------------------#
#### Example 1: Himmelblau, cont. parameter space ####
#----------------------------------------------------#
## Clear workspace ##
rm(list = ls())
## Load packages ##
library(GenSA)
library(optimization)
library(xtable)
library(microbenchmark)
## setwd ##
#setwd('/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/')
## Himmelblau's function ##
# 4 minima at
# f(3, 2) = 0
# f(-2.804, -3.131) = 0
# f(-3.779, -3.283) = 0
# f( 3.584, -1.848) = 0
hi <- function(x){
(x[1]**2 + x[2] - 11)**2 + (x[1] + x[2]**2 -7)**2
}
nloop <- 10000 # Caution: Time intensive
#---------------------------#
## Frequency of covariates ##
#---------------------------#
## optim_sa ##
trace.1 <- data.frame(fun = rep(NA, nloop), x1 = rep(NA, nloop), x2 = rep(NA, nloop), meth = "optim_sa")
for(i in c(1 : nloop)) {
trace.1[i, c(1 : 3)] <- unlist(optim_sa(fun = hi,
start = (c(10, 10)),
trace = TRUE,
lower = c(-40, -40),
upper=c(40, 40),
control = list(t0 = 500,
nlimit = 50,
r = 0.85,
rf = 3,
ac_acc = 0.1,
dyn_rf = TRUE
)
)[c("function_value", "par")])
}
round(mean(trace.1$fun, na.rm = TRUE), digits = 3) <= 0.001
trace.1.rnd <- cbind(round(trace.1[, c(1 : 3)], digits = 1), trace.1[, 4])
table((apply(trace.1.rnd[c(2, 3)], 1, paste, collapse = "/")))
## Call optim() ##
trace.2 <- data.frame(fun = rep(NA, nloop), x1 = rep(NA, nloop), x2 = rep(NA, nloop), meth = "optim_sann")
for(i in c(1 : nloop)) {
trace.2[i, c(1 : 3)] <- unlist(optim(fn = hi, par = c(10, 10), method = "SANN", control = list(tmax = 500, reltol = 0.1, temp = 50, trace = TRUE, maxit = 7000))[c("value", "par")])
}
round(mean(trace.2$fun, na.rm = TRUE), digits = 3) <= 0.001
trace.2.rnd <- cbind(round(trace.2[, c(1 : 3)], digits = 1), trace.2[, 4])
table((apply(trace.2.rnd[c(2, 3)], 1, paste, collapse = "/")))
## Call GenSA ##
# The example GenSA is canceled
trace.3 <- data.frame(fun = rep(NA, nloop), x1 = rep(NA, nloop), x2 = rep(NA, nloop), meth = "GenSA")
for(i in c(1 : nloop)) {
trace.3[i, c(1 : 3)] <- unlist(GenSA(fn = hi, par = c(10, 10), lower = c(-40, -40), upper = c(40, 40), control = list(temperature = 50, nb.stop.improvement = 30, maxit = 500))[c("value", "par")])
}
round(mean(trace.3$fun, na.rm = TRUE), digits = 3) <= 0.001
trace.3.rnd <- cbind(round(trace.3[, c(1 : 3)], digits = 1), trace.3[, 4])
table((apply(trace.3.rnd[c(2, 3)], 1, paste, collapse = "/")))
## Call NM direct search method ##
trace.4 <- data.frame(fun = rep(NA, nloop), x1 = rep(NA, nloop), x2 = rep(NA, nloop), meth = "optim_nm")
for(i in c(1 : nloop)) {
trace.4[i, c(1 : 3)] <- unlist(optim(fn = hi, par = c(-10, -10), method = "Nelder-Mead")[c("value", "par")])
}
round(mean(trace.4$fun, na.rm = TRUE), digits = 3) <= 0.001
trace.4.rnd <- cbind(round(trace.4[, c(1 : 3)], digits = 1), trace.4[, 4])
table((apply(trace.4.rnd[c(2, 3)], 1, paste, collapse = "/")))
# -> Frequency of covariate combination is +/- equal for optim_sa & optim(SANN)
# GenSA & optim(Nelder-Mead) always only finds -3.8, -3.3
## Create df with results
# Bind dfs
trace <- rbind(trace.1, trace.2, trace.4)
table(trace$meth)
# Make groups
trace$x.factor <- apply(round(trace[c(2, 3)], digits = 1), 1, paste, collapse = "/")
trace$x.factor[trace$x.factor %in% "3.6/-1.9"] <- "3.6/-1.8" # Combine -1.9 and -1.8 to one factor as the real solution is in between(approx. -1.85)
trace$x.factor <- factor(trace$x.factor)
table(trace$x.factor)
cross.table.x <- xtabs(~meth + x.factor, data = trace)
xtable(cross.table.x / 100) # LaTex Table.
# To reproduce the exact results, the workspace is stored in the following file:
# save.image(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_frequency.RData')
# load(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_frequency.RData')
#---------------#
## Performance ##
#---------------#
## Calculation ##
mb.1 <- microbenchmark(
optim_sa(fun = hi,
start = (c(10, 10)),
trace = FALSE,
lower = c(-40, -40),
upper=c(40, 40),
control = list(t0 = 500,
nlimit = 50,
r = 0.85,
rf = 3,
ac_acc = 0.1,
dyn_rf = TRUE
)
), times = nloop
)
mb.2 <- microbenchmark(
optim(par = c(10, 10), fn = hi, method = "SANN", control = list(tmax = 500, reltol = 0.1, temp = 50, trace = FALSE)), times = nloop
)
mb.3 <- microbenchmark(
GenSA(par = c(10, 10), fn = hi, lower = c(-40, -40), upper = c(40, 40), control = list(temperature = 50, nb.stop.improvement = 30, maxit = 500)), times = nloop
)
mb.4 <- microbenchmark(
optim(par = c(-10, -10), fn = hi, method = "Nelder-Mead"), times = nloop
)
## Visualization & saving ##
# To reproduce the exact results, the workspace is stored in the following file:
# save.image(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_speed.RData')
# load(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_speed.RData')
boxplot(cbind(mb.4$time, mb.1$time, mb.2$time))
# Counting outliers
length(mb.1$time[mb.1$time > 4e7])
length(mb.2$time[mb.2$time > 4e7])
# length(mb.3$time[mb.3$time > 4e7])
length(mb.4$time[mb.4$time > 4e7])
cex.plot.tex <- 1.6
tikzDevice::tikz('Fig/fig1_ex1-time.tex', h = 6, w = 6)
par(mar = c(6, 6, 2, 2) + 0.1)
boxplot(cbind(mb.4$time, mb.1$time, mb.2$time), ylim = c(0, 4e7), axes = FALSE)
axis(1, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 4, "Calculation time [millisecond]", cex = cex.plot.tex)
mtext(side = 1, line = c(1.5, 3, 1.5, 3), at = c(1 : 4), c("optim (NM)", "optim\\_sa", "optim (SA)", "GenSA"), cex = cex.plot.tex)
axis(2, las = 2, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 1.5, c(0 : 4), cex = cex.plot.tex, at = seq(0, 4e7, 1e7), las = 2)
box()
dev.off()
#-------------------------------#
#### Frequency of iterations ####
#-------------------------------#
## optim_sa ##
freq.1 <- data.frame(n_iter = rep(NA, nloop), meth = "optim_sa")
for(i in c(1 : nloop)) {
freq.1[i, c(1)] <- sum(as.data.frame(
optim_sa(fun = hi,
start = (c(10, 10)),
trace = TRUE,
lower = c(-40, -40),
upper=c(40, 40),
control = list(t0 = 500,
nlimit = 50,
r = 0.85,
rf = 3,
ac_acc = 0.1,
dyn_rf = TRUE
)
)[c("trace")])$trace.n_inner)
}
## optim() ##
freq.2 <- data.frame(n_iter = rep(NA, nloop), meth = "optim_sann")
for(i in c(1 : nloop)) {
freq.2$n_iter[i] <- optim(fn = hi, par = c(10, 10), method = "SANN", control = list(tmax = 500, reltol = 0.1, temp = 50, trace = TRUE))$counts[1]
# Always 9999
}
## Call GenSA ##
freq.3 <- data.frame(n_iter = rep(9999, nloop), meth = "GenSA")
for(i in c(1 : nloop)) {
freq.3$n_iter[i] <- GenSA(fn = hi, par = c(10, 10), lower = c(-40, -40), upper = c(40, 40), control = list(temperature = 50, nb.stop.improvement = 30, maxit = 500))$counts
}
freq.4 <- data.frame(n_iter = rep(9999, nloop), meth = "optim_nm")
for(i in c(1 : nloop)) {
freq.4$n_iter[i] <- optim(fn = hi, par = c(-10, -10), method = "Nelder-Mead")$counts[1]
}
freq <- rbind(freq.4, freq.1, freq.2)
## Visualization & saving ##
# To reproduce the exact results, the workspace is stored in the following file:
# save.image(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_count.RData')
# load(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex1_count.RData')
cex.plot.tex <- 1.6
tikzDevice::tikz('Fig/fig1_ex1-counts.tex', h = 6, w = 6)
par(mar = c(6, 6, 2, 2) + 0.1)
boxplot(freq$n_iter ~ freq$meth, ylim = c(0, 1e4), axes = FALSE)
axis(1, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 5, "Frequency of iterations", cex = cex.plot.tex)
mtext(side = 1, line = c(1.5, 3, 1.5, 3), at = c(1 : 4), c("optim (NM)", "optim\\_sa", "optim (SA)", "GenSA"), cex = cex.plot.tex)
axis(2, las = 2, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 1.5, seq(0, 10000, 2000), cex = cex.plot.tex, at = seq(0, 10000, 2000), las = 2)
box()
dev.off()
#### Plot 2-way graphic ####
own.cex <- 1
cex.plot.tex <- 2
tikzDevice::tikz('Fig/fig1_ex1.tex', w = 14 * own.cex, h = 7 * own.cex)
par(mfcol = c(1,2))
par(mar = c(6, 5, 2, 4) + 0.1)
boxplot(cbind(mb.4$time, mb.1$time, mb.2$time), ylim = c(0, 4e7), axes = FALSE)
axis(1, labels = FALSE, lwd = 0, lwd.ticks = 1, at = c(1 : 3))
mtext(side = 2, line = 3.5, "Calculation time [millisecond]", cex = cex.plot.tex)
mtext(side = 1, line = 3, at = c(1 : 3), c("optim (NM)", "optim\\_sa", "optim (SA)"), cex = cex.plot.tex)
axis(2, las = 2, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 1.5, c(0 : 4), cex = cex.plot.tex, at = seq(0, 4e7, 1e7), las = 2)
box()
par(mar = c(6, 5, 2, 3) + 0.1)
boxplot(freq$n_iter ~ freq$meth, ylim = c(0, 1e4), axes = FALSE)
axis(1, labels = FALSE, lwd = 0, lwd.ticks = 1, at = c(1 : 3))
mtext(side = 2, line = 5.5, "Frequency of iterations", cex = cex.plot.tex)
mtext(side = 1, line = 3, at = c(1 : 3), c("optim (NM)", "optim\\_sa", "optim (SA)"), cex = cex.plot.tex)
axis(2, las = 2, labels = FALSE, lwd = 0, lwd.ticks = 1)
mtext(side = 2, line = 1.5, seq(0, 10000, 2000), cex = cex.plot.tex, at = seq(0, 10000, 2000), las = 2)
box()
dev.off()
#------------------------------------------------------#
#### Example 1: Himmelblau, integer parameter space ####
#------------------------------------------------------#
## Clear workspace ##
rm(list = ls())
## Load packages ##
library(optimization)
library(xtable)
library(microbenchmark)
## setwd ##
#setwd('/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/')
## Himmelblau's function ##
# 4 minima at
# f(3, 2) = 0
# f(-2.804, -3.131) = 0
# f(-3.779, -3.283) = 0
# f( 3.584, -1.848) = 0
hi <- function(x){
(x[1]**2 + x[2] - 11)**2 + (x[1] + x[2]**2 -7)**2
}
var_func_int <- function (para_0, fun_length, rf, temp = NA){
ret_var_func <- para_0 + sample.int(rf, fun_length, replace = TRUE) *
((rbinom(fun_length, 1, 0.5) * -2) + 1)
return (ret_var_func)
}
var_func_int_gr <- function (x) {
x1 <- x[1]
x2 <- x[2]
c(x1 + sample.int(10, 1, replace = TRUE) * ((rbinom(length(x1), 15, 0.5) * -2) + 1),
x2 + sample.int(10, 1, replace = TRUE) * ((rbinom(length(x2), 15, 0.5) * -2) + 1))
}
#-----------------------#
## Integer programming ##
#-----------------------#
## optim_sa ##
int_programming <- optim_sa(fun = hi,
start = (c(10, 10)),
trace = TRUE,
lower = c(-40, -40),
upper=c(40, 40),
control = list(t0 = 500,
nlimit = 50,
r = 0.85,
rf = 3,
ac_acc = 0.1,
dyn_rf = TRUE,
vf = var_func_int
)
)
own.cex <- 0.6
postscript('Fig/fig2-ex2-plot.eps', w = 14 * own.cex, h = 7 * own.cex, paper = "special", horizontal = FALSE)
par(mfcol = c(1,2))
plot(int_programming)
plot(int_programming, type = "contour", lower = c(-5, -5), upper = c(5, 5))
par(mar=c(3.5, 3.5, 1, 1) +0.1, new = TRUE)
arrows(x0 = 1.5, y0 = -0.5, x1 = 3, y1 = 0, col = 'white', lwd = 4)
arrows(x0 = 1.5, y0 = -0.5, x1 = 3, y1 = 0, col = 'black', lwd = 2)
# text(x = 1.1, y = -0.5, "10", cex = 1.2, col = 'white')
text(x = 1.1, y = -0.5, "10", cex = 1, col = 'black')
arrows(x0 = 0.5, y0 = -2.5, x1 = 2, y1 = -3, col = 'white', lwd = 4)
arrows(x0 = 0.5, y0 = -2.5, x1 = 2, y1 = -3, col = 'black', lwd = 2)
# text(x = 0.1, y = -2.5, "19", cex = 1.2, col = 'white', adj = 1)
text(x = 0.1, y = -2.5, "19", cex = 1, col = 'black')
dev.off()
# To reproduce the exact results, the workspace is stored in the following file:
# save.image(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex2_Integer.RData')
# load(file = '/home/khusman1/Documents/Veroeffentlichungen/optimization_essay/RFiles/Ex2_Integer.RData')
N <- 10000
boot_int_programming <- rep(NA, N)
for (i in c(1 : N)) {
try(boot_int_programming[i] <- optim_sa(fun = hi,
start = (c(10, 10)),
trace = TRUE,
lower = c(-40, -40),
upper=c(40, 40),
control = list(t0 = 500,
nlimit = 50,
r = 0.85,
rf = 3,
ac_acc = 0.1,
dyn_rf = TRUE,
vf = var_func_int
)
)$function_value)
}
hist(boot_int_programming)
summary(factor(boot_int_programming))
# 67.28 %
## optim (SA) ##
optim(par = c(10, 10), fn = hi, gr = var_func_int_gr, method = "SANN", control = list(trace = TRUE))
|
d6b6a641af90b221f873d9d09750cf29f7bf1b9a
|
9fb072a48f70cc3e592a07fa6960363f1ba1562e
|
/books.r
|
24c5de9dd0c337b25f685873931718440ce6c600
|
[] |
no_license
|
cyadusha/bookspy
|
0092a2275a708bf41f72110ecdb03b3b654f3471
|
32a97e14b9f07a578b99ff2694e2390e1947524f
|
refs/heads/master
| 2021-01-19T00:18:26.359745
| 2017-04-04T05:40:12
| 2017-04-04T05:40:12
| 87,151,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 738
|
r
|
books.r
|
# Databricks notebook source
sc <- sparkR.session()
books <- as.data.frame(read.df(source = 'csv', schema = NULL, path = '/FileStore/tables/5c89o5011491261805559/BX_Book_Ratings-05a61.csv', header = "true"))
books$Rating <- as.integer(books$Rating)
books$User <- as.integer(books$User)
books$User <- as.integer(books$User)
#sdf <- createDataFrame(sc, books)
# COMMAND ----------
model <- SparkR::spark.als(books, maxIter = 5, regParam = 0.01, userCol = "User",
itemCol = "ISBN", ratingCol = "Rating")
# Model summary
summary(model)
# Prediction
predictions <- predict(model, test)
showDF(predictions)
# COMMAND ----------
library(SparkR)
# COMMAND ----------
sdf
# COMMAND ----------
# COMMAND ----------
|
11e00d606bbeba202d8f4496a05f8acf0c541fbd
|
90f6ffe83e3565f9bfc82de8662c907508b64f61
|
/functions/support_functions.R
|
dfa72af11c0672fed6150f0881308fbed65b36b0
|
[] |
no_license
|
henrique-anatole/shinyapp_template
|
2f8b4196daf864a50b29c8c034af5ab45540707d
|
fefa002d3336cd3410475dcbb2d36223a7114251
|
refs/heads/master
| 2023-06-09T13:47:32.445072
| 2021-07-03T03:18:36
| 2021-07-03T03:18:36
| 382,497,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45
|
r
|
support_functions.R
|
# Put all basic support functions here
|
e6ba807ab4dc0a3083d1db255c59d349c218d88c
|
ddd70f6b0a5adb038dd67c3de22f3b1f90a69bf4
|
/Week_1.R
|
5152c2445c84bc1afd13468939e75ae6719ca263
|
[] |
no_license
|
ccannon3/R_DAVIS_in_class_project
|
5fd85db514e9311089985e38ef96d547af203ea8
|
19da6aec708688023859c71a678b753bfa07eba0
|
refs/heads/master
| 2020-12-04T03:54:23.842032
| 2020-02-18T22:00:23
| 2020-02-18T22:00:23
| 231,599,539
| 0
| 0
| null | 2020-01-03T13:58:47
| 2020-01-03T13:58:44
| null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
Week_1.R
|
# Welcome to R-Davis 2020 Week 1
1 + 100
#r uses the order of operations
3 + 5 *2
#use parentheses to force order of operations
(3 + 5)*2
#scientific notation
2 / 10000
1e9*8
#call functions in R
sin(3.14)
log(3)
exp(.5)
#nesting functions, interpreted from the inside out
sqrt(exp(4))
#comparisons in R
#R can do logical comparisons
# == is, ! not
1 == 1
1 == 3
1 != 2
#objects and assignments in R
# <- assignment operator
x <- 1/4
log(x)
x <- 99
x <- x + 1
this_is_my_object <- 90
#rules for naming objects, cannot have a space, cannot start with a number
#tab completion: enter different arguments into a function
log(this_is_my_object)
#for help can go to console and type in ?function and learn more from help file
log(3, 10) #there are two arguments to log for instance, base and x
log(x=3, base = 10) #the same as above
log(10, 3) #different from above, because R assumes base is second
log(base = 10, x = 3) #specifying the argument so like the first example even though not what R expects
log(x = 10, base = 3)
# R communications: error,
log(aword) #error message, good to google
|
96252d8e98ea8df40641b8e4339c254851b6791d
|
66080fc86a2579dbdf221aa061e73e569d6f1471
|
/man/roll.Rd
|
eb2374de035d5e46ffc78e58eb66c2205de071f2
|
[] |
no_license
|
oganm/diceSyntax
|
bfa230b756383a623dbeb8a44afac0917cff433b
|
0e8186dc09b52e25d50571b1e2dfbc22d2d8dbb6
|
refs/heads/master
| 2021-08-22T21:29:37.261554
| 2021-07-19T19:42:27
| 2021-07-19T19:42:27
| 83,385,569
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 498
|
rd
|
roll.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dice.R
\name{roll}
\alias{roll}
\title{Roll a dice}
\usage{
roll(dice, critMark = TRUE, vocal = TRUE, returnRolls = FALSE)
}
\arguments{
\item{dice}{character, a valid dice syntax}
\item{vocal}{Should it print individual rolls}
\item{returnRolls}{Logical. If true a list will be returned that includes rolled and dropped dice as well as the sum of accepted dice}
}
\description{
Rolls the dice described as a string
}
|
5c4d2d900cf0e46eb726742f6ca8fd5103aacf5c
|
222fe5a863813f5c418c5891de999bdf90fa343f
|
/man/gen_id_to_haps.Rd
|
55addb2c9a59d85ebc219285f4ce4fa464919ad1
|
[] |
no_license
|
xiahui625649/digitaltwins
|
c12045110e3e35d095ef1d8370eb1f36b506f695
|
aa84b33bedc378bc8491afc98609326519a944e0
|
refs/heads/master
| 2023-03-30T12:58:40.540064
| 2020-02-24T18:47:54
| 2020-02-24T18:47:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 377
|
rd
|
gen_id_to_haps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_crt.r
\name{gen_id_to_haps}
\alias{gen_id_to_haps}
\title{Convert genotype IDs to haplotype IDs}
\usage{
gen_id_to_haps(ids)
}
\arguments{
\item{ids}{vector of ids (integers)}
}
\value{
Vector of ids, typically of size 2 * length(ids).
}
\description{
Convert genotype IDs to haplotype IDs
}
|
4272fb38b78ca50991b012ba0b884cee75a39a90
|
973cf626ce20a6facdff3e08bc7f5b7f7553faa8
|
/BioStats2/q4.R
|
933862232add5ffb2d424ec6ce4acd6e8a428658
|
[] |
no_license
|
RobertCPhillips/CourseraJohnHopkinsDataScience
|
c9b3ca59f208e9cc0c24e40674eeffd01b329824
|
e0fc46b404d76e1aa689d85b72bd7c13dea95c90
|
refs/heads/master
| 2021-01-23T12:17:16.871163
| 2016-01-02T15:33:38
| 2016-01-02T15:33:38
| 22,586,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
q4.R
|
#1
dat <- array(c(8, 52, 5, 164, 25, 29, 21, 128), c(2, 2, 2))
dat
mantelhaen.test(dat, correct = FALSE)
#2
diff <- c(-3, +2, -1)
wilcox.test(diff, exact = T, alternative = c("two.sided"))
#3
#prop.test(15, 25, p = .5)
m <- matrix(c(55, 12, 41, 20), 2)
mcnemar.test(m, correct = FALSE)
#4
a4 <- (55+41)*(41+20)/((55+12)*(12+20))
#5
exp(log(54/189))
#7
.8^5
|
984ecef86e55c75994e9bf18fabe6f90035e0b7e
|
b29b3cfeeb124d77f1c103d184207837f9322944
|
/man/pennyPerShare.Rd
|
c4b8a17360b40d8b46d6f554ab6b125a8fd5b663
|
[] |
no_license
|
codecliff/blotter
|
9384a4833d12e022da7b6a243a69677e8c74cd8f
|
c54ee700a3f0925d1b966ddf2ba0e1f1ab2fd849
|
refs/heads/master
| 2021-01-13T10:17:07.395515
| 2016-08-21T21:38:06
| 2016-08-21T23:01:08
| 69,478,420
| 1
| 0
| null | 2016-09-28T15:42:03
| 2016-09-28T15:42:02
| null |
UTF-8
|
R
| false
| true
| 559
|
rd
|
pennyPerShare.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addTxn.R
\name{pennyPerShare}
\alias{pennyPerShare}
\title{Example TxnFee cost function}
\usage{
pennyPerShare(TxnQty, ...)
}
\arguments{
\item{TxnQty}{total units (such as shares or contracts) transacted. Positive values indicate a 'buy'; negative values indicate a 'sell'
This is an example intended to demonstrate how a cost function could be used in place of a flat numeric fee.}
\item{\dots}{any other passthrough parameters}
}
\description{
Example TxnFee cost function
}
|
8e3a5518db81db84232f4c38dd6e14829e2d4433
|
a270ac7fe1a89e31cfff6ba39af96d88bebe469a
|
/safer_streets_priority_finder/R/mod_visualize_model_results.R
|
b0f7e63db7dfb1ebf0ae62bf6db5cbd1c2bae92f
|
[
"MIT"
] |
permissive
|
dflynn-volpe/Safer-Streets-Priority-Finder
|
7e4fb4e808bc2ae03051e6eb0bc9f70fdac978c9
|
10411177c039a5b96147390e23b4cbe631b27323
|
refs/heads/main
| 2023-07-22T14:44:36.576337
| 2021-09-01T17:32:24
| 2021-09-01T17:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,961
|
r
|
mod_visualize_model_results.R
|
#' visualize_model_results UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_visualize_model_results_ui <- function(id){
ns <- NS(id)
tagList(
)
}
#' visualize_model_results Server Functions
#'
#' @noRd
mod_visualize_model_results_server <- function(input, output, session, connection, user_id, run_id, data, leaflet_proxy, n){
ns <- session$ns
list <- list(sev_levels=c("Fatality (K)", "Incapacitating Injury (A)", "Non-Incapacitating Injury (B)", "Possible Injury (C)", "Property Damage Only (O)"),
col_options = c('Oranges', 'Greens'),
cols = c('e_cr_ped_k', 'e_cr_ped_a', 'e_cr_ped_b', 'e_cr_ped_c', 'e_cr_ped_o'),
mode = 'Pedestrian'
)
study_area_boundary <- fetch_spatial_table(connection = connection,
columns= 'ST_AsEWKT((ST_Dump(geom)).geom) as geom',
schema = 'local_user_data',
table = return_table_name('study_area', user_id, run_id),
geom_type='POLYGON',
is_wkt=TRUE
)
study_area_boundary <- transform_with_epsg(study_area_boundary, 4326)
data$model_results <- transform_with_epsg(data$model_results, 4326)
bk_data <- data$model_results[ which( data$model_results$rt_bike_cost_1y >= 120563), ] %>% arrange(rt_bike_cost_1y)
pd_data <- data$model_results[ which( data$model_results$rt_ped_cost_1y >= 120563), ] %>% arrange(rt_ped_cost_1y)
if ( nrow(bk_data) < 1 && nrow(pd_data) < 1 && (nrow(data$model_results ) > 0 || nrow(data$model_results) > 0)) {
shiny_warming_alert(title='Low Values', text="Your results can't be visualized because no segments have an estimated annual average cost per mile greater than $120,563. Your results are still available for download.", showConfirmButton=TRUE, showCancelButton=FALSE, size="s", type="warning")
} else {
bbox <- as.vector(sf::st_bbox(data$model_results))
blanks <- rep("​",times=n)
blanks <- paste(blanks, collapse = "")
groups <- c()
groups <- append(groups, paste0('Study Area', blanks))
leaflet_proxy %>%
leaflet::clearShapes() %>%
leaflet::clearControls() %>%
leaflet::addPolygons(data=study_area_boundary,
weight = 2,
fillColor = '#666566',
color = '#666566',
stroke=TRUE,
fillOpacity = 0,
opacity = .8,
group = paste0('Study Area', blanks)
)
if ( nrow(pd_data) > 0 ) {
ped_cost <- colorNumeric(
palette = "YlGnBu",
domain=as.numeric(pd_data$rt_ped_cost_1y)
)
groups <- append(groups, paste0('Estimated Pedestrian Crash Cost', blanks))
leaflet_proxy %>%
leaflet::addPolylines(data=pd_data,
color = ped_cost(as.numeric(pd_data$rt_ped_cost_1y)),
opacity = .75,
weight = 2,
popup=~paste0('<strong>Road Name: </strong>', pd_data$road_name, '</br>',
'<strong>Functional Classification: </strong>', pd_data$road_fclass,'</br>',
'<strong>Estimated Average Annual Pedestrian Crash Cost Per Mile: </strong>', '$', prettyNum(round(as.numeric(pd_data$rt_ped_cost_1y), 2), big.mark=",",scientific=FALSE),'</br>',
'<strong>Estimated Total 5-year Pedestrian Crash Cost: </strong>', '$', prettyNum(round(as.numeric(pd_data$stan_ped_cost), 2), big.mark=",",scientific=FALSE),'</br>',
'<strong>Estimated 5-year Pedestrian Fatalities (K): </strong>', round(as.numeric(pd_data$e_cr_ped_k), 2),'</br>',
'<strong>Estimated 5-year Pedestrian Incapacitating Injuries (A): </strong>', round(as.numeric(pd_data$e_cr_ped_a), 2),'</br>',
'<strong>Estimated 5-year Pedestrian Non-Incapacitating Injuries (B): </strong>', round(as.numeric(pd_data$e_cr_ped_b), 2),'</br>',
'<strong>Estimated 5-year Pedestrian Possible Injuries (C): </strong>', round(as.numeric(pd_data$e_cr_ped_c), 2),'</br>',
'<strong>Estimated 5-year Pedestrian Property Damage Only (O): </strong>', round(as.numeric(pd_data$e_cr_ped_o), 2),'</br>',
'<strong>Total Historical Pedestrian Crashes: </strong>', pd_data$tot_ped_all
),
group = paste0('Estimated Pedestrian Crash Cost', blanks),
popupOptions = popupOptions( maxWidth = 600)
) %>%
leaflet::addLegend(position = "bottomleft",
pal = ped_cost,
values = pd_data$rt_ped_cost_1y,
title = "Estimated Average Annual <br>Pedestrian Crash Costs Per Mile",
labFormat = labelFormat(prefix='$'),
group = paste0('Estimated Pedestrian Crash Cost', blanks)
)
}
if ( nrow(bk_data) > 0 ) {
bike_cost <- colorNumeric(
palette = "YlOrRd",
domain=as.numeric(bk_data$rt_bike_cost_1y)
)
groups <- append(groups, paste0('Estimated Bicycle Crash Cost', blanks))
leaflet_proxy %>%
leaflet::addPolylines(data=bk_data,
color = bike_cost(as.numeric(bk_data$rt_bike_cost_1y)),
opacity = .75,
weight = 2,
popup=~paste0('<strong>Road Name: </strong>', bk_data$road_name, '</br>',
'<strong>Functional Classification: </strong>', bk_data$road_fclass,'</br>',
'<strong>Estimated Average Annual Bicycle Crash Cost Per Mile: </strong>', '$', prettyNum(round(as.numeric(bk_data$rt_bike_cost_1y), 2), big.mark=",",scientific=FALSE),'</br>',
'<strong>Estimated Total 5-year Bicycle Crash Cost: </strong>', '$', prettyNum(round(as.numeric(pd_data$stan_bike_cost), 2), big.mark=",",scientific=FALSE),'</br>',
'<strong>Estimated 5-year Bicycle Fatalities (K): </strong>', round(as.numeric(bk_data$e_cr_bike_k), 2),'</br>',
'<strong>Estimated 5-year Bicycle Incapacitating Injuries (A): </strong>', round(as.numeric(bk_data$e_cr_bike_a), 2),'</br>',
'<strong>Estimated 5-year Bicycle Non-Incapacitating Injuries (B): </strong>', round(as.numeric(bk_data$e_cr_bike_b), 2),'</br>',
'<strong>Estimated 5-year Bicycle Possible Injuries (C): </strong>', round(as.numeric(bk_data$e_cr_bike_c), 2),'</br>',
'<strong>Estimated 5-year Bicycle Property Damage Only (O): </strong>', round(as.numeric(bk_data$e_cr_bike_o), 2),'</br>',
'<strong>Total Historical Bicycle Crashes: </strong>', bk_data$tot_bike_all
),
group = paste0('Estimated Bicycle Crash Cost', blanks),
popupOptions = popupOptions( maxWidth = 600)
) %>%
leaflet::addLegend(position = "bottomleft",
pal = bike_cost,
values = bk_data$rt_bike_cost_1y,
title = "Estimated Average Annual <br>Bicycle Crash Costs Per Mile",
labFormat = labelFormat(prefix='$'),
group = paste0('Estimated Bicycle Crash Cost', blanks)
)}
leaflet_proxy %>%
leaflet::fitBounds(bbox[1], bbox[2], bbox[3], bbox[4]) %>%
leaflet::addLayersControl(baseGroups = c("Grey", "Negative", "OpenStreetMap"),
overlayGroups = c(groups),
options = layersControlOptions(collapsed = F),
position = "topright")
if (nrow(bk_data) > 0 && nrow(pd_data) > 0)
leaflet_proxy %>%
leaflet::hideGroup(paste0('Estimated Bicycle Crash Cost', blanks))
if (nrow(bk_data) > 0 && nrow(pd_data) < 1) {
shiny_warming_alert(title='Low Values', text="Your pedestrian model results can't be visualized because no segments have an estimated annual average cost per mile of greater than $120,563. Your results are still available for download.", showConfirmButton=TRUE, showCancelButton=FALSE, size="s", type="warning")
}
if (nrow(bk_data) < 1 && nrow(pd_data) > 0) {
shiny_warming_alert(title='Low Values', text="Your bicycle model results can't be visualized because no segments have an estimated annual average cost per mile of greater than $120,563. Your results are still available for download.", showConfirmButton=TRUE, showCancelButton=FALSE, size="s", type="warning")
}
}
}
## To be copied in the UI
# mod_visualize_model_results_ui("visualize_model_results_ui_1")
## To be copied in the server
# mod_visualize_model_results_server("visualize_model_results_ui_1")
|
008be2f607511c65f957cf200e13997001f20481
|
a736ac537b5e2255d5f1fb7e075815353972c888
|
/getStockInfo.R
|
b42313480b316c2374c856f8c4c9af3485280398
|
[] |
no_license
|
mahoutm/learning-R
|
37fd6fdb8d1325f9f428f1a36783dcd2a10ccc9f
|
fd59c45d00b63f4276f143651a5874b022cfed36
|
refs/heads/master
| 2021-01-15T17:42:11.438350
| 2014-10-14T14:13:54
| 2014-10-14T14:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 126
|
r
|
getStockInfo.R
|
# get stock information to yahoo site.
install.packages("fImport")
library(fImport)
s_e <- yahooSeries("005935.KS")
plot(s_e)
|
0013f21f1b4799e74fc367968cb29b83c87e1b05
|
3c4277919e1e18231c9c5a35615cbce0eb85ec9b
|
/180607_텍스트마이닝/Ngram.R
|
12bc9a7dee62ae940f847dcda55381087a065234
|
[] |
no_license
|
chankoo/BOAZ-Sessions
|
988932b943c0287049d991a3849554b8701120e3
|
87c15440ff7027f09216a85c932ddcbea75aa95b
|
refs/heads/master
| 2020-03-28T09:13:41.760854
| 2018-11-24T14:42:43
| 2018-11-24T14:42:43
| 148,022,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,978
|
r
|
Ngram.R
|
############################
##########n-gram############
##############################
rm(list=ls())
# install.packages("dplyr")
# install.packages("tidytext")
# install.packages("janeaustenr")
library(dplyr)
library(tidytext)
library(janeaustenr)
# tf-idf 에서도 unnest_tokens()를 사용했었음
# tf-idf 에서는 unnest_tokens()를 통해 text를 단어나 문장 단위로 tokenize하여 출현 빈도수를 파악하는 것이였다면
# n-gram은 단어간의 시퀀스를 고려
# unnest_tokens()에 token="ngrams"와 단어를 몇 개씩 쪼갤것인가를 정하는 n 옵션을 추가하면 된다.
# n=1 -> unigram , n=2 -> bigram , n=3 -> trigram
austen_bigrams <- austen_books() %>%
unnest_tokens(bigram, text, token = "ngrams", n = 2)
austen_bigrams
##counting and filtering n-grams
austen_bigrams %>%
count(bigram, sort = TRUE) #bigram 내림차순 정렬
library(tidyr)
bigrams_separated <- austen_bigrams %>%
separate(bigram, c("word1", "word2"), sep = " ")
bigrams_separated
bigrams_filtered <- bigrams_separated %>%
filter(!word1 %in% stop_words$word) %>% #separate 한 bigram 들의 불용어 제거
filter(!word2 %in% stop_words$word)
bigrams_filtered
# new bigram counts:
bigram_counts <- bigrams_filtered %>%
count(word1, word2, sort = TRUE) #불용어 제거한 bigram들 count!
bigram_counts
bigrams_united <- bigrams_filtered %>%
unite(bigram, word1, word2, sep = " ") #나누었던 bigram들을 다시 합침
bigrams_united
austen_books() %>%
unnest_tokens(trigram, text, token = "ngrams", n = 3) %>% # 단어를 세개씩 쪼개 trigram을 만든다
separate(trigram, c("word1", "word2", "word3"), sep = " ") %>% # 위와 같은 과정으로, 단어를 나눈후 불용어 제거하고 count!
filter(!word1 %in% stop_words$word,
!word2 %in% stop_words$word,
!word3 %in% stop_words$word) %>%
count(word1, word2, word3, sort = TRUE)
##Analyzing bigrams
##bigram을 이용하여 분석해보자
#street 이 가장 많이 포함된 bigram 을 찾고싶을 때
bigrams_filtered %>%
filter(word2 == "street") %>%
count(book, word1, sort = TRUE) #word2에 "street"이 포함된 bigram을 count
#tf-idf 가 높은 bigram을 찾고싶을 때
bigram_tf_idf <- bigrams_united %>% #tf-idf 가 높은 bigram을 내림차순 정렬
count(book, bigram) %>%
bind_tf_idf(bigram, book, n) %>%
arrange(desc(tf_idf))
bigram_tf_idf
##Using bigrams to provide context in sentiment analysis
##감성분석 시 주의사항 : 문장안에서 부정적 의미로 쓰인 단어가, 개별 단어로서는 긍정적 의미를 가질 때
##ex) I am not happy and I do not like it => 문장은 부정적이지만 단어로 count를 할때에 happy와 like는 긍정적 단어로 count된다
#얼마나 자주 단어들이 not과 같은 부정어 뒤에 오는지 파악
bigrams_separated %>%
filter(word1 == "not") %>%
count(word1, word2, sort = TRUE)
AFINN <- get_sentiments("afinn") #AFINN : lexicon which gives a numeric sentiment score for each word
AFINN
not_words <- bigrams_separated %>% #sentiment 와 연관된 단어들 중 not 뒤에 가장 많이 나타난 단어를 정렬
filter(word1 == "not") %>%
inner_join(AFINN, by = c(word2 = "word")) %>%
count(word2, score, sort = TRUE) %>%
ungroup()
not_words
#어떠한 단어들이 얼마만큼 "잘못된 해석을 하게끔 기여하였는가" 를 알아보자
#x축 : n(출현빈도수)*score, y축 : not 뒤에 온 단어들
not_words %>%
mutate(contribution = n * score) %>%
arrange(desc(abs(contribution))) %>%
head(20) %>%
mutate(word2 = reorder(word2, contribution)) %>%
ggplot(aes(word2, n * score, fill = n * score > 0)) +
windows() +
geom_col(show.legend = FALSE) +
xlab("Words preceded by \"not\"") +
ylab("Sentiment score * number of occurrences") +
coord_flip()
#결과를 확인하면
#not like와 not help가 잘못해석하게 만드는 가장 큰 요인들 : 문장이 실제 의미보다 더 긍정적인 것처럼 보이게 함
#not afriad와 not fail은 실제 의미보다 더 부정적이게 해석하게끔 만드는것을 확인 할 수 있다.
negation_words <- c("not", "no", "never", "without") #not 이외의 다른 부정어도 분석해보자.
negated_words <- bigrams_separated %>%
filter(word1 %in% negation_words) %>%
inner_join(AFINN, by = c(word2 = "word")) %>%
count(word1, word2, score, sort = TRUE) %>%
ungroup()
# 시각화는 각자 해보세요~!
# ##Visualizing a network of bigrams with ggraph
#
library(igraph)
# original counts
bigram_counts
# filter for only relatively common combinations
bigram_graph <- bigram_counts %>%
filter(n > 20) %>%
graph_from_data_frame()
bigram_graph
library(ggraph)
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
|
99381b40a1cb8d92dde1ba009e51a7a2d2529bf4
|
ddf0c1ddf1e2df05f2fb752ea2c6c3702972edeb
|
/man/isplit2.Rd
|
00aa9cad8ddb3b9e8b67dfe40483bea21e298d05
|
[] |
no_license
|
talgalili/plyr
|
f61d5ca395f69345b04d7db98558518f28028ddf
|
dc98253e4ec68951c3c53426f13c31a93e47d13c
|
refs/heads/master
| 2020-04-07T21:29:41.218492
| 2012-10-29T15:04:38
| 2012-10-29T15:04:38
| 8,526,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
rd
|
isplit2.Rd
|
\name{isplit2}
\alias{isplit2}
\title{Split iterator that returns values, not indices.}
\usage{
isplit2(x, f, drop = FALSE, ...)
}
\description{
Split iterator that returns values, not indices.
}
\keyword{internal}
|
9bde57cd3e815cafad0d416eec54dcc54e6a0d0c
|
93948587ecb19bd226dd7c6b499f90a5ae3c472e
|
/R/REGE_for.R
|
7218165eec41a95389610410142282f2acce7aba
|
[] |
no_license
|
cran/blockmodeling
|
2aee1677bc94cd7daf7b48f5a7b7a662100a648e
|
f1766c756496c05040f8a8015f31036f13224659
|
refs/heads/master
| 2022-12-01T06:35:33.586891
| 2022-11-22T11:30:02
| 2022-11-22T11:30:02
| 17,694,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,583
|
r
|
REGE_for.R
|
#' @rdname REGE
#'
#' @export
REGE.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("rege",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGD.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 0 #initial dissimiliarity between vertices (default 0 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(as.double(E),ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regd",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ow.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regeow",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGD.ow.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 0 #initial dissimiliarity between vertices (default 0 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(as.double(E),ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regdow",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dnM[[1]],dnM[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ownm.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regeownm",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ownm.diag.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regeownmdiag",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.nm.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regenm",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.nm.diag.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regenmdiag",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regene",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ow.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regeowne",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.ownm.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regeownmne",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGE.nm.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of two relations)
iter = 3,
E = 1 #initial similiarity between vertices (default 1 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
if(NR==1){
M2<-array(NA,dim=c(N,N,2))
M2[,,1]<-diag(1/apply(M,1,sum))%*%M
M2[,,2]<-M%*%diag(1/apply(M,2,sum))
M2[is.nan(M2)]<-0
NR<-2
if(length(dimnames(M))==2) dimN<-dimnames(M) else dimN<-c(list(NULL),list(NULL))
dimnames(M2)<-c(dimN,list(c("out","in")))
M<-M2
} else{
if(NR==2){
cat("The first matrix will be used to evalueate outgoing arcs and the second to evaluate in ingoing arcs.\n")
} else stop("This function is only suitable for evaluating two relations obtained as a row and column normalization of a single relation network. You have supplied more than two relations.\n")
}
E<-matrix(E,ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regenmne",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
#'
#' @export
REGD.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 0 #initial dissimiliarity between vertices (default 0 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(as.double(E),ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regdne",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dimnames(M)[[1]],dimnames(M)[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
#' @rdname REGE
REGD.ow.ne.for<-function(
M, #netowrk in form of a matrix or array (in case of several relations)
iter = 3,
E = 0 #initial dissimiliarity between vertices (default 0 among all vertices).
){
if(is.array(M)){
dM<-dim(M)
dnM<-dimnames(M)
N<-dM[1]
if (length(dM)==3) {
NR<-dM[3]
} else {
if(length(dM)==2) {
NR<-1
} else stop("An array has wrong dimensions")
}
} else stop("M must be an array")
M<-structure(as.double(M),dim=dM)
dimnames(M)<-dnM
E<-matrix(as.double(E),ncol=N, nrow=N)
diag(E)<-1.0
res<-.Fortran("regdowne",M = M, E = E, N = as.integer(N), NR = as.integer(NR), iter = as.integer(iter))
Eall<-array(NA,dim=c(dim(E),2))
Eall[,,1]<-E
Eall[,,2]<-res$E
dimnames(Eall)<-list(dnM[[1]],dnM[[2]],c("initial","final"))
return(list(E=Eall[,,"final"],Eall=Eall,M=M,iter=iter))
}
|
eb72e645b7273cfb15a577199a1bbfdeca5932f7
|
653008da920ccc5fc80958ab479d4af39e78b39d
|
/man/CreateWalkForwardFuns.Rd
|
aa76af1bd198d0f71dac1904bc0519623f140c83
|
[
"MIT"
] |
permissive
|
TheRealSvc/forecast4you
|
7c0659942eed0806be713eb3d2beb8743cd66246
|
1c31a57741e8e4af0b0e6074a07c32a5e4c05896
|
refs/heads/main
| 2023-08-04T10:52:06.222516
| 2021-09-11T20:23:24
| 2021-09-11T20:23:24
| 405,196,500
| 0
| 0
| null | 2021-09-11T20:23:24
| 2021-09-10T19:48:56
|
R
|
UTF-8
|
R
| false
| true
| 651
|
rd
|
CreateWalkForwardFuns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_funs_framework.R
\name{CreateWalkForwardFuns}
\alias{CreateWalkForwardFuns}
\title{split the time series and creates a list tree as the main object for further operations}
\usage{
CreateWalkForwardFuns(lTrain, lValid = 1)
}
\arguments{
\item{lTrain}{length of training intervall in a walk forward validation}
\item{lValid}{length of validation data for backtesting}
}
\value{
all splits in form of a list "out[[name]][[train]]" und "out[[name]][[valdation]]"
}
\description{
split the time series and creates a list tree as the main object for further operations
}
|
c644588ffd3b857f377f13063aa719a6ecaa0f12
|
eb98338c4a30a5ad3608a10a4fc8f3f2bb63d6ec
|
/tests/testthat/test-view.R
|
d6244b3b5d27ac505afce87fad20e69e76f47632
|
[
"MIT"
] |
permissive
|
tidyverse/stringr
|
650be49f02b55276161bbc1134630f24dd0d0da8
|
08ff36fa0ca365000197341b9540ff7126711497
|
refs/heads/main
| 2023-08-26T10:38:24.999750
| 2023-08-05T20:42:27
| 2023-08-05T20:42:27
| 365,649
| 445
| 184
|
NOASSERTION
| 2023-09-03T11:49:44
| 2009-11-08T22:20:08
|
R
|
UTF-8
|
R
| false
| false
| 1,899
|
r
|
test-view.R
|
test_that("results are truncated", {
expect_snapshot(str_view(words))
# and can control with option
local_options(stringr.view_n = 5)
expect_snapshot(str_view(words))
})
test_that("indices come from original vector", {
expect_snapshot(str_view(letters, "a|z", match = TRUE))
})
test_that("view highlights all matches", {
x <- c("abc", "def", "fgh")
expect_snapshot({
str_view(x, "[aeiou]")
str_view(x, "d|e")
})
})
test_that("view highlights whitespace (except a space/nl)", {
x <- c(" ", "\u00A0", "\n", "\t")
expect_snapshot({
str_view(x)
"or can instead use escapes"
str_view(x, use_escapes = TRUE)
})
})
test_that("view displays nothing for empty vectors",{
expect_snapshot(str_view(character()))
})
test_that("match argument controls what is shown", {
x <- c("abc", "def", "fgh", NA)
out <- str_view(x, "d|e", match = NA)
expect_length(out, 4)
out <- str_view(x, "d|e", match = TRUE)
expect_length(out, 1)
out <- str_view(x, "d|e", match = FALSE)
expect_length(out, 3)
})
test_that("can match across lines", {
local_reproducible_output(crayon = TRUE)
expect_snapshot(str_view("a\nb\nbbb\nc", "(b|\n)+"))
})
test_that("vectorised over pattern", {
x <- str_view("a", c("a", "b"), match = NA)
expect_equal(length(x), 2)
})
test_that("[ preserves class", {
x <- str_view(letters)
expect_s3_class(x[], "stringr_view")
})
test_that("str_view_all() is deprecated", {
expect_snapshot(str_view_all("abc", "a|b"))
})
test_that("html mode continues to work", {
skip_if_not_installed("htmltools")
skip_if_not_installed("htmlwidgets")
x <- c("abc", "def", "fgh")
expect_snapshot({
str_view(x, "[aeiou]", html = TRUE)$x$html
str_view(x, "d|e", html = TRUE)$x$html
})
# can use escapes
x <- c(" ", "\u00A0", "\n")
expect_snapshot({
str_view(x, html = TRUE, use_escapes = TRUE)$x$html
})
})
|
8e635366b0d3d13e151d2cdb8d6b4ac2e13a3d23
|
76abe33b0dac505b1f7d771c799e18b57a8f4417
|
/shiny/colourpicker.R
|
e306150f3de0a8bce08157485098c06fc66f2162
|
[] |
no_license
|
jyeazell/DataCamp_practice
|
4ddaf889b07a2ef3fcd0965bee7d71372e3eb2f3
|
de4443e01d5414913aa555a5771d5eadc9f83700
|
refs/heads/master
| 2022-12-19T23:27:19.410533
| 2020-10-09T20:31:07
| 2020-10-09T20:31:07
| 183,300,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,081
|
r
|
colourpicker.R
|
##' Add colours to your plot: color input
##'
##' The colourpicker package provides a color input, available through the
##' colourInput() function. Even though color inputs are not part of the shiny
##' package, they behave in the same way as any other input.
##'
##' A color input can have many different arguments you can explore, but we will
##' only use the basic arguments: inputId, label, and value. The value argument
##' accepts a color to use as the initial value. Colours can be specified in
##' several different formats, but the easiest one is to simply use English
##' color names such as "red" or "yellow".
# Load the colourpicker package
library(colourpicker)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
textInput("title", "Title", "GDP vs life exp"),
numericInput("size", "Point size", 1, 1),
checkboxInput("fit", "Add line of best fit", FALSE),
# Replace the radio buttons with a color input
colourInput("color", "Point color", "blue"),
selectInput("continents", "Continents",
choices = levels(gapminder$continent),
multiple = TRUE,
selected = "Europe"),
sliderInput("years", "Years",
min(gapminder$year), max(gapminder$year),
value = c(1977, 2002))
),
mainPanel(
plotOutput("plot")
)
)
)
# Define the server logic
server <- function(input, output) {
output$plot <- renderPlot({
data <- subset(gapminder,
continent %in% input$continents &
year >= input$years[1] & year <= input$years[2])
p <- ggplot(data, aes(gdpPercap, lifeExp)) +
geom_point(size = input$size, col = input$color) +
scale_x_log10() +
ggtitle(input$title)
if (input$fit) {
p <- p + geom_smooth(method = "lm")
}
p
})
}
shinyApp(ui = ui, server = server)
|
384653be537b7fa3c1e616cd7e809b6eccd86947
|
4bbb1f6be0a86429f685635056edf46f5bfa26c6
|
/MONAMI_useful.R
|
729645b22831e76039e1e522b8ff6335b8f1f097
|
[
"MIT"
] |
permissive
|
s-seo/useful-function
|
635c3c940d257dc4d5bb26512646afce8c54e6af
|
378919631e9f554484d982e94ba88fb67aa47837
|
refs/heads/master
| 2020-04-29T02:14:29.063805
| 2019-03-15T13:07:08
| 2019-03-15T13:07:08
| 175,759,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
MONAMI_useful.R
|
lapply(c('dplyr',
'ggplot2',
'stringr',
'data.table',
'reshape2',
'XLConnect',
'reshape',
'gridExtra',
'biglm'
), require, character.only =T)
read.xls <- function(filename, sheetnumber=1, sheetname=NULL, forceConversion=TRUE, startCol=0, stringsAsFactors=TRUE) {
wb <- loadWorkbook(filename)
if (is.null(sheetname)) sheetname = getSheets(wb)[sheetnumber]
df <- readWorksheet(wb, sheet=sheetname, forceConversion=forceConversion, startCol=startCol)
if (stringsAsFactors) {
ischar <- sapply(df, class) == "character"
for (i in 1:length(df)) {
if (ischar[i]) df[,i] <- factor(df[,i])
}
}
df
}
#중복되어 나타나는 값을 다른 변수와 연관지어서 보는 함수
dupl <- function(data, index, index2){
data2 <- data %>% select(colnames(data)[index]) %>% distinct()
a <- which(colnames(data2) == colnames(data)[index2])
dd <- data2 %>% select(a) %>% duplicated()
dd2 <- data2 %>% filter(dd) %>% select(a) %>% unlist() %>% as.character()
res <- data2 %>% filter(unlist(select(data2,a)) %in% dd2)
return(res)
}
d <- list.files('C:\\Users\\baoro\\Desktop\\공모전\\상경대 빅데이터 경진대회\\모나미') %>%
as.data.frame() %>%
filter(grepl('20190103',.) & grepl('csv',.)) %>%
unlist() %>%
as.character()
|
5c0aacb43c55db6df721774b05caaa7dedada7bc
|
d749c7a9182c1e4f9d136ee7db61bbe6a95bfcd3
|
/man/temperature_4yr.Rd
|
b557f8cccca114db2d60615247a218b1ab45bfb7
|
[
"MIT"
] |
permissive
|
themacfreezie/wxsumR
|
becdcdf805cdc6ca99fcc359cea0608ddb7a58e2
|
4fad8e9ad6610c955824f87cf85d13689b5a4836
|
refs/heads/main
| 2023-08-07T16:24:18.563452
| 2021-09-10T22:17:58
| 2021-09-10T22:17:58
| 410,063,543
| 0
| 0
|
MIT
| 2021-09-24T18:25:21
| 2021-09-24T18:25:21
| null |
UTF-8
|
R
| false
| true
| 671
|
rd
|
temperature_4yr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{temperature_4yr}
\alias{temperature_4yr}
\title{A small (four-year) example data set of daily temperature measurements}
\format{
An object of class \code{data.frame} with 100 rows and 1462 columns.
}
\usage{
temperature_4yr
}
\description{
The data set includes one column with the unique identifier for the site
(\code{hhid}) and remaining 1,461 columns with daily temperature measurements
in degrees Celsius. Columns are named with the "tmp_YYYYMMDD" format. For
example, the column \code{tmp_19830101} has temperature values for
1983-01-01.
}
\keyword{datasets}
|
bc8f4930a2b3bce41b61d4034942ef9b20562213
|
787d30612e43925008c9cbc72a3328740b7c4974
|
/BA/Sec16. Logistic Regression in R.R
|
e95589e18e78eeccd91f5ff88c7414f400f4e030
|
[] |
no_license
|
MMM-UOC/rAnalytics
|
3a16d7f00287348487c00c0748517448a15de8a4
|
5c93d788cd909b0697633a86296de49422a38198
|
refs/heads/master
| 2022-11-08T20:43:40.894935
| 2020-06-26T09:50:53
| 2020-06-26T09:50:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,631
|
r
|
Sec16. Logistic Regression in R.R
|
#Logistic Regression----
#Data is regarding selection of studentinto graduate school
#Note that R requires forward slashes (/) not back slashes () when specifying a file location even if the file is on your hard drive.
mydata <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
head(mydata) #view the first six rows of the data
str(mydata)
summary(mydata)2
sapply(mydata,sd)
#rank is a categirial values, but is saved in integer datatype; lets convert it to factors
mydata$rank = as.factor(mydata$rank)
mydata$admit = as.factor(mydata$admit)
str(mydata)
dim(mydata)
# wo-way contingency table of categorical outcome and predictors we want
# to make sure there are not 0 cells
#which rank of institute are more successful (in nos/ %) in getting admitted - 2 /1
xtabs(~admit + rank, data = mydata)
#Model 1----
mylogit = glm(admit ~ gre + gpa + rank, data = mydata, family = "binomial")
summary(mylogit)
#gre,gpa, rank are statistically significant, Star in each variable rows
#For every one unit change in gre, the log odds of admission (versus non-admission) increases by 0.002.
#For a one unit increase in gpa, the log odds of being admitted to graduate school increases by 0.804. (keeping others constants)
#The indicator variables for rank have a slightly different interpretation. For example, having attended an undergraduate institution with rank of 2, versus an institution with a rank of 1, changes the log odds of admission by -0.675.
#AIC for the model is 470.52.
#dividing the dataset into training and test for analysis
n = nrow(mydata)
sample = sample(1:n, size = round(0.7*n), replace=FALSE)
train = mydata[sample,]
test = mydata[-sample,]
#Model 2----
#Building the logistic regression model
?glm
logR1 = glm(admit ~ gre+gpa+rank, train, family = binomial)
logR1
summary(logR1)
#Understanding the outcome of the model
# to check whether the model is significant or not, elements to check are: pvalues of variables ; AIC value: lower the better;
# now gpa, rank are statistically significant, Star in each variable rows; gre becomes insignificant after splitting the data
#For a one unit increase in gpa, the log odds of being admitted to graduate school increases by 0.849. (keeping others constants)
#The indicator variables for rank have a slightly different interpretation. For example, having attended an undergraduate institution with rank of 2, versus an institution with a rank of 1, changes the log odds of admission by -596; it is not that effective factor for model
#predict on test set
predicted = (predict(logR1, newdata = test, type='response'))
predictV = factor(ifelse(predicted <0.5,0, 1))
test = cbind(test, predictV)
head(test)
str(test)
#Checing accuracy of the model
library(caret)
confusionMatrix(test$admit, test$predictV) #better
#Model 3----
n = nrow(mydata)
sample = sample(1:n, size = round(0.7*n), replace=FALSE)
train = mydata[sample,]
test2 = mydata[-sample,]
logR2 = glm(admit ~ gpa+rank, train, family = binomial) #gre removed
logR2
summary(logR2)
pred = (predict(logR2, newdata = test2, type='response'))
predict = factor(ifelse(pred <0.5,0, 1))
test2 = cbind(test2, predict)
head(test2)
#checking accuracy using caret package: confusion matric
caret::confusionMatrix(test2$admit, test2$predict) #better
#confusion matrix using table command
table(test2$admit, pred > 0.5)
#checking accuracy of model using mean command
mean(test2$predict == test2$admit)
#New data prediction
range(mydata$gpa)
df2 = data.frame(gpa= 3.7 , rank= factor(2))
df2
p = predict(logR2, newdata = df2)
p
p1 = factor(ifelse(p <0.5,0, 1))
p1
test2 = cbind(df2, p1)
head(test2)
|
00dc8b6b187a8138f987ac5c8369698ac2100998
|
c40bb0f3a4b5eb625686c0742f3ec0d09158a2d4
|
/cachematrix.R
|
4b57aa4214e4dd0b5d8eb83bd0f1912bf3ad6f51
|
[] |
no_license
|
accbel/ProgrammingAssignment2
|
08ec133df2e690d04c36fb19b898f1238bfcd26b
|
ba5b90c5bbf79209db938f8709359210236e4b24
|
refs/heads/master
| 2021-01-14T14:27:54.778131
| 2015-04-21T21:57:02
| 2015-04-21T21:57:02
| 34,351,346
| 0
| 0
| null | 2015-04-21T20:46:38
| 2015-04-21T20:46:37
| null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function will create a special object
## (a list with named elements as functions) to deal with matrix inverse
## and repeateable computations (using a cache region)
makeCacheMatrix <- function(base = matrix()) {
## create a cacheable matrix object and some associated sub-functions/methods
## define the cache 'cache'
cache <- NULL
set <- function(newMatrix) {
## assign the input matrix 'newMatrix' to the variable 'base' in the
## parent environment
base <<- newMatrix
cache <<- NULL ## re-initialize 'cache' in the parent environment to null
}
get <- function() {
base ## return the matrix x
}
setInverse <- function(inverse) {
## set the cache 'cache' equal to the provided calculated inverse
cache <<- inverse
}
getInverse <- function() {
cache ## return the cached inverse
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
# This function takes a cacheable matrix and calculates its inverse, storing the
# result in a cache region to avoid repeatable computations and waste of time
cacheSolve <- function(cacheMatrix, ...) {
## Return a matrix that is the inverse of 'cacheMatrix'
## Get inverse if any
cache <- cacheMatrix$getInverse()
## If a cached result is present, return it immediately
if(!is.null(cache)) {
message("retrieving from cache...")
return(cache)
}
## If there is no cached result, do the computation and store it
data <- cacheMatrix$get()
cache <- solve(data, ...)
cacheMatrix$setInverse(cache)
cache
}
|
f807831bb259eab2df69ee6d89ceced2db7329dc
|
653b8ba356ed50f74a442455e409f62976b4464d
|
/modelAnalyzeR/man/previous_western_electric_rules.Rd
|
0aa4383ea48b2b45990c56eb4bd75fba11200973
|
[
"MIT"
] |
permissive
|
kiran1984/SCOPE-Anomaly-Detection-Case-Study
|
e5bcfaf981b78695f7ebebdfb8b40ed7871244c5
|
21a0bb9e16a200ba1fcf29354c544524cec9a154
|
refs/heads/master
| 2020-06-22T11:09:53.603581
| 2018-06-30T21:53:38
| 2018-06-30T21:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 754
|
rd
|
previous_western_electric_rules.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/western_electric_rules.R
\name{previous_western_electric_rules}
\alias{previous_western_electric_rules}
\title{Previous Western Electric Rules}
\usage{
previous_western_electric_rules(rule, rules_order, include = TRUE)
}
\arguments{
\item{rule}{Name of the rule to start with}
\item{rules_order}{Vector of rules from first checked to last}
\item{include}{Boolean indicating whether or not to include the rule provided in the list. Defaults to TRUE (include it)}
}
\value{
A vector of rules or "N/A" if there are no rules and the provided one isn't included
}
\description{
Return the current rules and previous rules that would preceed this one.
}
\author{
Stefanie Molin
}
|
423ea82251e84e7c67e02e05e869f2d11786f2a5
|
abe6484733486374f615840d7bddd249b354b48b
|
/man/AIC.Rd
|
573f8975171f2191d7e60150adea4ee9e635183f
|
[] |
no_license
|
cran/spaMM
|
5c64ff59d95ade15763d552445525729307f4f00
|
f96d37b51601ceaa49c61c0d86f0526fc2aa8c21
|
refs/heads/master
| 2023-08-07T17:25:27.993410
| 2023-07-06T10:30:02
| 2023-07-06T10:30:02
| 17,699,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,505
|
rd
|
AIC.Rd
|
\name{AIC}
\alias{get_any_IC}
\alias{AIC}
\alias{AIC.HLfit}
\alias{extractAIC}
\alias{extractAIC.HLfit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Extractors for information criteria such as AIC
}
\description{
\code{get_any_IC} computes model selection/information criteria such as AIC. See Details for more information about these criteria. The other extractors \code{AIC} and \code{extractAIC} are methods for \code{HLfit} objects of generic functions defined in other packages: \code{AIC} is equivalent to \code{get_any_IC} (for a single fitted-model object), and \code{extractAIC} returns the marginal AIC and the number of degrees of freedom for the fixed effects.
}
\usage{
get_any_IC(object, nsim=0L, ..., verbose=interactive(),
also_cAIC=TRUE, short.names=NULL)
\method{AIC}{HLfit}(object, ..., nsim=0L, k, verbose=interactive(),
also_cAIC=TRUE, short.names=NULL)
\method{extractAIC}{HLfit}(fit, scale, k, ..., verbose=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object, fit}{A object of class \code{HLfit}, as returned by the fitting functions in \code{spaMM}.}
\item{scale, k}{Currently ignored, but are required in the definitions for consistency with the generic.}
\item{verbose}{ Whether to print the model selection criteria or not. }
\item{also_cAIC}{Whether to include the plug-in estimate of conditional AIC in the result (its computation may be slow).}
\item{nsim}{Controls whether to include the bootstrap estimate of conditional AIC (see Details) in the result. If positive, \code{nsim} gives the number of bootstrap replicates.}
\item{short.names}{NULL, or boolean; controls whether the return value uses short names (\code{mAIC}, etc., as shown by screen output if \code{verbose} is TRUE), or the descriptive names (\code{" marginal AIC:"}, etc.) also shown in the screen output. Short names are more appropriate for programming but descriptive names may be needed for back-compatibility. The default (NULL) ensures back-compatibility by using descriptive names unless the bootstrap estimate of conditional AIC is reported.}
\item{\dots}{For \code{AIC.HLfit}: may include more fitted-model objects, consistently with the generic. For this and the other functions: other arguments that may be needed by some method. For example, if \code{nsim} is positive, a \code{seed} argument may be passed to \code{simulate}, and the other \dQuote{\dots} may be used to control the optional parallel execution of the bootstrap computations (by providing arguments to \code{\link{dopar}}).
}
}
\details{
The AIC is a measure (by Kullback-Leibler directed distance, up to an additive constant) of quality of prediction of new data by a fitted model.
Comparing information criteria may be viewed as a fast alternative to a comparison of the predictive accuracy of different models by cross-validation. Further procedures for model choice may also be useful (e.g. Williams, 1970; Lewis et al. 2010).
The \bold{conditional AIC} (Vaida and Blanchard 2005) applies the AIC concept to new realizations of a mixed model, conditional on the realized values of the random effects. Lee et al. (2006) and Ha et al (2007) defined a corrected AIC [i.e., AIC(D*) in their eq. 7] which is here interpreted as the conditional AIC.
Such Kullback-Leibler relative distances cannot generally be evaluated exactly and various estimates have been discussed.
\code{get_any_IC} computes, optionally prints, and returns invisibly one or more of the following quantities:\cr
* Akaike's classical AIC (\bold{marginal AIC}, \code{mAIC}, i.e., minus twice the marginal log-likelihood plus twice the number of fitted parameters);\cr
* a plug-in estimate (\code{cAIC}) and/or a bootstrap estimate (\code{b_cAIC}) of the conditional AIC;\cr
* a focussed AIC for dispersion parameters (\bold{dispersion AIC}, \code{dAIC}).
For the \bold{conditional AIC}, Vaida and Blanchard's plug-in estimator involves the conditional likelihood, and degrees of freedom for (i) estimated residual error parameters and (ii) the overall linear predictor characterized by the \bold{Effective degrees of freedom} already discussed by previous authors including Lee and Nelder (1996), which gave a plug-in estimator (\eqn{p_D}) for it in HGLMs.
By default, the plug-in estimate of both the conditional AIC and of \eqn{n-p_D} (\code{GoFdf}, where \eqn{n} is the length of the response vector) are returned by \code{get_any_IC}. But these are biased estimates of conditional AIC and effective df, and an alternative procedure is available for GLM response families if a non-default positive \code{nsim} value is used. In that case, the conditional AIC is estimated by a bootstrap version of Saefken et al. (2014)'s equation 2.5; this involves refitting the model to each bootstrap samples, so it may take time, and a full cross-validation procedure might as well be considered for model selection.
The dispersion AIC has been defined from restricted likelihood by Ha et al (2007; eq.10). The present implementation will use restricted likelihood only if made available by an REML fit, otherwise marginal likelihood is used.
}
\value{
\code{get_any_IC}, a numeric vector whose possible elements are described in the Details, and whose names are controlled by the \code{short.names} argument. Note that the bootstrap computation actually makes sense and works also for fixed-effect models (although it is not clear how useful it is in that case). The return value will still refer to its results as conditional AIC.
For \code{AIC}, If just one fit object is provided, the same return value as for \code{get_any_IC}. If multiple objects are provided, a data.frame built from such vectors, with rows corresponding to the objects.
For \code{extractAIC}, a numeric vector of length 2, with first and second elements giving
\item{* edf}{the degree of freedom of the fixed-effect terms of the model
for the fitted model \code{fit}.}
\item{* AIC}{the (marginal) Akaike Information Criterion for \code{fit}.}
Likelihood is broadly defined up to a constant, which opens the way for inconsistency between different likelihood and AIC computations. In \pkg{spaMM}, likelihood is nothing else than the probability or probability density of the data as function of model parameters. No constant is ever added, in contrast to \code{stats::extractAIC} output, so there are discrepancies with the latter function (see Examples).
}
\references{
%Cox, D. R. and Donnelly C. A. (2011) Principles of Applied Statistics. Cambridge Univ. Press.
Ha, I. D., Lee, Y. and MacKenzie, G. (2007) Model selection for multi-component frailty models. Statistics in Medicine 26: 4790-4807.
Lee Y. and Nelder. J. A. 1996. Hierarchical generalized linear models (with discussion). J. R. Statist. Soc. B, 58: 619-678.
Lewis, F., Butler, A. and Gilbert, L. (2011), A unified approach to model selection using the likelihood ratio test. Methods in Ecology and Evolution, 2: 155-162. \doi{10.1111/j.2041-210X.2010.00063.x}
%Overholser R., and Xu R. (2104) Effective degrees of freedom and its application to conditional AIC for linear mixed-effects models with correlated error structures. J. Multivariate Anal. 132: 160-170.
Saefken B., Kneib T., van Waveren C.-S., Greven S. (2014) A unifying approach to the estimation of the conditional Akaike information in generalized linear mixed models. Electron. J. Statist. 8, 201-225.
Vaida, F., and Blanchard, S. (2005) Conditional Akaike information for mixed-effects models. Biometrika 92, 351-370.
Williams D.A. (1970) Discrimination between regression models to determine the pattern of enzyme synthesis in synchronous cell cultures. Biometrics 26: 23-32.
}
\examples{
data("wafers")
m1 <- fitme(y ~ X1+X2+X3+X1*X3+X2*X3+I(X2^2)+(1|batch), data=wafers,
family=Gamma(log))
get_any_IC(m1)
# => The plug-in estimate is stored in the 'm1' object
# as a result of the previous computation, and is now returned even by:
get_any_IC(m1, also_cAIC=FALSE)
if (spaMM.getOption("example_maxtime")>4) {
get_any_IC(m1, nsim=100L, seed=123) # provides bootstrap estimate of cAIC.
# (parallelisation options could be used, e.g. nb_cores=detectCores()-1L)
}
extractAIC(m1)
\dontrun{
# Checking (in)consistency with glm example from help("stats::extractAIC"):
utils::example(glm) # => provides 'glm.D93' fit object
logLik(glm.D93) # logL= -23.38066 (df=5)
dataf <- data.frame(counts=counts,outcome=outcome, treatment=treatment)
extractAIC(fitme(counts ~ outcome + treatment, family = poisson(), data=dataf))
# => 56.76132 = -2 logL + 2* df
extractAIC(glm.D93) # 56.76132 too
#
# But for LM:
lm.D93 <- lm(counts ~ outcome + treatment, data=dataf)
logLik(lm.D93) # logL=-22.78576 (df=6)
extractAIC(fitme(counts ~ outcome + treatment, data=dataf)) # 57.5715 = -2 logL + 2* df
extractAIC(lm.D93) # 30.03062
### Inconsistency also apparent in drop1 output for :
# Toy data from McCullagh & Nelder (1989, pp. 300-2), as in 'glm' doc:
clotting <- data.frame(
u = c(5,10,15,20,30,40,60,80,100),
lot1 = c(118,58,42,35,27,25,21,19,18),
lot2 = c(69,35,26,21,18,16,13,12,12))
#
drop1( fitme(lot1 ~ log(u), data = clotting), test = "F") # agains reports marginal AIC
# => this may differ strongly from those returned by drop1( < glm() fit > ),
# but the latter are not even consistent with those from drop1( < lm() fit > )
# for linear models. Compare
drop1( lm(lot1 ~ log(u), data = clotting), test = "F") # consistent with drop1.HLfit()
drop1( glm(lot1 ~ log(u), data = clotting), test = "F") # inconsistent
## Discrepancies in drop1 output with Gamma() family:
gglm <- glm(lot1 ~ 1, data = clotting, family=Gamma())
logLik(gglm) # -40.34633 (df=2)
spgglm <- fitme(lot1 ~ 1, data = clotting, family=Gamma())
logLik(spgglm) # -40.33777 (slight difference:
# see help("method") for difference in estimation method between glm() and fitme()).
# Yet this does not explain the following:
drop1( fitme(lot1 ~ log(u), data = clotting, family=Gamma()), test = "F")
# => second AIC is 84.676 as expected from above logLik(spgglm).
drop1( glm(lot1 ~ log(u), data = clotting, family=Gamma()), test = "F")
# => second AIC is 1465.27, quite different from -2*logLik(gglm) + 2*df
}
}
\keyword{models}
|
aef141d717b28571af8454c5711b01e0e7ee12f1
|
4755f3de81c8fead02dc028a52fb22183381bf6e
|
/lecture6.R
|
02e4a7694930f1c4f4938d97fff41beda9d42d13
|
[] |
no_license
|
LucIFer427/mhl_programming_econ
|
9bd63cc58cacfa1bc10ce5ca76f98e1664a369b6
|
bc9f3f59a63d8779269b9657a7dc82867ac874e6
|
refs/heads/master
| 2022-04-16T11:40:09.172882
| 2020-04-13T10:36:38
| 2020-04-13T10:36:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
r
|
lecture6.R
|
c1 <- '[Uhlmann, Eric Luis] HEC Paris, Management & Human Resources Dept, F-78351 Jouy En Josas, France; [Heaphy, Emily] Boston Univ, Sch Management, Boston, MA 02215 USA; [Ashford, Susan J.] Univ Michigan, Sch Business, Ann Arbor, MI 48109 USA; [Lei, Luke [Zhu] Univ British Columbia, Sauder Sch Business, Vancouver, BC V5Z 1M9, Canada; [Sanchez-Burks, Jeffrey] Univ Michigan, Stephen M Ross Sch Business, Ann Arbor, MI 48109 USA'
|
44401017625a2ea8ba5d6c73f322e2a03e272cb5
|
3dd96bb147da182d23824ac7172031c309f55298
|
/bash_DADA2_script.R
|
4b74bd865309f1a84812d088280a3a0915b12da8
|
[] |
no_license
|
qinglong89/Taxa4Meta-ParameterBenchmarking
|
4d07e95f83abae79efb6feba2eeb510da67ae0f4
|
8f2155981c52437e74072f9644fe0b2c65d7a17a
|
refs/heads/main
| 2023-04-24T13:05:48.901832
| 2021-05-17T16:25:05
| 2021-05-17T16:25:05
| 368,240,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,052
|
r
|
bash_DADA2_script.R
|
#!/usr/bin/env Rscript
#usage: Rscript --vanilla bash_DADA2_script.R /path-to-fastq-files/
#alternative usage: R CMD BATCH bash_DADA2_script.R (but need to specify the path directory in the script)
#define argumens from command line
args <- commandArgs(trailingOnly=TRUE)
#pass the work directory (where fastq files kept) from the command line to the script
#this only take the first argument from the command line
DirPath <- args[1]
#alternative usage (need to deactivate above two commands)
#DirPath <- "/mnt/home1/qinglong/Test_SimulatedAmplicon_NCBI16SRefSeq_ReadLength_taxonomy/Clustering_Denoising_accuracy/NCBI_16S-rRNA-RefSeq_V6V9_AbundanceSimulated_reverse_amplicon"
#this workflow is based on the tutorial of DADA2 version 1.8
library(dada2)
setwd(DirPath)
path <- DirPath
list.files(path)
#get filename
fnFs <- sort(list.files(path, pattern=".fastq", full.names = TRUE))
filenames <- gsub(pattern = "\\.fastq$", "", basename(fnFs))
#set up output for filtered sequence, which will be save in a new dir "dada2filtered"
filtFs <- file.path(path, "dada2Filtered", paste0(filenames, "_filtered.fastq"))
names(filtFs) <- filenames
#filter and trim the sequences (make sure minLen > 5 which is the default k-mers used by DADA2), output is binary file, not traditional fastq file
#note that dada2 does not allow Ns
out <- filterAndTrim(fnFs, filtFs, truncQ=2, maxN=0, maxEE=2, rm.phix=TRUE, multithread=TRUE)
#learn errors
errF <- learnErrors(filtFs, multithread=TRUE)
plotErrors(errF, nominalQ=TRUE)
dev.off()
#dereplicate sequence
derepFs <- derepFastq(filtFs, verbose=TRUE)
names(derepFs) <- filenames
#apply the core sample inference algorithm to the dereplicated data
dadaFs <- dada(derepFs, err=errF, multithread=TRUE)
#construct an amplicon sequence variant table (ASV) table
seqtab <- makeSequenceTable(dadaFs)
ASVseq <- getSequences(seqtab)
write.table(seqtab, file = "DADA2_ASV-feature-table.txt", sep="\t")
write.table(ASVseq, file = "DADA2_ASV-seq.txt", sep="\t")
quit(save="yes") #this did not change the work directory
|
8366c93a08d772888382862b4beff6f41204b40a
|
f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6
|
/source/macOS/R-Portable-Mac/library/plotly/demo/animation-tour-basic.R
|
b56a31313211d141a2e79b6205ee061004e48c11
|
[
"MIT",
"CC-BY-3.0",
"GPL-2.0-only"
] |
permissive
|
romanhaa/Cerebro
|
5b2d9371403c52f60341894f84cd0f6a006cc930
|
946ed178c986027d60af6013e63d1fc51ae8b371
|
refs/heads/master
| 2022-12-02T15:49:57.705873
| 2021-11-20T11:47:12
| 2021-11-21T17:09:37
| 164,686,297
| 87
| 23
|
MIT
| 2022-11-10T18:21:44
| 2019-01-08T16:09:59
|
HTML
|
UTF-8
|
R
| false
| false
| 1,350
|
r
|
animation-tour-basic.R
|
# adapted from https://github.com/rstudio/ggvis/blob/master/demo/tourr.r
library(tourr)
library(plotly)
mat <- rescale(as.matrix(flea[1:6]))
tour <- new_tour(mat, grand_tour(), NULL)
tour_dat <- function(step_size) {
step <- tour(step_size)
proj <- center(mat %*% step$proj)
data.frame(x = proj[,1], y = proj[,2],
species = flea$species)
}
proj_dat <- function(step_size) {
step <- tour(step_size)
data.frame(
x = step$proj[,1], y = step$proj[,2], measure = colnames(mat)
)
}
steps <- c(0, rep(1/15, 50))
stepz <- cumsum(steps)
# tidy version of tour data
tour_dats <- lapply(steps, tour_dat)
tour_datz <- Map(function(x, y) cbind(x, step = y), tour_dats, stepz)
tour_dat <- dplyr::bind_rows(tour_datz)
# tidy version of tour projection data
proj_dats <- lapply(steps, proj_dat)
proj_datz <- Map(function(x, y) cbind(x, step = y), proj_dats, stepz)
proj_dat <- dplyr::bind_rows(proj_datz)
ax <- list(
title = "",
range = c(-1, 1),
zeroline = FALSE
)
# for nicely formatted slider labels
options(digits = 2)
proj_dat %>%
plot_ly(x = ~x, y = ~y, frame = ~step, color = I("gray80")) %>%
add_segments(xend = 0, yend = 0) %>%
add_text(text = ~measure) %>%
add_markers(color = ~species, data = tour_dat) %>%
hide_legend() %>%
layout(xaxis = ax, yaxis = ax) %>%
animation_opts(33, redraw = FALSE)
|
1b1ff7637d285458d18607c611abdbf5b6b7564f
|
3cd53640e1e89f3c8ea21bc02f6bf66160d08f7d
|
/scripts/parallel_process.R
|
7a44bc593d4b60d2bc16017cac9d57c6ba24a9ff
|
[
"MIT"
] |
permissive
|
benranco/SNPpipeline
|
c1823ae0ba3c9d5af04d4282c45f6419d027f6a7
|
c73c5621cfe3b88baed72ae46d538149ad02a25e
|
refs/heads/master
| 2022-09-17T12:14:23.564823
| 2022-09-14T09:41:11
| 2022-09-14T09:41:11
| 66,683,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,319
|
r
|
parallel_process.R
|
args <- commandArgs(trailingOnly = TRUE)
path <- args[1]
file <- args[2]
haploidOrDiploid <- args[3]
#path <- "/home/gosuzombie/Desktop/region_38/run"
#file <- "report_p432.Rds"
options(stringsAsFactors = FALSE, warn = 1)
report <- NULL
s <- NULL
#################################################
printErr <- function(command, err) {
message("**************** parallel_process.R: An error occured trying to execute the following system command: ")
message(command)
message("And below is the error message from R:")
message(err)
message(" ")
message("We will skip trying to retrieve data from the BAM file for this particular row+sample.")
return(NA)
}
#################################################
processTViewOut <- function(tview, rnum, cnum, ref) {
isIndel <- (nchar(ref) > 1)
# get indexes of all * characters in the ref line
starsInRefLine <- gregexpr(fixed=TRUE, pattern ='*',tview[2,1])
starsInRefLine <- starsInRefLine[[1]][1:length(starsInRefLine[[1]])] # now it's a simple vector
# if there are actually stars in the ref line, update the ref to include those stars:
if(starsInRefLine[1] != -1) {
# for each * that is within the ref scope, insert it into the actual ref, so the ref will be the proper
# length for comparing against the line below it.
# (this loop assumes the gregexpr function call above returned the indexes of star characters in
# increasing order)
for (n in 1:length(starsInRefLine)) {
starIndex <- starsInRefLine[n]
if (starIndex <= nchar(ref)) {
ref <- paste0(substr(ref, 1, starIndex-1), "*", substr(ref, starIndex, nchar(ref)))
} else {
break
}
}
}
if(substring(tview[2,1], 1 ,nchar(ref)) == ref) {
if(nrow(tview) >= 3) {
# The following logic is based on information from these two links:
# https://en.wikipedia.org/wiki/Pileup_format
# https://en.wikipedia.org/wiki/Nucleic_acid_sequence
# It assumes the very first line returned from tview is either some sort of header or blank spaces,
# the second line begins with the REF, and the third line is a summary/conclusion of all the lines
# below it.
#
# pseudocode:
# only proceed if REF matches the first part of line 2 (first data line)
# now, for comparing line 3 to line 2:
# for each position:
# if . or , then use REF/REF (diploid) or REF/ (haploid)
# if (A|G|C|T|a|g|c|t) then use uppercase(x/x or x/)
# if diploid:
# if W then A/T or T/A (always REF/ALT, or if neither are REF then the order doesn't matter).
# (haploid shouldn't have these) (if it is part of an indel then put REF before the / and ALT
# after the /) (ACTUALLY, if indel, we don't treat the indels that have WSMKRY, too complex)
# if S then C/G or G/C
# if M then A/C or C/A
# if K then G/T or T/G
# if R then A/G or G/A
# if Y then C/T or T/C
# if empty (as is usually the case when the REF position above it is *), or *, skip this position.
# (in the case where it is a character but the REF position above it is a *, we just treat it
# with the same rules as anything else, so the result could end up being longer than the REF (but
# we've already adjusted the REF that we're comparing it against to be the same length)).
# eg. (with REF = TAAAC):
# T*AAAC*AAAAGGGAGATTTTGGA*T*A**G*G*G*G*G**T*A**GTAA***CC*AACAA*T*G*C*CC*C*C*GCACG
# . .... ..........C.Y.... Y . . . . . . Y . .... .. .R... Y K . .. . . S.R..
# This case would be: TAAAC/TAAAC
#
# T*AAAC*AAAAGGGAGATTTTGGA*T*A**G*G*G*G*G**T*A**GTAA***CC*AACAA*T*G*C*CC*C*C*GCACG
# .A.... ..........C.Y.... Y . . . . . . Y . .... .. .R... Y K . .. . . S.R..
# This case would be: TAAAAC/TAAAAC
firstHalf <- ""
secondHalf <- ""
abort <- FALSE
for (n in 1:nchar(ref)) {
x <- substring(tview[3,1], n ,n) # the character we're looking at in this iteration
# NOTE: there is no code to deal with the case when a character in line 3 is " " (or "*") because
# we just skip it if so.
# 1 == haploid, 2 == diploid
if (haploidOrDiploid == 1) { # haploid
if(x == "." || x == ",") {
firstHalf <- paste0(firstHalf, substring(ref, n ,n) )
} else if(x == "A" || x == "G" || x == "C" || x == "T") {
firstHalf <- paste0(firstHalf, x)
} else if(x == "a" || x == "g" || x == "c" || x == "t") {
firstHalf <- paste0(firstHalf, toupper(x))
} else if(x == "W" || x == "S" || x == "M" || x == "K" || x == "R" || x == "Y") {
abort <- TRUE
break;
} else if(x != " " && x != "*") {
abort <- TRUE
break;
}
} else { # diploid
if(x == "." || x == ",") {
r <- substring(ref, n ,n)
firstHalf <- paste0(firstHalf, r)
secondHalf <- paste0(secondHalf, r)
} else if(x == "A" || x == "G" || x == "C" || x == "T") {
firstHalf <- paste0(firstHalf, x)
secondHalf <- paste0(secondHalf, x)
} else if(x == "a" || x == "g" || x == "c" || x == "t") {
firstHalf <- paste0(firstHalf, toupper(x))
secondHalf <- paste0(secondHalf, toupper(x))
} else if(isIndel && (x == "W" || x == "S" || x == "M" || x == "K" || x == "R" || x == "Y") ) {
abort <- TRUE # we are leaving indels containing W,S,M,K,R,Y as NA.
break;
} else if(x == "W") {
if (substring(ref, n ,n) == "T") {
firstHalf <- paste0(firstHalf, "T")
secondHalf <- paste0(secondHalf, "A")
} else {
firstHalf <- paste0(firstHalf, "A")
secondHalf <- paste0(secondHalf, "T")
}
} else if(x == "S") {
if (substring(ref, n ,n) == "G") {
firstHalf <- paste0(firstHalf, "G")
secondHalf <- paste0(secondHalf, "C")
} else {
firstHalf <- paste0(firstHalf, "C")
secondHalf <- paste0(secondHalf, "G")
}
} else if(x == "M") {
if (substring(ref, n ,n) == "C") {
firstHalf <- paste0(firstHalf, "C")
secondHalf <- paste0(secondHalf, "A")
} else {
firstHalf <- paste0(firstHalf, "A")
secondHalf <- paste0(secondHalf, "C")
}
} else if(x == "K") {
if (substring(ref, n ,n) == "T") {
firstHalf <- paste0(firstHalf, "T")
secondHalf <- paste0(secondHalf, "G")
} else {
firstHalf <- paste0(firstHalf, "G")
secondHalf <- paste0(secondHalf, "T")
}
} else if(x == "R") {
if (substring(ref, n ,n) == "G") {
firstHalf <- paste0(firstHalf, "G")
secondHalf <- paste0(secondHalf, "A")
} else {
firstHalf <- paste0(firstHalf, "A")
secondHalf <- paste0(secondHalf, "G")
}
} else if(x == "Y") {
if (substring(ref, n ,n) == "T") {
firstHalf <- paste0(firstHalf, "T")
secondHalf <- paste0(secondHalf, "C")
} else {
firstHalf <- paste0(firstHalf, "C")
secondHalf <- paste0(secondHalf, "T")
}
} else if(x != " " && x != "*") {
abort <- TRUE
break;
}
}
} # end for-loop
if (!abort && firstHalf != "") {
# 1 == haploid, 2 == diploid. If it's haploid, we follow the format in the .tab file of "A/",
# whereas if it's diploid we follow the format in the .tab file of "A/A".
# Update 2022-04-04: Fixed a bug by using <<-, previously was using <- which doesn't assign to a global variable.
if (haploidOrDiploid == 1) {
report[rnum,cnum] <<- paste0(firstHalf, "/")
} else {
report[rnum,cnum] <<- paste0(firstHalf, "/", secondHalf)
}
}
}
}
} # end function
#################################################
testProcessTViewOut <- function() {
# Create this test report:
# CHROM POS REF sample1 sample2
# r1 10 AATG NA NA
# r2 20 A NA NA
# r3 30 G NA NA
# r4 40 AATATC NA NA
myReport <- data.frame(c("r1","r2","r3","r4"), c(10,20,30,40), c("AATG","A","G","AATATC"), character(4), character(4))
names(myReport) <- c("CHROM","POS","REF","sample1","sample2")
myReport[,4:5] <- NA
# save state of global variables before appropriating them:
realReport <- report
report <<- myReport
realHaploidOrDiploid <- haploidOrDiploid
haploidOrDiploid <<- 2
realS <- s
s <<- 4
# Test indel with Y, this should be NA:
data1 <- as.matrix(c("151 161 171 181 191 201 211 221 ",
"AATGAATTTCCACATGCCTTTGAATCTACTTCTATGCTCACTTATGGCATTGGGAGTTTGGACGGGTGTTGGGAAGGAGA",
"G.Y..............A....C....R......YK............G........................R......",
"G.....*..........A....C....G......C.............G..............................."))
processTViewOut(data1, 1, 4, "AATG")
write(paste0("This should be TRUE: ", is.na(report[1,4])), stdout())
# If REF is just A, this should be A/G, if REF is just G, this should be G/A:
data2 <- as.matrix(c("151 161 171 181 191 201 211 221 ",
"AATGAATTTCCACATGCCTTTGAATCTACTTCTATGCTCACTTATGGCATTGGGAGTTTGGACGGGTGTTGGGAAGGAGA",
"R.Y..............A....C....R......YK............G........................R......",
"G.....*..........A....C....G......C.............G..............................."))
processTViewOut(data2, 2, 4, "A")
write(paste0("This should be TRUE: ", "A/G" == report[2,4]), stdout())
processTViewOut(data2, 3, 4, "G")
# false because it doesn't proceed if the REF doesn't match the ref in the data line
write(paste0("This should be TRUE: ", is.na(report[3,4])), stdout())
# If REF is just A, this should be A/G, if REF is just G, this should be G/A:
data3 <- as.matrix(c("151 161 171 181 191 201 211 221 ",
"GATGAATTTCCACATGCCTTTGAATCTACTTCTATGCTCACTTATGGCATTGGGAGTTTGGACGGGTGTTGGGAAGGAGA",
"R.Y..............A....C....R......YK............G........................R......",
"G.....*..........A....C....G......C.............G..............................."))
processTViewOut(data3, 2, 5, "G")
write(paste0("This should be TRUE: ", "G/A" == report[2,5]), stdout())
# Test complex indel, If REF is AATATC, this ALT should be GACAATC/GACAATC:
data4 <- as.matrix(c("151 161 171 181 191 201 211 221 ",
"AAT*A***TCCACATGCCTTTGAATCTACTTCTATGCTCACTTATGGCATTGGGAGTTTGGACGGGTGTTGGGAAGGAGA",
"G.C .A .........A....C....R......YK............G........................R......",
"G.....*..........A....C....G......C.............G..............................."))
processTViewOut(data4, 4, 4, "AATATC")
write(paste0("This should be TRUE: ", "GACAATC/GACAATC" == report[4,4]), stdout())
# Testing for an unsupported character "U", this should give NA:
data5 <- as.matrix(c("151 161 171 181 191 201 211 221 ",
"AATGAATTTCCACATGCCTTTGAATCTACTTCTATGCTCACTTATGGCATTGGGAGTTTGGACGGGTGTTGGGAAGGAGA",
"U.Y..............A....C....R......YK............G........................R......",
"G.....*..........A....C....G......C.............G..............................."))
processTViewOut(data5, 3, 5, "A")
write(paste0("This should be TRUE: ", is.na(report[3,5])), stdout())
#report
message("-----------------------")
# Now doing the same tests but for haploid:
report[,4:5] <<- NA
haploidOrDiploid <<- 1
processTViewOut(data1, 1, 4, "AATG")
write(paste0("This should be TRUE: ", is.na(report[1,4])), stdout())
# extra test:
processTViewOut(data1, 1, 5, "AA")
write(paste0("This should be TRUE: ", "GA/" == report[1,5]), stdout())
processTViewOut(data2, 2, 4, "A")
write(paste0("This should be TRUE: ", is.na(report[2,4])), stdout())
processTViewOut(data2, 3, 4, "G")
# false because it doesn't proceed if the REF doesn't match the ref in the data line
write(paste0("This should be TRUE: ", is.na(report[3,4])), stdout())
processTViewOut(data3, 2, 5, "G")
write(paste0("This should be TRUE: ", is.na(report[2,5])), stdout())
processTViewOut(data4, 4, 4, "AATATC")
write(paste0("This should be TRUE: ", "GACAATC/" == report[4,4]), stdout())
# extra test:
processTViewOut(data4, 4, 5, "A")
write(paste0("This should be TRUE: ", "G/" == report[4,5]), stdout())
processTViewOut(data5, 3, 5, "A")
write(paste0("This should be TRUE: ", is.na(report[3,5])), stdout())
#report
# set global variables back to whatever real data may have been in it:
report <<- realReport
haploidOrDiploid <<- realHaploidOrDiploid
s <<- realS
}
#################################################
# Execution begins here (above are functions):
# To test the processTViewOut function, just run source('parallel_process.R') from within an
# R session, it'll give you an error from the below code, but ignore it, then run testProcessTViewOut().
message(paste0("parallel_process: looking up NA data on ", file))
report <- readRDS(paste0(path, "/reporttemp/", file))
if(!("COMBINED" %in% colnames(report))) {
s <- 4
} else {
s <- 5
}
for(a in 1:nrow(report)) {
reference <- report[a, "REF"]
if(s <= ncol(report)) {
for(b in s:ncol(report)) {
if(is.na(report[a,b])) {
fn <- paste(substr(colnames(report)[b], 0 , nchar(colnames(report)[b]) - 4),
"_sorted_markDup.bam", sep = "")
cmd <- paste0(path, "/tools/samtools-1.3.1/samtools tview ", path ,"/dataTemp/single/", fn ,
" ", path, "/reference/formatted_output.fasta -d T",
' -p \"', paste(report[a, "CHROM"], report[a, "POS"], sep = ":"), '"')
tryCatch(
{
# Update 2022-04-04: added header=FALSE to read.delim to deal with the R error "duplicate 'row.names'
# are not allowed" in the cases where there is a merely whitespace first line instead of a proper
# header".
# This means that the first line will always be the header (even if the header was just whitespace), and
# the second line will always be the one containing the REF, and the third line will always be the
# summary line of the lines below it.
tviewOut <- as.matrix(read.delim(pipe(cmd), header=FALSE, sep = "\n"))
processTViewOut(tviewOut, a, b, reference)
#return(NA) # only necessary when doing tryCatch in a function
},
error=function(error_message) {
printErr(cmd, error_message)
#return(NA) # only necessary when doing tryCatch in a function
}
) # end tryCatch
}
} # end inner for-loop
}
} # end outer for-loop
saveRDS(report, file = paste0(path, "/reporttemp/", substr(file, 0, nchar(file) - 4), "_filled.Rds"))
|
1f54959ce128f03f6dc5bd3aaca0afa24b5af872
|
a1bb4782472b046285e29132ea8faee76cecc6f8
|
/R/initConst.R
|
d440b7e5a4d7a6c8334ca0a053d6d558f4d9c963
|
[] |
no_license
|
cran/orth
|
bb155ab9fc0e26274f3c5925b71e0c4090b378df
|
cc72f36de41107427c5445a9700608bb40cf5e6c
|
refs/heads/master
| 2021-01-17T05:56:39.888184
| 2011-01-25T00:00:00
| 2011-01-25T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 284
|
r
|
initConst.R
|
`initConst` <-
function(ni, lambda)
{
# When n[i] equals 2, the contribution of lambda is 0
if (ni == 2) { c1 <- 1; c2 <- 0 }
else
{
c1 <- 1 / (1-lambda)
c2 <- -lambda / ( (1-lambda)*(1 + (ch2(ni) - 1)* lambda ) )
}
return(list(c1 = c1, c2 = c2))
}
|
4c48d19ac722d44ba81ecc2279ff07646f9bdcfc
|
216432e1e9cab0486bc4ea487580ffc0ae149511
|
/data_transform_completed.R
|
b2b5d1b7503a823f75cd29f2e2ecce6912122f07
|
[] |
no_license
|
chubenn/Consultation-Angie
|
1c8c573bb81e56f945ffd6be06eb2e972a36bea2
|
409735adf278e1beaea4875dc4026e5c6a4f38de
|
refs/heads/master
| 2021-09-16T23:10:09.412312
| 2018-06-25T22:39:34
| 2018-06-25T22:39:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,551
|
r
|
data_transform_completed.R
|
library(DescTools)
# Create data set with needed DVs and IVs
angie_mutated <- angie_complete %>%
tbl_df %>%
janitor::clean_names() %>%
mutate(dv_mpca = (mpca1 + mpca2 + mpca3 + mpca4 + mpca5 + mpca6 + mpca7 + mpca8 +
mpca9 + mpca10 + mpca11 + mpca12 + mpca13 + mpca14 + mpca15 + mpca16 +
mpca17 + mpca18 + mpca19 + mpca20 + mpca21 + mpca22 + mpca23)/23,
dv_mpcag = (mpcag1 + mpcag2 + mpcag3 + mpcag4 + mpcag5 + mpcag6 + mpcag7 +
mpcag8 + mpcag9 + mpcag10 + mpcag11 + mpcag12 + mpcag13 + mpcag14 + mpcag15)/15,
dv_eipses = (eipses1 + eipses2 + eipses3 + eipses4 + eipses5 + eipses6 + eipses7 + eipses8 +
eipses9 + eipses10 + eipses11 + eipses12 + eipses13 + eipses14 + eipses15 + eipses16)/16,
dv_psisf = (psisf1 + psisf2 + psisf3 + psisf4 + psisf5 + psisf6 + psisf7 + psisf8 + psisf9 +
psisf10 + psisf11 + psisf12 + psisf13 + psisf14 + psisf15 + psisf16 + psisf17 +psisf18 +
psisf19 + psisf20 + psisf21 + psisf22 + psisf23 + psisf24 + psisf25 + psisf26 + psisf27 +
psisf28 + psisf29 + psisf30 + psisf31 + psisf32 + psisf33 + psisf34 + psisf35 + psisf36)/36,
dv_airs = (airs1 + airs2 + airs3 + airs4 + airs5 +
airs6 + airs7 + airs8 +airs9 + airs10)/10,
dv_mpca_2 = (mpca1_2 + mpca2_2 + mpca3_2 + mpca4_2 + mpca5_2 + mpca6_2 + mpca7_2 + mpca8_2 +
mpca9_2 + mpca10_2 + mpca11_2 + mpca12_2 + mpca13_2 + mpca14_2 + mpca15_2 + mpca16_2 +
mpca17_2 + mpca18_2 + mpca19_2 + mpca20_2 + mpca21_2 + mpca22_2 + mpca23_2)/23,
dv_mpcag_2 = (mpcag1_2 + mpcag2_2 + mpcag3_2 + mpcag4_2 + mpcag5_2 + mpcag6_2 + mpcag7_2 +
mpcag8_2 + mpcag9_2 + mpcag10_2 + mpcag11_2 + mpcag12_2 + mpcag13_2 + mpcag14_2 + mpcag15_2)/15,
dv_eipses_2 = (eipses1_2 + eipses2_2 + eipses3_2 + eipses4_2 + eipses5_2 + eipses6_2 + eipses7_2 + eipses8_2 +
eipses9_2 + eipses10_2 + eipses11_2 + eipses12_2 + eipses13_2 + eipses14_2 + eipses15_2 + eipses16_2)/16,
dv_psisf_2 = (psisf1_2 + psisf2_2 + psisf3_2 + psisf4_2 + psisf5_2 + psisf6_2 + psisf7_2 + psisf8_2 + psisf9_2 +
psisf10_2 + psisf11_2 + psisf12_2 + psisf13_2 + psisf14_2 + psisf15_2 + psisf16_2 + psisf17_2 + psisf18_2 +
psisf19_2 + psisf20_2 + psisf21_2 + psisf22_2 + psisf23_2 + psisf24_2 + psisf25_2 + psisf26_2 + psisf27_2 +
psisf28_2 + psisf29_2 + psisf30_2 + psisf31_2 + psisf32_2 + psisf33_2 + psisf34_2 + psisf35_2 + psisf36_2)/36,
dv_airs_2 = (airs1_2 + airs2_2 + airs3_2 + airs4_2 + airs5_2 +
airs6_2 + airs7_2 + airs8_2 + airs9_2 + airs10_2)/10,
dv_mpca_3 = (mpca1_3 + mpca2_3 + mpca3_3 + mpca4_3 + mpca5_3 + mpca6_3 + mpca7_3 + mpca8_3 +
mpca9_3 + mpca10_3 + mpca11_3 + mpca12_3 + mpca13_3 + mpca14_3 + mpca15_3 + mpca16_3 +
mpca17_3 + mpca18_3 + mpca19_3 + mpca20_3 + mpca21_3 + mpca22_3 + mpca23_3)/23,
dv_mpcag_3 = (mpcag1_3 + mpcag2_3 + mpcag3_3 + mpcag4_3 + mpcag5_3 + mpcag6_3 + mpcag7_3 +
mpcag8_3 + mpcag9_3 + mpcag10_3 + mpcag11_3 + mpcag12_3 + mpcag13_3 + mpcag14_3 + mpcag15_3)/15,
dv_eipses_3 = (eipses1_3 + eipses2_3 + eipses3_3 + eipses4_3 + eipses5_3 + eipses6_3 + eipses7_3 + eipses8_3 +
eipses9_3 + eipses10_3 + eipses11_3 + eipses12_3 + eipses13_3 + eipses14_3 + eipses15_3 + eipses16_3)/16,
dv_psisf_3 = (psisf1_3 + psisf2_3 + psisf3_3 + psisf4_3 + psisf5_3 + psisf6_3 + psisf7_3 + psisf8_3 + psisf9_3 +
psisf10_3 + psisf11_3 + psisf12_3 + psisf13_3 + psisf14_3 + psisf15_3 + psisf16_3 + psisf17_3 +psisf18_3 +
psisf19_3 + psisf20_3 + psisf21_3 + psisf22_3 + psisf23_3 + psisf24_3 + psisf25_3 + psisf26_3 + psisf27_3 +
psisf28_3 + psisf29_3 + psisf30_3 + psisf31_3 + psisf32_3 + psisf33_3 + psisf34_3 + psisf35_3 + psisf36_3)/36,
dv_airs_3 = (airs1_3 + airs2_3 + airs3_3 + airs4_3 + airs5_3 +
airs6_3 + airs7_3 + airs8_3 + airs9_3 + airs9)/9,
iv_time = ifelse(time == 1, "pre",
ifelse(time == 2, "post","follow up")),
iv_groups = ifelse(group == 1, "treatment",
ifelse(group == 2, "completed",
ifelse(group == 3, "treatment as usual","graduate"))),
##FIX IV_SBC
iv_sbcgroup = ifelse(group == 1, "sbp",
ifelse(group == 2, "sbp",
ifelse(group == 3, "no_sbp","post_sbp"))),
iv_teacher = (teacher),
iv_id = (id),
iv_language = ifelse(lang == 1, "english",
ifelse(lang == 2, "spanish","other")),
iv_impserv = ifelse(impserv == 1, "no_improvement",
ifelse(impserv == 2, "mild_improvement", "substancial_improvement")),
iv_ever_st = ifelse(childservst == 1, "yes","no"),
iv_ever_bt = ifelse(childservbt == 1, "yes","no"),
iv_ever_ot = ifelse(childservot == 1, "yes","no"),
iv_ever_pt = ifelse(childservpt == 1, "yes","no"),
iv_ever_sb = ifelse(childservsb == 1, "yes","no"),
iv_curr_st = ifelse(childrecst == 1, "yes","no"),
iv_curr_bt = ifelse(childrecbt == 1, "yes","no"),
iv_curr_ot = ifelse(childrecot == 1, "yes","no"),
iv_curr_pt = ifelse(childrecpt == 1, "yes","no"),
iv_curr_sb = ifelse(childrecsb == 1, "yes","no"))
# select variables of interest
angie_thesis <- angie_mutated %>%
select(dv_mpca, dv_mpcag, dv_eipses, dv_psisf, dv_airs,
dv_mpca_2, dv_mpcag_2, dv_eipses_2, dv_psisf_2, dv_airs_2,
dv_mpca_3, dv_mpcag_3, dv_eipses_3, dv_psisf_3, dv_airs_3,
iv_time, iv_groups, iv_sbcgroup, iv_teacher, iv_language, iv_impserv,
id, iv_ever_st, iv_ever_bt, iv_ever_ot, iv_ever_pt, iv_ever_sb,
iv_curr_st, iv_curr_bt, iv_curr_ot, iv_curr_pt, iv_curr_sb)
# check for skewness and kurtosis issues
lapply(angie_thesis[1:15],Skew,method = 2, conf.level =.99)
lapply(angie_thesis[1:15],Kurt, method = 2, conf.level = .99)
# Transformations
transform <- function (x, reflected = FALSE) {
if (reflected == FALSE) {
print('SQUAREROOT')
print(sqrt(x + 1)) # squareroot
print('LOG')
print(log10(x + 1)) # log
print('INVERSE')
print(1/(x + 1))
} else {
print('REFLECTED SQUAREROOT')
print(sqrt(max(x) - x + 1))
print('REFLECTED LOG')
print(log10(max(x) - x + 1))
print('REFLECTED INVERSE')
print(1/(max(x) - x + 1))
}
}
transform(angie_thesis$dv_mpcag_3, reflected = FALSE)
angie_thesis$dv_mpcag_3 <- log10(max(angie_thesis$dv_mpcag_3) - angie_thesis$dv_mpcag_3 +1)
angie_thesis$dv_mpcag_2 <- log10(max(angie_thesis$dv_mpcag_2) - angie_thesis$dv_mpcag_2 +1)
angie_thesis$dv_mpcag <- log10(max(angie_thesis$dv_mpcag) - angie_thesis$dv_mpcag +1)
|
bedc80851090d7c0a7eeb9c3f12a19aea722124a
|
2fbd23b496681bfa89b7a61f05f05d3b5e65772d
|
/tests/testthat/test_setequal.R
|
075aa99679f6d7e0337d9cd56e9ae6ac3f9493f8
|
[
"MIT"
] |
permissive
|
numeract/Nmisc
|
95b7b8f89f2eb278572117c04825d2add2f508a3
|
17347253922b657336e6c7ba044f7361daf67506
|
refs/heads/master
| 2021-07-16T22:09:34.461956
| 2021-04-28T13:28:39
| 2021-04-28T13:28:39
| 118,459,419
| 0
| 0
|
NOASSERTION
| 2021-04-28T04:29:10
| 2018-01-22T13:16:23
|
R
|
UTF-8
|
R
| false
| false
| 977
|
r
|
test_setequal.R
|
context("Testing set-misc.R/setequal_na()")
test_that("set_equal_na() works when na.rm = TRUE", {
x <- c(1, 2)
y <- c(NA, 1, 2)
result <- setequal_na(x, y, na.rm = TRUE)
expect_equal(result, TRUE)
})
test_that("set_equal_na() works when na.rm = FALSE", {
x <- c(1, 2)
y <- c(NA, 1, 2)
result <- setequal_na(x, y, na.rm = FALSE)
expect_equal(result, FALSE)
})
test_that("set_equal_na() works when na.rm = TRUE, multiple NAs", {
x <- c(1, 2, NA, NA)
y <- c(NA, 1, 2)
result <- setequal_na(x, y, na.rm = TRUE)
expect_equal(result, TRUE)
})
test_that("set_equal_na() works when na.rm = TRUE, only NAs", {
x <- c(NA, NA)
y <- c(NA, NA, NA)
result <- setequal_na(x, y, na.rm = TRUE)
expect_equal(result, TRUE)
})
test_that("set_equal_na() works when na.rm = FALSE, only NAs", {
x <- c(NA, NA, NA, NA)
y <- c(NA, NA, NA)
result <- setequal_na(x, y, na.rm = FALSE)
expect_equal(result, TRUE)
})
|
afc7d7a59ddad6c6b3f313c3eb83a7bbac0bdb32
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.security.identity/man/securityhub_get_members.Rd
|
0b76f122f21d73a19a0a45efdccc3ab5f17812dd
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,377
|
rd
|
securityhub_get_members.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_get_members}
\alias{securityhub_get_members}
\title{Returns the details for the Security Hub member accounts for the
specified account IDs}
\usage{
securityhub_get_members(AccountIds)
}
\arguments{
\item{AccountIds}{[required] The list of account IDs for the Security Hub member accounts to return
the details for.}
}
\value{
A list with the following syntax:\preformatted{list(
Members = list(
list(
AccountId = "string",
Email = "string",
MasterId = "string",
MemberStatus = "string",
InvitedAt = as.POSIXct(
"2015-01-01"
),
UpdatedAt = as.POSIXct(
"2015-01-01"
)
)
),
UnprocessedAccounts = list(
list(
AccountId = "string",
ProcessingResult = "string"
)
)
)
}
}
\description{
Returns the details for the Security Hub member accounts for the
specified account IDs.
A master account can be either a delegated Security Hub administrator
account for an organization or a master account that enabled Security
Hub manually.
The results include both member accounts that are in an organization and
accounts that were invited manually.
}
\section{Request syntax}{
\preformatted{svc$get_members(
AccountIds = list(
"string"
)
)
}
}
\keyword{internal}
|
56872bde3485373d3ebdf95e7b9fcc6985859ef1
|
481675ed5406254a953ca816a3c9778ed5457d8f
|
/tests/testthat/tests_calc_durationLow.R
|
5e8191a137c276089a30520980b5c4f38136ea77
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
dblodgett-usgs/EflowStats
|
40918982cc372a3f4618d8029601fe1f9658b35b
|
ec0e7976d2725fc99e9cbcc4e9ebaeed3dd31759
|
refs/heads/main
| 2023-03-17T12:36:59.121633
| 2023-02-04T02:06:46
| 2023-02-04T02:06:46
| 546,371,708
| 0
| 0
|
NOASSERTION
| 2022-10-06T01:23:24
| 2022-10-06T01:23:23
| null |
UTF-8
|
R
| false
| false
| 1,345
|
r
|
tests_calc_durationLow.R
|
context("duration low")
test_that("duration low pref mean", {
x<-sampleData[c("date","discharge")]
calc_durationLowTest <- calc_durationLow(x=x,yearType="water",pref = "mean")
calc_durationLowTestCheck <- readRDS("data/tests_calc_durationLow.rds")
expect_equal(calc_durationLowTest,calc_durationLowTestCheck)
x <- readRDS("data/sample_nwis_data.rds")
calc_durationLowTest <- calc_durationLow(x=x,yearType="water",pref = "mean")
calc_durationLowTestCheck <- readRDS("data/tests_calc_durationLow_nwis_mean.rds")
expect_equal(calc_durationLowTest,calc_durationLowTestCheck)
})
test_that("duration low pref median", {
x<-sampleData[c("date","discharge")]
calc_durationLowTest <- calc_durationLow(x=x,yearType="water", pref = "median")
calc_durationLowTestCheck <- readRDS("data/tests_calc_durationLow.rds")
expect_equal(calc_durationLowTest,calc_durationLowTestCheck)
x <- readRDS("data/sample_nwis_data.rds")
calc_durationLowTest <- calc_durationLow(x=x,yearType="water", pref = "median")
calc_durationLowTestCheck <- readRDS("data/tests_calc_durationLow_nwis_median.rds")
expect_equal(calc_durationLowTest,calc_durationLowTestCheck)
})
|
11cbd7f6b0195be67ccf70070a5dd65f5e064421
|
ed261861b636b60b63bc6f86a8914511bf35d1f1
|
/fstatistic/ui.R
|
aae684998fc057001d715c37d92869aaa1a012e5
|
[] |
no_license
|
jmarshallnz/shinystats
|
169614ce2d39dcdef614cdd10b553a058156b1e4
|
df3d836bf649b9b32abc645269205af6c28928d2
|
refs/heads/master
| 2021-08-08T13:01:54.755636
| 2021-07-18T07:33:34
| 2021-07-18T07:33:34
| 140,226,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 589
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# Show a plot of the generated distribution
fluidRow(
column(width=6, plotOutput("data")),
column(width=6, plotOutput("fdist"))
),
fluidRow(
column(width=3, sliderInput("n", "Sample size", min=10, max=100, value=30, step=1)),
column(width=3, sliderInput("strength", "Strength of relationship", min=-1, max=1, value=0, step=0.05)),
column(width=3, sliderInput("resid", "Residual variation", min=0.01, max=1, value=0.5, step=0.01)),
column(width=3, br(),
actionButton("new_data", "New data set"))
)
))
|
44c5bfa43bb96fcc3cddd7524cf74bbb45b66a7e
|
293fd8c290c52e106b62f029d1b36b56726a7448
|
/R/old/models_av.R
|
bf371f559491555c222462dfd98658ffef45f354
|
[] |
no_license
|
kmeier1633/PredictiveProjectADDK
|
e9222c415e9f85154477b635c03588d75be7af24
|
da41cb6a84a2da8f20ceac5e4d0d5f892ab8c7d9
|
refs/heads/master
| 2021-01-12T04:50:37.552550
| 2016-12-08T06:06:05
| 2016-12-08T06:06:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,563
|
r
|
models_av.R
|
#install.packages('ROCR')
library(ROCR)
library(pROC)
#The logistic regression models are the same as models_km. My regression code is further below
who = "adit"
# Set appropriate file address
if(who=="kristin"){
address <- '/Users/kmeier92/Documents/Northwestern/fall2016/Predictive_Analytics/PredictiveProjectADDK/'
}
if(who=="adit"){
address <- '/Users/arvenkat/Documents/MSIA401/PredictiveProjectADDK/'
}
if(who=="dylan"){
address <- 'C:/Users/Dylan/Documents/Northwestern/Predictive Analytics I/PROJECT/'
}
if(who=="dustin"){
address <- 'C:/Users/Dustin/Documents/Northwestern/Predictive Analytics 1/PredictiveProjectADDK/'
}
setwd(address)
# source R code that cleans the data as needed
source(paste(address,'R/cleandata.R',sep=""))
# functions auc & ccr
source(paste(address,'R/helper.R',sep=""))
# call function to create the 2 datasets
thedata <- cleandata(dropNA=T)
donTRAINING_orig <- thedata$train
donTEST_orig <- thedata$test
##############################################
# LOGISTIC REGRESSION MODEL
##############################################
# fit basic logistic regression model
# https://www.r-bloggers.com/evaluating-logistic-regression-models/
# Split into TRAINING and TEST sets
# Remove some columns
# "CNDOL1" "CNTRLIF" "CONLARG" "CONTRFST" "CNCOD1" "CNCOD2" "CNCOD3" "CNDAT1"
# "CNDAT2" "CNDAT3" "CNDOL2" "CNDOL3" "CNTMLIF" "SLCOD1" "SLCOD2" "SLCOD3"
# "TARGDOL" "STATCODE" "SEX" "CNMON1" "CNMON2" "CNMON3" "CNMONF" "CNMONL"
# "ID" "ContType1" "ContType2" "ContType3" "SolType1" "SolType2" "SolType3" "Region"
# "avg" "avgTime" "don2" "don3" "donated"
# QUAD TERMS
# "sq_CNDOL1" "sq_CNTRLIF" "sq_CONLARG" "sq_CONTRFST" "sq_CNCOD1" "sq_CNCOD2" "sq_CNCOD3" "sq_CNDAT1"
# "sq_CNDAT2" "sq_CNDAT3" "sq_CNDOL2" "sq_CNDOL3" "sq_CNTMLIF" "sq_SLCOD1" "sq_SLCOD2" "sq_SLCOD3"
# "sq_CNMON1" "sq_CNMON2" "sq_CNMON3" "sq_CNMONF" "sq_CNMONL" "sq_avg" "sq_avgTime" "sq_don2"
# "sq_don3"
dropcols <- c("STATCODE","TARGDOL","ID","CNDAT2","CNDAT3","CNCOD1","CNCOD2","CNCOD3"," SLCOD1","
SLCOD2","SLCOD3"," CNMON2"," CNMON3"," SolType2"," SolType3","ContType2","ContType3","Region")
# what to do regression on
keepcols <- c("donated", "CNDOL1", "CNTRLIF", "CONLARG", "CONTRFST", "CNDAT1", "CNDOL2", "
CNDOL3", "CNTMLIF", "SLCOD1", "SLCOD2", "SEX", "CNMON1", "CNMON2", "
CNMON3", "CNMONF", "CNMONL", "ContType1", "SolType1", "SolType2", "
SolType3", "avg", "avgTime", "don2", "don3")
keepcols <- c("donated", "CNDOL1", "CNTRLIF", "CONLARG", "CONTRFST", "CNDAT1", "CNDOL2", "
CNDOL3", "CNTMLIF", "SLCOD1", "SLCOD2", "SEX", "CNMON1", "CNMON2", "
CNMON3", "CNMONF", "CNMONL", "ContType1", "SolType1", "SolType2", "
SolType3", "avg", "avgTime", "don2", "don3","CNDOL3_don3","CNDOL2_don2")
## you can either have: DATES OF CONTRIBUTION or MONTHS SINCE LATEST CONTRIBUTION
## since all data is using 1 date as current date. these are redundant
donTRAINING <- donTRAINING_orig[,(names(donTRAINING_orig) %in% keepcols)]
donTEST <- donTEST_orig[,(names(donTEST_orig) %in% keepcols)]
### BASICALLY WE WANT TO KEEP TRACK OF THE VARIABLES WE USED
### WHAT THE AUC IS
### CORRECT RATE
### # SIG PREDICTORS
### # TOTAL PREDICTORS
###
### ASSUME WE ARE USING THIS AS A MEASURE OF 'BEST MODEL'
logModel <- glm(donated ~ . , data = donTRAINING, family=binomial)
sum.mod <- summary(logModel)
num.pred <- nrow(sum.mod$coefficients)
auc(model=logModel) #0.7767594
ccr(model=logModel) #0.732779
##########################
# BACKWARDS STEPWISE
##########################
logModel <- glm(donated ~ . , data = donTRAINING, family=binomial)
backwards = step(logModel)
summary(backwards)
auc(model=backwards) #0.7767395
ccr(model=backwards) #0.7320231
# start
'
donated ~ CNDOL1 + CNTRLIF + CONLARG + CONTRFST + CNDAT1 + CNDOL2 +
CNDOL3 + CNTMLIF + SLCOD1 + SLCOD2 + SEX + CNMON1 + CNMON2 +
CNMON3 + CNMONF + CNMONL + ContType1 + SolType1 + SolType2 +
SolType3 + avg + avgTime + don2 + don3
'
# final
'
donated ~ CNDOL1 + CNTRLIF + CONLARG + CONTRFST + CNDAT1 + CNDOL2 +
CNTMLIF + SLCOD2 + SEX + CNMON2 + CNMON3 + CNMONF + CNMONL +
ContType1 + SolType1 + SolType3 + avg + avgTime
'
##########################
# FORWARD STEPWISE
##########################
nothing <- glm(donated ~ 1, data = donTRAINING, family=binomial)
forwards = step(nothing,
scope=list(lower=formula(nothing),upper=formula(logModel)),
direction="forward")
summary(forwards)
# final
'
donated ~ CNMON2 + CNMONL + CNDAT1 + CNMON3 + ContType1 + CONTRFST +
SolType3 + CNTMLIF + CNMONF + CNDOL1 + CONLARG + SLCOD2 +
CNTRLIF + CNDOL2 + avg + avgTime + SolType1 + SEX
'
auc(model=forwards) #0.7767682
ccr(model=forwards) #0.7310781
######################################################
# DO FORWARD AND BACKWARD W/ ALL QUADRATIC AND INTERACTION TERMS AS OPTIONS...
# MAY TAKE A WHILE BUT WHY NOT?
#########################################################################
# FULL MODEL CURRENTLY
'donated ~ CNDOL1 + CNTRLIF + CONLARG + CONTRFST + CNDAT1 + CNDOL2 +
CNDOL3 + CNTMLIF + SLCOD1 + SLCOD2 + SEX + CNMON1 + CNMON2 +
CNMON3 + CNMONF + CNMONL + ContType1 + SolType1 + SolType2 +
SolType3 + avg + avgTime + don2 + don3
'
logModel2 <- glm(donated ~ . , data = donTRAINING_orig, family=binomial)
####################################################
# STOP HERE FOR NOW...ADDED QUADRATIC AND INTERACTION TERMS TO THE CLEANDATA.R
####################################################
#add quadratic terms and then use information gain (dylan)
donTRAINING2 <- addSecondDegree(donTRAINING)
donTEST <- addSecondDegree(donTEST)
logModel3 <- glm(donated ~ . , data = donTRAINING2, family=binomial)
auc(model=logModel3) #0.7952067
ccr(model=logModel3) #0.7419446
#pick best features using information gain
weights <- information.gain(donated~., donTRAINING2)
subset <- cutoff.k(weights, 200)
f <- as.simple.formula(subset, "donated")
logModel4 <- glm(f, data = donTRAINING2, family=binomial)
auc(model=logModel4) #0.7899999
ccr(model=logModel4) #0.7361807
##############################################
# Second Order Model with some interactions included, attempting to maximize AUC
##############################################
logModel2 <- glm(donated ~ . + (CNMONL+CNTMLIF+CNMONF+CNMON1+CNDOL1+ContType1+CONLARG+CNTRLIF+SolType1)^2, data = donTRAINING, family=binomial)
p2 <- predict(logModel2, newdata=donTEST, type="response")
pr2 <- prediction(p2, donTEST$donated)
prf2 <- performance(pr2, measure = "auc")
prf2@y.values[[1]]
# AUC 0.7876269
##############################################
#fit multiple regression model for predicting donation amount
##############################################
#split into training and test set again
donSET2 <- subset(donData, select=-c(STATCODE,ID,CNDAT1,CNDAT2,CNDAT3,CNCOD1,CNCOD2,CNCOD3, SLCOD1,SLCOD2,SLCOD3, CNMON2, CNMON3, SolType2, SolType3,ContType2,ContType3))
donTRAINING2 <- donSET2[-TESTindices,]
donTEST2 <- donSET2[TESTindices,]
#filter data to only people that donated
donTRAINING2 <- donTRAINING2[donTRAINING2$TARGDOL > 0,]
#fit multiple regression model
mrModel <- lm(TARGDOL ~ ., data=donTRAINING2)
summary(mrModel)
##############################################
#Calculate expected donation for each person
##############################################
#get probability that each person donates from logistic model
donData$prob <- predict.glm(logModel, newdata=donData, type="response")
#get guess of each person's donation from multiple regression model
donData$donGuess <- predict.lm(mrModel, newdata=donData, na.action = na.pass)
donData$donGuess <- ifelse(donData$donGuess < 0, 0, donData$donGuess)
#get expected value of each person's donation
donData$expDon <- donData$prob * donData$donGuess
summary(donData$expDon)
####CALCULATE RMSES######
rmse.model <- sqrt(mean(()^2)))
##############################################
#SCRATCH WORK
##############################################
#add second degree terms to dataframe
z <- 0
numVars <- length(names(donTRAINING))-1
for (i in 1:numVars) {
for (j in 1:i) {
z <- z + 1
assign(paste("col", z, sep = '_'), donTRAINING[,i]*donTRAINING[,j])
donTRAINING <- cbind(donTRAINING, eval(as.name(paste('col_',z,sep=''))))
assign(paste("col2", z, sep = '_'), donTEST[,i]*donTEST[,j])
donTEST <- cbind(donTEST, eval(as.name(paste('col2_',z,sep=''))))
}
}
names(donTRAINING)[24:276] <- seq(24,276,by=1)
names(donTEST)[24:276] <- seq(24,276,by=1)
#remove columns with 1 level
for (x in names(donTRAINING)) {
if (length(unique(donTRAINING[,x]))==1) {
donTRAINING[,x] <- NULL
donTEST[,x] <- NULL
}
}
logModel2 <- glm(donated ~ . , data = donTRAINING)
library(ROCR)
# make predictions on TEST set then give AUC
p <- predict(logModel2, newdata=donTEST, type="response")
pr <- prediction(p, donTEST$donated)
prf <- performance(pr, measure = "auc")
prf@y.values[[1]]
#look at importance of some features
logModel2 <- glm(donated ~ . + (CNMONL+CNTMLIF+CNMONF+CNMON1+CNDOL1+ContType1+CONLARG+CNTRLIF+SolType1)^2, data = donTRAINING, family=binomial)
#install.packages('caret')
library(caret)
# idk exactly what this calculates, but most the variables deemed most important seem to make sense
x<-varImp(logModel, scale = FALSE)
x$variableName <- rownames(x)
x[with(x,order(-Overall)),]
|
90a2d32ddc3687aff8918495d0a8558b5f2c9411
|
837a3177789464eabb12b7abfb12e8621feb71fb
|
/(8)Fishing/setUpScenario_ts_files_versionB1.R
|
d9d301c9cb33fbd09600df57d8bfead836241f75
|
[] |
no_license
|
mcgregorv/AtlantisRscripts
|
21140225d43ba583a1bebc70557c8cb5f61b9b5c
|
1009f0d1961fc95bc4a98d25eea7dc1d7cccee77
|
refs/heads/master
| 2020-07-11T14:20:34.142538
| 2019-08-26T22:22:13
| 2019-08-26T22:22:13
| 204,566,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,172
|
r
|
setUpScenario_ts_files_versionB1.R
|
#read in last 5 year average (total per year) catches - these were created in PlotHistoricForcedCatches_part2
#scale the value from the last year (up or down depending on scenario) and proportion it spatially based on last 5 year average proportions
this_path<-paste(DIR$'Base',"ATLANTISmodels\\",sep="")
catchPath<-paste(this_path,"inputs\\catch_history\\catchts\\",sep="")
groupsDF<-read.csv(paste(this_path,"CRAM_groups.csv",sep=""))
catchGroupsDF<-groupsDF[groupsDF$IsFished==1,]; ncg<-dim(catchGroupsDF)[1]
# version<-"FS_A1"
version<-"FS_B1" #just like A1, but starts 20 years in
X_CN<-5.7
mg_2_tonne<-2e-8
newStartYear<-1865; prevStartYear<-1900 #prevstartyear is the year in the historic catch ts files
allBoxes<-0:29; dynBoxes<-1:24
last5YearAve<-read.csv(paste(DIR$'Tables',"CatchHist_last5YearAverage.csv",sep=""))[,1]
aveBoxProp<-read.csv(paste(DIR$'Tables',"CatchHist_last5YearPropByBox.csv",sep=""))/(1e+3) ## these are in kg, convert to tonnes
# thisScenario<-"Base"; thisScenarioName<-"Base"; thisScalar<-1; thisGroups<-catchGroupsDF$Code
scenarioScalars<-c(0,1,0.5,0.6,0.7,0.8,0.5,0.4,0.3,0.2,1.2,1.3,1.4,1.5,1.2,1.3,1.4,1.5); nScenarios<-length(scenarioScalars)
scenarioGroups<-c(rep("All", 6),rep("Hoki",8),rep("All",4))
scenarioCodes<-mapply(FUN=function(x,y){paste(x,y*100, "catch",sep="")},x=scenarioGroups,y=scenarioScalars)
scenarioNames<-mapply(FUN=function(x,y){paste(x," ",y*100, "% catch",sep="")},x=scenarioGroups,y=scenarioScalars)
scenarioDF<-data.frame(cbind(scenarioCodes,scenarioNames,scenarioScalars,scenarioGroups))
nScenarios<-dim(scenarioDF)[1]
scenario_nyears<-50
scenario_years<-seq(newStartYear,newStartYear+scenario_nyears)
scenario_months<-seq(0,scenario_nyears*12)
#set up timestep values in seconds from start day.
numSecsPerMonth<-60*60*24*30.5
scenario_seconds<-scenario_months*numSecsPerMonth
for(s in 1:nScenarios){
thisScenario<-scenarioDF$scenarioCodes[s]
cat(thisScenario," -- ")
thisScalar<-as.double(as.character(scenarioDF$scenarioScalars[s]))
thisGroups<-catchGroupsDF$Code
if(scenarioDF$scenarioGroups[s] != "All"){thisGroups<-"HOK"}
#first set up base case with 5 year average catch and 5 year average proportions
newCatchArray<-0*aveBoxProp
for(g in 1:ncg){
thisCode<-catchGroupsDF$Code[g]
newCatchArray[,g]<-aveBoxProp[,g]*last5YearAve[g]
if(thisCode %in% thisGroups){
newCatchArray[,g]<-aveBoxProp[,g]*last5YearAve[g]*thisScalar
}
}
baseFolder<-paste(catchPath,"..\\catchts_",version,thisScenario,"\\",sep="")
dir.create(baseFolder)
#read in one of the historic catch ts files to edit for the new ones
thisTSfile<-paste(catchPath,"catch1.ts",sep="")
thisTempLines<-readLines(thisTSfile)
thisTSlines<-thisTempLines[grep("#", thisTempLines, invert = TRUE)]
newTSlines<-thisTempLines
#replace start year
x<-grep("seconds since",newTSlines)
newTSlines[x]<-gsub(prevStartYear,newStartYear,newTSlines[x])
#only want to keep the bits that start with # as replacing the other lines
newTSlines<-newTSlines[grep("^#",newTSlines)]
for(b in allBoxes){
this_tsFileName<-paste(baseFolder,"catch",b,".ts",sep="")
if(b==0){
this_tsFileName<-paste(baseFolder,"boundary.ts",sep="")
}
if(b %in% dynBoxes){
thisData<-newCatchArray[b,]
} else{
thisData<-0*newCatchArray[1,]
}
#turn thisData into mg N caught per second
x<-as.double(thisData)/12
convertedData<-(x/(mg_2_tonne * X_CN))/numSecsPerMonth
writeLines(newTSlines,this_tsFileName)
for(t in 1:length(scenario_seconds)){
cat(scenario_seconds[t],"\t",file=this_tsFileName,append=TRUE)
cat(as.double(convertedData),file=this_tsFileName,append=TRUE)
cat("\n",file=this_tsFileName,append=TRUE)
}
}
}
#create the run file
runFile<-paste(this_path,"base\\RunFishScenarios_",version,"",sep="")
runText<-paste("#Run multiple fish scenarios ",version,sep="")
cat(runText,file=runFile,append=FALSE)
existingForceFile<-"inputs/CRAM_force.prm"
thisInitialConditionsFile<-"CRAM_input_short_from_PreSENS2_800yr.nc" #can change this to one creates from outputs of another run
for(s in 1:nScenarios){
thisCode<-scenarioDF$scenarioCodes[s]; thisScenario<-scenarioDF$scenarioCodes[s]
#create new forcing file, which points to the appropriate catch ts files
thisForceFile<-paste("inputs/CRAM_force_",version,thisCode,".prm",sep="")
file.copy(paste(this_path,existingForceFile,sep=""), paste(this_path,thisForceFile,sep=""), overwrite = TRUE)
thisForceLines<-readLines(paste(this_path,thisForceFile,sep=""))
thisCatchFolder<-paste("catchts_",version,thisScenario,sep="")
newForceLines<-gsub("catchts", thisCatchFolder,thisForceLines)
writeLines(newForceLines,paste(this_path,thisForceFile,sep=""))
thisOutFolder<-paste(version,thisCode,sep="")
runText<-paste("\nWD=\"$(pwd)\"\nRUN=\"../../bin/bin/atlantisMerged -i ", thisInitialConditionsFile," 0 -o output.nc -r CRAM_baseFish_run_short.prm -f ", thisForceFile, " -p inputs/CRAM_physics.prm -b CRAM_BH_hybrid_biol.prm -h CRAM_harvest_short.prm -s CRAM_Groups.csv -q CRAM_Fisheries.csv -d base/output",thisOutFolder,"\"\necho $RUN > RUN\nCMD=\"msub -l nodes=1 -l walltime=50:00:00 -l partition=slurm -l qos=standby -p -1000 -q large -o CRAM.log.%j -e CRAMfishscen",version,".err.%j -S /bin/bash RUN\"\necho \"Running Atlantis ",version,p," for CRAM on MOAB in directory:\" $WD\necho -n \"Job started at: \" ; date\necho $RUN\nCOMMAND=\"cd $WD ; $CMD\"\nssh turbine $COMMAND\nsleep 0.5",sep="")
cat(runText,file=runFile,append=TRUE)
}
## this should turn all files in a folder to unix files
# find . -type f -print0 | xargs -0 dos2unix
#create run file to turn all files created here to unix
thisFile<-paste(this_path,"Turn2dos_",version,sep="")
cat("",file=thisFile,append=FALSE)
for(s in 1:nScenarios){
thisCode<-scenarioDF$scenarioCodes[s];
thisForceFile<-paste("CRAM_force_",version,thisCode,".prm",sep="")
cat("dos2unix",thisForceFile,"\n",file=thisFile, append = TRUE)
}
|
44da21d52f8a2a62ba562574a06c6695558cd684
|
722281e3bddbef275d0e165f58c15077f91741aa
|
/R/read.R
|
6cf6779055b15fa50f18bb2d10c81164ff94f7e4
|
[] |
no_license
|
heibl/ips
|
ba824e8d2c185e22ad81c2d85d7e6368b9017f5d
|
647ba4c9ae1104b649cacc901c527de0207b7775
|
refs/heads/master
| 2022-02-08T16:08:03.520858
| 2022-02-06T14:54:58
| 2022-02-06T14:54:58
| 50,670,472
| 9
| 3
| null | 2018-01-18T09:19:30
| 2016-01-29T15:18:38
|
R
|
UTF-8
|
R
| false
| false
| 1,346
|
r
|
read.R
|
#' @title Reading Sequence Files
#' @description Read DNA and amino acid sequences from FASTA, PHILIP, and NEXUS
#' formatted files.
#' @param x A character string, giving the file name.
#' @param text A character string in FASTA format.
#' @return An matrix (aligned sequences) or list (unaligned sequences) of class
#' \code{DNAbin} or \code{AAbin}.
#' @references Maddison, D.R., D.L. Swofford, and W.P. Maddison. 1997. NEXUS: an
#' extensible file format for systematic information. \emph{Syst. Biol.}
#' \bold{46}: 590-621.
#' @seealso \code{\link{mafft}} and \code{\link{prank}} for sequence alignment,
#' \code{\link{gblocks}} and \code{\link{aliscore}} for quality check and
#' cleaning of sequence alignments, \code{\link{cbind.DNAbin}} for
#' concatenation of sequence alignments.
#' @examples
#' ## bark beetle COX1 sequences
#' data(ips.cox1)
#' ## create temporary file names
#' format <- c(".fas", ".phy", ".nex")
#' fn <- sapply(format, tempfile,
#' pattern = "ips", tmpdir = tempdir())
#' ## write sequences files
#' write.fas(ips.cox1, fn[".fas"])
#' write.phy(ips.cox1, fn[".phy"])
#' write.nex(ips.cox1, fn[".nex"])
#' ## read sequence files
#' fas <- read.fas(fn[".fas"])
#' phy <- read.phy(fn[".phy"])
#' nex <- read.nex(fn[".nex"])
#' ## remove sequence files
#'unlink(fn)
#' @name read
NULL
|
1b089be31d4012da55b6c8edcd45bd24bd944f08
|
3632532c83f0058a489d3299e6b6670165088e67
|
/server.R
|
0726f488f9d75b50b72031b45307edbd452de690
|
[] |
no_license
|
kggen/next-word-app
|
9f89c6bb2e090194fb389ae646f8e521e506ffec
|
5ff4902589aa2d82c4e513c657dc99f0f2eef950
|
refs/heads/master
| 2020-04-21T16:27:34.358454
| 2019-02-08T08:09:52
| 2019-02-08T08:09:52
| 169,702,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
server.R
|
library(shiny)
source("Prediction.R")
shinyServer(
function(input, output){
result <- reactive({
nextWord(input$query)
})
output$query <- renderPrint({input$query})
output$recommendation_1 <- renderText({result()[1]})
output$recommendation_2 <- renderText({result()[2]})
output$recommendation_3 <- renderText({result()[3]})
}
)
|
10d0b0a71e01b249c66098d99e862e1b0e3bfafd
|
fd2bf6d71e00c84e16814fa8fc41c35d52e0752b
|
/fancy-scientific .R
|
35330bfb24e2eaf76028da960e1f72d8c6c30125
|
[] |
no_license
|
BTDangelo/Function-Archive
|
b9a6a5538e2dd56043b60d0ad7fe58286fb5ec9a
|
20cf00f19a5999ac4c6fab01f4cf68288fccd1b4
|
refs/heads/master
| 2020-12-02T18:04:59.106707
| 2017-08-09T20:05:06
| 2017-08-09T20:05:06
| 96,469,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
fancy-scientific .R
|
## Change the Format of Numbers on Axis with ggplot
fancy_scientific <- function(l) {
## function taken from stackoverflow.com post
## http://stackoverflow.com/questions/11610377/how-do-i-change-the-formatting-of-numbers-on-an-axis-with-ggplot/24241954
# turn in to character string in scientific notation
x <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
#y <- gsub("^(.*)e", "'\\1'e", x)
# turn the 'e+' into plotmath format
z <- gsub("^.*e", "10^", x)
# return this as an expression
parse(text=z)
}
|
f75e03627ee351fe1576fb0a6964ba705da95ba8
|
2c2217ea029932600f417f54f9f3defc63459376
|
/Gam smoothing.R
|
588d38124a0fb8203020ae9fb86d074eb84ca499
|
[] |
no_license
|
yangxhcaf/EWSR-Nature-Communications-2019
|
06c5f02f1e43688799171c5b37dc33c6a8f6fc45
|
fc72d2414aec794fdf1df9c79d5596a0969385e3
|
refs/heads/master
| 2021-10-25T16:22:40.130225
| 2019-04-05T11:19:23
| 2019-04-05T11:19:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,518
|
r
|
Gam smoothing.R
|
###########################################################################################
library(mgcv)
eps <- 1e-7 ## finite difference interval
##functino to calculate the slopes, derivatives and ci's of the data, based on the Gam method from Burthe et al.:
is.there = function(x=0, L.CI, U.CI){
pos<- ifelse(x<U.CI, 1, -1)
negs<-ifelse(x>L.CI, -1, 1)
return(pos+negs)}
##round(length(timeseries)/4)
gam_smoothing<-function(years, timeseries,knots){
if(length(which(timeseries<=0))==0){
gam1<-gam(timeseries~s(as.vector(years), bs="cs", k=knots), family=gaussian(link="log"))}else{
gam1<-gam(timeseries~s(as.vector(years), bs="cs", k=knots), family=gaussian)}
time.series.fit<-predict(gam1, newdata=data.frame(years=years), type="response")
X0<-predict(gam1, newdata=data.frame(years=years), type= "lpmatrix")
X1<-predict(gam1, newdata=data.frame(years=years+eps), type= "lpmatrix")
Xi<-(X1-X0)/eps
df <- Xi%*%coef(gam1) ## ith smooth derivative
df.sd <- rowSums(Xi%*%gam1$Vp*Xi)^.5 ## cheap diag(Xi%*%b$Vp%*%t(Xi))^.5
#plot(years,df,type="l",ylim=range(c(df+2*df.sd,df-2*df.sd)))##plot 'em
#lines(years,df+2*df.sd,lty=2);lines(years,df-2*df.sd,lty=2)
splines<-data.frame(years=years,deriv=df,U.CI=df+2*df.sd, L.CI=df-2*df.sd)
splines$sign<-is.there(0, splines$L.CI, splines$U.CI)/2
splines$fit<-time.series.fit
return(splines)}
###########################################################################################
|
0943895293e190c175a3d97801482896ae27a10b
|
f4d86d015400a9972f9b9c38b02913ba4162a50b
|
/R/pca_drivers.R
|
4a1e2b30c9977c1ad493acfc3b4b639d3d92034d
|
[] |
no_license
|
aidanmacnamara/epiChoose
|
2ba4e6c6348bf763a40edd7e1285098e1bc7a14a
|
11cbb78cf063afa767943c8d6e0779f451317550
|
refs/heads/master
| 2021-12-22T13:12:04.352433
| 2021-12-10T07:33:51
| 2021-12-10T07:33:51
| 83,414,546
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,033
|
r
|
pca_drivers.R
|
#' @title TO ADD
#' @description TO ADD
#' @description This is a new line ...
#' @details What's this?
#' @param dat PCA matrix
#' @return TO ADD
pca_drivers <- function(dat, which_pc=1, thresh=5000) {
remove_rows = which(apply(dat, 1, function(x) all(is.na(x)))) # remove samples with no data
if(length(remove_rows)) {
dat_na_rm = dat[-remove_rows,]
} else {
dat_na_rm = dat
}
print(paste("Data has", length(remove_rows), "rows removed."))
dim(dat_na_rm)
dat_na_rm = dat_na_rm[,!apply(dat_na_rm, 2, function(x) sd(x)==0)] # remove regions with no variance
dim(dat_na_rm)
# remove na columns
dat_na_rm = dat_na_rm[,!apply(is.na(dat_na_rm), 2, all)] # remove regions with no data
dim(dat_na_rm)
pca_res <- prcomp(dat_na_rm, scale=TRUE, center=TRUE)
pca_res_summary <- summary(pca_res)
# what regions are driving difference?
res_l = abs(pca_res$rotation[,1])
mask_ix = head(order(res_l, decreasing=TRUE), thresh)
return(mask_ix)
}
|
0d46b35da8f48d6c6329a872e65c130b11636a5d
|
32be028fb64990e51f7bc0b16b7efc0c52728a66
|
/data/create_learning2014.R
|
2f6af12c07a0284b67182a9763b6df2b5efd2948
|
[] |
no_license
|
isoakar/IODS-project
|
52320e9be1c9e26e3cb7f03a0946037d64e28fba
|
b0869eefd76003f10c5d6ff725d85d42a7504ffc
|
refs/heads/master
| 2021-08-14T13:21:11.470180
| 2017-11-15T19:47:43
| 2017-11-15T19:47:43
| 109,665,740
| 0
| 0
| null | 2017-11-06T08:10:18
| 2017-11-06T08:10:18
| null |
UTF-8
|
R
| false
| false
| 2,246
|
r
|
create_learning2014.R
|
#Karoliina Isoaho, Nov 9th 2017
#This is my R code for the data wrangling exercise.
# Read the full learning data
lrn14 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", sep="\t", header=TRUE)
# Exploring its structure and dimensions
str(lrn14)
dim(lrn14)
# data has 183 rows and 60 columns
# str shows the variables and observations in the data.frame
# Creating an analysis dataset with the variables gender, age, attitude, deep, stra, surf and points
# Questions related to deep, surface and strategic learning
deep_questions <- c("D03", "D11", "D19", "D27", "D07", "D14", "D22", "D30","D06", "D15", "D23", "D31")
surface_questions <- c("SU02","SU10","SU18","SU26", "SU05","SU13","SU21","SU29","SU08","SU16","SU24","SU32")
strategic_questions <- c("ST01","ST09","ST17","ST25","ST04","ST12","ST20","ST28")
# select the columns related to deep learning and create column 'deep' by averaging
deep_columns <- select(lrn14, one_of(deep_questions))
lrn14$deep <- rowMeans(deep_columns)
# select the columns related to surface learning and create column 'surf' by averaging
surface_columns <- select(lrn14, one_of(surface_questions))
lrn14$surf <- rowMeans(surface_columns)
# select the columns related to strategic learning and create column 'stra' by averaging
strategic_columns <- select(lrn14, one_of(strategic_questions))
lrn14$stra <- rowMeans(strategic_columns)
# choose a handful of columns to keep
keep_columns <- c("gender","Age","Attitude", "deep", "stra", "surf", "Points")
# select the 'keep_columns' to create a new dataset
learning2014 <- select(lrn14, one_of(keep_columns))
# select rows where points is greater than zero
learning2014 <- filter(learning2014, points > 0)
# Printing dimensions of learning2014 to check that there are 166 observations with 7 variables
dim(learning2014)
# Setting working directory of this R session to the IODS project folder
setwd("/Users/testitesti/Documents/GitHub/IODS-project/data/")
getwd()
# Saving the analysis dataset 'learning2014' that I created to the data folder with CSV
?write.csv
write.csv(learning2014, file = "learning2014.csv")
# demonstrating reading capabilities
read.csv(file = "learning2014.csv")
head(learning2014)
str(learning2014)
|
33df3bc3aadad002c71d1a73d037fc949ead57d1
|
73e646c6f16bb7f44a63a7b64d07613e784fe4a8
|
/tests/testthat/test-calc_AF_vectors.R
|
c103ceacc0b8b3e8c80c4f6f64b40aa2cff4a1c6
|
[
"MIT"
] |
permissive
|
JimWhiting91/afvaper
|
191031eedd805f6da9f6cde8c197d2cbe81e2818
|
90914732ecf1cfa0ccfe09d2d6fba183be6b5941
|
refs/heads/master
| 2023-04-06T20:18:06.876886
| 2022-08-23T11:14:10
| 2022-08-23T11:14:10
| 357,587,040
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,416
|
r
|
test-calc_AF_vectors.R
|
# Tests for calc_AF_vectors()
vcf_in <- vcfR::read.vcfR(system.file("full_parallel.vcf.gz",package="afvaper"))
popmap_in <- read.table(system.file("full_parallel.popmap",package="afvaper"))
test_that("calc_AF_vectors() calculates AF vectors from VCF", {
vectors <- lapply(2:5,function(x) return(c("pop1",paste0("pop",x))))
names(vectors) <- paste0("pop",2:5)
test_vectors <- calc_AF_vectors(vcf = vcf_in,
popmap = popmap_in,
vectors = vectors,
n_cores = 1)
# Are vectors normalised?
expect_equal(sum(test_vectors[[1]][1,]^2)^0.5, 1)
# Do we have all vectors?
expect_equal(nrow(test_vectors[[1]]),length(vectors))
expect_equal(length(test_vectors),floor(nrow(vcf_in@fix)/200))
})
test_that("calc_AF_vectors() can calculate without normalising", {
# Truncate for speed
vcf_in2 <- vcf_in[1:601,]
vectors <- lapply(2:5,function(x) return(c("pop1",paste0("pop",x))))
names(vectors) <- paste0("pop",2:5)
test_vectors <- calc_AF_vectors(vcf = vcf_in2,
popmap = popmap_in,
vectors = vectors,
normalise = F,
n_cores = 1)
# Are vectors normalised?
expect_false(isTRUE(all.equal(sum(test_vectors[[1]][1,]^2)^0.5, 1)))
# Make sure that there are no values bigger or smaller than 1/-1
all_vals <- as.numeric(unlist(test_vectors))
expect_false(any(abs(all_vals)>1))
})
# Test null perms
test_that("calc_AF_vectors() can perform null perms as expected...", {
# Truncate for speed
vcf_in2 <- vcf_in[1:600,]
popmap_in <- read.table(system.file("full_parallel.popmap",package="afvaper"))
vectors <- lapply(2:5,function(x) return(c("pop1",paste0("pop",x))))
names(vectors) <- paste0("pop",2:5)
test_vectors <- calc_AF_vectors(vcf = vcf_in2,
popmap = popmap_in,
vectors = vectors,
normalise = T,
null_perms = 4,
n_cores = 1)
# Fetch start_pos
pos <- names(test_vectors)
pos <- sapply(strsplit(pos,":"),'[[',2)
start_pos <- as.integer(sapply(strsplit(pos,"-"),'[[',1))
# Are positions shuffled...
expect_false(all(start_pos %in% c(1,201,401)))
})
|
eb1b001f3c77a2e9c459f5dcdcc501e6256044ae
|
3b8e05a03df019883a3bfad23e8b04c25860bf6e
|
/plot5.R
|
861bda0e5a07f080e51a162b192b6a1a5a5913d9
|
[] |
no_license
|
elcid73/Exploratory-Data-Analysis--Course-Project-2-v2
|
fd5abbd2c04f8db4e3e500ed807810fda5fb5be3
|
594ffce84a6187b1622aab13ca10d2e84093c661
|
refs/heads/master
| 2016-09-12T09:35:32.808849
| 2016-05-24T21:58:15
| 2016-05-24T21:58:15
| 59,602,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 506
|
r
|
plot5.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(ggplot2)
baltimore<-subset(NEI$fips=='24510'& type == 'ON-ROAD')
baltimore_motor<-aggregate(Emissions ~ year,baltimore,sum)
png('plot5.png')
ggplot(baltimore_motor, aes(factor(year), Emissions)) + geom_bar(stat="identity") + xlab("year") + ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle in Baltimore, Maryland from 1999 to 2008')
dev.off()
|
53fdb065c549e24e61f2529a1ef7a8d9c8338c57
|
87ff01119e0660b86b93fe4a2f8b07bf77f5e304
|
/man/shrooms.Rd
|
121e2722fdc35510148e7cf12a16716977bf54ee
|
[] |
no_license
|
homerhanumat/tigerData
|
7dd072fd84d41ad4be40962ae1e66771e9723d0d
|
03774d5e00d03dcad60cd22b5ca27b82cf56bb4d
|
refs/heads/master
| 2022-12-12T00:31:47.448157
| 2022-11-30T13:02:18
| 2022-11-30T13:02:18
| 28,987,989
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,703
|
rd
|
shrooms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shrooms.R
\docType{data}
\name{shrooms}
\alias{shrooms}
\title{Can You Eat This Mushroom?}
\format{
A data frame with 5891 observations on the following 23 variables.
\describe{
\item{class}{Whether the mushroom is edible or poisonous.}
\item{cap.shape}{}
\item{cap.surface}{}
\item{cap.color}{}
\item{bruises}{Whether or not the mushroom is bruised.}{}
\item{odor}{}
\item{gill.attachment}{}
\item{gill.spacing}{}
\item{gill.size}{}
\item{gill.color}{}
\item{stalk.shape}{}
\item{stalk.root}{}
\item{stalk.surface.above.ring}{}
\item{stalk.surface.below.ring}{}
\item{stalk.color.above.ring}{}
\item{stalk.color.below.ring}{}
\item{veil.type}{}
\item{veil.color}{}
\item{ring.number}{}
\item{ring.type}{}
\item{spore.print.color}{}
\item{population}{}
\item{habitat}{}
}
}
\source{
A sample from of mushroom records drawn from The Audubon Society
Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.),
New York: Alfred A. Knopf, Original data contributed by Jeffrey
Schlimmer to the UCI Machine Learning Repository
(\url{http://archive.ics.uci.edu/ml}, Irvine, CA: University of
California, School of Information and Computer Science. See
\url{http://archive.ics.uci.edu/ml/datasets/Mushroom}.
}
\description{
Subset of data from a study on edibility of mushrroms.
The individual mushrooms come from 23 species of gilled mushrooms
in the Agaricus and Lepiota Family. The aim is to come up with a rule
for predicting, on the basis of an individual mushroom's characteristics,
whether or not the mushroom is edible. Remaining data is held back
for evaluation of proposed rules.
}
\keyword{datasets}
|
77ec4bb693c2f0d8b307bba69c6caa1b0daa5360
|
9a41ff952b3a2ae3adab0092761f367979aacd71
|
/R/select.R
|
fb8d8366190beaa7943f1ad5c8069b33762aab97
|
[] |
no_license
|
BenMcCleave/ctmm
|
67c8c94420deb4cf7deca3074c3d709d61d1ff93
|
16c3356121781f03aa3e9f2471dbe815ec20c339
|
refs/heads/master
| 2020-06-14T05:04:41.827603
| 2019-06-18T22:18:03
| 2019-06-18T22:18:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,718
|
r
|
select.R
|
# small sample size adjustment for ctmm.select to be more agressive
alpha.ctmm <- function(CTMM,alpha)
{
z <- stats::qnorm(alpha)
z <- sqrt(z^2 + (CTMM$AICc-CTMM$AIC))
alpha <- 1-stats::pnorm(z)
return(alpha)
}
#########
get.MSPE <- function(CTMM,MSPE="position")
{
if(!is.na(MSPE)) { MSPE <- CTMM$MSPE[MSPE] }
else { MSPE <- Inf }
return(MSPE)
}
########
get.IC <- function(CTMM,IC="AICc")
{
if(!is.na(IC)) { IC <- CTMM[[IC]] }
else { IC <- Inf }
return(IC)
}
##################
# function to simplify complexity of models
simplify.ctmm <- function(M,par)
{
if("minor" %in% par)
{
M$isotropic <- TRUE
M$sigma <- covm(M$sigma,isotropic=TRUE,axes=M$axes)
par <- c(par,'angle')
}
if("major" %in% par)
{
M$isotropic <- TRUE
M$sigma <- covm(0,isotropic=TRUE,axes=M$axes)
M$tau <- NULL
par <- c(par,c('minor','angle','circle','tau position','tau velocity','tau','omega'))
}
if("circle" %in% par)
{ M$circle <- FALSE }
if("range" %in% par)
{
M$sigma <- scale.covm(M$sigma,1/M$tau) # convert to diffusion matrix
M$tau[1] <- Inf
M$range <- FALSE
par <- c('tau','tau position')
}
# autocorrelation timescales can't be distinguished
if("diff.tau" %in% par)
{
M$tau <- c(1,1)*mean(M$tau)
M$omega <- FALSE
par <- c('tau position','tau velocity')
M$features <- c(M$features,'tau')
}
M$features <- M$features[M$features %nin% par]
return(M)
}
###############
# keep removing uncertain parameters until AIC stops improving
ctmm.select <- function(data,CTMM,verbose=FALSE,level=1,IC="AICc",MSPE="position",trace=FALSE,cores=1,...)
{
IC <- match.arg(IC,c("AICc","AIC","BIC",NA))
MSPE <- match.arg(MSPE,c("position","velocity",NA))
alpha <- 1-level
trace2 <- if(trace) { trace-1 } else { 0 }
IC <- match.arg(IC,c("AICc","AIC","BIC",NA))
MSPE <- match.arg(MSPE,c("position","velocity",NA))
UERE <- get.error(data,CTMM,flag=TRUE) # error flag only
drift <- get(CTMM$mean)
if(CTMM$mean=="periodic")
{
Nyquist <- CTMM$period/stats::median(diff(data$t))/2
message("Nyquist frequency estimated at harmonic ",paste(Nyquist,collapse=" ")," of the period.")
}
# initial guess in case of pREML (better for optimization)
get.mle <- function(FIT=CTMM)
{
MLE <- FIT
if(!get("EMPTY",pos=MLE.env)) # will have been set from ctmm.fit first run
{
MLE <- get("MLE",pos=MLE.env)
# check that structure is consistent
if(is.null(MLE) || name.ctmm(MLE)!=name.ctmm(FIT)) { MLE <- FIT }
}
return(MLE)
}
# consider a bunch of new models and update best model without duplication
iterate <- function(DROP,REFINE=list())
{
# name the proposed models
names(DROP) <- sapply(DROP,name.ctmm)
names(REFINE) <- sapply(REFINE,name.ctmm)
# remove models already fit
DROP <- DROP[!(names(DROP) %in% names(MODELS))]
REFINE <- REFINE[!(names(REFINE) %in% names(MODELS))]
N <- length(DROP)
M <- length(REFINE)
GUESS <- c(DROP,REFINE)
# fit every model
if(trace && length(GUESS)) { message("* Fitting models ",paste(names(GUESS),collapse=", "),".") }
#? should I run select here instead of fit ?
GUESS <- plapply(GUESS,function(g){ctmm.fit(data,g,trace=trace2,...)},cores=cores)
MODELS <<- c(MODELS,GUESS)
# check MSPE for improvement in REFINEd models
# if(M>0 && !is.na(MSPE))
# {
# if(N>0) { DROP <- GUESS[1:N] } else { DROP <- list() }
# REFINE <- GUESS[N + 1:M]
#
# GOOD <- sapply(REFINE,function(M){get.MSPE(M,MSPE)}) <= get.MSPE(CTMM,MSPE)
# REFINE <- REFINE[GOOD]
#
# GUESS <- c(DROP,REFINE)
# }
# what is the new best model?
OLD <<- CTMM
CTMM <<- min.ctmm(c(GUESS,list(CTMM)),IC=IC,MSPE=MSPE)
}
########################
# PHASE 1: work our way up to complicated autocorrelation models
# all of the features we need to fit numerically
FEATURES <- id.parameters(CTMM,UERE=UERE)$NAMES
# consider only features unnecessary "compatibility"
FEATURES <- FEATURES[!(FEATURES=="major")]
FEATURES <- FEATURES[!(FEATURES=="error")]
FEATURES <- FEATURES[!grepl("tau",FEATURES)]
FEATURES <- FEATURES[!(FEATURES=="omega")]
# start with the most basic "compatible" model
GUESS <- simplify.ctmm(CTMM,FEATURES)
if(trace) { message("* Fitting model ",name.ctmm(GUESS),".") }
TARGET <- CTMM
CTMM <- ctmm.fit(data,GUESS,trace=trace2,...)
MODELS <- list(CTMM)
names(MODELS) <- sapply(MODELS,name.ctmm)
OLD <- ctmm()
while(!identical(CTMM,OLD))
{
GUESS <- list()
MLE <- get.mle()
# consider non-zero eccentricity
if(("minor" %in% FEATURES) && MLE$isotropic)
{
GUESS <- c(GUESS,list(MLE))
n <- length(GUESS)
GUESS[[n]]$isotropic <- FALSE
# copy over target angle, but leave eccentricity zero to start (featureless)
sigma <- attr(GUESS[[n]]$sigma,"par")
sigma["angle"] <- attr(TARGET$sigma,"par")['angle']
sigma <- covm(sigma,isotropic=FALSE,axes=TARGET$axes)
GUESS[[n]]$sigma <- sigma
}
# consider circulation
if(("circle" %in% FEATURES) && !MLE$circle)
{
GUESS <- c(GUESS,list(MLE))
GUESS[[length(GUESS)]]$circle <- 2 * .Machine$double.eps * sign(TARGET$circle)
}
# consider a bunch of new models and update best model without duplication
iterate(GUESS)
}
#############################
# PHASE 2: work our way down to simpler autocorrelation models & work our way up to more complex trend models
OLD <- ctmm()
# CTMM <- min.ctmm(MODELS)
while(!identical(CTMM,OLD))
{
GUESS <- list()
MLE <- get.mle()
beta <- alpha.ctmm(CTMM,alpha)
# consider if some timescales are actually zero
CI <- confint.ctmm(CTMM,alpha=beta)
if(length(CTMM$tau)==2 && !is.na(IC)) # OUX -> OU
{
if(!CTMM$omega && CTMM$tau[1]!=CTMM$tau[2]) # OUF -> OU
{
Q <- CI["tau velocity",1]
if(is.nan(Q) || (Q<=0))
{
GUESS <- c(GUESS,list(MLE))
GUESS[[length(GUESS)]]$tau <- MLE$tau[-length(MLE$tau)]
}
}
else if(!CTMM$omega) # OUf -> OU
{
Q <- CI["tau",1]
if(is.nan(Q) || (Q<=0))
{
GUESS <- c(GUESS,list(MLE))
GUESS[[length(GUESS)]]$tau <- MLE$tau[-length(MLE$tau)]
}
}
else # OUO -> OU
{
Q <- 1/CI["tau period",3]
if(is.nan(Q) || (Q<=0))
{
GUESS <- c(GUESS,list(MLE))
GUESS[[length(GUESS)]]$omega <- FALSE
GUESS[[length(GUESS)]]$tau <- MLE$tau[-length(MLE$tau)]
}
}
}
else if(length(CTMM$tau)==1 && !is.na(IC)) # OU -> IID
{
Q <- CI["tau position",1]
if(is.nan(Q) || (Q<=0))
{
GUESS <- c(GUESS,list(MLE))
GUESS[[length(GUESS)]]$tau <- NULL
}
}
# can autocorrelation timescales be distinguished?
if(length(CTMM$tau)==2 && CTMM$tau[1]<Inf && (CTMM$tau[1]!=CTMM$tau[2] || CTMM$omega))
{
TEMP <- get.taus(CTMM,zeroes=TRUE)
nu <- TEMP$f.nu[2] # frequency/difference
J <- TEMP$J.nu.tau[2,] # Jacobian of nu WRT canonical parameters
Q <- TEMP$tau.names
Q <- c(J %*% CTMM$COV[Q,Q] %*% J) # variance of nu
Q <- ci.tau(nu,Q,alpha=beta)[1]
if(Q<=0 || level==1 || is.na(IC))
{ GUESS <- c(GUESS,list(simplify.ctmm(MLE,"diff.tau"))) }
}
else if(length(CTMM$tau)==2) # try other side if boundary if choosen model is critically damped
{
# try overdamped
TEMP <- MLE
TEMP$omega <- 0
TEMP$tau <- TEMP$tau * exp(c(1,-1)*sqrt(.Machine$double.eps))
GUESS <- c(GUESS,list(TEMP))
# try underdamped
TEMP <- MLE
TEMP$tau <- c(1,1)/mean(1/TEMP$tau)
TEMP$omega <- sqrt(.Machine$double.eps)
GUESS <- c(GUESS,list(TEMP))
}
else if(length(CTMM$tau)==1 && level==1) # OU -> OUf (bimodal likelihood)
{ GUESS <- c(GUESS,list(simplify.ctmm(MLE,"diff.tau"))) }
# consider if there is no circulation
if(CTMM$circle)
{
Q <- CI["circle",3]
if(is.nan(Q) || (Q==Inf) || is.na(IC)) { GUESS <- c(GUESS,list(simplify.ctmm(MLE,"circle"))) }
}
# consider if eccentricity is zero
if(!CTMM$isotropic)
{
Q <- c("major","minor")
GRAD <- c(1/CTMM$sigma@par[1],-1/CTMM$sigma@par[2])
SD <- ifelse(all(Q %in% CTMM$features),sqrt(c(GRAD %*% CTMM$COV[Q,Q] %*% GRAD)),Inf) # variance could collapse early
Q <- stats::qnorm(beta/2,mean=log(CTMM$sigma@par[1]/CTMM$sigma@par[2]),sd=SD)
if(Q<=0 || is.na(IC)) { GUESS <- c(GUESS,list(simplify.ctmm(MLE,"minor"))) }
}
# is the animal even moving?
if(!CTMM$sigma@par['major'] && CTMM$error)
{ GUESS <- c(GUESS,list(simplify.ctmm(MLE,"major"))) }
# consider if we can relax range residence (non-likelihood comparison only)
if(CTMM$range && is.na(IC))
{ GUESS <- c(GUESS,list(simplify.ctmm(MLE,"range"))) }
# consider if the mean could be more detailed
REFINE <- drift@refine(MLE)
# consider a bunch of new models and update best model without duplication
iterate(GUESS,REFINE)
}
# return the best or return the full list of models
if(verbose)
{
MODELS <- sort.ctmm(MODELS,IC=IC,MSPE=MSPE)
# remove redundant models
NAMES <- sapply(MODELS,name.ctmm) -> names(MODELS)
KEEP <- c(TRUE, NAMES[-1]!=NAMES[-length(NAMES)] )
MODELS <- MODELS[KEEP]
return(MODELS)
}
else
{ return(CTMM) }
}
################
name.ctmm <- function(CTMM,whole=TRUE)
{
FEATURES <- CTMM$features
# base model
tau <- CTMM$tau
if(length(tau)==2)
{
if(tau[1]==Inf) { NAME <- "IOU" }
else if(tau[1]>tau[2]) { NAME <- "OUF" }
else if(CTMM$omega) { NAME <- "OU\u03A9" } # underdamped
else { NAME <- "OUf" } # identical timescales
}
else if(length(tau)==1)
{ if(tau[1]<Inf) { NAME <- "OU" } else { NAME <- "BM" } }
else if(length(tau)==0)
{
if(CTMM$sigma@par['major'] || "major" %in% FEATURES)
{ NAME <- "IID" }
else
{ NAME <- "inactive" }
}
# isotropy
if(CTMM$isotropic)
{ NAME <- c(NAME,"isotropic") }
else
{ NAME <- c(NAME,"anisotropic") }
# circulation
if(CTMM$circle || "circle" %in% FEATURES)
{ NAME <- c(NAME,"circulation") }
# error
if(CTMM$error || "error" %in% FEATURES)
{ NAME <- c(NAME,"error") }
# mean
drift <- get(CTMM$mean)
DNAME <- drift@name(CTMM)
NAME <- paste(NAME,sep="",collapse=" ")
if(whole && !is.null(DNAME))
{ NAME <- paste(NAME,DNAME) }
else if(!whole)
{
if(is.null(DNAME)) { DNAME <- "stationary" }
NAME <- c(NAME,DNAME)
}
return(NAME)
}
########
sort.ctmm <- function(x,decreasing=FALSE,IC="AICc",MSPE="position",flatten=TRUE,INF=FALSE,...)
{
if(is.na(MSPE))
{ ICS <- sapply(x,function(m){get.IC(m,IC)}) }
else if(is.na(IC))
{ ICS <- sapply(x,function(m){get.MSPE(m,MSPE)}) }
if(is.na(MSPE) || is.na(IC))
{
IND <- sort(ICS,index.return=TRUE,decreasing=decreasing)$ix
x <- x[IND]
if(flatten) { return(x) }
# structure the same as below
if(is.na(IC)) { x <- list(x) }
if(is.na(MSPE)) { x <- lapply(x,list) }
return(x)
}
# model type names
NAMES <- sapply(x,function(fit) name.ctmm(fit,whole=FALSE) )
ACOV <- NAMES[1,]
MEAN <- NAMES[2,]
# group by ACOV
ACOVS <- unique(ACOV)
MEANS <- unique(MEAN)
# partition into ACF-identical blocks for MSPE sorting, and then all-identical blocks for likelihood sorting
y <- list()
ICS <- numeric(length(ACOVS)) # ICs of best MSPE models
for(i in 1:length(ACOVS))
{
# ACF-identical block
SUB <- (ACOV==ACOVS[i])
y[[i]] <- x[SUB]
MEAN.SUB <- MEAN[SUB]
MEANS.SUB <- unique(MEAN.SUB)
z <- list()
MSPES <- numeric(length(MEANS.SUB))
for(j in 1:length(MEANS.SUB)) # sort exactly same models by IC
{
# all-identical block
SUB <- (MEAN.SUB==MEANS.SUB[j])
z[[j]] <- sort.ctmm(y[[i]][SUB],IC=IC,MSPE=NA)
MSPES[j] <- get.MSPE(z[[j]][[1]],MSPE) # associate block with best's MSPE
}
IND <- sort(MSPES,index.return=TRUE,decreasing=decreasing)$ix
y[[i]] <- do.call(c,z[IND]) # flatten to ACF blocks
ICS[i] <- get.IC(y[[i]][[1]],IC) # associate block with best's IC
}
# sort blocks by IC and flatten
IND <- sort(ICS,index.return=TRUE,decreasing=decreasing)$ix
y <- y[IND]
# BM/IOU log-likelihood is infinitely lower than OU/OUF log-likelihood
RANGE <- sapply(y,function(Y){Y[[1]]$range})
if(!is.na(IC) && any(RANGE) && any(!RANGE))
{
if(INF) { for(i in which(!RANGE)) { for(j in 1:length(y[[i]])) { y[[i]][[j]][[IC]] <- Inf } } }
y <- c( y[RANGE] , y[!RANGE] )
}
if(flatten) { y <- do.call(c,y) }
return(y)
}
############
min.ctmm <- function(x,IC="AICc",MSPE="position",...)
{ sort.ctmm(x,IC=IC,MSPE=MSPE,...)[[1]] }
########
summary.ctmm.list <- function(object, IC="AICc", MSPE="position", units=TRUE, ...)
{
IC <- match.arg(IC,c("AICc","AIC","BIC",NA))
MSPE <- match.arg(MSPE,c("position","velocity",NA))
N <- length(object)
object <- sort.ctmm(object,IC=IC,MSPE=MSPE,flatten=FALSE,INF=TRUE)
M <- length(object)
# if(N==M) { MSPE <- NA } # don't need to sort MSPE
# if(M==1) { IC <- NA } # don't need to sort IC
object <- do.call(c,object)
if(!is.na(IC))
{
ICS <- sapply(object,function(m){get.IC(m,IC)})
# show relative IC
ICS <- ICS - ICS[1]
ICS <- cbind(ICS)
colnames(ICS) <- paste0("\u0394",IC)
}
else { ICS <- NULL }
if(!is.na(MSPE))
{
MSPES <- sapply(object,function(m){get.MSPE(m,MSPE)})
# convert to meters/kilometers
CNAME <- paste0("\u0394","RMSPE")
MIN <- which.min(MSPES)
MSPES <- sqrt(MSPES)
if(MSPES[1]<Inf) { MSPES <- MSPES - MSPES[MIN] }
MIN <- min(c(abs(MSPES[MSPES!=0]),Inf))
UNIT <- unit(MIN,if(MSPE=="position"){"length"}else{"speed"},concise=TRUE,SI=!units)
MSPES <- MSPES/UNIT$scale
CNAME <- paste0(CNAME," (",UNIT$name,")")
MSPES <- cbind(MSPES)
colnames(MSPES) <- CNAME
}
else { MSPES <- NULL }
ICS <- cbind(ICS,MSPES)
rownames(ICS) <- names(object)
if(is.na(MSPE))
{
DOF <- sapply(object,DOF.mean)
DOF <- cbind(DOF)
colnames(DOF) <- "DOF[mean]"
}
else if(MSPE=="position")
{
DOF <- sapply(object,DOF.area)
DOF <- cbind(DOF)
colnames(DOF) <- "DOF[area]"
}
else if(MSPE=="velocity")
{
DOF <- sapply(object,DOF.speed)
DOF <- cbind(DOF)
colnames(DOF) <- "DOF[speed]"
}
METH <- sapply(object,function(m){m$method})
if(FALSE) # only prints correctly in unicode locale (Windows R bug)
{
DOF <- data.frame(DOF,METH)
colnames(DOF) <- c("DOF[mean]","method")
}
ICS <- cbind(ICS,DOF)
return(ICS)
}
|
73c37452ef3386091fc349970103ef3b0677d9e1
|
85ef44500cf0794b7ccac66a5cf24f65112be3b4
|
/R/ograph_plot.R
|
771868da6cc974aa1099b2773a924b22e913f302
|
[] |
no_license
|
budkaja/ograph
|
9f8488bfd0c1f5b1b0fa63d1683397ce8566293f
|
8d72326602880e1d9acb716831a10fbc225c8caf
|
refs/heads/master
| 2020-03-28T01:57:36.412034
| 2016-05-18T21:44:39
| 2016-05-18T21:44:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,078
|
r
|
ograph_plot.R
|
##################################
##simple method to plot the tree with pre-defined parms
treeplot<-function(graph,label=TRUE,label.nodes=c(),
vertex.size=3,
vertex.label.cex=1,
edge.arrow.size=0.2,
edge.width=0.5,
vertex.label.dist=0,
vertex.label.degree=-pi/4,
show.genes=FALSE,
only.gene=FALSE,
root='all'){
graph<-reverseArch(graph)
#text=paste(V(graph)$name,V(graph)$def,sep="\n")
label.text=vector(mode='character',length(V(graph)))
if(label){
if(length(label.nodes)>0){
#always plot the first three level
default.nodes<-V(subGraphByLevel(reverseArch(graph),3))$name
label.nodes=unique(c(default.nodes,label.nodes))
index<-which(V(graph)$name %in% label.nodes)
label.text[index]=paste(V(graph)$name[index],V(graph)$def[index],sep="\n")
}else{
label.text=paste(V(graph)$name,V(graph)$def,sep="\n")
}
if(show.genes){
if(only.gene)
label.text=sapply(V(graph)$genes,length)
else
label.text=paste(label.text,sapply(V(graph)$genes,length),sep="\n")
}
}
if(label){
#plot(graph,vertex.size=vertex.size,vertex.label.cex=vertex.label.cex,vertex.label=paste(V(graph)$name,V(graph)$name,sapply(V(graph)$genes,length),sep="\n"),edge.arrow.size=edge.arrow.size,edge.width=edge.width,edge.color='black',layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name=='all')))
plot(graph,vertex.size=vertex.size,vertex.label.dist=vertex.label.dist,vertex.label.degree=vertex.label.degree,
vertex.label.cex=vertex.label.cex,
vertex.label=label.text,
edge.arrow.size=edge.arrow.size,
edge.width=edge.width,
edge.color='black',
layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name==root)))
}else{
plot(graph,vertex.size=vertex.size,vertex.label=NA,edge.arrow.size=edge.arrow.size,edge.width=edge.width,edge.color='black',layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name=='all')))
}
}
##similar to tree plot, but use interactive plotting. work only with small graph.
##
tktreeplot<-function(graph,label=0,vertex.size=3,vertex.label.cex=1,edge.arrow.size=0.2,edge.width=0.5,vertex.label.dist=0,vertex.label.degree=-pi/4,show.genes=FALSE,only.gene=FALSE,root='all'){
if(length(V(graph))>500){
die('too many nodes!')
}
graph<-reverseArch(graph)
text=paste(V(graph)$name,V(graph)$def,sep="\n")
if(show.genes){
if(only.gene)
text=sapply(V(graph)$genes,length)
else
text=paste(text,sapply(V(graph)$genes,length),sep="\n")
}
if(label==1){
#plot(graph,vertex.size=vertex.size,vertex.label.cex=vertex.label.cex,vertex.label=paste(V(graph)$name,V(graph)$name,sapply(V(graph)$genes,length),sep="\n"),edge.arrow.size=edge.arrow.size,edge.width=edge.width,edge.color='black',layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name=='all')))
tkplot(graph,vertex.size=vertex.size,vertex.label.dist=vertex.label.dist,vertex.label.degree=vertex.label.degree,
vertex.label.cex=vertex.label.cex,
vertex.label=text,
edge.arrow.size=edge.arrow.size,
edge.width=edge.width,
edge.color='black',
layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name==root)))
}else{
tkplot(graph,vertex.size=vertex.size,vertex.label=NA,edge.arrow.size=edge.arrow.size,edge.width=edge.width,edge.color='black',layout=layout.reingold.tilford(graph,flip.y=TRUE,root=which(V(graph)$name=='all')))
}
}
##################################
##simple method to plot the tree with pre-defined parms
nomalplot<-function(graph,label=0){
if(label==1){
plot(graph,vertex.size=3,vertex.label.cex=1,vertex.label=paste(V(graph)$name,V(graph)$name,sapply(V(graph)$genes,length),sep="\n"),edge.arrow.size=0.2,edge.width=0.5,edge.color='black',layout=layout.fruchterman.reingold)
}else{
plot(graph,vertex.size=3,vertex.label=NA,edge.arrow.size=0.2,edge.width=0.5,edge.color='black',layout=layout.fruchterman.reingold)
}
}
##################################
##value is a named vector with nodes and their values.
##plot the tree and color the significant nodes
##plot2file(filename,width=50,heigth=20)
##plotSig(graph=g@graph,testresult=resultElimFis,number_of_node=50,label=1)
##dev.off()
plotSig<-function(graph,value,number_of_node=0,only.plot.sig=T,...){
#turn to numeric
tmp<-names(value)
value<-sub(pattern='< 1e-30',replacement='1e-30',x=value)
value<-as.numeric(value)
names(value)<-tmp
x=sort(value+10^-20)
#x=sort(score(testresult))
if(length(x)>number_of_node & number_of_node>0 )
x=x[1:number_of_node]
log.x=log10(x)
color <- round(log.x - range(log.x)[1] + 1,3)
index=unique(color)
colorMap <- heat.colors(length(index))
color<-sapply(names(color),function(x){
colorMap[which(index==color[x])]
})
if(!exists('label.nodes'))
label.nodes=names(x)
g=subGraphByNodes(graph,nodes=c(names(x),label.nodes))
g=ograph::set.node.attribute(g,attr_name='color',attr_value=color,nodes=names(color))
if(only.plot.sig)
treeplot(g,label.nodes=names(x),...)
else
treeplot(g,...)
}
##################################
##function to plot the following graph into a file
##need to call dev.off when finish plotting
plot2file<-function(filename,width=12,heigth=8,units='in',res=300){
png(filename, width,heigth, units=units, res=res)
}
##################################
##plot the wordcloud base on p-value
##value is a named vector with nodes and their values.
plotWordcloud<-function(value,number_of_node=Inf,scale=c(3,0.1),filename='',width=12,heigth=8,units='in',res=300){
require(wordcloud)
def=Term(ONTTERM)
ns<-names(value)
value<-sub(pattern='< 1e-30',replacement='1e-30',x=value)
value<-as.numeric(value)
names(value)<-ns
x=sort(value+10^-20)
if(!is.infinite(number_of_node)){
x=x[1:number_of_node]
}
y=-log(x)
freq=y/sum(y)
min.freq=sort(freq[freq>0])[1]
if(filename!=''){
png(filename, width,heigth, units=units, res=res)
wordcloud(words=def[names(y)],freq=freq,scale=scale,min.freq=min.freq,random.order=FALSE, max.words=Inf,rot.per=0, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
dev.off()
}else{
wordcloud(words=def[names(y)],freq=freq,scale=scale,min.freq=min.freq,random.order=FALSE, max.words=Inf,rot.per=0, use.r.layout=FALSE, colors=brewer.pal(8, 'Dark2'))
}
}
###############################################
## save igraph object to format
saveGraph<-function(graph,name,loc,graphml=TRUE,e=TRUE,v=TRUE){
if(graphml)
write.graph(graph,paste(loc,name,".graphml",sep=''), format="graphml")
tmp=get.data.frame(graph,what='both')
if(e)
write.table(tmp$edges, file = paste(loc,name,"_edges.txt",sep=''), sep = "\t",row.names = FALSE,quote=FALSE)
if(v){
##parse the genes coloum
tmp$vertices$genes<-.listOfEnv2ListOfList(tmp$vertices$genes)
if(length(tmp$vertices$genes)>0){
tmp$vertices$genes<-sapply(tmp$vertices$genes,FUN=function(x){
paste(x,collapse=',')
})
}
write.table(tmp$vertices, file = paste(loc,name,"_nodes.txt",sep=''), sep = "\t",row.names = FALSE,quote=FALSE)
}
}
loadGraph<-function(edges_file,nodes_file){
edges=read.table(edges_file,header=TRUE,sep='\t')
nodes=read.table(nodes_file,header=TRUE,sep='\t')
unique(union(edges$from,edges$to))
nodes=nodes[,1:2]
levels(nodes$name)
setdiff(unique(union(edges$from,edges$to)),levels(nodes$name))
g=graph.data.frame(edges, directed=FALSE, vertices=nodes)
}
.listOfEnv2ListOfList<-function(listOfEnv){
lapply(listOfEnv,ls)
}
##################################
##plot the graph in text format
##giving an ider how the structure looks like
plotGraphStructure<-function(graph,indent='--',text=c('name')){
root<-findRoot(graph)
levels<-buildLevels(graph)
f<-function(graph,node){
level<-levels$nodes2level[[node]]
string=paste(rep(indent,level),collapse='')
t=''
for(i in text){
t = c(t,get.node.attribute(graph,i,c(node)))
}
t=paste(t,collapse=' ')
cat(paste(string,t,"\n",sep=' '))
cs<-findChildrenNodes(graph,node)
if(length(cs)>0){
for(i in cs){
f(graph,i)
}
}
}
f(graph,root)
}
################
#method to turn an igraph to graphNEL
.to.GraphNEL<-function(igraph){
nel<-igraph.to.graphNEL(igraph)
nAttrs<-list()
v.n <- list.vertex.attributes(igraph)
v.n <- v.n[v.n != "name"]
index<-get.vertex.attribute(igraph, 'name')
for (n in v.n) {
nAttrs[[n]]<-unlist(nodeData(nel, attr = n))
}
dic<-c('color'='fillcolor')
names(nAttrs)<-unname(sapply(names(nAttrs),function(x){
if(is.na(dic[x]))
x
else
dic[x]
}))
list(graph=nel,nodeAttrs=nAttrs)
}
################
#method to turn an igraph to graphNEL and plot
plot.graphNEL<-function(igraph,term2def,label=FALSE,showEdges = TRUE,node.shape='circle',node.fontsize = 9,edge.fontsize = 9,node.height = 0.45,label.only.def=T){
require(Rgraphviz)
r<-.to.GraphNEL(igraph)
nel<-r$graph
nodeAttrs<-r$nodeAttrs
node.names <- nodes(nel)
if(label.only.def==1)
nodeAttrs$label <- .getTermsDefinition(term2def,node.names,multipLines=T)
else
nodeAttrs$label <- paste(node.names,nodeAttrs$def, sep = "\\\n")
names(nodeAttrs$label) <- node.names
# nodeAttrs$shape<-'circle'
# nodeAttrs$height<-0.45
## we set the global Graphviz attributes
graphAttrs <- getDefaultAttrs(layoutType = 'dot')
graphAttrs$cluster <- NULL
#graphAttrs$graph$splines <- FALSE
graphAttrs$graph$size <- "6.99,3.99"
## set the node shape
# graphAttrs$node$shape <- 'ellipse'
graphAttrs$node$shape <- node.shape
## set the fontsize for the nodes labels
graphAttrs$node$fontsize <- node.fontsize
graphAttrs$edge$fontsize <- edge.fontsize
graphAttrs$node$style <- 'filled'
graphAttrs$node$height <- node.height
# graphAttrs$node$width <- '1.5'
if(!showEdges)
graphAttrs$edge$color <- 'white'
else
## if we want to differentiate between 'part-of' and 'is-a' edges
## 0 for a is_a relation, 1 for a part_of relation
## edgeAttrs$color <- ifelse(.getEdgeWeights(dag) == 0, 'black', 'red')
graphAttrs$edge$color <- 'black'
plot(reverseEdgeDirections(nel),nodeAttrs = nodeAttrs,attrs=graphAttrs)
}
plot.text<-function(graph,indent.symbol='.'){
levels<-buildLevels(graph)
root<-findRoot(graph)
.plot.text.node(graph,levels,root,indent.symbol)
}
.plot.text.node<-function(graph,levels,node,indent.symbol='-'){
indent=rep(indent.symbol,levels$nodes2level[[node]])
cat(indent,node,"\t",get.node.attribute(graph,'def',node),"\n",sep = '')
cs<-findChildrenNodes(graph,node)
if(length(cs)>0){
for(c in cs){
.plot.text.node(graph,levels,c,indent.symbol)
}
}
}
|
c177b901f4e5cc5d74fb50dbfacf2c1f14e88a06
|
3f277f9f4e034d6d47984bdc70a24ba8952e5a1d
|
/man/dtransform.Rd
|
d3180bfc8ef9cc625a225bd9c8f8664e57c6e6bd
|
[] |
no_license
|
kkholst/mets
|
42c1827c8ab939b38e68d965c27ffe849bcd6350
|
c1b37b8885d5a9b34688cb4019170a424b7bec70
|
refs/heads/master
| 2023-08-11T17:09:18.762625
| 2023-06-16T07:54:16
| 2023-06-16T07:54:16
| 28,029,335
| 16
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 656
|
rd
|
dtransform.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtransform.R
\name{dtransform}
\alias{dtransform}
\alias{dtransform<-}
\alias{dtrans}
\alias{dtrans<-}
\title{Transform that allows condition}
\usage{
dtransform(data, ...)
}
\arguments{
\item{data}{is data frame}
\item{...}{new variable definitions including possible if condition}
}
\description{
Defines new variables under condition for data frame
}
\examples{
data(mena)
xx <- dtransform(mena,ll=log(agemena)+twinnum)
xx <- dtransform(mena,ll=log(agemena)+twinnum,agemena<15)
xx <- dtransform(xx ,ll=100+agemena,ll2=1000,agemena>15)
dsummary(xx,ll+ll2~I(agemena>15))
}
|
409533370aef008e55953caa8db8092ecd378cda
|
927f36bd8494c82c19ee05b0b8c83477ce464e4b
|
/geneClustering/drought_cluster_R.r
|
a7e3b3a71f6bcacefe97ecf70b2e2ce465471c7a
|
[] |
no_license
|
Bo-UT/Drought_pCRE
|
d0fd045b99078c1df8371901522c0935300804d5
|
b20cc738bccc22f2e8d9f3730487cddfd06c942e
|
refs/heads/main
| 2023-07-29T10:32:46.708034
| 2021-09-15T15:39:44
| 2021-09-15T15:39:44
| 406,616,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,597
|
r
|
drought_cluster_R.r
|
library(tidyverse)
library(reshape2)
setwd('E:/Study/Courses/Projects/ResearchProjects/labDehydrationProject/ML_pipeline/geneClustering/scripts')
##load data
data <- read.csv('dehydration_allDE_cluster.csv',row.names=1)
data$d0h <- 0
colnames(data)
data <- data[,c('d0h',colnames(data)[1:7])]
# data[-7] to remove original cluster column
data2 <- t(scale(t(data[-8]))) %>% as.data.frame() %>% rownames_to_column(., "GeneID") # scale data and convert row name to firt column
## kmeans cluster
num_cluster <- 40 # set cluster number
set.seed(42)
kmeans_out <- kmeans(data2[-1],centers=num_cluster,iter.max = 500,nstart=50)
## add cluster info to orig matrix
data_with_cust_info <- data2 %>%
mutate(cluster = paste("cluster ", kmeans_out$cluster,sep = ""))
# data_with_cust_info = cbind(data2,data$cluster) %>% stats::setNames(c(colnames(data2),'cluster'))
# dim(data_with_cust_info)
## visualise each cluster
options(repr.plot.width = 300, repr.plot.height = 300) # set figure size
data_with_cust_info %>%
# filter(cluster %in% paste0('cluster ',c(12,21,27,30,4,13,5,1,2,10,9,12))) %>%
gather(key = "variable" , value = "value", -c(1,9)) %>% ### 1 is the index of column 'geneName' and 8 is the index of column 'clust'
group_by(variable) %>%
mutate(cluster=factor(cluster,levels = paste0('cluster ', seq(num_cluster)))) %>% # levlels name must be same with cluster
ggplot(aes(x = variable , y = value , group = GeneID)) +
geom_point(size=1.0) +
geom_line(alpha = 1 , aes(col = as.character(cluster))) +
theme_bw() +
theme(legend.position = "none" , axis.text.x = element_text(angle = 90 , vjust = 0.4)) +
labs(x='Time courses',y='Normalized expression')+
facet_wrap(~cluster,ncol=8)+
theme(text = element_text(size = 50),axis.title.x = element_text(margin = margin(t = 20, r = 20, b = 0, l = 0)))+
ggsave('E:/Study/Courses/Projects/ResearchProjects/labDehydrationProject/findKmers/KmersFinding2/genecluster.tiff',
width = 100,height = 100,units = 'cm',limitsize = FALSE)
data_with_cust_info %>% filter(cluster %in% paste0('cluster ',c(3,5,14,17,20,22,26,32,37))) %>%
ggplot(.)+geom_bar(aes(cluster))
for (i in c(3,5,14,17,20,22,26,32,37)) {
data_with_cust_info %>% filter(cluster==paste0('cluster ', i)) %>% select(1) %>%
write.table(file = paste0("E:/Study/Courses/Projects/ResearchProjects/labDehydrationProject/findKmers/KmersFinding2/geneClusters/cluster_",i,".txt"),
sep = "\t",row.names = F, col.names = F,quote = F)
}
data_with_cust_info %>% select(1)
|
24c28d79726cf93a622a196cd80e95116c1e9fc4
|
6bfb407d6dbc79e672a3886eab30a38012888d71
|
/retired_functions/summariseTrees.R
|
0eb33ecf7e4919306fbfcfc6f9a4f448f13ff9af
|
[] |
no_license
|
hferg/bayestraitr
|
ca438050b7565e0d6bf752ad8a9a152bd568a0b0
|
308253d82d02ec0c51414b4608c74fbe0acd528d
|
refs/heads/master
| 2021-10-22T14:55:55.160296
| 2019-03-11T15:15:45
| 2019-03-11T15:15:45
| 107,663,993
| 0
| 0
| null | 2019-03-11T15:16:23
| 2017-10-20T10:16:43
|
R
|
UTF-8
|
R
| false
| false
| 3,241
|
r
|
summariseTrees.R
|
#' summariseTrees
#'
#' Summarise a posterior sample of trees from a rate-varible or RJ local transformation
#' BayesTraits MCMC analysis.
#' @param reftree A tree that provides the reference topology (in most cases this is
#' time-tree the analysis was run on). Can be a filename of a tree in the working
#' directory or an object of class "phylo"
#' @param trees The posterior sample of trees from a rate-variable or RJlocaltransformation
#' MCMC BayesTraits analysis. Typically will have the .Output.trees extension. Either
#' the filename of the posterior, or an object of class "multiPhylo".
#' @param verbose If TRUE a progress bar will be shown when ladderizing the posterior
#' trees (this step can be time consuming).
#' @param burnin Number of trees to discard as burnin (if, for example, the MCMC
#' chain had not converged until later in the run).
#' @param thinning If >1 then every nth tree will be sampled - useful if the sampling
#' interval of the original MCMC analysis was too small. Note that is it preferable
#' to ensure proper chain convergence prior to analysis of the results, in which case
#' the default settings of burnin and thinning will be appropriate.
#' @export
#' @name summariseTrees
summariseTrees <- function(reftree, trees, burnin = 0, thinning = 1, verbose = TRUE) {
if (class(reftree) != "phylo") {
reftree <- ape::read.nexus(reftree)
}
reftree <- ape::ladderize(reftree)
if (class(trees) == "multiPhylo") {
trees <- trees
} else {
trees <- ape::read.nexus(trees)
}
trees <- trees[seq.int(burnin, length(trees), thinning)]
# ladderize trees.
if (verbose) {
print("Ladderizing posterior trees:")
trees <- pbapply::pblapply(trees, ape::ladderize)
class(trees) <- "multiPhylo"
} else {
trees <- lapply(trees, ape::ladderize)
class(trees) <- "multiPhylo"
}
# and check topology
for (i in seq_along(trees)) {
if (sum(reftree$tip.label == trees[[i]]$tip.label) != length(reftree$tip.label)) {
stop(paste("Tip labels on tree", i, "do not mactch reference tree"))
}
if (sum(reftree$edge == trees[[i]]$edge) != length(reftree$edge)) {
stop(paste("Tree", i, "has a different topology to reference tree"))
}
}
bls <- sapply(trees, function(x) x$edge.length)
meanbl <- apply(bls, 1, mean)
medianbl <- apply(bls, 1, median)
modebl <- apply(bls, 1, modeStat)
sdbl <- apply(bls, 1, sd)
rangebl <- apply(bls, 1, function(x) max(x) - min(x))
meantree <- mediantree <- modetree <- reftree
meantree$edge.length <- meanbl
mediantree$edge.length <- medianbl
modetree$edge.length <- modebl
summarytrees <- list(original_tree = reftree,
meant_ree = meantree,
median_tree = mediantree,
mode_tree = modetree)
class(summarytrees) <- c("trees_summary", "multiPhylo")
bls <- tibble(original_bls = reftree$edge.length,
mean_bls = meanbl,
median_bls = medianbl,
mode_bls = modebl,
range_bls = rangebl,
sd_bls = sdbl)
res <- list(tree_summaries = summarytrees,
branchlengt_info = bls)
return(res)
}
|
88240c477eb4b26a22c1c32e22d2e3e7b1b2a79b
|
1123842eaa78a7a9d61b217d0830f1e5eca20afb
|
/inst/doc/funneljoin.R
|
bdb4db1fec79e2806ea197dd2f7078ee42bb97d4
|
[] |
no_license
|
cran/funneljoin
|
99f362a7616c3d590542e028c2b2d6ac76006d26
|
efd762d08c3392688d8d30e5343c0a404272c171
|
refs/heads/master
| 2023-04-02T04:30:56.830919
| 2023-03-21T19:00:02
| 2023-03-21T19:00:02
| 236,600,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,603
|
r
|
funneljoin.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message = FALSE
)
## -----------------------------------------------------------------------------
library(dplyr)
library(funneljoin)
## -----------------------------------------------------------------------------
landed
## -----------------------------------------------------------------------------
registered
## -----------------------------------------------------------------------------
landed %>%
after_inner_join(registered,
by_user = "user_id",
by_time = "timestamp",
type = "first-first")
## -----------------------------------------------------------------------------
landed %>%
after_inner_join(registered,
by_user = "user_id",
by_time = "timestamp",
type = "any-any",
max_gap = as.difftime(4, units = "days"),
gap_col = TRUE)
## -----------------------------------------------------------------------------
experiment_starts <- tibble::tribble(
~user_id, ~timestamp, ~ alternative.name,
1, "2018-07-01", "control",
2, "2018-07-01", "treatment",
3, "2018-07-02", "control",
4, "2018-07-01", "control",
4, "2018-07-04", "control",
5, "2018-07-10", "treatment",
5, "2018-07-12", "treatment",
6, "2018-07-07", "treatment",
6, "2018-07-08", "treatment"
) %>%
mutate(timestamp = as.Date(timestamp))
experiment_registrations <- tibble::tribble(
~user_id, ~timestamp,
1, "2018-07-02",
3, "2018-07-02",
4, "2018-06-10",
4, "2018-07-02",
5, "2018-07-11",
6, "2018-07-10",
6, "2018-07-11",
7, "2018-07-07"
) %>%
mutate(timestamp = as.Date(timestamp))
## -----------------------------------------------------------------------------
experiment_starts %>%
after_left_join(experiment_registrations,
by_user = "user_id",
by_time = "timestamp",
type = "first-firstafter")
## -----------------------------------------------------------------------------
experiment_starts %>%
after_left_join(experiment_registrations,
by_user = "user_id",
by_time = "timestamp",
type = "first-firstafter") %>%
group_by(alternative.name) %>%
summarize_conversions(converted = timestamp.y)
## -----------------------------------------------------------------------------
for_conversion <- tibble::tribble(
~"experiment_group", ~"first_event", ~"last_event", ~"type",
"control", "2018-07-01", NA, "click",
"control", "2018-07-02", NA, "click",
"control", "2018-07-03", "2018-07-05", "click",
"treatment", "2018-07-01", "2018-07-05", "click",
"treatment", "2018-07-01", "2018-07-05", "click",
"control", "2018-07-01", NA, "purchase",
"control", "2018-07-02", NA, "purchase",
"control", "2018-07-03", NA, "purchase",
"treatment", "2018-07-01", NA, "purchase",
"treatment", "2018-07-01", "2018-07-05", "purchase"
)
for_conversion %>%
group_by(type, experiment_group) %>%
summarize_conversions(converted = last_event)
## -----------------------------------------------------------------------------
tbl <- tibble::tribble(
~ experiment_group, ~nb_users, ~nb_conversions, ~type,
"control", 500, 200, "purchase",
"treatment", 500, 100, "purchase",
"control", 500, 360, "click",
"treatment", 500, 375, "click"
)
tbl %>%
group_by(type) %>%
summarize_prop_tests(alternative_name = experiment_group)
|
ed730034720cd31ec8e2ed83c9ade49e727a33a2
|
dfa7dc09fee4f6faec626f6aed78adde34237944
|
/infectChiriAnalysisJustin/scripts/getPolygons.R
|
7297e992e37d34d04bde69fac5e5103a5bc7995e
|
[] |
no_license
|
chirimacha/Laboratory
|
afaf0edcf087a8ec923a1333bf654ab582910e6e
|
6075fb99a65abc28209beeb64d09c085ac63efb3
|
refs/heads/master
| 2021-03-19T11:46:42.012352
| 2018-10-29T11:42:32
| 2018-10-29T11:42:32
| 34,410,633
| 0
| 2
| null | 2016-04-19T15:22:56
| 2015-04-22T19:17:36
|
R
|
UTF-8
|
R
| false
| false
| 6,870
|
r
|
getPolygons.R
|
#===================================================================================
# Code to make the polygons for each of the cameras
# mayo 2018 justin
#===================================================================================
#===================================================================================
# Part I: Dependencies, libraries
#===================================================================================
library("devtools")
library("sp")
library("videoplayR")
library("splancs")
#===================================================================================
# Part II: Function to make the polygons
#===================================================================================
makePolyExp <- function(dia, cam, csvFile) {
# Load video
if (is.na(csvFile)) {
# Create the polygon from the 1000th image of the first hour video of the camera
setwd(paste0("/Volumes/TOSHIBA_EXT/BACKUP_UPCH_AQP_21oct2015/JUSTIN_ASSAY/FIXED CSVs/day", dia, "/cam", cam))
vid <- readVid(paste0("DAY", dia, "_CAM", cam, "_1HR.mp4"))
} else {
setwd(paste0("/Volumes/TOSHIBA_EXT/BACKUP_UPCH_AQP_21oct2015/JUSTIN_ASSAY/VIDEOS_FIRST_ASSAY/day_", dia, "/cam", cam, "(preprocessed)"))
vid <- readVid(paste0(csvFile, ".mp4"))
}
# Manually make polygon
imshow(getFrame(vid, 1000))
poly <- getpoly(quiet=FALSE)
return(poly)
}
#===================================================================================
# Part III: Make the polygons for each of the 32 cameras
# IMPORTANT: Go from TL to TR to BR to BL when making the polygon
#===================================================================================
#===================================================================================
# Dia 1
#===================================================================================
POLYDAY1CAM1 <- makePolyExp(dia=1, cam=1, "2016_09_19 17_54_59.dur.60 min")
POLYDAY1CAM2 <- makePolyExp(dia=1, cam=2, "2016_09_19 18_00_04.dur.60.min")
POLYDAY1CAM3 <- makePolyExp(dia=1, cam=3, "2016_09_19 18_00_26.dur.60.min")
POLYDAY1CAM4 <- makePolyExp(dia=1, cam=4, "2016_09_19 17_59_39.dur.60.min")
#===================================================================================
# Dia 2
#===================================================================================
POLYDAY2CAM1 <- makePolyExp(dia=2, cam=1, "2016_09_20 18_59_49 dur 60 min")
POLYDAY2CAM2 <- makePolyExp(dia=2, cam=2, "2016_09_20 18_01_34 dur 60 min")
POLYDAY2CAM3 <- makePolyExp(dia=2, cam=3, "2016_09_20 18_01_53 dur 60 min")
POLYDAY2CAM4 <- makePolyExp(dia=2, cam=4, "2016_09_20 18_01_06 dur 60 min")
#===================================================================================
# Dia 3
#===================================================================================
POLYDAY3CAM1 <- makePolyExp(dia=3, cam=1, "2016_09_22 17_45_50 dur 60 min")
POLYDAY3CAM2 <- makePolyExp(dia=3, cam=2, "2016_09_22 17_46_48 dur 60 min")
POLYDAY3CAM3 <- makePolyExp(dia=3, cam=3, "2016_09_22 17_45_55 dur 60 min")
POLYDAY3CAM4 <- makePolyExp(dia=3, cam=4, "2016_09_22 17_46_05 dur 60 min")
#===================================================================================
# Dia 4
#===================================================================================
POLYDAY4CAM1 <- makePolyExp(dia=4, cam=1, "2016_09_23 17_50_07 dur 60 min")
POLYDAY4CAM2 <- makePolyExp(dia=4, cam=2, "2016_09_23 17_51_27 dur 60 min")
POLYDAY4CAM3 <- makePolyExp(dia=4, cam=3, "2016_09_23 17_51_48 dur 60 min")
POLYDAY4CAM4 <- makePolyExp(dia=4, cam=4, "2016_09_23 17_50_58 dur 60 min")
#===================================================================================
# Dia 5
#===================================================================================
POLYDAY5CAM1 <- makePolyExp(dia=5, cam=1, NA)
POLYDAY5CAM2 <- makePolyExp(dia=5, cam=2, NA)
POLYDAY5CAM3 <- makePolyExp(dia=5, cam=3, NA)
POLYDAY5CAM4 <- makePolyExp(dia=5, cam=4, NA)
#===================================================================================
# Dia 6
#===================================================================================
POLYDAY6CAM1 <- makePolyExp(dia=6, cam=1, NA)
POLYDAY6CAM2 <- makePolyExp(dia=6, cam=2, NA)
POLYDAY6CAM3 <- makePolyExp(dia=6, cam=3, NA)
POLYDAY6CAM4 <- makePolyExp(dia=6, cam=4, NA)
#===================================================================================
# Dia 7
#===================================================================================
POLYDAY7CAM1 <- makePolyExp(dia=7, cam=1, NA)
POLYDAY7CAM2 <- makePolyExp(dia=7, cam=2, NA)
POLYDAY7CAM3 <- makePolyExp(dia=7, cam=3, NA)
POLYDAY7CAM4 <- makePolyExp(dia=7, cam=4, NA)
#===================================================================================
# Dia 8
#===================================================================================
POLYDAY8CAM1 <- makePolyExp(dia=8, cam=1, NA)
POLYDAY8CAM2 <- makePolyExp(dia=8, cam=2, NA)
POLYDAY8CAM3 <- makePolyExp(dia=8, cam=3, NA)
POLYDAY8CAM4 <- makePolyExp(dia=8, cam=4, NA)
#===================================================================================
# Part IV: Combine everything so it can be saved and used in the variables file
#===================================================================================
left <- c(rep("POLYDAY1CAM1",4), rep("POLYDAY1CAM2",4), rep("POLYDAY1CAM3",4), rep("POLYDAY1CAM4",4),
rep("POLYDAY2CAM1",4), rep("POLYDAY2CAM2",4), rep("POLYDAY2CAM3",4), rep("POLYDAY2CAM4",4),
rep("POLYDAY3CAM1",4), rep("POLYDAY3CAM2",4), rep("POLYDAY3CAM3",4), rep("POLYDAY3CAM4",4),
rep("POLYDAY4CAM1",4), rep("POLYDAY4CAM2",4), rep("POLYDAY4CAM3",4), rep("POLYDAY4CAM4",4),
rep("POLYDAY5CAM1",4), rep("POLYDAY5CAM2",4), rep("POLYDAY5CAM3",4), rep("POLYDAY5CAM4",4),
rep("POLYDAY6CAM1",4), rep("POLYDAY6CAM2",4), rep("POLYDAY6CAM3",4), rep("POLYDAY6CAM4",4),
rep("POLYDAY7CAM1",4), rep("POLYDAY7CAM2",4), rep("POLYDAY7CAM3",4), rep("POLYDAY7CAM4",4),
rep("POLYDAY8CAM1",4), rep("POLYDAY8CAM2",4), rep("POLYDAY8CAM3",4), rep("POLYDAY8CAM4",4))
right <- rbind(POLYDAY1CAM1, POLYDAY1CAM2, POLYDAY1CAM3, POLYDAY1CAM4,
POLYDAY2CAM1, POLYDAY2CAM2, POLYDAY2CAM3, POLYDAY2CAM4,
POLYDAY3CAM1, POLYDAY3CAM2, POLYDAY3CAM3, POLYDAY3CAM4,
POLYDAY4CAM1, POLYDAY4CAM2, POLYDAY4CAM3, POLYDAY4CAM4,
POLYDAY5CAM1, POLYDAY5CAM2, POLYDAY5CAM3, POLYDAY5CAM4,
POLYDAY6CAM1, POLYDAY6CAM2, POLYDAY6CAM3, POLYDAY6CAM4,
POLYDAY7CAM1, POLYDAY7CAM2, POLYDAY7CAM3, POLYDAY7CAM4,
POLYDAY8CAM1, POLYDAY8CAM2, POLYDAY8CAM3, POLYDAY8CAM4)
output <- cbind(left, right)
filename <- file.path("~/Desktop/Laboratory/infectChiriAnalysis/polygons", paste0("polygons.csv"))
write.table(output, filename, row.names = F)
|
1c96821ed45496992b945135ddea51a2a8b82b41
|
e2fda898fdc902a606ac89d75198fdb7791ad4d3
|
/STA380_Homework1_Aldrich_Charles/Homework1MarketSegmentation.R
|
c9e211a692d7d694dc935548fa60d9fdb45e9a86
|
[] |
no_license
|
zaldri/general
|
1bfea7191ead046f6afce7fa0f9b3bf2c52d7a21
|
e9ef7f5185e612feb945bc94342d464845c33c8f
|
refs/heads/master
| 2016-09-08T02:40:25.479446
| 2015-08-19T00:47:54
| 2015-08-19T00:47:54
| 40,131,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,293
|
r
|
Homework1MarketSegmentation.R
|
social_marketing <- read.csv("~/GitHub/STA380/data/social_marketing.csv")
# removing "chatter", "spam", and "adult"
# transposing data frame in order to have users in the columns and categories in the rows
sm2 = social_marketing
n = sm2$X
sm2T = as.data.frame(t(sm[,3:35]))
colnames(sm2T) = n
names(sm2T)
rownames(sm2T)
# normalize all scores in order to redistribute weighting based on how often an individual tweets
sm2TN = sm2T/rowSums(sm2T)
# using k-means to assign all 33 categories across 10 groups
set.seed(35)
social2T_scaled_N = scale(sm2TN, center = T, scale = T)
social2T_clust_N = kmeans(social2T_scaled_N, 10, nstart = 500)
qplot(factor(rownames(sm2TN)), data= sm2TN, geom = 'bar', fill = factor(social2T_clust_N$cluster))
# distribution of categories across clusters
table(social2T_clust_N$cluster)
# categories in each cluster
which(social2T_clust_N$cluster == 1) # young professional
which(social2T_clust_N$cluster == 2)
which(social2T_clust_N$cluster == 3)
which(social2T_clust_N$cluster == 4)
which(social2T_clust_N$cluster == 5) # stay at home mom
which(social2T_clust_N$cluster == 6)
which(social2T_clust_N$cluster == 7)
which(social2T_clust_N$cluster == 8) # fitness
which(social2T_clust_N$cluster == 9) # college student
which(social2T_clust_N$cluster == 10)
|
f5d052123bab4803c2fec53c037c6bb069f10f8f
|
3a42630716521b58a20d5a9445fd3eb1007188aa
|
/man/pointer-events-presentationAttribute.Rd
|
b9e1c16b53aede03f9aa0308bc3b9c9e896a4d41
|
[
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
mslegrand/svgR
|
2a8addde6b1348db34dee3e5145af976008bf8f0
|
e781c9c0929a0892e4bc6e23e7194fb252833e8c
|
refs/heads/master
| 2020-05-22T01:22:16.991851
| 2020-01-18T03:16:30
| 2020-01-18T03:16:30
| 28,827,655
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,649
|
rd
|
pointer-events-presentationAttribute.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_PresAttrPages.R
\name{pointer-events-presentationAttribute}
\alias{pointer-events-presentationAttribute}
\title{pointer.events}
\description{
Governs the under what conditions this graphics element can be the targetof a pointer event.
}
\section{Available Attribute Values}{
\describe{
\item{\emph{'\emph{'\emph{'\emph{'all'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided pointer is over either the interior or border of this element.}
\item{\emph{'\emph{'\emph{'\emph{'fill'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided the pointer is over the interior of this element.}
\item{\emph{'\emph{'\emph{'\emph{'inherit'}'}'}'}}{Specifies to inherit the \emph{pointer-events} property from the parent.}
\item{\emph{'\emph{'\emph{'\emph{'none'}'}'}'}}{Specifies that this element cannot be targeted for a pointer event}
\item{\emph{'\emph{'\emph{'\emph{'painted'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided the pointer is over a painted area. (Area being either interior or border).}
\item{\emph{'\emph{'\emph{'\emph{'stroke'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided and pointer is over the border.}
\item{\emph{'\emph{'\emph{'\emph{'visible'}'}'}'}}{Specifies that element can be targeted for a pointer event provided visibilty='visible' and pointer is over either the interior or border.}
\item{\emph{'\emph{'\emph{'\emph{'visibleFill'}'}'}'}}{Speciries that this element can be targeted for a pointer event provided visibilty='visible' and pointer is over a painted interior. (thus fill!='none').}
\item{\emph{'\emph{'\emph{'\emph{'visiblePainted'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided visibilty='visible' and pointer is over a painted area (area being either the interior or border).}
\item{\emph{'\emph{'\emph{'\emph{'visibleStroke'}'}'}'}}{Specifies that this element can be targeted for a pointer event provided visibilty='visible' and pointer is over a painted border. (thus stroke!='none').}
}
}
\section{Used by the Elements}{
\describe{
\item{\emph{Graphics Referencing Elements}}{\code{\link[=image]{image}}, \code{\link[=use]{use}}}
\item{\emph{Shape Elements}}{\code{\link[=circle]{circle}}, \code{\link[=ellipse]{ellipse}}, \code{\link[=line]{line}}, \code{\link[=path]{path}}, \code{\link[=polygon]{polygon}}, \code{\link[=polyline]{polyline}}, \code{\link[=rect]{rect}}}
\item{\emph{Text Content Elements}}{\code{\link[=text]{text}}}
}
}
\keyword{internal}
|
8732c6c2ab10161eb76979ffbeb8f5002abc8f6e
|
7e531655408b520612c0ee8ad717fc48c80039d6
|
/R/generate_2D_peaks.R
|
4a48aaed3d316d86389a5994a8acc53d613e54a6
|
[
"MIT"
] |
permissive
|
atfrank/nmR
|
5e64ff557d6d508b077373e72d3ea9633aed52da
|
124bf3ed44701a80a51139c9a64716f635176431
|
refs/heads/master
| 2021-01-19T05:03:32.816526
| 2019-02-13T18:19:56
| 2019-02-13T18:19:56
| 64,267,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,619
|
r
|
generate_2D_peaks.R
|
create_residue_hmqc_peaks <- function(cs, protons=c("H1'", "H2'", "H3'", "H4'", "H5'", "H5''", "H2", "H5", "H6", "H8"), carbons=c("C1'", "C2'", "C3'", "C4'", "C5'", "C5'", "C2", "C5", "C6", "C8")){
#' HMQC Peaks Generation Helper Function
#'
#' This function allows you to convert chemical shift list to chemical shift peak table
#' @param cs input chemical shift dataframe. Should contain field: model, resid, nucleus, weight, and predCS
#' @param protons vector of proton nuclei.
#' @param carbons vector of carbon nuclei.
#' @export
#' @examples
#' create_residue_hmqc_peaks(cs)
if(length(protons)!=length(carbons)){stop("list of protons and carbons should be the same")}
if(length(unique(cs$model))!=1 || length(unique(cs$resid))!=1 ){stop("this function works only for a single model and a single resid. Try using create_peaks")}
peak_H <- NULL
peak_C <- NULL
type_H <- NULL
type_C <- NULL
weight_H <- NULL
weight_C <- NULL
for (i in 1:length(protons)){
if(length(cs$predCS[cs$nucleus==protons[i]])!=0){
peak_H <- c(peak_H, cs$predCS[cs$nucleus==protons[i]])
peak_C <- c(peak_C, cs$predCS[cs$nucleus==carbons[i]])
type_H <- c(type_H, protons[i])
type_C <- c(type_C, carbons[i])
weight_H <- c(weight_H, cs$weight[cs$nucleus==protons[i]])
weight_C <- c(weight_C, cs$weight[cs$nucleus==carbons[i]])
}
}
return(data.frame(type_H=type_H, type_C=type_C, peak_H=peak_H, peak_C=peak_C, weight_H=weight_H, weight_C=weight_C))
}
create_residue_tocsy_peaks <- function(cs){
#' TOCSY Peaks Generation Helper Function
#'
#' This function allows you to convert chemical shift list to chemical shift peak table
#' @param cs input chemical shift dataframe. Should contain field: model, resid, nucleus, weight, and predCS
#' @export
#' @examples
#' create_residue_tocsy_peaks(cs)
# generate a simulated 2D TOCSY spectrum from a list of assigned peaks
nuc <- c("H1'","H2'","H3'","H4'","H5'","H2","H5","H6","H8")
nuc_name <- c("h1p","h2p","h3p","h4p","h5p","h2","h5","h6","h8")
for (i in seq_along(nuc_name)){
if(nrow(subset(cs, nucleus==nuc[i]))<1){
assign(nuc_name[i], 9999)
}
else {
assign(nuc_name[i],subset(cs,nucleus==nuc[i])$predCS[1])
}
}
p1 <- p2 <- p1_nam <- p2_nam <- NULL
# "H1'" correlations
p1 <- c(p1,h1p,h1p,h1p,h1p,h2p,h3p,h4p,h5p)
p2 <- c(p2,h2p,h3p,h4p,h5p,h1p,h1p,h1p,h1p)
p1_nam <- c(p1_nam,"H1'","H1'","H1'","H1'","H2'","H3'","H4'","H5'")
p2_nam <- c(p2_nam,"H2'","H3'","H4'","H5'","H1'","H1'","H1'","H1'")
# "H2'" correlations
p1 <- c(p1,h2p,h2p,h2p,h2p,h1p,h3p,h4p,h5p)
p2 <- c(p2,h1p,h3p,h4p,h5p,h2p,h2p,h2p,h2p)
p1_nam <- c(p1_nam,"H2'","H2'","H2'","H2'","H1'","H3'","H4'","H5'")
p2_nam <- c(p2_nam,"H1'","H3'","H4'","H5'","H2'","H2'","H2'","H2'")
# "H3'" correlations
p1 <- c(p1,h3p,h3p,h3p,h3p,h1p,h2p,h4p,h5p)
p2 <- c(p2,h1p,h2p,h4p,h5p,h3p,h3p,h3p,h3p)
p1_nam <- c(p1_nam,"H3'","H3'","H3'","H3'","H1'","H2'","H4'","H5'")
p2_nam <- c(p2_nam,"H1'","H2'","H4'","H5'","H3'","H3'","H3'","H3'")
# "H4'" correlations
p1 <- c(p1,h4p,h4p,h4p,h4p,h1p,h2p,h3p,h5p)
p2 <- c(p2,h1p,h2p,h3p,h5p,h4p,h4p,h4p,h4p)
p1_nam <- c(p1_nam,"H4'","H4'","H4'","H4'","H1'","H2'","H3'","H5'")
p2_nam <- c(p2_nam,"H1'","H2'","H3'","H5'","H4'","H4'","H4'","H4'")
# "H5'" correlations
p1 <- c(p1,h5p,h5p,h5p,h5p,h1p,h2p,h3p,h4p)
p2 <- c(p2,h1p,h2p,h3p,h4p,h5p,h5p,h5p,h5p)
p1_nam <- c(p1_nam,"H5'","H5'","H5'","H5'","H1'","H2'","H3'","H4'")
p2_nam <- c(p2_nam,"H1'","H2'","H3'","H4'","H5'","H5'","H5'","H5'")
resname <- unique(cs$resname)
if (resname =="URA" || resname == "CYT"){
p1 <- c(p1,h5,h6)
p2 <- c(p2,h6,h5)
p1_nam <- c(p1_nam,"H5","H6")
p2_nam <- c(p2_nam,"H6","H5")
}
if ( resname == "ADE" ){
p1 <- c(p1,h2,h6,h8,h6)
p2 <- c(p2,h6,h2,h6,h8)
p1_nam <- c(p1_nam,"H2","H6","H8","H6")
p2_nam <- c(p2_nam,"H6","H2","H6","H8")
}
spectrum <- data.frame(pair=paste(p1_nam,p2_nam,sep=":"),cs1=p1,cs2=p2)
spectrum_H <- subset(spectrum,cs1!=9999 & cs2!=9999)
# generate a simulated 2D TOCSY spectrum from a list of assigned peaks
nuc <- c("C1'","C2'","C3'","C4'","C5'","C2","C5","C6","C8")
nuc_name <- c("c1p","c2p","c3p","c4p","c5p","c2","c5","c6","c8")
for (i in seq_along(nuc_name)){
if(nrow(subset(cs,nucleus==nuc[i]))<1){
assign(nuc_name[i], 9999)
}
else {
assign(nuc_name[i],subset(cs,nucleus==nuc[i])$predCS[1])
}
}
p1 <- p2 <- p1_nam <- p2_nam <- NULL
# "C1'" correlations
p1 <- c(p1,c1p,c1p,c1p,c1p,c2p,c3p,c4p,c5p)
p2 <- c(p2,c2p,c3p,c4p,c5p,c1p,c1p,c1p,c1p)
p1_nam <- c(p1_nam,"C1'","C1'","C1'","C1'","C2'","C3'","C4'","C5'")
p2_nam <- c(p2_nam,"C2'","C3'","C4'","C5'","C1'","C1'","C1'","C1'")
# "C2'" correlations
p1 <- c(p1,c2p,c2p,c2p,c2p,c1p,c3p,c4p,c5p)
p2 <- c(p2,c1p,c3p,c4p,c5p,c2p,c2p,c2p,c2p)
p1_nam <- c(p1_nam,"C2'","C2'","C2'","C2'","C1'","C3'","C4'","C5'")
p2_nam <- c(p2_nam,"C1'","C3'","C4'","C5'","C2'","C2'","C2'","C2'")
# "C3'" correlations
p1 <- c(p1,c3p,c3p,c3p,c3p,c1p,c2p,c4p,c5p)
p2 <- c(p2,c1p,c2p,c4p,c5p,c3p,c3p,c3p,c3p)
p1_nam <- c(p1_nam,"C3'","C3'","C3'","C3'","C1'","C2'","C4'","C5'")
p2_nam <- c(p2_nam,"C1'","C2'","C4'","C5'","C3'","C3'","C3'","C3'")
# "C4'" correlations
p1 <- c(p1,c4p,c4p,c4p,c4p,c1p,c2p,c3p,c5p)
p2 <- c(p2,c1p,c2p,c3p,c5p,c4p,c4p,c4p,c4p)
p1_nam <- c(p1_nam,"C4'","C4'","C4'","C4'","C1'","C2'","C3'","C5'")
p2_nam <- c(p2_nam,"C1'","C2'","C3'","C5'","C4'","C4'","C4'","C4'")
# "C5'" correlations
p1 <- c(p1,c5p,c5p,c5p,c5p,c1p,c2p,c3p,c4p)
p2 <- c(p2,c1p,c2p,c3p,c4p,c5p,c5p,c5p,c5p)
p1_nam <- c(p1_nam,"C5'","C5'","C5'","C5'","C1'","C2'","C3'","C4'")
p2_nam <- c(p2_nam,"C1'","C2'","C3'","C4'","C5'","C5'","C5'","C5'")
resname <- unique(cs$resname)
if (resname =="URA" || resname == "CYT"){
p1 <- c(p1,c5,c6)
p2 <- c(p2,c6,c5)
p1_nam <- c(p1_nam,"C5","C6")
p2_nam <- c(p2_nam,"C6","C5")
}
if ( resname == "ADE" ){
p1 <- c(p1,c2,c6,c8,c6)
p2 <- c(p2,c6,c2,c6,c8)
p1_nam <- c(p1_nam,"C2","C6","C8","C6")
p2_nam <- c(p2_nam,"C6","C2","C6","C8")
}
spectrum <- data.frame(pair=paste(p1_nam,p2_nam,sep=":"),cs1=p1,cs2=p2)
spectrum_C <- subset(spectrum,cs1!=9999 & cs2!=9999)
return(rbind(spectrum_C, spectrum_H))
}
create_residue_cosy_peaks<- function(cs){
#' COSY Peaks Generation Helper Function
#'
#' This function allows you to convert chemical shift list to chemical shift peak table
#' @param cs input chemical shift dataframe. Should contain field: model, resid, nucleus, weight, and predCS
#' @export
#' @examples
#' create_residue_cosy_peaks(cs)
# generate a simulated 2D COSY spectrum from a list of assigned peaks
nuc <- c("H1'","H2'","H3'","H4'","H5'","H2","H5","H6","H8")
nuc_name <- c("h1p","h2p","h3p","h4p","h5p","h2","h5","h6","h8")
for (i in seq_along(nuc_name)){
if(nrow(subset(cs,nucleus==nuc[i]))<1){
assign(nuc_name[i], 9999)
}
else {
assign(nuc_name[i],subset(cs,nucleus==nuc[i])$predCS[1])
}
}
p1 <- p2 <- NULL
p1 <- c(h1p,h2p,h2p,h3p,h3p,h4p,h4p,h5p)
p2 <- c(h2p,h1p,h3p,h2p,h4p,h3p,h5p,h4p)
p1_nam <- c("H1'","H2'","H2'","H3'","H3'","H4'","H4'","H5'")
p2_nam <- c("H2'","H1'","H3'","H2'","H4'","H3'","H5'","H4'")
resname <- unique(cs$resname)
if (resname =="URA" || resname == "CYT"){
p1 <- c(p1,h5,h6)
p2 <- c(p2,h6,h5)
p1_nam <- c(p1_nam,"H5","H6")
p2_nam <- c(p2_nam,"H6","H5")
}
spectrum <- data.frame(pair=paste(p1_nam,p2_nam,sep=":"),cs1=p1,cs2=p2)
spectrum_H <- subset(spectrum,cs1!=9999 & cs2!=9999)
# generate a simulated 2D COSY spectrum from a list of assigned peaks
nuc <- c("C1'","C2'","C3'","C4'","C5'","C2","C5","C6","C8")
nuc_name <- c("c1p","c2p","c3p","c4p","c5p","c2","c5","c6","c8")
for (i in seq_along(nuc_name)){
if(nrow(subset(cs,nucleus==nuc[i]))<1){
assign(nuc_name[i], 9999)
}
else {
assign(nuc_name[i],subset(cs,nucleus==nuc[i])$predCS[1])
}
}
p1 <- p2 <- NULL
p1 <- c(c1p,c2p,c2p,c3p,c3p,c4p,c4p,c5p)
p2 <- c(c2p,c1p,c3p,c2p,c4p,c3p,c5p,c4p)
p1_nam <- c("C1'","C2'","C2'","C3'","C3'","C4'","C4'","C5'")
p2_nam <- c("C2'","C1'","C3'","C2'","C4'","C3'","C5'","C4'")
resname <- unique(cs$resname)
if (resname =="URA" || resname == "CYT"){
p1 <- c(p1,c5,c6)
p2 <- c(p2,c6,c5)
p1_nam <- c(p1_nam,"C5","C6")
p2_nam <- c(p2_nam,"C6","C5")
}
spectrum <- data.frame(pair=paste(p1_nam,p2_nam,sep=":"),cs1=p1,cs2=p2)
spectrum_C <- subset(spectrum,cs1!=9999 & cs2!=9999)
return(rbind(spectrum_C, spectrum_H))
}
create_peaks <- function(cs, type = "hmqc", grouping = c("model", "resid", "resname"), protons=c("H1'", "H2'", "H3'", "H4'", "H5'", "H5''", "H2", "H5", "H6", "H8"), carbons=c("C1'", "C2'", "C3'", "C4'", "C5'", "C5'", "C2", "C5", "C6", "C8")){
#' Peaks Generation Function
#'
#' This function allows you to convert chemical shift list to chemical shift peak table
#' @param cs input chemical shift dataframe. Should contain field: model, resid, nucleus, weight, and predCS
#' @param type type of 2D spectra.
#' @param grouping variables used to group data.
#' @param protons vector of proton nuclei.
#' @param carbons vector of carbon nuclei.
#' @export
#' @examples
#' create_hmqc_peaks(cs)
require(plyr)
if(type == "hmqc"){
peaks <- plyr::ddply(.data = cs, .variables = grouping, .fun = create_residue_hmqc_peaks, protons, carbons)
}
if(type == "cosy"){
peaks <- plyr::ddply(.data = cs, .variables = grouping, .fun = create_residue_cosy_peaks)
}
if(type == "tocsy"){
peaks <- plyr::ddply(.data = cs, .variables = grouping, .fun = create_residue_tocsy_peaks)
}
return(peaks)
}
compass_score <- function (Q, P) {
#' COMPASS Scoring Function
#'
#' This function allows you to compare two multi-dimensional spectra
#' See: 10.1016/j.str.2015.07.019: Experimental Protein Structure Verification by Scoring with a Single, Unassigned NMR Spectrum
#' @param Q reference spectrum (experimental)
#' @param P comparison spectrum (simulated)
#' @export
#' @examples
#' compass_score(cs)
# computes the COMPASS score between points in 2D space
stopifnot(is.numeric(P), is.numeric(Q))
if (is.vector(P))
P <- matrix(P, ncol = 1)
if (is.vector(Q))
Q <- matrix(Q, ncol = 1)
if (ncol(P) != ncol(Q))
stop("'P' and 'Q' must have the same number of columns.")
D <- pracma::distmat(Q, P)
return(list(scores=median(apply(D, 1, min)), indices=apply(D, 1, which.min)))
}
|
922f8eb8fdc13c267b6238a66c03a1c2eff4582e
|
eb758cc52aa63406a54da5ee0864129ec9fd675e
|
/r/learn/s_map.R
|
1f00c9439982b6aeffb7dcc004826380706bc726
|
[
"Apache-2.0"
] |
permissive
|
kakaba2009/MachineLearning
|
35682dc06954494f3246f7106c0e80497611993f
|
26b389f8ffb5f3af939dfc9ebfdf2c6b2fc2ae33
|
refs/heads/master
| 2021-01-11T18:44:43.701118
| 2017-09-30T13:18:36
| 2017-09-30T13:18:36
| 79,616,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
s_map.R
|
library(rEDM)
source('./mylib/mcalc.R')
source('./mylib/mtool.R')
options(max.print=5.5E5)
df <- loadSymbol('JPY=X')
#df$Close.Chg <- df$Close - shift(df$Close, n=1)
df <- df$Close
df <- tail(df, 1000)
nr <- NROW(df)
lib <- c(1, 975)
pred <- c(976, nr)
BestE <- BestDimEDM(df, lib, pred)
BestTheta <- BestThetaEDM(df, lib, pred, BestE)
smap_output <- s_map(df, lib, pred, E=BestE, theta = c(BestTheta),
stats_only=FALSE, save_smap_coefficients=TRUE)
observed <- smap_output[[1]]$model_output$obs
predicted <- smap_output[[1]]$model_output$pred
print(tail(observed, 5))
print(tail(predicted, 5))
par(mar = c(4, 4, 1, 1), pty = "s")
plot_range <- range(c(observed, predicted), na.rm = TRUE)
plot(observed, predicted, xlim = plot_range, ylim = plot_range, xlab = "Observed", ylab = "Predicted")
abline(a = 0, b = 1, lty = 2, col = "blue")
|
57e630d41cde1c87f2b2facc28e2b4c29dcdf763
|
b2e2f737bee0614571ecce3743438fd0d92f5d53
|
/man/Bayesian.Rd
|
f48e43c4bbaf4fb32b1895feb824c70f62d02ab1
|
[] |
no_license
|
mistletoe999/RandomFields
|
9bfceaf4ba855abb5f878ee57282a995d81492fd
|
e5a7a2f272b7834f96c925ced7acfa0c6456a87f
|
refs/heads/master
| 2020-03-10T14:34:03.979780
| 2017-04-17T22:09:51
| 2017-04-17T22:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,242
|
rd
|
Bayesian.Rd
|
\name{Hierarchical Modelling}
\alias{bayesian}
\alias{Bayesian}
\alias{Bayesian Modelling}
\alias{Hierarchical}
\alias{Hierarchical Modelling}
\title{Bayesian Spatial Modelling}
\description{
\pkg{RandomFields} provides Bayesian modelling to some extend:
(i) simulation of hierarchical models at arbitrary depth;
(ii) estimation of the parameteres of a hierarchical model of depth 1
by means of maximizing the likelihood.
}
\details{
A Bayesian approach can be taken for scalar, real valued model
parameters, e.g. the shape parameter \code{nu} in the
\link{RMmatern} model.
A random parameter can be passed through a distribution
of an existing family, e.g. (\code{dnorm}, \code{pnorm},
\code{qnorm}, \code{rnorm}) or self-defined.
It is passed without the leading letter
\code{d}, \code{p}, \code{q}, \code{r}, but as a function call
e.g \code{norm()}.
This function call may contain arguments that must be
named, e.g. \code{norm(mean=3, sd=5)}.
Usage:
\itemize{
\item \code{exp()} denotes the exponential distribution family
with rate 1,
\item \code{exp(3)} is just the scalar \eqn{e^3} and
\item \code{exp(rate=3)} is the exponential
distribution family with rate \eqn{3}.
}
The family can be passed in three ways:
\itemize{
\item implicitelty, e.g. \code{RMwhittle(nu=exp())} or
\item explicitely through \command{\link{RRdistr}}, e.g.
\code{RMwhittle(nu=RRdistr(exp()))}.
\item by use of \code{\link[=RR]{RRmodels}} of the package
}
The first is more convenient, the second more flexible and slightly safer.
}
\note{
\itemize{
\item
While simulating any depth of hierarchical modelling is possible,
estimation is currently restricted to one level of hierarchy.
\item
The effect of the distribution family varies between the different processes:
\itemize{
\item in Max-stable fields and
\command{\link{RPpoisson}}, a new realisation of the prior
distribution(s) is drawn for each shape function
\item in all the other cases: a realisation of the prior(s)
is only drawn once.
This effects, in particular, Gaussian fields with argument
\code{n>1}, where all the realisations are based on the same
realisation out of the prior distribution(s).
}
Note that checking the validity of the
arguments is rather limited for such complicated models, in general.
}
}
%\references{Ribeiro}
\seealso{
\link{RMmodelsAdvanced}
For hierarchical modelling see \link{RR}
}
\examples{
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
## See 'RRmodels'for hierarchical models
## the following model defines the argument nu of the Whittle-Matern
## model to be an expontential random variable with rate 5.
model <- ~ 1 + RMwhittle(scale=NA, var=NA, nu=exp(rate=5)) + RMnugget(var=NA)
\dontshow{if (!interactive()) model <- 1 + RMwhittle(scale=NA, var=NA, nu=exp(rate=5))}%ok
data(soil)
fit <- RFfit(model, x=soil$x, y=soil$y, data=soil$moisture, modus="careless")
print(fit)
\dontshow{FinalizeExample()}
}
\author{Martin Schlather, \email{schlather@math.uni-mannheim.de}
}
\keyword{spatial}
|
cd205fde004e671a55e640be07ea29e4eaf4be3f
|
2622abb38c964e4537c038e898ba43d1fb496efb
|
/cachematrix.R
|
a5e02e791634509a8ee4c75e9a628fb8b3fd8f57
|
[] |
no_license
|
nh2kimo/ProgrammingAssignment2
|
1f03e39aad3eac44341002a31ef81f25e0c9d4aa
|
0ed1f6f4a1115483d3d45ec2932238efc6b0652d
|
refs/heads/master
| 2021-01-20T18:39:35.463701
| 2014-12-10T14:37:24
| 2014-12-10T14:37:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## make a matrix first
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
### next 3 function are used for cachesolve
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ### return original vector
setinverse <- function(solve) m <<- solve(x) ### called by cachesolve
getinverse <- function() m ### return the cached value to cachesolve
### return a list
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## make a function to retrive cache value of matrix
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data) ##if no value in cache, then inverse matrix here
x$setinverse(m)
m
}
|
cd92b1f3de8ea1541fd314d864523ff6340a0883
|
e4ebb6b4c8ae14b08b4b958b49750ba92ab5d9f1
|
/SVM.r
|
a9d3a141f0a1c9a530b74399b4bf026f3b4c64fc
|
[] |
no_license
|
windaarism/kaggle
|
804847b42f0d64bb00332c7e204af5a38522d605
|
f61258260fb02934a26782adf2bba05a8bbc0a01
|
refs/heads/master
| 2021-01-13T16:19:47.969302
| 2017-04-07T07:21:38
| 2017-04-07T07:21:38
| 79,875,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
SVM.r
|
library(e1071)
library(ggplot2)
library(caret)
pathData = 'D:/kaggle/titanic/logistic'
setwd(pathData)
Data = read.csv('data.csv')
dataa<- Data[,1:7]
#melakukan standarisasi
dataa$Age<- (dataa$Age- min(dataa$Age))/(max(dataa$Age)-min(dataa$Age))
dataa$Pclass<- (dataa$Pclass- min(dataa$Pclass))/(max(dataa$Pclass)-min(dataa$Pclass))
dataa$SibSp<- (dataa$SibSp- min(dataa$SibSp))/(max(dataa$SibSp)-min(dataa$SibSp))
dataa$Fare<- (dataa$Fare- min(dataa$Fare))/(max(dataa$Fare)-min(dataa$Fare))
#menambah variabel dummy pada data kategorik
dataa$Embarked<- class.ind(dataa$Embarked)
dataa$Sex<- class.ind(dataa$Sex)
dataclean<- as.matrix(dataa)
#definisikan data training dan testing
training_data <- dataclean[1:889,]
testing_data <- dataclean[890:nrow(dataclean),]
output_testing <- data.frame(dataclean[890:nrow(dataclean),1])
#tuning gamma and cost
svm_tune <- tune(svm, Survived~.,data=training_data,kernel="radial", ranges=list(cost=10^(-1:2), gamma=c(.5,1,2)))
print(svm_tune)
#model svm dengan cost dan gamma hasil tuning
model <- svm(Survived~.,data=training_data, kernel="radial", cost=1,gamma=1)
predicted <- predict(model,testing_data,type='raw')
predicted<- round(predicted)
predic<- data.frame(predicted)
cek<- cbind(predic,output_testing)
confusionMatrix(cek$predicted,cek$dataclean.890.nrow.dataclean...1.)
#akurasi 78.4 %
|
2a18a0687d8a53180939250e5827bdaeaba0a6a2
|
5434a6fc0d011064b575b321e93a3519db5e786a
|
/man/getCurrentSandboxName.Rd
|
398566e6c8b353abd2585f1ee11ae5ab4f90d39a
|
[
"MIT"
] |
permissive
|
cytoscape/RCy3
|
4813de06aacbaa9a3f0269c0ab8824a6e276bad9
|
18d5fac035e1f0701e870150c55231c75309bdb7
|
refs/heads/devel
| 2023-09-01T18:23:28.246389
| 2023-08-23T07:57:19
| 2023-08-23T07:57:19
| 118,533,442
| 47
| 22
|
MIT
| 2023-04-03T17:52:34
| 2018-01-23T00:21:43
|
R
|
UTF-8
|
R
| false
| true
| 346
|
rd
|
getCurrentSandboxName.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-sandbox.R
\name{getCurrentSandboxName}
\alias{getCurrentSandboxName}
\title{getCurrentSandboxName}
\usage{
getCurrentSandboxName()
}
\value{
current sandbox name
}
\description{
Return the current sandbox name.
}
\examples{
\donttest{
getCurrentSandboxName()
}
}
|
7fbf0193b63dfb614cd7f385d09eaf85dc1eb8c4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RevEcoR/vignettes/RevEcoR.R
|
5d2552dc18d26b182a0326e7bb18a76435e60420
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,529
|
r
|
RevEcoR.R
|
## ----setup, include=FALSE------------------------------------------------
library(knitr)
library(RevEcoR)
opts_chunk$set(fig.width=8, fig.height=5)
set.seed(60823316)
## ----eval=FALSE----------------------------------------------------------
# install.packages("RevEcoR")
## ----eval=FALSE----------------------------------------------------------
# if (!require(devtools)
# install.packages("devtools")
# devtools::install_github("yiluheihei/RevEcoR")
## ----eval=TRUE-----------------------------------------------------------
library(RevEcoR)
## ----eval=FALSE----------------------------------------------------------
# ## download sample metabolic data from remote KEGG database
# buc <- getOrgMetabolicData("buc")
# data(kegg_buc)
# head(buc)
## ----eval=TRUE, htmlcap="Figure 1 Reconstruction metabolic network of *Buchnera aphidicola APS*", fig.lp="Figure 1", fig.width=8, fig.height=8----
## species in KEGG
buc.net <- reconstructGsMN(kegg_buc, RefData = NULL)
igraph::print.igraph(buc.net)
igraph::plot.igraph(buc.net, vertex.label=NA, vertex.size=5, edge.arrow.size=0.1)
## ko annotation profile species detected in a human microbiome in IMG (not in KEGG)
annodir <- system.file("extdata/koanno.tab",package = "RevEcoR")
metabolic.data <- read.delim(annodir,stringsAsFactors=FALSE)
##load the reference metabolic data
data(RefDbcache)
g2 <- reconstructGsMN(metabolic.data, RefData = RefDbcache)
## ----eval=TRUE, htmlcap="Figure 2The node colored with red represents the species' seed set",fig.lp="Figure 2", fig.width=8, fig.height=8----
## seed set prediction
seed.set <- getSeedSets(buc.net, 0.2)
show(seed.set)
head(seed.set@seeds)
## The node colored with red represents the species' seed set
nodes <- igraph::V(buc.net)$name
seeds <- unlist(seed.set@seeds)
seed.index <- match(seeds,nodes)
node.color <- rep("SkyBlue2",length(nodes))
node.color[seed.index] <- "red"
igraph::plot.igraph(buc.net,
vertex.label=NA, vertex.size=5, edge.arrow.size=0.1,
vertex.color = node.color)
## ------------------------------------------------------------------------
# ptr metabolic network
data(kegg_ptr)
##ptr.net <- reconstructGsMN(getOrgMetabolicData("ptr"))
ptr.net <- reconstructGsMN(kegg_ptr)
# cooperation analysis between buc and ptr
cooperation.index <- calculateCooperationIndex(buc.net,ptr.net)
cooperation.index
## ---- eval = FALSE, echo=TRUE--------------------------------------------
# ##metabolic network reconstruction of these seven species
# net <- lapply(anno.species, reconstructGsMN)
## ---- eval=FALSE, echo=TRUE----------------------------------------------
# ## caculate interactions among vious species
# interactions <- calculateCooperationIndex(net, p = TRUE)
# ## competition index
# $competition.index
# Aa Ao Fn Pg Sg So Va
# Aa 1.0000000 0.4736842 0.3157895 0.2280702 0.4210526 0.4385965 0.2456140
# Ao 0.4736842 1.0000000 0.3684211 0.3333333 0.4736842 0.4736842 0.2456140
# Fn 0.5000000 0.5833333 1.0000000 0.4166667 0.5833333 0.5555556 0.4166667
# Pg 0.4193548 0.6129032 0.4838710 1.0000000 0.6129032 0.5161290 0.3870968
# Sg 0.5454545 0.6136364 0.4772727 0.4318182 1.0000000 0.9090909 0.3863636
# So 0.5813953 0.6046512 0.4651163 0.3720930 0.9302326 1.0000000 0.3953488
# Va 0.4827586 0.4827586 0.5172414 0.4137931 0.5862069 0.5862069 1.0000000
# ## p value of competition index
# $competition.index.p
# Aa Ao Fn Pg Sg So Va
# Aa 0.000 0.001 0.001 0.001 0.001 0.001 0.001
# Ao 0.001 0.000 0.001 0.001 0.001 0.001 0.001
# Fn 0.001 0.001 0.000 0.001 0.001 0.001 0.001
# Pg 0.001 0.001 0.001 0.000 0.001 0.001 0.001
# Sg 0.001 0.001 0.001 0.001 0.000 0.001 0.001
# So 0.001 0.001 0.001 0.001 0.001 0.000 0.001
# Va 0.001 0.001 0.001 0.001 0.001 0.001 0.000
# ## complementarity index
# $complementarity.index
# Aa Ao Fn Pg Sg So Va
# Aa 0.0000000 0.1052632 0.1228070 0.07017544 0.0877193 0.08771930 0.1228070
# Ao 0.1403509 0.0000000 0.1403509 0.07017544 0.1228070 0.12280702 0.1403509
# Fn 0.1944444 0.1666667 0.0000000 0.16666667 0.1111111 0.11111111 0.1388889
# Pg 0.2258065 0.2258065 0.1612903 0.00000000 0.1612903 0.19354839 0.2258065
# Sg 0.2272727 0.1818182 0.1590909 0.09090909 0.0000000 0.04545455 0.1590909
# So 0.1860465 0.1395349 0.1860465 0.09302326 0.0000000 0.00000000 0.1395349
# Va 0.2068966 0.1724138 0.1379310 0.17241379 0.1379310 0.13793103 0.0000000
# ## p value of complementarity index
# $complementarity.index.p
# Aa Ao Fn Pg Sg So Va
# Aa 0.000 0.001 0.001 0.001 0.001 0.001 0.001
# Ao 0.001 0.000 0.001 0.001 0.001 0.001 0.001
# Fn 0.001 0.001 0.000 0.001 0.001 0.001 0.001
# Pg 0.001 0.001 0.001 0.000 0.001 0.001 0.001
# Sg 0.001 0.001 0.001 0.001 0.000 0.001 0.001
# So 0.001 0.001 0.001 0.001 0.001 0.000 0.001
# Va 0.001 0.001 0.001 0.001 0.001 0.001 0.000
## ------------------------------------------------------------------------
data(gut_microbiome)
## summary(gut_microbiome)
## ---- eval = FALSE, echo = TRUE------------------------------------------
# gut.nets <- lapply(gut_microbiome,reconstructGsMN)
# seed.sets <- lapply(gut.nets,getSeedSets)
# ## Since calculation is on large scale, species interactions prediction may take several hours
# gut.interactions <- calculateCooperationIndex(gut.nets)
# competition.index <- gut.interactions$competition.index
# complementarity.index <- gut.interactions$complementarity.index
## ---- eval = TRUE, echo = TRUE-------------------------------------------
occurrence.score <- read.delim(system.file("extdata/occurrence.tab",
package = "RevEcoR"),stringsAsFactors = FALSE, quote = "")
## ---- eval=FALSE,echo=TRUE-----------------------------------------------
# competition.index <- (competition.index + t(competition.index))/2
# complementarity.index <- (complementarity.index + t(complementarity.index))/2
## ---- eval=FALSE,echo=TRUE-----------------------------------------------
# ## upper triangles, which is used to calculate the correlation
# competition.upper <- competition.index[upper.tri(competition.index)]
# occurrence.upper <- occurrence.score[upper.tri(occurrence.score)]
# complementarity.upper <- complementarity.index[upper.tri(complementarity.index)]
#
# ## calculate the spearman correlation betwwen co-occurrence scores and two
# ## interactions indices
# competition.cor <- cor(competition.upper,occurrence.upper,method="spearman")
# complementarity.cor <- cor(complementarity.upper,occurrence.upper,method="spearman")
#
# ## permutation-based mantel test. Random permutation the co-occurance score
# ## 10000 times, P value is the fraction of correlations as high as or higher
# ## than the original
# if (require(magrittr)){
# null.stat <- replicate(10000,
# sample(1:116) %>% occurrence.score[.,.] %>%
# .[upper.tri(.)]
# )
# competition.null <- cor(competition.upper,null.stat)
# complementarity.null <- cor(complementarity.upper,null.stat)
# length(which(competition.null >= competition.cor)) ## 0 p.competition < 0.00001
# length(which(complementarity.null <= complementarity.cor)) ## 0 p.complementarity< 0.00001
# }
## ---- eval=TRUE----------------------------------------------------------
sessionInfo()
|
f1a4c2b05f59af8f856e5d0d0b755874f08291bd
|
ba03e694880c7567b1f74c381b6dd16e1f2efa21
|
/script1.R
|
c922ee9989bce8b58a0761f6ad5ff06906b50bac
|
[] |
no_license
|
stochasticTreat/R_data_vis_lecture
|
96941093f0ffdd70d5ff1dd5618ef40b4a1c8396
|
df63470917976aff9cb9c427d6d8d40227781aa7
|
refs/heads/master
| 2020-05-19T08:51:11.010165
| 2014-11-22T22:10:47
| 2014-11-22T22:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,726
|
r
|
script1.R
|
# Simple assignment operators
# These three operations do the same thing:
x = "a"
x <- "a"
"a" -> x
c
#vectors:
# - one or more elements
# - one data type
v0 = 1:4
v0
#everything is a vector
#weak typed language, can check class type with:
class(x)
class(v0)
is.vector(x)
is.vector(v0)
#getting the length
length(v0)
v1 = c(1,2,3,8,4) #c() is the concatenation function, makes a vector
v1[3:5]
v1[c(1,3,4)]
v1[c(T,F,F,F,T)]
#matrix examples
m1 = matrix( data=1:12, nrow=3 )
m2 = cbind( 1:10, (1:10)^2 )
class(m2)
#finding the dimensions:
dim(m2)
#naming dimensions:
colnames(m2)<-c("James","Ted")
#gotchya:
test1 = m2[,"James"] #The output from this is a vector. To keep the matrix data type, use drop=FALSE
#error
test1[1:2,"James"]
#the type changed:
print(test1)
is.vector(test1)
#the correct way
test2 = m2[,"James",drop=FALSE]
print(test2)
test2[1:2,"James"]
#basic plotting
plot(x=1:10,y=10:1)
plot(m2)
#if the columns were not named:
colnames(m2)<-NULL
plot(m2)
#name the dimensions
plot(m2, xlab="j numbers", ylab="t numbers", main="Plot 2")
sdat = read.table("./injuriesVsWrithingTimeVsRegion.txt", header=T, sep="\t") #see also read.csv()
#ggplot example:
install.packages('ggplot2')
library('ggplot2')
ggplot(data=sdat, aes(x=Injuries, y=Time))+
geom_point()
#map data to visual elements
ggplot(data=sdat, aes(x=Injuries, y=Time, color=Region))+
geom_point()
sdat$Region <- factor(sdat$Region, levels=c("Australia", "Middle.East","Africa", "Asia",
"C.America","N.America","S.America", "Europe"))
ggplot(data=sdat, aes(x=Injuries, y=Time, label=Team))+
stat_smooth()+
geom_point()+
geom_text()+
ggtitle("Number of injuries vs time spent writhing on the ground")
|
e0de8e4721b97637cb2497fa58c414927d5fc531
|
20fb140c414c9d20b12643f074f336f6d22d1432
|
/man/NISTkilowattHourTOmegajoule.Rd
|
8a6d7947471cde9809acaf7ed4ad419b0e472322
|
[] |
no_license
|
cran/NISTunits
|
cb9dda97bafb8a1a6a198f41016eb36a30dda046
|
4a4f4fa5b39546f5af5dd123c09377d3053d27cf
|
refs/heads/master
| 2021-03-13T00:01:12.221467
| 2016-08-11T13:47:23
| 2016-08-11T13:47:23
| 27,615,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 829
|
rd
|
NISTkilowattHourTOmegajoule.Rd
|
\name{NISTkilowattHourTOmegajoule}
\alias{NISTkilowattHourTOmegajoule}
\title{Convert kilowatt hour to megajoule }
\usage{NISTkilowattHourTOmegajoule(kilowattHour)}
\description{\code{NISTkilowattHourTOmegajoule} converts from kilowatt hour (kW * h) to megajoule (MJ) }
\arguments{
\item{kilowattHour}{kilowatt hour (kW * h) }
}
\value{megajoule (MJ) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTkilowattHourTOmegajoule(10)
}
\keyword{programming}
|
9b7859628cf85eaefd82455601241f58621fded6
|
06509fe0b8452dcef48fc75d989e1cb7aecb4bc0
|
/data/tutorial/R/generate_more_phenotypes.R
|
34a6fe62a9033a69c4a06d4cadd98b7f8f7d0ba5
|
[
"Apache-2.0"
] |
permissive
|
ibm-bioinformatics/BlueSNP
|
f43e338ada67d3b620ff13fb0d660625d795c802
|
16d4c79dc5a1c8742823685c14072e954c0583ce
|
refs/heads/master
| 2016-09-06T11:48:53.991714
| 2014-04-14T17:46:01
| 2014-04-14T17:46:01
| 5,087,261
| 3
| 2
| null | 2021-12-09T01:08:10
| 2012-07-17T19:58:40
|
R
|
UTF-8
|
R
| false
| false
| 781
|
r
|
generate_more_phenotypes.R
|
# helper function to generate more (fake) phenotypes
generate.more.phenotypes <- function(input, output, N=10) {
if (!file.exists("./tmp")) {
dir.create("./tmp")
} else {
system("rm -r ./tmp")
dir.create("./tmp")
}
rhinit.singleton()
rhget(input, "./tmp")
infile = paste("./tmp", stripPath(input), sep="/")
load(infile)
A = matrix(NA, nrow=nrow(Y), ncol=N)
colnames(A) = paste("pheno", 1:ncol(A), sep="")
rownames(A) = rownames(Y)
for (i in seq(1,ncol(A),2)) { # odd ones are copies of the original
A[,i] = Y[,1]
}
for (i in seq(2,ncol(A),2)) { # even ones are random
A[,i] = sample(Y[,1])
}
Y = A
tmpfile = paste("./tmp", stripPath(output), sep="/")
save(Y, file=tmpfile)
rhput(tmpfile, output)
}
|
356a358a3719e6e118871672be917de2c824c09d
|
0013279b19c620f5ca367b767cc1d0e15b59b480
|
/R/sampling.R
|
fa5eaf4da426022f6cea5605707762be3a709d3a
|
[
"MIT"
] |
permissive
|
jolars/euclidr
|
7ef6d11a986ede6d5e06d8b2ca2616f373ef0cf5
|
df7358c030eb6aee5cec2ab236810a7fd0fde478
|
refs/heads/master
| 2021-01-12T13:43:03.533644
| 2016-11-05T14:34:57
| 2016-11-05T14:34:57
| 72,221,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
sampling.R
|
#' Find the farthest points
#'
#' Select a subset of points that maximize the smallest pairwise distance
#' between the points.
#'
#' This function begins with a random sample of n points and iterately puts them
#' back into the mother set and tries to see if there is a better candidate than
#' the point that was put back.
#'
#' @param data A matrix of points to choose n from, either in two or three
#' dimensions.
#' @param n The number of points to select.
#' @return A vector of indices for the points in the subset.
#' @seealso \code{\link[stats]{dist}}
#' @examples
#' xy <- matrix(runif(200), ncol = 2)
#' id <- farthest_points(data = xy, n = 5)
#' xy[id, ]
#'
#' plot(xy)
#' points(xy[id, ], pch = 16)
#'
#' @export
#' @import assertthat
farthest_points <- function(data, n) {
assert_that(
is.numeric(data),
ncol(data) == 2 | ncol(data) == 3,
nrow(data) >= n,
is.count(n)
)
dmat <- as.matrix(stats::dist(data))
r <- sample.int(nrow(dmat), n)
repeat {
r_old <- r
for (i in 1:n) {
mm <- dmat[r[-i], -r[-i], drop = FALSE]
k <- which.max(mm[(1:ncol(mm) - 1) * nrow(mm) + max.col(t(-mm))])
r[i] <- as.numeric(dimnames(mm)[[2]][k])
}
if (identical(r_old, r)) return(r)
}
}
|
b7ddcc454cc2cd1ec7a77c4dfb1d0142b4afc287
|
8450cd8d46322e46964eaeabb27e299d5d417ca6
|
/inst/R_old/newPlot2.R
|
631fa1fcdae7c75b3ac83cac6d4b17d570cc972c
|
[] |
no_license
|
WillFox/TBsim
|
0251b444b8247796ed11d56283344b88e4329099
|
d304c5957dd1199e2ad08ba00fe054b8c6e30366
|
refs/heads/master
| 2020-07-07T04:07:23.609484
| 2018-07-05T21:52:44
| 2018-07-05T21:52:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
r
|
newPlot2.R
|
########################################################
# General purpose plot function for TB data
# updated to use ggplot package
# Dec 23, 2012 by John Fors
########################################################
newPlot2 <- function(yin, names, logG, timePeriods, mainTitle, subTitle, ytext)
{
# Prepare data
timeSelect <- drugStart:(drugStart+360)
yint <- t(yin[drugStart:(drugStart+360)])
yset <- data.frame(timeSelect, yint)
colnames(yset) <- names
dfm <- melt(yset, id='time')
dfm <- dfm[seq(1, nrow(dfm), 10), ]
plot.main <- mainTitle
plot.sub <- subTitle
labx <- c(1, 60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840, 900, 960, 1020)
namesx <- c(-180, -120, -60, 0, 60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840)
laby <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
namesy <- c(10, 20, 30, 40, 50, 60, 70, 80, 90, 100)
# Generate plot
#dev.new()
if(logG==1) {
pl <- ggplot(data = dfm, aes(x = time, y = value, color = variable, group=variable)) +
geom_line(size=1) +
#stat_smooth(span=0.1, se=FALSE, size=1, method="loess") +
#scale_y_log10() +
scale_x_continuous(breaks = labx, labels = namesx)
}
pl + xlab("Time (Days since first drug start)") +
ylab(ytext) +
theme(axis.title = element_text(size=10)) +
theme(legend.position="none") +
geom_vline(xintercept = drugStart, colour = "darkgreen", linetype = "dotted") +
expand_limits(y=0)
#ggtitle(bquote(atop(.(plot.main), atop(italic(.(plot.sub)), ""))))
}
|
8d1fe00bd84f8aa674a6ff5b3f59a2a8baab941f
|
685eae3ccd6b3d81e04287cdf8f83d61256f41fd
|
/R/baseSpe.plot.R
|
e5eb1df6d9d893411af8553a4fa06796163ca117
|
[] |
no_license
|
mxdeluca/KataegisPortal
|
23304fc212259ca7b1997fb3ba0af6ec1a07ab98
|
8645c38e35b19dbeb1da9755407c2ae686af98da
|
refs/heads/master
| 2022-11-17T16:18:29.282933
| 2020-07-15T07:05:17
| 2020-07-15T07:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,604
|
r
|
baseSpe.plot.R
|
baseSpe.plot <- function(plot.data, sample = "sample", chr = NULL,
arm = NULL, color = NULL, k = NULL) {
build = plot.data$build[1]
genome.opts = c("hg19", "hg18", "hg38")
if (!build %in% genome.opts) {
stop("Available reference builds: hg18, hg19, hg38")
}
if (build == "hg19") {
chr.arm = c(1.25e+08, 93300000, 9.1e+07, 50400000, 48400000,
6.1e+07, 59900000, 45600000, 4.9e+07, 40200000, 53700000,
35800000, 17900000, 17600000, 1.9e+07, 36600000, 2.4e+07,
17200000, 26500000, 27500000, 13200000, 14700000, 60600000,
12500000)
} else if (build == "hg18") {
chr.arm = c(124300000, 93300000, 91700000, 50700000, 47700000,
60500000, 59100000, 45200000, 51800000, 40300000, 52900000,
35400000, 1.6e+07, 15600000, 1.7e+07, 38200000, 22200000,
16100000, 28500000, 27100000, 12300000, 11800000, 59500000,
11300000)
} else if (build == "hg38") {
chr.arm = c(123400000, 93900000, 90900000, 5e+07, 48800000,
59800000, 60100000, 45200000, 4.3e+07, 39800000, 53400000,
35500000, 17700000, 17200000, 1.9e+07, 36800000, 25100000,
18500000, 26200000, 28100000, 1.2e+07, 1.5e+07, 6.1e+07,
10400000)
} else {
stop("Available reference builds: hg18, hg19, hg38")
}
if (is.null(color)) {
col = c("darkgreen", "darkblue", "grey", "darkred")
} else {
col = color
}
names(col) = c("A", "C", "G", "T")
plot.data = plot.data[which(plot.data$ref %in% c("C", "G")), ]
if (is.null(chr)) {
seq = c(1:24)
seq0 = "whole genome"
} else {
plot.data$pos.updated = plot.data$pos
seq0 = gsub(pattern = "chr", replacement = "", x = chr, fixed = TRUE)
seq = gsub(pattern = "X", replacement = "23", x = seq0, fixed = TRUE)
seq = gsub(pattern = "Y", replacement = "24", x = seq, fixed = TRUE)
seq = as.numeric(seq)
plot.data = plot.data[which(plot.data$seq %in% seq), ]
if (!is.null(arm)) {
if (arm == "p") {
plot.data = plot.data[which(plot.data$pos <= chr.arm[seq]),
]
} else {
plot.data = plot.data[which(plot.data$pos > chr.arm[seq]),
]
}
}
}
if (is.null(k)) {
k = (nchar(as.character(plot.data$context[1])) - 1)/2
} else {
build = plot.data$build[1]
genome.opts = c("hg19", "hg18", "hg38")
if (!build %in% genome.opts) {
stop("Available reference builds: hg18, hg19, hg38")
}
if (build == "hg19") {
bsg = BSgenome.Hsapiens.UCSC.hg19
} else if (build == "hg18") {
bsg = BSgenome.Hsapiens.UCSC.hg18
} else if (build == "hg38") {
bsg = BSgenome.Hsapiens.UCSC.hg38
} else {
stop("Available reference builds: hg18, hg19, hg38")
}
conv.start = plot.data$pos - k
conv.end = plot.data$pos + k
context = getSeq(bsg, plot.data$chr, start = conv.start, end = conv.end)
if (TRUE) {
idx = DNAStringSet(plot.data$ref) %in% c("A", "G")
context[idx] = reverseComplement(context[idx])
}
plot.data$context = context
}
base <- rbind(data.frame(strsplit(as.character(plot.data$context),
"")))
n = 2 * k + 1
nBase <- matrix(0, nrow = 4, ncol = n, dimnames = list(c("A", "C",
"G", "T"), c(-k:k)))
for (i in 1:n) {
nBase[1, i] = length(base[i, ][base[i, ] == "A"])
nBase[2, i] = length(base[i, ][base[i, ] == "C"])
nBase[3, i] = length(base[i, ][base[i, ] == "G"])
nBase[4, i] = length(base[i, ][base[i, ] == "T"])
}
par(mai = c(1, 1, 1, 1.5))
if (seq0 == "whole genome") {
barplot(nBase, col = col, space = 0, yaxt = "n", main = paste("C>X mutations in",
sample, "on whole genome", sep = " "), xlab = "Flanking bases",
ylab = "Number of bases")
} else {
barplot(nBase, col = col, space = 0, yaxt = "n", main = paste("C>X mutations in",
sample, "on chr", seq0, arm, sep = " "), xlab = "Flanking bases",
ylab = "Number of bases")
}
box()
axis(2, las = 1, lwd.tick = 0.5, mgp = c(2, 1, 0))
legend("right", names(col), col = col, pch = 15, inset = c(-0.12,
0), bty = "n", xpd = TRUE)
}
|
39ab6703cdc65312529e964e25e7de2567c77fa5
|
17194ebc19e62d72987f1dcdc9d9824200b60d34
|
/R/inspecters.R
|
4c540f1016136ce8329ea084e53aa1d261dea826
|
[] |
no_license
|
sherrisherry/cleandata
|
34f2e5dffeee8c59cc456abbb74f14c1828f19f4
|
191ecc80a658868e9f2caee80a3b06802983ecdf
|
refs/heads/master
| 2021-08-08T21:53:03.706033
| 2018-12-02T03:39:48
| 2018-12-02T03:39:48
| 141,081,630
| 3
| 0
| null | 2018-07-27T23:23:39
| 2018-07-16T03:23:17
|
HTML
|
UTF-8
|
R
| false
| false
| 2,519
|
r
|
inspecters.R
|
inspect_map<-function(x,common=0,message=TRUE){
factor_cols<-list()
factor_levels<-list()
char_cols<-c()
ordered_cols<-c()
num_cols<-c()
other_cols<-c()
for(i in colnames(x)){
classifier <- paste(class(x[,i]), collapse = '.')
switch(classifier,
factor={
l<-levels(x[,i])
if(!length(factor_cols)){
factor_levels[[i]]<-l
factor_cols[[i]]<-i
}
else{
for(j in 1:length(factor_levels)){
if(ifelse(common,sum(l %in% factor_levels[[j]])>common,all(l==factor_levels[[j]]))){
factor_cols[[j]]<-append(factor_cols[[j]],i)
if(common)factor_levels[[j]]<-union(l,factor_levels[[j]])
j<--5
break}
}
if(j!=-5){
factor_cols[[i]]<-i
factor_levels[[i]]<-l
}
}},
character={char_cols<-append(char_cols,i)},
ordered.factor={ordered_cols<-append(ordered_cols,i)},
{if(mode(x[,i])=='numeric')num_cols<-append(num_cols,i)
else other_cols<-append(other_cols,i)})
if(message)cat(paste(i,classifier,'factors:',length(unlist(factor_cols)),'nums:',length(num_cols),'chars:',length(char_cols),'ordered:',length(ordered_cols),'others:',length(other_cols),"\n",sep = ' '))
}
return(list(factor_cols=factor_cols,factor_levels=factor_levels,num_cols=num_cols,char_cols=char_cols,ordered_cols=ordered_cols,other_cols=other_cols))
}
inspect_na<-function(x, top=ncol(x)){
a<-sort(apply(is.na(x),2,sum),decreasing = TRUE)
return(a[1:top])
}
inspect_smap <- function(x, message = TRUE){
factor_cols<-c()
char_cols<-c()
ordered_cols<-c()
num_cols<-c()
other_cols<-c()
for(i in colnames(x)){
classifier <- paste(class(x[,i]), collapse = '.')
switch(classifier,
factor={factor_cols<-append(factor_cols,i)},
character={char_cols<-append(char_cols,i)},
ordered.factor={ordered_cols<-append(ordered_cols,i)},
{if(mode(x[,i])=='numeric')num_cols<-append(num_cols,i)
else other_cols<-append(other_cols,i)})
if(message)cat(paste(i,classifier,'factors:',length(factor_cols),'nums:',length(num_cols),'chars:',length(char_cols),'ordered:',length(ordered_cols),'others:',length(other_cols),"\n",sep = ' '))
}
return(list(factor_cols=factor_cols,num_cols=num_cols,char_cols=char_cols,ordered_cols=ordered_cols,other_cols=other_cols))
}
|
9d1e5966e5e0cec8e459da8eb30b229a26ecac71
|
2491ce3e1bd5762df5d129b7f4826c66723780df
|
/R/fun.gld.slope.fixed.int.vary.R
|
b526aa991498d25923d6ae45f8805c35a4c53170
|
[] |
no_license
|
cran/GLDreg
|
60e3cf2d6890604d98aad9cd09080bdc7758cb25
|
4d6ad22ceada267cf3a46ef8b4cab5b9006ae022
|
refs/heads/master
| 2022-06-04T22:22:08.379317
| 2022-05-13T06:30:09
| 2022-05-13T06:30:09
| 26,482,634
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,048
|
r
|
fun.gld.slope.fixed.int.vary.R
|
fun.gld.slope.fixed.int.vary <-
function(q,fit,fit.simu,fun,param,maxit=20000,method="Brent"){
# Use a default lower and upper limit
lubound<-quantile(subset(fit.simu,select=c("(Intercept)")),c(0,1))
lbound<-min(lubound)
ubound<-max(lubound)
# First find empirical solution
x<-fit$x
y<-fit$y
k1<-apply(fit.simu,2,function(x,q) quantile(x,q),q)[1]
fit1<-fit[[3]][-c((length(fit[[3]])-3):length(fit[[3]]))]
r1<-optim(k1,function(k1,x,y,fit1,q){resid<-y-data.matrix(x)%*%c(k1,fit1[-1])
return((sum(resid<=0)/length(resid)-q)^2)
},x=x,y=y,fit1=fit1,q=q,control=list(maxit=maxit),method=method,lower=lbound,upper=ubound)
# Then find parametric solution
k2<-r1$par
r2<-optim(k2,function(k2,x,y,fit1,q){gld.fit<-fun(y-data.matrix(x)%*%c(k2,fit1[-1]))
return((pgl(0,gld.fit,param=param)-q)^2)
},x=x,y=y,fit1=fit1,q=q,control=list(maxit=maxit),method=method,lower=lbound,upper=ubound)
r.val<- setNames(c(r2$par, fit1[-1], r2$value, r2$convergence),c(names(fit1),"Objective Value","Convergence"))
return(list(r2,r.val))
}
|
e6993b2aacdec4c0d61445cc87f49f5ab545b246
|
2dc3ebb7462aab578ab71bbddb3b0bf9c00417a5
|
/(Coursera) Reproducible research.R
|
e1a194af543a0e411fd31d2f9f079152872756b8
|
[] |
no_license
|
ankristov/R
|
36451b6c305b448bbadba3329a5df72a3d46a715
|
c2445105837f2c5ada595ae8c3ee9dc491819384
|
refs/heads/master
| 2023-07-09T22:38:32.030107
| 2023-06-25T22:04:54
| 2023-06-25T22:04:54
| 196,532,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
r
|
(Coursera) Reproducible research.R
|
# Sweave: main web site http://www.statistik.lmu.de/~leisch/Sweave
# knitr: main web site http://yihui.name/knitr/
# http://search.r-project.org/library/kernlab/html/spam.html
install.packages("kernlab")
library(kernlab)
data("spam")
head(spam)
str(spam[,1:5])
# Perform the subsetting
set.seed(3435)
trainIndicator = rbinom(4601, size = 1, prob = 0.5)
trainIndicator[1:20]
table(trainIndicator)
trainSpam <- spam[trainIndicator == 1,]
testSpam <- spam[trainIndicator == 0,]
names(trainSpam)
table(trainSpam$type)
plot(trainSpam$capitalAve ~ trainSpam$type)
plot(log10(trainSpam$capitalAve + 1) ~ trainSpam$type)
plot(log10(trainSpam[,1:4] + 1))
hClust = hclust(dist(t(trainSpam[,1:57])))
plot(hClust)
hClustUpdated = hclust(dist(t(log10(trainSpam[,1:20] +1))))
plot(hClustUpdated)
# statistical prediction and modeling
trainSpam$numType <- as.numeric(trainSpam$type) - 1
costFunction <- function(x,y) {sum(x != (y > 0.5))}
cvError <- rep(NA,55)
library(boot)
for (i in 1:55) {
lmFormula <- reformulate(names(trainSpam)[i], response = "numType")
glmFit <- glm(lmFormula, family = "binomial", data = trainSpam)
cvError[i] <- cv.glm(trainSpam, glmFit, costFunction, 2)$delta[2]
}
names(trainSpam)[which.min(cvError)]
# Use the best model from the group
predictionModel <- glm(numType ~ charDollar, family = "binomial", data = trainSpam)
# Get prediction on the thest set
predictionTest <- predict(predictionModel, testSpam)
predictedSpam <- rep("nonspam", dim(testSpam)[1])
# Classify as spam those with prob > 0.5
predictedSpam[predictionModel$fitted > 0.5] <- "spam"
# Classification table
table(predictedSpam, testSpam$type)
# Error rate
(61 + 458) / (1346 + 458 + 61 + 449)
|
4a9596554c4948c069fd80442e48c5de8b42aa6f
|
cc178630c66d061a6bc754d96b77aa5600eb72b7
|
/man/write_styles.Rd
|
aa430806048c398c470411af3f6e2289089a1ad0
|
[
"MIT"
] |
permissive
|
niszet/stylex
|
2073716d22a07479c741e316826e4a1e20d00b89
|
54c04abe95eeba103e41ac783e12e5144c4cf725
|
refs/heads/master
| 2022-11-28T18:26:55.797390
| 2020-08-10T03:37:23
| 2020-08-10T03:37:23
| 282,142,004
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 677
|
rd
|
write_styles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{write_styles}
\alias{write_styles}
\title{Write docx file with original reference.docx and updated style xml}
\usage{
write_styles(styles_xml, ref_file, new_file)
}
\arguments{
\item{styles_xml}{\code{xml_node} object with \code{styles}}
\item{ref_file}{reference docx file name as a \code{character}. Use files except \code{word/styles.xml}.}
\item{new_file}{new docx file name as a \code{character}.}
}
\value{
None
}
\description{
Write docx file with original reference.docx and updated style xml
}
\examples{
\dontrun{
write_styles(xml, "reference.docx", "updated.docx")
}
}
|
bd8499e19b1b677e8e113970e08ea6673667acb7
|
58bbae05372d92b197078e2dc457a3bca7f21401
|
/R/RcppExports.R
|
48725f2c17bb70c0f503dbc609660ddcb4c9c8c0
|
[
"MIT"
] |
permissive
|
jonatanrg/fglm_intern
|
f964cfb7f0913af4af92499842dc7c476294a540
|
bdf5765931d7f17e6fbe94857723dbbd10a3c53b
|
refs/heads/main
| 2023-06-04T22:55:18.714871
| 2021-06-24T08:05:26
| 2021-06-24T08:05:26
| 379,848,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
fista_ee <- function(y, X, lam1, lam2, b, maxit, tol, L, verbose, acc) {
.Call(`_fglm_fista_ee`, y, X, lam1, lam2, b, maxit, tol, L, verbose, acc)
}
obj_diff_cpp <- function(y, X, b, lam1, lam2, order) {
.Call(`_fglm_obj_diff_cpp`, y, X, b, lam1, lam2, order)
}
soft_t <- function(x, lam) {
.Call(`_fglm_soft_t`, x, lam)
}
log1mexp <- function(x) {
.Call(`_fglm_log1mexp`, x)
}
prox_newt <- function(y, X, lam1, lam2, b, maxit, tol, verbose, linsearch) {
.Call(`_fglm_prox_newt`, y, X, lam1, lam2, b, maxit, tol, verbose, linsearch)
}
|
436ac9ed53f67776fd55ba83469c440888ffce12
|
0084c6a9973b2f484c6cbe57d77929bd42a7d2fc
|
/deprecated/ukfsst/Rpack/ukfsst/man/kfsst.Rd
|
236e0c21d4d9f35ff35c370e12e944a5578736ce
|
[] |
no_license
|
positioning/kalmanfilter
|
db680c7e9cfd8c6edcbab6d1f12a44e153a149ff
|
501ec9a28ae05802287aadcf8ca2cfb9229137cb
|
refs/heads/master
| 2023-02-10T00:36:28.576957
| 2023-01-29T13:16:15
| 2023-01-29T13:16:15
| 40,897,081
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,098
|
rd
|
kfsst.Rd
|
\name{kfsst}
\alias{kfsst}
\title{Kalman Filter based optimization of the tracking model including Sea Surface Temperature}
\description{
After the track has been read, and SST data retrieved via the function
\code{\link{get.sst.from.server}}, this function does the actual
optimization of the model and reconstruction of the track.
Basically this function only needs the raw track as the reference to
the SST-source is stored elsewhere. It has a lot of options to modify
the model, which are presented here. Quite often it is necessary to
simplify the model to get a converging fit - especially for short tracks.
}
\usage{
kfsst(data, fix.first = TRUE, fix.last = TRUE,
theta.active = c(u.active, v.active, D.active, bx.active,
by.active, bsst.active, sx.active, sy.active, ssst.active,
a0.active, b0.active, r.active),
theta.init = c(u.init, v.init, D.init, bx.init, by.init,
bsst.init, sx.init, sy.init, ssst.init, a0.init, b0.init,
r.init),
u.active = TRUE, v.active = TRUE, D.active = TRUE,
bx.active = TRUE, by.active = TRUE, bsst.active = TRUE,
sx.active = TRUE, sy.active = TRUE, ssst.active = TRUE,
a0.active = TRUE, b0.active = TRUE, r.active=FALSE,
u.init = 0, v.init = 0, D.init = 100, bx.init = 0,
by.init = 0, bsst.init = 0, sx.init = 0.1, sy.init = 1,
ssst.init = 0.1, a0.init = 0.001, b0.init = 0, r.init=200,
var.struct = "solstice", save.dir = NULL, admb.string = "",
from.ystr=c(3,6), from.dstr=c(7,9), to.ystr=c(11,14),
to.dstr=c(15,17), localsstfolder=NULL)
}
\arguments{
\item{data}{A data.frame consisting of six columns. The first three columns should contain
\code{day}, \code{month} and \code{year} corresponding to valid dates. The dates
must be sorted in ascending order. Column four and five should contain the
\code{longitude} and \code{latitude} in degrees. The final column should contain
the SST measurement derived from the tag on the fish.}
\item{fix.first}{\code{TRUE} (default) if the first position in the data set is the true
release position (known without error), \code{FALSE} otherwise.
}
\item{fix.last}{\code{TRUE} (default) if the last position in the data set is the true
recapture/popoff position (known without error), \code{FALSE} otherwise.
}
\item{theta.active}{A logical vector of eleven elements, each corresponding to a model
parameter. If an element is set to \code{TRUE} the value of corresponding
parameter is optimized, otherwise it is kept at its initial value.
The default value is \code{TRUE} for all parameters. The values
\code{1/0} can be used instead of \code{TRUE/FALSE}. The order of the
elements in this vector is \code{c(u.active, v.active, D.active,
bx.active, by.active, bsst.active, sx.active, sy.active, ssst.active,
a0.active, b0.active)}, hence a value of \code{c(0,0,1,1,1,1,1,1,1,1,1,0)}
would result in a model where \eqn{u} and \eqn{v} were fixed at there
initial values.
}
\item{theta.init}{A numeric vector of eleven elements, each corresponding to a model
parameter. The order of the elements in this vector is
\code{c(u.init, v.init, D.init, bx.init, by.init, bsst.init, sx.init,
sy.init, ssst.init, a0.init, b0.init)}
and the default value is \code{c(0, 0, 100, 0, 0, 0, 0.1, 1.0, 0.1,
0.001, 0, 2)}. It is unwise to initialize elements \code{D.init},
\code{sx.init}, \code{sy.init}, and \code{ssst.init} below zero,
as they correspond to standard deviations.
}
\item{u.active}{\code{TRUE} (default) if \eqn{u} should be optimized, \code{FALSE} if
it should be fixed at its initial value.}
\item{v.active}{\code{TRUE} (default) if \eqn{v} should be optimized, \code{FALSE} if
it should be fixed at its initial value.}
\item{D.active}{\code{TRUE} (default) if \eqn{D} should be optimized, \code{FALSE} if
it should be fixed at its initial value.}
\item{bx.active}{\code{TRUE} (default) if \eqn{b_x}{b[x]} should be optimized, \code{FALSE}
if it should be fixed at its initial value.}
\item{by.active}{\code{TRUE} (default) if \eqn{b_y}{b[y]} should be optimized, \code{FALSE}
if it should be fixed at its initial value.}
\item{bsst.active}{\code{TRUE} (default) if \eqn{b_{sst}}{b[sst]} should be optimized,
\code{FALSE} if it should be fixed at its initial value.}
\item{sx.active}{\code{TRUE} (default) if \eqn{\sigma_x}{sigma[x]} should be optimized,
\code{FALSE} if it should be fixed at its initial value.}
\item{sy.active}{\code{TRUE} (default) if \eqn{\sigma_y}{sigma[y]} should be optimized,
\code{FALSE} if it should be fixed at its initial value.}
\item{ssst.active}{\code{TRUE} (default) if \eqn{\sigma_{sst}}{sigma[sst]} should be
optimized, \code{FALSE} if it should be fixed at its initial value.}
\item{a0.active}{If the variance structure \code{var.struct="solstice"} is chosen this
flag should be set to \code{TRUE} (default) if \eqn{a_0}{a[0]} should be
optimized, \code{FALSE} if it should be fixed at its initial value. If a
different variance structure is selected this flag is ignored.}
\item{b0.active}{If the variance structure \code{var.struct="solstice"} is chosen this
flag should be set to \code{TRUE} (default) if \eqn{b_0}{b[0]} should be
optimized, \code{FALSE} if it should be fixed at its initial value.
If a different variance structure is selected this flag is ignored.}
\item{r.active}{If the radius is to be estimated from data. The flag should be set to
\code{TRUE} if the radius should be optimized and \code{FALSE} (default)
if it should be fixed at its initial value.}
\item{u.init}{The initial value of \eqn{u}. Default is 0.}
\item{v.init}{The initial value of \eqn{v}. Default is 0.}
\item{D.init}{The initial value of \eqn{D}. Default is 100.}
\item{bx.init}{The initial value of \eqn{b_x}{b[x]}. Default is 0.}
\item{by.init}{The initial value of \eqn{b_y}{b[y]}. Default is 0.}
\item{bsst.init}{The initial value of \eqn{b_{sst}}{b[sst]}. Default is 0.}
\item{sx.init}{The initial value of \eqn{\sigma_x}{sigma[x]}. Default is 0.1.}
\item{sy.init}{The initial value of \eqn{\sigma_y}{sigma[y]}. Default is 1.0.}
\item{ssst.init}{The initial value of \eqn{\sigma_{sst}}{sigma[sst]}. Default is 0.1.}
\item{a0.init}{If the variance structure \code{var.struct="solstice"} is chosen this sets
the initial value of \eqn{a_0}{a[0]}. Default is 0.001. If a different
variance structure is selected this is ignored.}
\item{b0.init}{If the variance structure \code{var.struct="solstice"} is chosen this sets
the initial value of \eqn{b_0}{b[0]}. Default is 0. If a different variance
structure is selected this is ignored.
}
\item{r.init}{The initial value for the radius (in nautical miles) around each track point where
the SST is to be used (the default is 200).}
\item{var.struct}{Two options are available: \code{"uniform"}, \code{"solstice"}(default).}
\item{save.dir}{\code{NULL} (default) if the estimation should be done in a temporary
directory, otherwise the quoted name of the directory where the estimation
should be saved.}
\item{admb.string}{Additional command line arguments to the underlying AD Model Builder
program can be passed as a string. For instance "-est". The available
command line arguments can be found in the AD Model Builder documentation
(see \url{http://otter-rsch.com})}
\item{from.ystr}{Is an integer vector with two elements describing
what part of the file name describe the year of the
first date the data file represents. For instance if
the names of the data files all have the format
\code{RSyyyyddd_YYYYDDD.dat}, where \code{yyyy} is
the year of the first date the argument should be
\code{c(3,6)}.}
\item{from.dstr}{Is an integer vector with two elements describing
what part of the file name describe the 'number of
days into the year' of the first date the data file
represents.}
\item{to.ystr}{Is similar to \code{from.ystr}, but here for the year
of the last date the data file represents.}
\item{to.dstr}{Is similar to \code{from.dstr}, but here for the 'number
of days into the year' of the last date the data file
represents.}
\item{localsstfolder}{If the SST source is a bunch of files in a local folder this is
where the folder name is given as a string}
}
\details{
Here should the model briefly be described, but for now read all about it in the
reference given below.
}
\value{
An object of class \code{kfsst} is returned. This object contains
information about the fit and estimated tracks.
}
\author{Anders Nielsen \email{anielsen@dina.kvl.dk},
John Sibert \email{sibert@hawaii.edu}, and
Chi Lam \email{chihinl@usc.edu}}
\seealso{\code{\link{road.map}}, \code{link{blue.shark}}}
\examples{
# No example supplied here, but check out the example
# in the blue.shark dataset documentation
}
\keyword{models}
|
67f0816f1700bcc7a9506ce0ec4bafcd0380b319
|
ae6015cad9d32a2599b0c6e5fe24b89b67909d3e
|
/project/run_analysis.R
|
4559b36d54558eca3378e2e2abb6b52758a1bb4f
|
[] |
no_license
|
DarraghMcL/getting_and_cleaning_data_project
|
f906eb819f989efbd17dbea07de996c7041c483e
|
d2ab229ce72adbeec9165e962516333cd9ea56bc
|
refs/heads/master
| 2021-01-10T18:02:09.330884
| 2015-11-23T01:15:44
| 2015-11-23T01:16:42
| 46,687,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,702
|
r
|
run_analysis.R
|
#setwd("~/Desktop/getting_and_cleaning_data_project/project/")
library(dplyr)
#Setting up the file locations
test_directory <- "./UCI HAR Dataset/test/X_test.txt"
train_directory <- "./UCI HAR Dataset/train/X_train.txt"
train_labels_directory <- "./UCI HAR Dataset/train/y_train.txt"
test_labels_directory <- "./UCI HAR Dataset/test/y_test.txt"
activity_label_directory <- "./UCI HAR Dataset/activity_labels.txt"
features_directroy <- "./UCI HAR Dataset/features.txt"
#Creating headers for the data sets
activity_label_header <- c("activity_id", "activity_name")
label_headers <- c("activity_id")
features_header <- read.table(features_directroy)
#Reading in the data and applying sensible column names
test_data <- read.csv(test_directory, header=FALSE, sep = "", col.names = features_header[,2])
train_data <- read.csv(train_directory, header=FALSE, sep = "", col.names = features_header[,2])
test_labels <- read.csv(test_labels_directory, header=FALSE, sep = "", col.names = label_headers)
train_labels <- read.csv(train_labels_directory, header=FALSE, sep= "", col.names = label_headers)
activity_labels <- read.csv(activity_label_directory, header=FALSE, sep= "", col.names = activity_label_header)
complete_test <- cbind(test_data, test_labels)
complete_train <- cbind(train_data, train_labels)
complete_data <- rbind(complete_test, complete_train)
#Extracting only the mean and standard deviation columns
complete_data <- select(complete_data, contains("mean"), contains("std"), activity_id)
#Giving activity column descriptive activity names
complete_data <- merge(complete_data, activity_labels)
#Output data set
write.table(complete_data, file = "./tidy_data.txt", row.names = FALSE)
|
63c1471c12877d1c69750d0483a473cecc99ccb2
|
b2d782ed6c89fcc16c65d2e0e0d8e9fa950174c6
|
/man/vol_heat_capacity_soil.Rd
|
d1c7977cdc6f862a8eb2f03b8de86861927eb0ec
|
[] |
no_license
|
jmigueldelgado/micrometeo
|
4e01c837560ea9b4a481e2111e1311e43c845085
|
8f6b0fa25ba4c0b86ce6e1e8ecbbb63fe7c2f5f6
|
refs/heads/master
| 2021-01-23T10:07:00.388237
| 2018-12-18T14:47:56
| 2018-12-18T14:47:56
| 93,037,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 596
|
rd
|
vol_heat_capacity_soil.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bowen.R
\name{vol_heat_capacity_soil}
\alias{vol_heat_capacity_soil}
\title{volumetric heat capacity of a soil. From the encyclopedia of soil science, page 306. Unit is J.m^-3.K^-1}
\usage{
vol_heat_capacity_soil(xw, xorg, xsolid)
}
\arguments{
\item{xw}{is the volumetric water content}
\item{xorg}{is the organic carbon content of the soil}
\item{xsolid}{is the mineral content of the soil}
}
\description{
volumetric heat capacity of a soil. From the encyclopedia of soil science, page 306. Unit is J.m^-3.K^-1
}
|
c5a558515ece7e8a1770611a8aa402fbd651265c
|
9a09c50db1663711eadc2870d88cb4bd1adbe172
|
/man/Primo_modT.Rd
|
0b689dda77c77acf2db634d9f4b37434bccd8f1e
|
[] |
no_license
|
xtmgah/Primo
|
02dc433726c7de1d4017112677c3145d0c139c16
|
044b3c409d0a8aa57e14df56fdc3f4dc62d972b0
|
refs/heads/master
| 2020-05-03T09:48:52.167452
| 2019-03-28T18:32:58
| 2019-03-28T18:32:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,525
|
rd
|
Primo_modT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{Primo_modT}
\alias{Primo_modT}
\title{Estimate posterior probabilities of association patterns, using moderated t-statistics.}
\usage{
Primo_modT(Tstat_mod, mdfs, V_mat, Gamma, tol = 0.001, par_size = 1)
}
\arguments{
\item{Tstat_mod}{matrix of moderated t-statistics.}
\item{mdfs}{matrix of moderated degrees of freedom.}
\item{V_mat}{matrix of scaling factors.}
\item{Gamma}{correlation matrix.}
\item{tol}{numeric value specifying the tolerance threshold for convergence.}
\item{par_size}{numeric value specifying the number of workers for
parallel computing (1 for sequential processing).}
}
\value{
A list with the following elements:
\tabular{ll}{
\code{post_prob} \tab matrix of posterior probabilities
(each column corresponds to an association pattern).\cr
\code{pis} \tab vector of estimated proportion of observations
belonging to each association pattern.\cr
\code{D_mat} \tab matrix of densities under each association pattern.\cr
\code{Gamma} \tab correlation matrix.\cr
\code{Tstat_mod} \tab matrix of moderated t-statistics.\cr
\code{V_mat} \tab matrix of scaling factors under the alternative distribution.\cr
\code{mdf_sd_mat} \tab matrix of standard deviation adjustment according to
moderated degrees of freedom: df/(df-2).\cr
}
The main element of interest for inference is the posterior probabilities matrix, \code{post_prob}.
The estimated proportion of observations belonging to each association pattern, \code{pis}, may
also be of interest. The remaining elements are returned primarily for use by other functions.
}
\description{
This version of the main \code{Primo} function uses moderated \eqn{t}-statistics
and parameters previously calculated under the limma framework
(i.e. using \code{\link{estimate_densities_modT}}).
It is useful for cases where the same statistic from one study
(e.g. gene-SNP pair) may be mapped to multiple statistics from
another study (e.g. multiple gene-CpG pairings). For each observation
(e.g. SNP), it estimates the posterior probability for each association pattern.
Utilizes parallel computing, when available.
}
\details{
The following are additional details describing the input arguments
(for \eqn{m} SNPs/observations measured in \eqn{d} studies):
\tabular{ll}{
\code{Tstat_mod} \tab \eqn{m} x \eqn{d} matrix.\cr
\code{mdfs} \tab \eqn{m} x \eqn{d} matrix.\cr
\code{V_mat} \tab \eqn{m} x \eqn{d} matrix.\cr
\code{Gamma} \tab \eqn{d} x \eqn{d} matrix.\cr
}
}
|
a5a375210b3fd4dc59e6cf1e543203e3349cc49c
|
59ea89f1162f8048d9f7f10f6e6a3a1567c56607
|
/rstudio/prep21_plot_plv.R
|
0bf7196b8facce6eb7657b4ea274a14dcb2f2791
|
[] |
no_license
|
elshafeh/own
|
a9b8199efb3511aa1b30b53755be9337d572b116
|
ef3c4e1a444b1231e3357c4b25b0ba1ba85267d6
|
refs/heads/master
| 2023-09-03T01:23:35.888318
| 2021-11-03T09:56:33
| 2021-11-03T09:56:33
| 314,668,569
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
prep21_plot_plv.R
|
library(car);library(ggplot2)
library(dae);library(nlme);library(effects);library(psych)
library(interplot);library(plyr);library(devtools)
library(ez);library(Rmisc);library(wesanderson);library(lme4);library(lsmeans)
library(plotly)
library(ggplot2)
library(ggpubr)
rm(list=ls())
cbPalette <- c( "#56B4E9","#999999", "#E69F00")
ext1 <- "~/GoogleDrive/PhD/Fieldtripping/documents/4R/"
ext2 <- "prep21_plv2plot.txt"
pat <- read.table(paste0(ext1,ext2),header=T)
tgc <- summarySE(pat, measurevar="POW", groupvars=c("CUE_COND","CHAN"))
ggplot2::ggplot(tgc, aes(x=CHAN, y=POW, fill=CUE_COND)) +geom_bar(position=position_dodge(), stat="identity") +
geom_errorbar(aes(ymin=POW-se, ymax=POW+se),width=.2,position=position_dodge(.9))+
ylim(-0.25,0.25)+scale_fill_manual(values=cbPalette)+theme_classic()
|
c2366f425b39d6ea402d76fec6348eb4b96f1124
|
9874fc9b629c8893efe000a35ccf27390bece5f6
|
/02_R_Analyses/compare_NK_project.R
|
9ebfc6ea78fc67107788e7747f3a6f5fb8933fbb
|
[
"MIT"
] |
permissive
|
SchSascha/manuscript_tripleRNAseq
|
b7a15c2cc835dbeae0b5e8893ebccedd3052a2c2
|
9f35ed2d75041e6c87f26afd0c9c1e068ec57e19
|
refs/heads/master
| 2022-12-23T02:47:37.489128
| 2020-10-01T13:48:58
| 2020-10-01T13:48:58
| 299,326,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,773
|
r
|
compare_NK_project.R
|
#!/bin/env R
library("plyr")
library("data.table")
library("futile.logger")
library("tidyverse")
library("VennDiagram")
library("ggplot2")
library("UpSetR")
library("gplots") # to calculate venn overlap
library("AnnotationDbi")
library("org.Hs.eg.db")
get_sign_genes_geo2 <- function(x, with_fold = F, sigP = 0.01) {
# select tools
tools <- grep("_adj_pval", x %>% colnames, value = T, fixed = T)
if (!with_fold) {
deg_genes <- x[,tools] %>% {. < sigP} %>% {rowSums(.) == length(tools)} %>% which %>% names
} else {
inds <- x[,tools] %>% {. < sigP} %>% {rowSums(.) == length(tools)} %>% which
up <- x$log2_fc_mrn[inds] >= 0
deg_genes <- paste0(rownames(x)[inds], ifelse(up, "+", "-"))
}
return(deg_genes)
}
get_sign_genes_nk_list <- function(deg_res, with_fold = F, sigP = 0.01) {
l <- lapply(deg_res, function(x) {
# select tools
tools <- grep("_adj_pval", x %>% colnames, value = T, fixed = T)
if (!with_fold) {
deg_genes <- x[,tools] %>% {. < sigP} %>% {rowSums(.) == length(tools)} %>% which %>% names
} else {
inds <- x[,tools] %>% {. < sigP} %>% {rowSums(.) == length(tools)} %>% which
up <- x$log2_rpkm[inds] >= 0
deg_genes <- paste0(rownames(x)[inds], ifelse(up, "+", "-"))
}
return(deg_genes)
})
names(l) <- names(deg_res)
return(l)
}
map_gene_names <- function(genes, orgdb, from_type = "ENSEMBL", to_type = "SYMBOL") {
mapped_gene <- mapIds(org.Hs.eg.db, keys = genes, keytype = from_type, column = to_type)
# original names of 'genes' are stored in names(mapped_gene)
use_original <- is.na(mapped_gene)
mapped_gene[use_original] <- names(mapped_gene)[use_original]
return(mapped_gene)
}
get_items_per_intersection <- function(l) attr(venn(l, show.plot = F, ), "intersections")
rem_sign <- function(s) sub("[-|+]$", "", s)
make_venn_plot <- function(l, file = "", save = F, main = "Venn Diagram") {
if (save && file == "")
stop("Please supply file name if you want to save the figure!")
if (length(l) > 5)
stop("Cannot make Venn diagram with more than 5 sets! Use UpSet instead!")
futile.logger::flog.threshold(futile.logger::ERROR, name = "VennDiagramLogger") # no log files
vp <- VennDiagram::venn.diagram(
x = l,
filename = NULL,
main = main,
main.fontface = "bold",
sub = paste0("total number of Items: ", length(unique(unlist(l)))),
fill = rainbow(length(l)),
height = 3000,
width = 3000,
resolution = 500,
print.mode = if (length(l) < 5) c("raw", "percent") else "raw"
)
if (save) {
pdf(file)
grid::grid.draw(vp)
dev.off()
} else {
grid::grid.newpage()
grid::grid.draw(vp)
}
return(vp)
}
##############
# Part 1: DC
# basically, we want to know if there is a difference between treating DC cells with AFu compared to treating NK cells with AFu
############
#>#># MOL 4
#######
deg_res_sp1 <- readRDS("../results/DEG_overlaps/deg_res_sp1.rds")
xf_deg <- deg_res_sp1$DEGs$SingleInfection_Afu_0h_vs_SingleCulture_DC
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
nk_df <- data.table::fread("../other_data/NK92_4_VS_HS.csv") %>% as.data.frame
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0)
signs <- ifelse(nk_best$log2FC >= 0, "+", "-")
nk_best <- paste(nk_best$id, signs, sep = "")
# general overlap of annotation
l <- list(NK = nk_df$id, DC = xf_deg$id)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/gene_list_overlap.DC.pdf", main = "Overlap of Annotation", save = T)
# best of both using signed genes
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/DC.NK_noRep_best_sign_overlap.pdf", main = "Overlap of top genes with fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .) %>% map_gene_names(org.Hs.eg.db),
"../results/NK_compare/DC.NK_noRep_best_sign_overlap.genelist.txt")
# best of both using unsigned genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = F)
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0) %>% .$id
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/DC.NK_noRep_best_noSign_overlap.pdf", main = "Overlap of top genes without fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .) %>% map_gene_names(org.Hs.eg.db),
"../results/NK_compare/DC.NK_noRep_best_noSign_overlap.genelist.txt")
#># For Testing: using data with lower incubation time
xf_deg <- deg_res_sp1$DEGs$SingleInfection_Afu_4h30min_vs_SingleCulture_DC
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/DC.AFU4.5h.NK_noRep_best_sign_overlap.pdf", main = "Overlap of top genes with fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .) %>% map_gene_names(org.Hs.eg.db),
"../results/NK_compare/DC.AFU4.5h.NK_noRep_best_sign_overlap.genelist.txt")
############
#>#># MOL 0.5
#######
deg_res_sp1 <- readRDS("../results/DEG_overlaps/deg_res_sp1.rds")
xf_deg <- deg_res_sp1$DEGs$SingleInfection_Afu_0h_vs_SingleCulture_DC
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
nk_df <- data.table::fread("../other_data/NK92_0.5_VS_HS.csv") %>% as.data.frame
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0)
signs <- ifelse(nk_best$log2FC >= 0, "+", "-")
nk_best <- paste(nk_best$id, signs, sep = "")
# best of both using signed genes
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/mol_0.5.DC.NK_noRep_best_sign_overlap.pdf", main = "Overlap of top genes with fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .) %>% map_gene_names(org.Hs.eg.db),
"../results/NK_compare/mol_0.5.DC.NK_noRep_best_sign_overlap.genelist.txt")
# best of both using unsigned genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = F)
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0) %>% .$id
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/mol_0.5.DC.NK_noRep_best_noSign_overlap.pdf", main = "Overlap of top genes without fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .) %>% map_gene_names(org.Hs.eg.db),
"../results/NK_compare/mol_0.5.DC.NK_noRep_best_noSign_overlap.genelist.txt")
##############
# Part 2: AFu
# basically, we want to know if there is a difference between treating AFu with NK cells or with DC
deg_res_sp2 <- readRDS("../results/DEG_overlaps/deg_res_sp2.rds")
xf_deg <- deg_res_sp2$DEGs$SingleInfection_Afu_0h_vs_SingleCulture_Afu_0h
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
#########################
## No Replicate Version
#####
nk_df <- data.table::fread("../other_data/NK92_4_VS_AF.csv") %>% as.data.frame
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0)
signs <- ifelse(nk_best$log2FC >= 0, "+", "-")
nk_best <- paste(nk_best$id, signs, sep = "")
# general overlap of annotation
l <- list(NK = nk_df$id, DC = xf_deg$id)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/gene_list_overlap.AFU.pdf", main = "Overlap of Annotation", save = T)
# best of both using signed genes
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/AFU.NK_noRep_best_sign_overlap.pdf", main = "Overlap of top genes with fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
"../results/NK_compare/AFU.NK_noRep_best_sign_overlap.genelist.txt")
# best of both using unsigned genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = F)
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0) %>% .$id
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/AFU.NK_noRep_best_noSign_overlap.pdf", main = "Overlap of top genes without fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
"../results/NK_compare/AFU.NK_noRep_best_noSign_overlap.genelist.txt")
############
#>#># MOL 4
#######
nk_df <- data.table::fread("../other_data/NK92_0.5_VS_AF.csv") %>% as.data.frame
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0)
signs <- ifelse(nk_best$log2FC >= 0, "+", "-")
nk_best <- paste(nk_best$id, signs, sep = "")
# best of both using signed genes
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/mol_0.5.AFU.NK_noRep_best_sign_overlap.pdf", main = "Overlap of top genes with fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
"../results/NK_compare/mol_0.5.AFU.NK_noRep_best_sign_overlap.genelist.txt")
# best of both using unsigned genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = F)
nk_best <- nk_df %>% filter(abs(log2FC) >= 1.0) %>% .$id
l <- list(DC = xf_best, NK = nk_best)
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l, file = "../results/NK_compare/mol_0.5.AFU.NK_noRep_best_noSign_overlap.pdf", main = "Overlap of top genes without fc sign", save = T)
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
"../results/NK_compare/mol_0.5.AFU.NK_noRep_best_noSign_overlap.genelist.txt")
#########################
## Replicate Version
#####
deg_res_sp2 <- readRDS("../results/DEG_overlaps/deg_res_sp2.rds")
xf_deg <- deg_res_sp2$DEGs$SingleInfection_Afu_0h_vs_SingleCulture_Afu_0h
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
# ASP_US aspergillus unstimulated for control
# DC_asp1 for direct comparison
# NK_asp1 for NK comparison
# NKDC_Asp1 for ? comparison
sheets <- readxl::excel_sheets("../other_data/Statistics.xlsx")
nk_df <- lapply(sheets, function(s) readxl::read_xlsx("../other_data/Statistics.xlsx", sheet = s))
names(nk_df) <- sheets
sheets
nk_df <- nk_df[c("DC_Asp_VS_Asp_US", "NK_Asp_VS_Asp_US", "NKDC_Asp_VS_Asp_US")]
nk_df <- lapply(nk_df, function(x) x[,-1])
nk_df <- lapply(nk_df, function(x) x[,c("id", "log2FC", "deseqpadj", "deseqpadj2", "res_limma$adjpvalues", "p_edgeR")])
cn <- c("id", "log2_rpkm", "DESeq_adj_pval", "DESeq2_adj_pval", "limma_adj_pval", "edgeR_adj_pval") # NOTE: fc is RPKM, not MRN. But I do not want to rewrite the stuff above...
nk_df <- lapply(nk_df, function(x) {x <- as.data.frame(x); colnames(x) <- cn; rownames(x) <- x$id; x})
# with signed genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
nk_best <- get_sign_genes_nk_list(nk_df, with_fold = T)
for(test in names(nk_best)) {
l <- list(DC = xf_best, NK = nk_best[[test]])
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l,
save = T,
file = paste0("../results/NK_compare/AFU.NK_Rep_best_sign.", test, ".pdf"),
main = paste0("Overlap Top Rep NK | XF - ", test))
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
paste0("../results/NK_compare/AFU.NK_Rep_best_sign.", test, ".genelist.txt"))
}
# with unsigned genes
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = F)
nk_best <- get_sign_genes_nk_list(nk_df, with_fold = F)
for(test in names(nk_best)) {
l <- list(DC = xf_best, NK = nk_best[[test]])
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l,
save = T,
file = paste0("../results/NK_compare/AFU.NK_Rep_best_noSign.", test, ".pdf"),
main = paste0("Overlap Top Rep NK | XF - no sign - ", test))
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
paste0("../results/NK_compare/AFU.NK_Rep_best_noSign.", test, ".genelist.txt"))
}
# no deseq version
xf_best <- get_sign_genes_geo2(xf_deg[,c(-12,-13)], with_fold = F)
nk_df <- lapply(nk_df, function(x) x[,-3])
nk_best <- get_sign_genes_nk_list(nk_df, with_fold = F)
for(test in names(nk_best)) {
l <- list(DC = xf_best, NK = nk_best[[test]])
inter <- get_items_per_intersection(l)
sapply(inter, length)
make_venn_plot(l,
save = T,
file = paste0("../results/NK_compare/AFU.NK_Rep_best_noSign.nodeseq.", test, ".pdf"),
main = paste0("Overlap Top Rep NK | XF - no DESeq - no sign - ", test))
write_lines(inter$`DC:NK` %>% sub("[+-]", "", .),
paste0("../results/NK_compare/AFU.NK_Rep_best_noSign.nodeseq.", test, ".genelist.txt"))
}
# NK load version - for proof checking
nk_df <- readxl::read_xlsx("../other_data/Statistics.xlsx", sheet = "NK_Asp_VS_Asp_US")
nk_df <- nk_df[,c("id", "log2FC", "deseqpadj", "deseqpadj2", "res_limma$adjpvalues", "p_edgeR")]
cn <- c("id", "log2_rpkm", "DESeq_adj_pval", "DESeq2_adj_pval", "limma_adj_pval", "edgeR_adj_pval") # NOTE: fc is RPKM, not MRN. But I do not want to rewrite the stuff above...
nk_df <- lapply(list(nk_df), function(x) {x <- as.data.frame(x); colnames(x) <- cn; rownames(x) <- x$id; x})
nk_best <- get_sign_genes_nk_list(nk_df, with_fold = T)[[1]]
xf_best <- get_sign_genes_geo2(xf_deg, with_fold = T)
l <- list(DC = xf_best, NK = nk_best)
make_venn_plot(l, file = "../results/NK_compare/AFU.NK_noRep_besasdfrlap.pdf", main = "Overlap of top genes without fc sign", save = F)
|
515c0d1e822e98f89860ba139e04a61efc65fb02
|
4a3590a0e1a45b765cea07fecc415ccae2f54e1f
|
/R/eval-style.R
|
28d48df03956b69a27824291d36763422409f13b
|
[] |
no_license
|
cran/cascadess
|
f6c76a15347a5bc703ae032a7ae31d2bb229b434
|
3afd9853ecef99c7b660ef908c848031c2f6fa82
|
refs/heads/master
| 2023-01-23T04:34:26.785457
| 2020-11-30T08:00:08
| 2020-11-30T08:00:08
| 317,813,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,178
|
r
|
eval-style.R
|
.global <- new_environment()
pronoun_peek <- function() {
.global$pronoun
}
pronoun_set <- function(new) {
env_poke(.global, "pronoun", new)
}
pronoun_get_prefix <- function(ns) {
pronoun_peek()[[ns]]
}
#' @export
print.cascadess_style_pronoun <- function(x, ...) {
cat("<pronoun>\n")
invisible(x)
}
#' @export
str.cascadess_style_pronoun <- function(object, ...) {
cat("<pronoun>\n")
invisible(NULL)
}
#' Style pronoun
#'
#' @description
#'
#' The `.style` pronoun allows you to define styles for a tag element within the
#' context of the element. Without the `.style` pronoun tag styles are applied
#' outside and after constructing a tag element.
#'
#' ```R
#' div(". . .") %>% background("primary") %>% display("flex")
#' ```
#'
#' However, once the content of a tag element grows to more than a few lines,
#' associating the element's styles with the element becomes less and less
#' intuitive. In these situations, make use of the `.style` pronoun.
#'
#' ```R
#' div(
#' .style %>%
#' border("primary") %>%
#' font("primary"),
#' p(". . ."),
#' p(". . .")
#' )
#' ```
#'
#' @section Prefixing:
#'
#' Complex components such as `shiny::radioButtons()` or
#' `yonder::listGroupInput()` may need a non-standard prefix for the CSS
#' classes applied by cascadess' functions.
#'
#' @name style-pronoun
#' @format NULL
#' @export
.style <- structure(list(), class = "cascadess_style_pronoun")
#' Style pronoun contexts
#'
#' The `local_style()` establishes new prefixes and is used to overload the
#' `.style` pronoun's defaults. The `with_style()` function provides a different
#' approach to achieve the same utility.
#'
#' @param pronoun A call to `style_pronoun()`.
#'
#' @param expr An expression.
#'
#' @keywords internal
#' @export
local_style <- function(..., .env = caller_env()) {
new_pronoun <- new_environment(list(...))
prev_pronoun <- pronoun_peek()
pronoun_set(new_pronoun)
pronoun_restore <- call2(pronoun_set, prev_pronoun)
local_exit(!!pronoun_restore, .env)
invisible(prev_pronoun)
}
#' @rdname local_style
#' @export
with_style <- function(.expr, ...) {
local_style(..., .env = current_env())
.expr
}
|
60fb9808d864a1e05551fb6cf5e8b757372f00e9
|
92f1952b04b1bcee44a159c2876aafd2f2cf8a0e
|
/common/constants.r
|
de6809f3f9a3d604204f060fb7ef0d096e9ff011
|
[] |
no_license
|
isbur/dissertation
|
8a7f982f235cb1aa014e81d2eb8e7648e37602f4
|
fd7a19289d9db7856f38498b3737336cbdf412a5
|
refs/heads/master
| 2023-07-19T00:33:31.892154
| 2019-08-24T14:58:41
| 2019-08-24T14:58:41
| 152,059,369
| 0
| 1
| null | 2023-07-18T09:51:30
| 2018-10-08T10:11:13
|
Java
|
UTF-8
|
R
| false
| false
| 151
|
r
|
constants.r
|
ENERGY_MINIMIZER_ITERATIONS_LIMIT = 1
ENERGY_DROP_LIMIT = 0.05
MYPUSH_QUEUE_DEPTH = 5
NUMBER_OF_ENERGY_QUANTS_TO_DECREASE = 1
QUESTION_TO_LEAVE = 4
|
492ee014368d0e6e3e82f55d7aa2f2db31a6bfc3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/traitdataform/examples/get_gbif_taxonomy.Rd.R
|
22fe27d6de8fbd3472eb9987b48dc1aaa8774ef6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
get_gbif_taxonomy.Rd.R
|
library(traitdataform)
### Name: get_gbif_taxonomy
### Title: Get accepted canonical names and taxonomy for a given species
### name
### Aliases: get_gbif_taxonomy
### ** Examples
get_gbif_taxonomy(c("Chorthippus albomarginatus", "Chorthippus apricarius",
"Chorthippus biguttulus", "Chorthippus dorsatus", "Chorthippus montanus",
"Chorthippus parallelus", "Chrysochraon dispar", "Conocephalus dorsalis",
"Conocephalus fuscus", "Decticus verrucivorus", "Euthystira brachyptera",
"Gomphocerippus rufus", "Gryllus campestris", "Metrioptera roeselii",
"Omocestus viridulus", "Phaneroptera falcata", "Platycleis albopunctata",
"Spec", "Stenobothrus lineatus", "Stenobothrus stigmaticus",
"Stethophyma grossum", "Tetrix kraussi", "Tetrix subulata",
"Tetrix tenuicornis", "Tetrix undulata", "Tettigonia cantans",
"Tettigonia viridissima")
)
get_gbif_taxonomy("Vicia")
|
c2eb9db31d9fe4042c49e1fcb655abb7da0ef1c1
|
799a6667bd1e2492fc87894a0482452cabbe6443
|
/Ch.4.R
|
5032ed0444e72a6ae5f55aa82e14de0aff0e9a9b
|
[] |
no_license
|
TechJamieShin/Must-R-Basic
|
1b013b224bc6ed28265507704893b7e294e92c39
|
3ab98b40e7474a13b775063efc2f5e992b683d5b
|
refs/heads/master
| 2023-02-27T05:52:00.413834
| 2021-02-08T21:27:47
| 2021-02-08T21:27:47
| 333,222,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
r
|
Ch.4.R
|
# change the value from char to factor
hr$salary <-as.factor(hr$salary)
str(hr$salary)
str(hr$satisfaction_level)
# sammary of salary -> the number of each factors
summary(hr$salary)
summary(hr$satisfaction_level)
# quantile (10%, 30%, 60%, 90%)
quantile(hr$satisfaction_level, probs = c(.1, .3, .6, .9))
# calculation
sum(hr$satisfaction_level)
mean(hr$satisfaction_level)
sd(hr$satisfaction_level)
colMeans(hr[1:5]) # means of columns (first to fifth)
colSums(hr[1:5]) # sums of columns (first to fifth)
|
48844b51c5e1bc70a4221d652c18a7a4dec83fc3
|
45f72ad9b009bd924d9bdac674dfbebca890d1bc
|
/cachematrix.R
|
0991dd70ed5a8676f27affe60d943ca46cdf3122
|
[] |
no_license
|
vaibhav73/ProgrammingAssignment2
|
e6690a5ced1b499a38daf74b5deb4a81a2a314a8
|
ba4332253cd618e55c9afac68b19c331bf26242f
|
refs/heads/master
| 2021-01-12T04:19:32.598328
| 2016-12-29T06:06:44
| 2016-12-29T06:06:44
| 77,584,904
| 0
| 0
| null | 2016-12-29T05:20:29
| 2016-12-29T05:20:28
| null |
UTF-8
|
R
| false
| false
| 1,201
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#This function creates a list of functions for
#1)setting the value of matrix
#2)getting the value of matrix
#3)setting the value of inverse of the matrix
#4)getting the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
in_verse <- NULL
set <- function(y) {
x <<- y
in_verse <<- NULL
}
get <- function() x
setinverse <- function(inverse) in_verse <<- inverse
getinverse <- function() in_verse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
#This funcrion returns the inverse of the matrix.
#First of all it checks whether the inverse has been computed or not
#IF inverse is computed,it gets the result and skips computation
#Else it computes the inverse and sets the value in the cache with setinverse function
cacheSolve <- function(x, ...) {
in_verse <- x$getinverse()
if(!is.null(in_verse)) {
message("getting cached data.")
return(in_verse)
}
data <- x$get()
in_verse <- solve(data)
x$setinverse(in_verse)
in_verse
}
|
a0a4e157a0263ed1fca12e7db41587ec4a2ed74b
|
80b3a7af905335d4e04bc1644c93589a881e3634
|
/R/scale_throughputs_by.R
|
2e29619f5f3157c4a5aabb09ee4d210f9cc1275d
|
[] |
no_license
|
BAAQMD/qtytools
|
53a19738fcce07c9aa71d3a9cb3f605bc8eddf58
|
cf396ec102f1b071f65ee0588f9b1bc0c906c14a
|
refs/heads/master
| 2022-07-12T13:21:17.251252
| 2022-04-05T17:54:25
| 2022-04-05T17:54:25
| 131,063,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
scale_throughputs_by.R
|
#' @export
scale_throughputs_by <- function (input_data, using, ...) {
apply_scalars(input_data, using, value_col = "tput_qty", ...)
}
|
6ccf245500e8b071608c79eafe18c81f537cd9e1
|
6f2bc1d4b594ebbca495507f9fd9712acbddd722
|
/man/Trentino_hourly_T.Rd
|
109774f46704c2c6c765953ce3fe55e8167a63a0
|
[] |
no_license
|
cran/Interpol.T
|
b617239535b29b57f2d12da8c24de76e26a36d53
|
e31eef0cb3dc250f62b94784a9c58921227502dc
|
refs/heads/master
| 2021-01-01T06:55:04.137814
| 2012-12-20T00:00:00
| 2012-12-20T00:00:00
| 17,680,038
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,176
|
rd
|
Trentino_hourly_T.Rd
|
\name{Trentino_hourly_T}
\docType{data}
\alias{Trentino_hourly_T}
\alias{Tn}
\alias{Tx}
\alias{h_d_t}
\alias{mo_bias}
\alias{Th_int_list}
\alias{Tm_list}
\alias{calibration_l}
\alias{calibration_shape}
\title{
Dataset of hourly temperature for Trentino, Italy
}
\usage{
data(Trentino_hourly_T)
}
\description{
Contains the following objects:
\describe{
\item{\code{Tn}}{Data frame with \code{year},\code{month}, \code{day} and daily minimum temperature in 39 stations in Trentino, Italy. This series has been directly calculated from the hourly one (\code{h_d_t}), so it contains the daily minima and maxima of hourly measurements, not daily absolute minima and maxima.}
\cr
\item{\code{Tx}}{Data frame containing \code{year},\code{month} , \code{day} and daily maximum temperature in 39 stations in Trentino, Italy. This series has been directly calculated from the hourly one (\code{h_d_t}), so it contains the daily minima of hourly measurements.}
\cr
\item{\code{h_d_t}}{Data frame containing station id, date (yyyy/mm/dd), hour (integer 0,...23), hourly temperature, and a quality flag (not used) in 39 stations in Trentino.}
\cr
\item{\code{mo_bias}}{Data frame containing the monthly (lines 1-12) and annual (line 13) average bias between mean daily temperatures calculated as (Tmin + Tmax)/2 and the corresponding 24-value mean, for 39 stations in Trentino.}
\cr
\item{\code{Th_int_list}}{List containing the simulated (interpolated) hourly values. The first element (\code{Date}) is a data frame of \code{year}, \code{month}, \code{day}, and \code{hour}. All other elements, each having the name of one station id, are numeric vectors of temperature.}
\cr
\item{\code{Tm_list}}{List containing the daily means, calculated from the hourly interpolations. The first element (\code{Date}) is a data frame of \code{year}, \code{month}, and \code{day}. All other elements, each having the name of one station id, are numeric data frames of daily temperature.}
\cr
\item{\code{calibration_l}}{List containing the "hour" calibration parameters. Each element (name: station id) reports the following parameters, for each month: \code{time_min}: mode value of the time of occurrence of minimum temperature; \code{time_max}: mode value of the time of occurrence of maximum temperature; \code{time_suns}: mode value of the time of occurrence of sunset; \code{C_m}: value of "c" (see \code{\link{par_calibration}} and the quoted reference). An unreported station id means insufficient data for calibration.}
\cr
\item{\code{calibration_shape}}{List containing the "shape" calibration parameters for the night portion of the curve. The list has one data frame (name: \code{ratio}). It has in every line (one line per station id) the following: 1. the value of \code{ratio_dtr} that minimizes the mean error. 2., 3., 4. the mean error, mean absolute error, root mean square error, respectively, corresponding to this value (all in "deg C"). For details see \code{\link{shape_calibration}}.}
\cr
}
}
\details{
Dataset from Trentino, Italy, with examples of hourly temperature series, the daily series (min and max) obtained from these, and the results of the calibration and application of the interpolation algorithm: the calibration lists (parameters), the interpolated list of series, and the bias between interpolated 24-hour daily means and the means obtained by (Tmin + Tmax)/2.
The user can easily use the package with his/her own data after replacing the values of such variables.
}
\format{
Data frames and lists.
}
\source{
Original data are provided by Provincia Autonoma di Trento, Italy (\url{http://www.meteotrentino.it/}).
This dataset is intended for research purposes only, being distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY.
}
\references{
Eccel, E., 2010: What we can ask to hourly temperature recording. Part II: hourly interpolation of temperatures for climatology and modelling. Italian Journal of Agrometeorology XV(2):45-50
\url{http://www.agrometeorologia.it/documenti/Rivista2010_2/AIAM\%202-2010_pag45.pdf},\url{www.agrometeorologia.it}
}
\keyword{dataset}
|
0d9c9fc54e206ebc71040a462ef62eb402a743aa
|
d2e5663edcdb1a96ffe7a0144a2e1920292d0f9e
|
/tests/testthat.R
|
30f74eb5d4ac3185698de3dbaa065aee0300d262
|
[] |
no_license
|
mrgsolve/knobs
|
4608c2b8c7d66183ccf912f4e2d44119cc029e87
|
d9adf4d8cb5edc87a195a9d2411d59b355c1a91b
|
refs/heads/master
| 2022-05-28T02:17:30.399348
| 2020-04-25T18:31:26
| 2020-04-25T18:31:26
| 258,772,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
testthat.R
|
Sys.setenv("R_TESTS" = "")
library(mrgsolve)
library(testthat)
test_check("knobs", reporter="summary")
|
d5af782d8fb390531f111e01a0b72e3904645a4b
|
b61e785a5f4ceeed9e5084ad2e2e242b295a5402
|
/R/sectionCOP.R
|
9a331b98c1d8ce1124e64f690253f54c0d0a078d
|
[] |
no_license
|
surajitdb/copBasic
|
284315c43b2d232122cabcb030ba5e6d45089244
|
8c950b0cc7cc616cbf712f2f8c5f88a62c8f8afd
|
refs/heads/master
| 2023-07-17T14:28:11.905504
| 2021-09-02T20:10:25
| 2021-09-02T20:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,050
|
r
|
sectionCOP.R
|
"sectionCOP" <-
function(f, cop=NULL, para=NULL, wrtV=FALSE, dercop=FALSE, delt=0.005,
ploton=TRUE, lines=TRUE, xlab="NONEXCEEDANCE PROBABILITY", ...) {
if(wrtV) {
#message("Triggering Horizontal Section logic: v = constant")
txt <- "horizontal section"
} else {
#message("Triggering Vertical Section logic: u = constant")
txt <- "vertical section"
}
if(ploton) plot(c(0,1), c(0,1), type="n", xlab=xlab, ...)
T <- seq(0+delt,1-delt,delt)
C <- vector(mode="numeric", length=length(T))
if(dercop) {
C <- sapply(T, function(x) {
ifelse(wrtV, return( derCOP2(x,f, cop=cop, para=para) ),
return( derCOP(f,x, cop=cop, para=para) )) } )
} else {
C <- sapply(T, function(x) {
ifelse(wrtV, return( cop(x,f, para=para) ),
return( cop(f,x, para=para) )) } )
}
if(lines & ! is.null(dev.list())) lines(T,C, ...)
return(list(t=T, seccop=C, wrt=txt, fvalue=f, isderivative=dercop))
}
|
27d47eda1349bbc81b58b966a25e8fe44b870569
|
d28cd754e3a641b0ed45ecb3e3fd03d8c55c2650
|
/man/fars_map_state.Rd
|
62145c1b728be7d835968531a9b3eead9406df05
|
[
"CC0-1.0"
] |
permissive
|
Centurione/FARS_Package
|
d37d169ae8c0524db92efce7b2cb7b50fd7d4e6f
|
dc68f55887bab9937d2ef59f664d7b4e8fce87f3
|
refs/heads/master
| 2020-12-01T04:15:05.507138
| 2019-12-29T21:21:30
| 2019-12-29T21:21:30
| 230,555,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 807
|
rd
|
fars_map_state.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documents.R
\name{fars_map_state}
\alias{fars_map_state}
\title{fars_map_state}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{An integer value which identifies a state in the FARS dataset}
\item{year}{An integer value which for one of the years in the FARS dataset}
}
\value{
A plot of the states map with the coordinates of each fatality that year in
that state plotted as a point
}
\description{
\code{fars_map_state} returns a plot of the coordinates of each fatal collision by the
state and year in which they occured
}
\note{
if the state number isnt included in the dataset STATE column, it returns an error
}
\examples{
library(mapdata)
fars_map_state(12, 2014)
fars_map_state(36, 2014)
}
|
0b8ad86f5cc8ad391629a8c6dd1316bffdb2a831
|
01e5965414dd0153193d348e7ea43d7bfb2f1bc7
|
/fibonacci.R
|
6bdf3ac6d1d12862bad2dfa74585747158b1bd7e
|
[] |
no_license
|
CLG-Kind-ADC/thefailhouse
|
ead420487dce8231df7a65e53dc5a44757478a54
|
834401d548bff3e2117fc73244f0d15c5f167fe1
|
refs/heads/master
| 2021-01-17T20:01:24.253706
| 2019-06-05T05:27:17
| 2019-06-05T05:27:17
| 62,250,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 153
|
r
|
fibonacci.R
|
happyfunction=function(n){
if(n<=2 & n>0){
n= 1
return(n)
}
else
n = happyfunction(n-1) + happyfunction(n-2)
return(n)
}
|
9ee87710338bdb8422f032fbdb3a3d36211989c7
|
21a45648823ff5252eade28c6ab6d7d562a126da
|
/Intermediate of R/4.0.2 Personalizado.r
|
19200f485976069c9416ed5ee960addf935f3783
|
[] |
no_license
|
TopicosSelectos/tutoriales-2019-2-al150422
|
061163562e663901c5aa6e856fefe1166b0183b4
|
853520a4211f974fe4c1412dcba6dd77eb7072c6
|
refs/heads/master
| 2020-09-14T09:12:47.625966
| 2019-11-21T18:16:18
| 2019-11-21T18:16:18
| 223,086,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 152
|
r
|
4.0.2 Personalizado.r
|
snapchat <- c(16, 9, 13, 5, 2, 17, 14)
youtube <- c(17, 7, 5, 16, 8, 13, 14)
avg_sc <- mean(x = snapchat)
avg_yt <- mean(youtube)
avg_sc
avg_yt
|
2bd400a04c93780848822ac039b12e3d3e7a687d
|
cff5a02449f0794a8c1a88558c274f81eae89ad9
|
/data_read.R
|
a925352da6aa7b952b94e1e2183f991f1ecf356d
|
[] |
no_license
|
maoyuexin/R_plot_example
|
9b8cdd4d4f4de15b655d50e031077951e0623d73
|
1e7fa4486d87a02c5fbc9ed1e1145953febfe848
|
refs/heads/master
| 2020-12-25T06:43:50.552294
| 2016-02-17T02:43:47
| 2016-02-17T02:43:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 461
|
r
|
data_read.R
|
mydata = read.table("~/Desktop/git_repo/household_power_consumption.txt", sep=";",header=TRUE)
# get the sub data set which the data is between "2007-02-01" and "2007-02-02"
subdata=mydata[which(as.Date(mydata$Date,format="%d/%m/%Y")>=as.Date("2007-02-01") & as.Date(mydata$Date,format="%d/%m/%Y")<=as.Date("2007-02-02")),];
# save the subdata into csv file
x <- data.frame(subdata)
write.csv(x, "~/Desktop/git_repo/household_power_consumption_subdata.csv")
|
1073b6b24107400fe169d8009710e4616d7af09b
|
6ed58d0c61899aeb5e4870621dc7412b3eaa9d6f
|
/PracticalMachineLearning/Semana2/PreprocessingPrincipalComponentsAnalysis.R
|
8495a3b10cd7cd42962a0839244d0cf8ec4ffe6b
|
[] |
no_license
|
jspaz/DataScience
|
3be2c9497bf11af41168acdef83c764188cf68e2
|
b8bd27c4cc967c4127ef421585864f0f9de17b68
|
refs/heads/master
| 2020-04-06T06:19:28.585471
| 2017-12-25T03:58:48
| 2017-12-25T03:58:48
| 55,121,880
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,430
|
r
|
PreprocessingPrincipalComponentsAnalysis.R
|
#Correlated predictors
library(caret); library(kernlab); data("spam")
inTrain <- createDataPartition(y=spam$type, p=0.75, list = FALSE)
training <- spam[inTrain,]
testing <- spam[-inTrain,]
M <- abs(cor(training[,-58]))
diag(M) <- 0
which(M > 0.8, arr.ind = T)
names(spam)[c(34, 32)]
plot(spam[,34],spam[,32])
#Rotar la gráfica
X <- 0.71*training$num415 + 0.71*training$num857
Y <- 0.71*training$num415 - 0.71*training$num857
plot(X, Y)
#Usando R - prcomp
smallSpam <- spam[,c(34, 32)]
prComp <- prcomp(smallSpam)
plot(prComp$x[,1], prComp$x[,2])
prComp$rotation
#PCA con datos SPAM
typeColor <-((spam$type=="spam")*1+1)
prComp <- prcomp(log10(spam[,-58]+1))
plot(prComp$x[,1],prComp$x[,2], col=typeColor,xlab="PC1",ylab="PC2")
#PCA usando Caret
preProc <- preProcess(log10(spam[,-58]+1), method = "pca", pcaComp = 2)
spamPC <- predict(preProc, log10(spam[,-58]+1))
plot(spamPC[,1], spamPC[,2], col=typeColor)
#Preproceso con PCA
preProc <- preProcess(log10(training[,-58]+1), method = "pca", pcaComp = 2)
trainPC <- predict(preProc, log10(training[,-58]+1))
modelFit <- train(training$type ~., method = "glm", data = trainPC) #muestra error
testPC <- predict(preProc, log10(testing[,-58]+1))
confusionMatrix(testing$type, predict(modelFit, testPC))
#Alternativa
modelFit <- train(training$type ~., method='glm', preProcess='pca', data = training) #muestra error
confusionMatrix(testing$type, predict(modelFit,testing))
|
2cd46865e04c7a9a007cf1027fa24d62450f8603
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/onion/tests/aaa.R
|
3bf66ca1c12b9cc097cde968a7e74807822a734a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,002
|
r
|
aaa.R
|
require(onion)
test <- function(x, TOL= 1e-10){
stopifnot(Mod(x)<TOL)
return(TRUE)
}
f <- function(...){
## First the quaternions:
stopifnot(Hi*Hj == Hk)
stopifnot(Hj*Hi == -Hk)
stopifnot(Hj*Hk == Hi)
stopifnot(Hk*Hj == -Hi)
stopifnot(Hk*Hi == Hj)
stopifnot(Hi*Hk == -Hj)
stopifnot(Hi*Hi == -H1)
stopifnot(Hj*Hj == -H1)
stopifnot(Hk*Hk == -H1)
stopifnot(H1*H1 == H1)
stopifnot(H1*Hi == Hi)
stopifnot(H1*Hj == Hj)
stopifnot(H1*Hk == Hk)
stopifnot(H1*H1 == H1)
stopifnot(Hi*H1 == Hi)
stopifnot(Hj*H1 == Hj)
stopifnot(Hk*H1 == Hk)
stopifnot(Hi*Hj*Hk == -H1)
## Quaternion zero times table:
stopifnot(H0*H1 == H0)
stopifnot(H0*Hi == H0)
stopifnot(H0*Hj == H0)
stopifnot(H0*Hk == H0)
stopifnot(H1*H0 == H0)
stopifnot(Hi*H0 == H0)
stopifnot(Hj*H0 == H0)
stopifnot(Hk*H0 == H0)
## And some quaternion additions:
stopifnot(H1 + Him == Hall)
stopifnot(Hi + Hj + Hk == Him)
stopifnot(H1 + Hi + Hj + Hk == Hall)
## And some quaternion subtractions:
stopifnot(Hi - Hi == H0)
stopifnot(Hall - Hi - Hj - Hk == H1)
stopifnot(Hall - Him == H1)
## Now all 64 of the octonions:
stopifnot(O1*O1 == O1 )
stopifnot(O1*Oi == Oi )
stopifnot(O1*Oj == Oj )
stopifnot(O1*Ok == Ok )
stopifnot(O1*Ol == Ol )
stopifnot(O1*Oil == Oil)
stopifnot(O1*Ojl == Ojl)
stopifnot(O1*Okl == Okl)
stopifnot(Oi*O1 == Oi )
stopifnot(Oi*Oi == -O1 )
stopifnot(Oi*Oj == Ok )
stopifnot(Oi*Ok == -Oj )
stopifnot(Oi*Ol == Oil)
stopifnot(Oi*Oil == -Ol )
stopifnot(Oi*Ojl == -Okl)
stopifnot(Oi*Okl == Ojl)
stopifnot(Oj*O1 == Oj )
stopifnot(Oj*Oi == -Ok )
stopifnot(Oj*Oj == -O1 )
stopifnot(Oj*Ok == Oi )
stopifnot(Oj*Ol == Ojl)
stopifnot(Oj*Oil == Okl)
stopifnot(Oj*Ojl == -Ol )
stopifnot(Oj*Okl == -Oil)
stopifnot(Ok*O1 == Ok )
stopifnot(Ok*Oi == Oj )
stopifnot(Ok*Oj == -Oi )
stopifnot(Ok*Ok == -O1 )
stopifnot(Ok*Ol == Okl)
stopifnot(Ok*Oil == -Ojl)
stopifnot(Ok*Ojl == Oil)
stopifnot(Ok*Okl == -Ol )
stopifnot(Ol*O1 == Ol )
stopifnot(Ol*Oi == -Oil)
stopifnot(Ol*Oj == -Ojl)
stopifnot(Ol*Ok == -Okl)
stopifnot(Ol*Ol == -O1 )
stopifnot(Ol*Oil == Oi )
stopifnot(Ol*Ojl == Oj )
stopifnot(Ol*Okl == Ok )
stopifnot(Oil*O1 == Oil)
stopifnot(Oil*Oi == Ol )
stopifnot(Oil*Oj == -Okl)
stopifnot(Oil*Ok == Ojl)
stopifnot(Oil*Ol == -Oi )
stopifnot(Oil*Oil == -O1 )
stopifnot(Oil*Ojl == -Ok )
stopifnot(Oil*Okl == Oj )
stopifnot(Ojl*O1 == Ojl)
stopifnot(Ojl*Oi == Okl)
stopifnot(Ojl*Oj == Ol )
stopifnot(Ojl*Ok == -Oil)
stopifnot(Ojl*Ol == -Oj )
stopifnot(Ojl*Oil == Ok )
stopifnot(Ojl*Ojl == -O1 )
stopifnot(Ojl*Okl == -Oi )
stopifnot(Okl*O1 == Okl)
stopifnot(Okl*Oi == -Ojl)
stopifnot(Okl*Oj == Oil)
stopifnot(Okl*Ok == Ol )
stopifnot(Okl*Ol == -Ok )
stopifnot(Okl*Oil == -Oj )
stopifnot(Okl*Ojl == Oi )
stopifnot(Okl*Okl == -O1 )
## And the zero octonion times table:
stopifnot(O0*O0 == O0)
stopifnot(O0*O1 == O0)
stopifnot(O0*Oi == O0)
stopifnot(O0*Oj == O0)
stopifnot(O0*Ok == O0)
stopifnot(O0*Ol == O0)
stopifnot(O0*Oil == O0)
stopifnot(O0*Ojl == O0)
stopifnot(O0*Okl == O0)
stopifnot(O1*O0 == O0)
stopifnot(Oi*O0 == O0)
stopifnot(Oj*O0 == O0)
stopifnot(Ok*O0 == O0)
stopifnot(Ol*O0 == O0)
stopifnot(Oil*O0 == O0)
stopifnot(Ojl*O0 == O0)
stopifnot(Okl*O0 == O0)
## And some octonion additions:
stopifnot(O1 + Oim == Oall)
stopifnot(Oi + Oj + Ok + Ol + Oil + Ojl + Okl == Oim)
stopifnot(H1 + Oi + Oj + Ok + Ol + Oil + Ojl + Okl == Oall)
## And some subtractions:
stopifnot(Oil - Oil == O0)
stopifnot(Oall - Oim == O1)
## Dummy return value:
return(TRUE)
}
g <- function(...){
## Just pick some random quaternions
x <- as.quaternion(c(pi,sqrt(2),-3,10.1),single=TRUE)
y <- as.quaternion(c(exp(1),-2.22222,1/4,-1),single=TRUE)
z <- as.quaternion(c(exp(-0.1), 0.1122, -2, -0.001),single=TRUE)
## Verify associativity:
test(associator(x,y,z))
## And distributivity:
test(x*(y+z) - (x*y+x*z))
## And *power* associativity of the octonions:
jj1 <- x + Oil*y + Oj*z
test( jj1*(jj1*jj1) - (jj1*jj1)*jj1)
## And distributivity of octonions:
jj2 <- as.octonion(pi+1:8,single=TRUE)
jj3 <- as.octonion(1.123^(1:8) ,single=TRUE)
test(jj1*(jj2+jj3) - (jj1*jj2+jj1*jj3))
## And alternativity of octonions:
test(jj1*(jj1*jj2) - (jj1*jj1)*jj2 )
test(jj1*(jj2*jj1) - (jj1*jj2)*jj1 )
## Dummy return value
return(TRUE)
}
options(use.R=TRUE)
f()
g()
options(use.R=FALSE)
f()
g()
x <- as.octonion(c(1,4,sqrt(2),pi,pi/3, 1e-2, -4,1-pi),single=TRUE)
y <- as.octonion(1:8,single=TRUE)
z <- as.octonion(sqrt(17:10),single=TRUE)
options(use.R = TRUE)
jj.T <- associator(x,y,z)
options(use.R = FALSE)
jj.F <- associator(x,y,z)
test(jj.T-jj.F)
# Now some randomish checks that verify vectorized addition:
h <- function(a){
test(a-a)
test(a + (-1)*a)
test((-1)*a + a)
test( (a+a )-2*a)
test( (a+a )-a*2)
test( (a+a+a)-3*a)
test( (a+a+a)-a*3)
test(a+1-a-1)
test(a+a[1]-a-a[1])
test(a/a - 1)
test(a^2/a - a)
test(a^3/a^2 - a)
test(a^4/a^2 - a^2)
test( (a+a)/a - 2)
test( (a+a)/(a*a) - 2/a)
test(a*a - a^2)
test(a*a*a - a^3) #recall that octonions are *power* associative
test(a*a*a*a - a^4)
test(1/a - a^(-1))
test(1/a^2 - a^(-2))
test(1/(a^2) - (1/a)^2)
test(1/(a^3) - (1/a)^3)
test( (a/a[1])*a[1] - a)
if(is.quaternion(a)){
test(associator(a,a+1,a+Hi))
} else if (is.octonion(a)){
test(associator(a*(3*Ok+4*Oil),a*(Ok+Oil),a*(Ok+2*Oil)))
} else {
stop("a must be quaternion or octonion")
}
}
h(as.octonion(matrix(1:5,nrow=8,ncol=10)))
h(as.quaternion(matrix(1:5,nrow=4,ncol=20)))
## tests for a correct bugfix to a bug spotted (and patched!) by Adam
## Muschielok.
x <- rquat(3)
x <- Conj(x)
x <- 1/x
x <- roct(7)
x <- Conj(x)
x <- 1/x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.