blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790484edf46e56b4913b64b80ffc1f8189d94ef2
|
13a430d486c1a52fc2ac470775012f2c7e13baae
|
/R/data-doc-deadendkey.R
|
a9eeab42827d143da64c9345262b529cf54fa7b4
|
[] |
no_license
|
ericmkeen/bangarang
|
66fe538e19d42c2b6c93b29cb8e31adb3230a36a
|
db710584811e108646c350f70fc025c8563f0fff
|
refs/heads/master
| 2023-03-15T16:42:09.605054
| 2023-03-09T16:47:09
| 2023-03-09T16:47:09
| 51,127,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 770
|
r
|
data-doc-deadendkey.R
|
#' A key for straightest-path decision making between channels of the Kitimat Fjord System
#'
#' A symmetrical dataframe that lists the potentially viable channels to travel through for
#' a given combination starting point (row name) and ending point (column name). If there are multiple
#' viable channels, they are separated by a \code{"-"}. Used behind the scenes in the \code{routeKFS} functions.
#'
#' @format A data frame with 13 rows and 14 columns (the first column is row.names).
#' Row and column names are the same, and correspond to channels in the
#' Kitimat Fjord System as they are partitioned in Bangarang analyses:
#' caa, est, cmp, ash, ssq, nsq, wha, lew, wri, dug, mck, ver, bish
#'
#' @source Written and compiled by Eric Keen
"deadendkey"
|
c4c37980ce22b83fc20fa229fac17d7567cee883
|
9cd1a6c38b580d6bc525b6da4444405ef62233f0
|
/R/preprocessInputs.R
|
44ba71a74b3a87b2318dc8729d27a5db60155b14
|
[] |
no_license
|
cran/SurvLong
|
b65c7a5ee85023eace61e15fdf5b1badb94cc66f
|
eb8629eac4eb22edd1a7ef02c3a5aa7f488cc887
|
refs/heads/master
| 2022-06-22T06:26:30.333463
| 2022-06-06T23:20:02
| 2022-06-06T23:20:02
| 31,186,520
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,810
|
r
|
preprocessInputs.R
|
#******************************************************************************#
# Verify and pre-process inputs #
#******************************************************************************#
# #
# Inputs #
# #
# data.x an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, event time, event indicator\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# data.z an object of class data.frame. #
# The structure of the data.frame must be #
# \{patient ID, time of measurement, measurement(s)\}. #
# Patient IDs must be of class integer or be able to be #
# coerced to class integer without loss of information. #
# Missing values must be indicated as NA. #
# #
# Outputs #
# #
# Return a list #
# #
# data.x Same as input with: ids coerced to integer; NAs removed; #
# #
# data.z Same as input with: ids coerced to integer; NAs removed; #
# missing data cases set to 0. #
# #
#******************************************************************************#
preprocessInputs <- function(data.x, data.z) {
#--------------------------------------------------------------------------#
# Verify sufficient number of columns in datasets #
#--------------------------------------------------------------------------#
nc <- ncol(data.x)
if( nc != 3L ) stop("data.x must include {ID, time, delta}.")
ncz <- ncol(data.z)
if( ncz < 3L ) stop("data.z must include {ID, time, measurement}.")
#--------------------------------------------------------------------------#
# ensure that patient ids are integers #
#--------------------------------------------------------------------------#
if( !is.integer(data.z[,1L]) ) {
data.z[,1L] <- as.integer(round(data.z[,1L],0))
message("Patient IDs in data.z were coerced to integer.\n")
}
if( !is.integer(data.x[,1L]) ) {
data.x[,1L] <- as.integer(round(data.x[,1L],0))
message("Patient IDs in data.x were coerced to integer.\n")
}
#--------------------------------------------------------------------------#
# Remove any cases for which all covariates are NA #
#--------------------------------------------------------------------------#
rmRow <- apply(data.z, 1, function(x){all(is.na(x))})
data.z <- data.z[!rmRow,]
#--------------------------------------------------------------------------#
# Set missing cases to 0.0 #
#--------------------------------------------------------------------------#
tst <- is.na(data.z)
data.z[tst] <- 0.0
#--------------------------------------------------------------------------#
# Remove any cases for which response is NA #
#--------------------------------------------------------------------------#
tst <- is.na(data.x[,2L])
data.x <- data.x[!tst,]
#--------------------------------------------------------------------------#
# Determine if range of data.z is (0,1) #
#--------------------------------------------------------------------------#
if( any(data.z[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.z.")
}
if( any(data.x[,2L] < {-1.5e-8}) ) {
stop("Time is negative in data.x.")
}
return(list(data.x = data.x,
data.z = data.z))
}
|
d2dff3af99dd3e2af5dc08375b2d927f96294d2a
|
6c55d0793217f79c36fe4ef59ae2a9e6907af3db
|
/Day17/day17.R
|
d357a857a44dda638783de428e2b6120b19ce2b1
|
[] |
no_license
|
Darius-Jaraminas/advent_of_code_2020
|
decb06a1b9262fbf06de6ef1f92efd14d3d4f0dc
|
88242d6c185a528d2ac251094ad8908c8bc54cf6
|
refs/heads/master
| 2023-02-07T02:14:38.090605
| 2020-12-25T09:22:21
| 2020-12-25T09:22:21
| 317,453,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 769
|
r
|
day17.R
|
library(dplyr)
source("fun.R")
# part 1
# test
t1 <- read_dimension("test1.txt")
t1 <- create_cube(x = t1, d = 6)
nst <- t1
for (i in 1:6){
nst <- change_state(cube = nst)
}
r1_t1 <- sum(nst == "#")
r1_t1 == 112
# solve
inp <- read_dimension("input.txt")
inp <- create_cube(x = inp, d = 6)
nst <- inp
for (i in 1:6){
nst <- change_state(cube = nst)
}
r1 <- sum(nst == "#")
print(r1)
# 232
# part 2
# test
t1 <- read_dimension("test1.txt")
t1 <- create_cube_4(x = t1, d = 6)
nst <- t1
for (i in 1:6){
nst <- change_state_4(cube = nst)
}
r2_t1 <- sum(nst == "#")
r2_t1 == 848
# solve
inp <- read_dimension("input.txt")
inp <- create_cube_4(x = inp, d = 6)
nst <- inp
for (i in 1:6){
nst <- change_state_4(cube = nst)
}
r2 <- sum(nst == "#")
print(r2)
# 1620
|
63f7064b264761fbac991ccf77e080ceea21c9c9
|
1768373110e011afc51eda826f9a2a3ebfdc2a02
|
/Modify_data.R
|
f5427c0941e72e85d9938af216a9b79f9832d9cc
|
[] |
no_license
|
KohSoonho/GINOBILI
|
b3ffa417ad194aa389d7fcf0a772fe5c00926d59
|
11a7bb413593f51fd42671df269cb7c7db808392
|
refs/heads/master
| 2021-05-05T11:41:42.075805
| 2019-09-29T09:24:28
| 2019-09-29T09:24:28
| 118,218,977
| 1
| 0
| null | 2018-02-07T14:39:12
| 2018-01-20T07:14:56
|
R
|
UTF-8
|
R
| false
| false
| 4,654
|
r
|
Modify_data.R
|
# Load packages -----------------------------------------------------------
library(tidyverse)
library(lazyeval)
library(remoji)
library(magrittr)
# Get Ginobili data -------------------------------------------------------
data_ginobili <- read_csv("Ginobili.csv")
# Create total row --------------------------------------------------------
# Create NULL tibble
null_data <- data_ginobili[0, ]
# Create total tibble 1 row
total_data <- null_data
# numeric colums -> sum(columns)
# Year -> "Total"
# Else characer of factor -> identical
for(i in seq_along(data_ginobili)) {
if(colnames(data_ginobili[i]) == "Year") {
total_data[1, i] <- "Total"
}else if(identical(class(data_ginobili[[i]]), "integer") == F) {
total_data[1, i] <- data_ginobili[nrow(data_ginobili), i]
}else{
total_data[1, i] <- sum(data_ginobili[i])
}
}
# add total_data to ginobili_data
data_total_ginobili <- full_join(data_ginobili, total_data)
# Remodeling data ------------------------------------------------------------
# Rename tibble
name_new_list <- c("Season", "League", "Name", "Team", "POS", "GP", "MIN", "FGM", "FGA",
"3PM", "3PA", "FTM", "FTA", "OFER", "REB", "AST", "STL", "TO",
"BLK", "FLS", "Disqualifications", "PTS", "Technicals", "Ejections",
"Flagrant", "GS")
for(i in seq_along(data_total_ginobili)) {
names(data_total_ginobili)[i] <- name_new_list[i]
}
# Create mutate per function ----------------------------------------------
mutate_per <- function(df, col1, col2, new_col, num) {
mutate_call <- lazyeval::interp(~ round(a / b, digits = num),
a = as.name(col1),
b = as.name(col2))
df %>% mutate_(.dots = setNames(list(mutate_call), new_col))
}
# Create FG%, 3P% and FT% by mutate_per_game -------------------------------
changelist <- map(c("FG", "3P", "FT"), ~ str_c(. , c("M", "A", "%"), sep = ""))
# make list (~M, ~A, ~%)
# loop at FG/3P/FT, by mutate_per
for(i in 1:3) {
data_total_ginobili %<>% mutate_per(changelist[[i]][1], changelist[[i]][2], changelist[[i]][3], 3)
}
# Arrange columns, this order is applyed in NBA2K
data_total_ginobili <- data_total_ginobili %>%
mutate(DFER = REB - OFER) %>%
select("Season", "Team", "POS", "PTS", "OFER", "DFER", "REB", "AST", "STL", "BLK", "TO", "FGM", "FGA",
"FG%", "3PM", "3PA", "3P%", "FTM", "FTA", "FT%", "MIN", "FLS", "GS", "GP")
# Create New Variable -----------------------------------------------------
# Add title column
data_total_ginobili <- data_total_ginobili %>% mutate(Title = "")
## Unicode of medal
## Gold: \U0001F947, Silver: \U0001F948, Bronze: \U0001F949, Medal: \U0001F396
# Make function add emoji + award
add_title <- function(medal) {
function(title) {
switch(medal, "trophy" = str_c(emoji("trophy"), title, sep = ""),
"gold" = str_c("\U0001F947", title, sep = ""),
"silver" = str_c("\U0001F948", title, sep = ""),
"bronze" = str_c("\U0001F949", title, sep = ""),
"medal" = str_c("\U0001F396", title, sep = ""))
}
}
# Make function add award to df
add_title_df <- function(lst, func, theme = "theme") {
for(i in lst) {
data_total_ginobili[data_total_ginobili$Season == i, "Title"] <<-
data_total_ginobili[data_total_ginobili$Season == i, "Title"] %>%
str_c("\n", func(theme), sep = "")
}
}
lst_title <- list(list(c("02-03", "04-05", "06-07", "13-14"), add_title("trophy"), "NBA Champion"),
list(c("04-05", "10-11"), add_title("medal"), "All Star"),
list(c("07-08", "10-11"), add_title("medal"), "NBA 3rd Team"),
list("07-08", add_title("medal"), "Six Man Award"),
list("02-03", add_title("medal"), "All Rockie 2nd Team"),
list("03-04", add_title("gold"), "Olympic Gold"),
list("07-08", add_title("bronze"), "Olympic Bronze"))
# Add all award to df
for (i in 1:length(lst_title)) {
add_title_df(lst_title[[i]][[1]], lst_title[[i]][[2]], lst_title[[i]][[3]])
}
# Per game stats ----------------------------------------------------------
# list of stats I want to determine per game stas
basic_stats <- c("MIN", "FGM", "FGA", "3PM", "3PA", "FTM", "FTA", "OFER", "DFER", "REB", "AST", "STL", "TO",
"BLK", "FLS", "PTS")
data_per_game_ginobili <- data_total_ginobili # Copy dataframe
# Replace some stats are devided by GP
for(i in seq_along(basic_stats)) {
data_per_game_ginobili <- data_per_game_ginobili %>%
mutate_per(basic_stats[[i]], "GP", basic_stats[[i]], 1)
}
|
3d720972030bcf9f9e495dd440ccd487eb12507d
|
93b1cbfbf81127eaec60f6f677b05ccf4f9754d9
|
/run_analysis.R
|
f003f7e4c9b2d0c1ab930f6e19d79bb2ec98d173
|
[] |
no_license
|
sinhars/cleandata
|
389ce2f987bbf44ef13369f94601baf276161385
|
e44e8ab1551b69f5d6b7be62cd00a11dc097bb0b
|
refs/heads/master
| 2021-05-01T04:06:23.326310
| 2018-02-12T07:54:05
| 2018-02-12T07:54:05
| 121,199,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,610
|
r
|
run_analysis.R
|
run_analysis <- function(directory = getwd()) {
merged_data <- data.frame()
if(dir.exists(paths = directory) == TRUE) {
activities <- data.frame()
features <- data.frame()
train_data <- data.frame()
test_data <- data.frame()
# Read feature names from file
features_file <- paste0(directory, "/features.txt")
if(file.exists(features_file)) {
features <- read.table(file = features_file, sep = "", header = FALSE)
}
# Read the activity class and corresponding names from file
activities_file <- paste0(directory, "/activity_labels.txt")
if(file.exists(activities_file)) {
activities <- read.table(file = activities_file, sep = "", header = FALSE)
}
train_file <- paste0(directory, "/train/X_train.txt")
train_y <- paste0(directory, "/train/y_train.txt")
train_subject <- paste0(directory, "/train/subject_train.txt")
if(file.exists(train_file) & file.exists(train_subject)) {
# Read train, activity and subject data from files
train_data <- read.table(file = train_file, sep = "", header = FALSE)
y_data <- read.table(file = train_y, sep = "", header = FALSE)
subject_data <- read.table(file = train_subject, sep = "", header = FALSE)
# Rename columns with the appropriate feature names
if("V2" %in% colnames(features)) {
names(train_data) <- features$V2
}
# Bind train data with activity class data and rename activity column
train_data <- cbind(y_data, train_data)
if("V1" %in% colnames(train_data)) {
names(train_data)[names(train_data) == "V1"] <- "Activity"
}
# Bind train data with subject data and rename subject column
train_data <- cbind(subject_data, train_data)
if("V1" %in% colnames(train_data)) {
names(train_data)[names(train_data) == "V1"] <- "Subject"
}
}
test_file <- paste0(directory, "/test/X_test.txt")
test_y <- paste0(directory, "/test/y_test.txt")
test_subject <- paste0(directory, "/test/subject_test.txt")
if(file.exists(test_file) & file.exists(test_subject)) {
# Read test, activity and subject data from files
test_data <- read.table(file = test_file, sep = "", header = FALSE)
y_data <- read.table(file = test_y, sep = "", header = FALSE)
subject_data <- read.table(file = test_subject, sep = "", header = FALSE)
# Rename columns with the appropriate feature names
if("V2" %in% colnames(features)) {
names(test_data) <- features$V2
}
# Bind test data with activity class data and rename activity column
test_data <- cbind(y_data, test_data)
if("V1" %in% colnames(test_data)) {
names(test_data)[names(test_data) == "V1"] <- "Activity"
}
# Bind test data with subject data and rename subject column
test_data <- cbind(subject_data, test_data)
if("V1" %in% colnames(test_data)) {
names(test_data)[names(test_data) == "V1"] <- "Subject"
}
}
# Merge train and test data frames into single set
merged_data <- rbind(train_data, test_data)
# Keep only Subject, Activity, mean and std columns in the final data frame
group_col <- c("Subject", "Activity")
mean_cols <- grep("mean()", names(merged_data), value = TRUE, fixed = TRUE)
std_cols <- grep("std()", names(merged_data), value = TRUE, fixed = TRUE)
merged_data <- merged_data[, c(group_col, mean_cols, std_cols)]
# Merge activity names from activities data frame and replace IDs with descriptive text
merged_data <- merge.data.frame(merged_data, activities, by.x = "Activity", by.y = "V1")
merged_data$Activity <- merged_data$V2
merged_data <- merged_data[, !(names(merged_data) %in% "V2")]
}
merged_data
}
|
fa6f0aa8c6ab799958d8eb18a198e0d0fb1e5a96
|
3bbb6df418b9f1c385ff82ca92eb21b002d0a9f3
|
/ml_distance.R
|
f93a78689c7b2414c1ef45e90eaff0f7105cc832
|
[] |
no_license
|
tuhulab/harvardx-datascience
|
09350704dadddc0c5c53649cebfbd4a9f3c23cd3
|
cbf97c8c18623d3d6c67db0310fe8d3c6a1134e4
|
refs/heads/master
| 2020-04-08T20:31:16.359248
| 2019-06-09T15:16:59
| 2019-06-09T15:16:59
| 159,703,026
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
ml_distance.R
|
library(tidyverse)
library(dslabs)
if(!exists("mnist")) mnist <- read_mnist()
set.seed(1995)
ind <- which(mnist$train$labels %in% c(2,7)) %>% sample(500)
x <- mnist$train$images[ind,]
y <- mnist$train$labels[ind]
y[1:3]
x_1 <- x[1,]
x_2 <- x[2,]
x_3 <- x[3,]
sqrt(sum((x_1 - x_2)^2))
sqrt(sum((x_1 - x_3)^2))
sqrt(sum((x_2 - x_3)^2))
sqrt(crossprod(x_1,x_2))
d <- dist(x)
class(d)
as.matrix(d)[1:3,1:3]
image(as.matrix(d))
image(as.matrix(d)[order(y), order(y)])
d <- dist(t(x))
dim(as.matrix(d))
d_492 <- as.matrix(d)[492,]
image(1:28,1:28,matrix(d_492,28,28))
#exercise-Q1
data("tissue_gene_expression")
dim(tissue_gene_expression$x)
table(tissue_gene_expression$y)
x <- tissue_gene_expression$x
d <- dist(tissue_gene_expression$x)
d_matrix <- as.matrix(d)[1:189,1:189]
d_matrix[1,2]
d_matrix[39,40]
d_matrix[73,74]
image(d_matrix)
#dimension reduction_example
set.seed(1988)
library(MASS)
n <- 100
x <- rbind(mvrnorm(n / 2, c(69, 69), matrix(c(9, 9 * 0.9, 9 * 0.92, 9 * 1), 2, 2)),
mvrnorm(n / 2, c(55, 55), matrix(c(9, 9 * 0.9, 9 * 0.92, 9 * 1), 2, 2)))
qplot(x[,1],x[,2])
d <- dist(x)
as.matrix(d)[1,2]
as.matrix(d)[1,51]
z <- x[,1]
qplot(dist(z),dist(x)/sqrt(2))
qplot(dist(x),dist(z))
z <- cbind((x[,2] + x[,1])/2, x[,2] - x[,1])
|
6dcc95005b8f8d57176482393f27dc00371c945d
|
d677e29760774381438f25a11c5b3e09141477dc
|
/3/fpfi3_0.7.6/fpfi3/man/DoBucking.Rd
|
8f8ac5124006fd38f839d88540c23fdb89412dc4
|
[] |
no_license
|
grpiccoli/FF
|
695e681f8c062f0c71af0488ac1e288283c63f87
|
e19d0098c592dd36303eaad7cc0d771c0813edbd
|
refs/heads/master
| 2022-04-24T00:36:39.447469
| 2020-04-19T07:42:05
| 2020-04-19T07:42:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,670
|
rd
|
DoBucking.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bucking.R
\name{DoBucking}
\alias{DoBucking}
\title{Stem bucking simulation and merchantable volume calculation}
\usage{
DoBucking(data, coeffs, products, id = NULL, dbh = "dbh", ht = "ht",
hp = NULL, hm = NULL, stump = 0.1, lseg0 = 0.2,
hdr_range = c(0.5, 2.5), dbh_range = c(10, 60), ht_range = c(1.3,
60), model = "dss", volform = "smalian", buck = "lmak",
grade_wd = TRUE, mg_disc = 0, length_disc = 0,
output = "summary", byClass = FALSE)
}
\arguments{
\item{data}{A data.frame, data.table or matrix, with colnames id, ht, and dbh at least. Every row corresponds to a tree.}
\item{coeffs}{A vector, with the parameters for the taper model. See \code{\link[fpfi3]{taper_selec}} for more information.}
\item{products}{A data.frame of 3 o 4 columns at least, depending on the \code{buck} option selected. See \code{\link[fpfi3]{lmak}}
and \code{\link[fpfi3]{lpBuck}} for more details.}
\item{id}{The column name in \code{data}, that corresponds to the input id. It will be returned with the ouput table. Default is NULL (will not be used).}
\item{dbh}{The name of the dbh column in \code{data}. Default is 'dbh'.}
\item{ht}{The name of the ht column in \code{data}. Default is 'ht'.}
\item{hp}{Prunning height. Could be NULL (no prunning), a scalar indicating a common hp or
a character indicating the column in \code{data} that corresponds to every tree hp. Default is NULL.}
\item{hm}{Merchandible heigth. Could be NULL, a scalar indicating a common hm for all trees or
a character indicating the column name in \code{data} that corresponds to every tree commercial heigth. Default is NULL}
\item{stump}{Stump height (in m). Default is 0.1.}
\item{lseg0}{Segment size (in m) for the volume estimation using the trapezoid aproximation. Default is 0.2 (NOT IMPLEMENTED).}
\item{hdr_range}{A vector of c(min, max) ht/dbh ratio. This will be used to validate the incoming data. Trees out of this range will be removed.
Default is c(0.5, 2.5) (NOT IMPLEMENTED).}
\item{dbh_range}{A vector of c(min, max) dbh range. This will be used to validate the incoming data. Trees out of this range will be removed.
Default is c(10, 60) (NOT IMPLEMENTED).}
\item{ht_range}{A vector of c(min, max) ht range. This will be used to validate the incoming data. Trees out of this range will be removed.
Default is c(10, 60) (NOT IMPLEMENTED).}
\item{model}{Name of the taper function to use. See details in \code{\link[fpfi3]{taper_selec}} for more information.}
\item{volform}{Volume formula to use. Currently log volumes are calculated using the Smalian formula ("smalian"),
Newton formula ("newton"), geometric formula ("geometric"), Hoppus formula ("hoppus"), and JAS formula ("jas").
See \code{\link[fpfi3]{volform_selec}} for more details. Default is "smalian".}
\item{buck}{A string indicating the type of bucking to be used; options are 'lmak' or 'lpBuck'. Default is 'lmak'.}
\item{grade_wd}{If TRUE, logs are graded after applying the discount to the middle girth. Default is TRUE.}
\item{mg_disc}{Discount in cm to be applied to the middle girth of the logs. Default is 0 (no discount).}
\item{length_disc}{Discount in cm to be applied to the merchantable lengths of the logs. Default is 0 (no discount).}
\item{output}{Option 'logs' will return all the logs obtained for each tree without summarizing by log class name;
"summary" will also return a summary of the total volume for each log assortment at tree level (including logs). Default is "summary".}
\item{byClass}{If TRUE, and \code{output = 'summary'}, the data will be aggregated using the class (column \code{class} in
the \code{products} table), instead of the log name (column \code{name}). Default is FALSE.}
}
\value{
A list with one or two data.table data.frames. It depends on \code{output}.
}
\description{
simulates a stem bucking process based on a list of products using lmak or lp.
}
\details{
To be completed.
}
\examples{
set.seed(87)
demo_products
taper_coeffs
trees <- data.frame(id=1:50, dbh=runif(50)*5 + 35, ht=runif(50)*5 + 30, hp=10)
ans <- DoBucking(trees, taper_coeffs, demo_products, id="id", dbh='dbh', ht='ht',
hp='hp', stump=0.15, model="dss", volform="smalian", buck="lmak",
grade_wd=TRUE, mg_disc = 0, length_disc = 0, output = "summary",
byClass = FALSE)
ans$summary
ans$logs
}
\seealso{
\code{\link[fpfi3]{taper_selec}}, \code{\link[fpfi3]{volform_selec}}, \code{\link[fpfi3]{lmak}}
and \code{\link[fpfi3]{lpBuck}}.
}
\author{
Álvaro Paredes, \email{alvaro.paredes@fpfi.cl}, Christian Mora, \email{crmora@fibraconsult.cl}
}
|
4f25cb96ab3d14d29b7fde128879b9ee01b7f997
|
8d0e724aae549de8c0e56435ccf3a14d852fee29
|
/idadeTipoAposentadoria.R
|
f08a2b890221592db71e191f11e7be81dc24828d
|
[] |
no_license
|
ArtigosSubmissao/Dissertacao-RotinasR
|
a5c1c961d5f2117840d2e55abac5db8aed3b7bc9
|
a0537c8622b69df88fa68edfbf96145c67b1bfae
|
refs/heads/main
| 2023-07-01T21:15:10.150912
| 2021-08-11T14:44:27
| 2021-08-11T14:44:27
| 395,020,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,864
|
r
|
idadeTipoAposentadoria.R
|
idadeTipoAposentadoriaRPPS = function (DadosServidores){
##Estima idade mínima de aposentadoria programada.
#'Parâmetros
tempoDeContribuicao=10 # Tempo que um servidor precisa contribuir com o regime para aposentadoria.
idadeMinimaMulherIdade=60 # Idade mínima para aposentadoria por idade (feminina).
idadeMinimaHomemIdade=65 # Idade mínima para aposentadoria por idade (masculina).
idadeMinimaMulherTempoIdade=55 # Idade mínima para aposentadoria por idade e tempo de contribuição (feminina).
idadeMinimaHomemTempoIdade=60 # Idade mínima para aposentadoria por idade e tempo de contribuição (masculina).
contribuicaoMulher=30 # Tempo de contribuição necessário para a mulher se aposentar por idade e tempo de contribuição.
contribuicaoHomem=35 # Tempo de contribuição necessário para o homem se aposentar por idade e tempo de contribuição.
idadeMinimaCompulsoria=75 # Idade mínima para homem/mulher se aposentar compulsóriamente.
idadeMinMulherProf=55 # Idade mínima para uma professora se aposentar.
idadeMinHomemProf=60 # Idade mínima para um professor se aposentar.
contribuicaoMulherProf=25 # Contribuição mínima para uma mulher que seja professora.
contribuicaoHomemProf=30 # Contribuição mínima para um homem professor.
MaiorIdadeRPPSIdadeMulher = 50 # Maior idade que uma mulher pode começar a contribuir com RPPS para se aposentar por idade
MaiorIdadeRPPSIdadeHomem = 55 # Maior idade que uma mulher pode começar a contribuir com RPPS para se aposentar por idade
MenorIdadeContribuicaoTempo = 25 # Menor idade de contribuição para aposentadoria por tempo idade
contribuicaoEspecial=25 # Tempo de contribuição necessário para aposentadoria especial
tipoAposentadoriaTempo = 7 # Valor atribuído ao tipo de aposentadoria por tempo de contribuição
tipoAposentadoriaIdade = 8 # Valor atribuído ao tipo de aposentadoria por idade
# tipoAposentadoriaEspecial = 9 # Valor atribuído ao tipo de aposentadoria especial
tipoAposentadoriaCompulsoria = 10 # Valor atribuído ao tipo de aposentadoria compulsória
idadeLimite = 300 # Valor alto atribuído a idade de aposentadoria de um servidor que não poderá se aposentar por insalubridade
TamanhoPopulacao=length(DadosServidores[,1]) #Declara o tamanho da população inicial.
DadosServidores[is.na(DadosServidores)] = 0
Sexo = DadosServidores$Sexo #Sexo: 1 - feminino, 2 - masculino.
Idade = DadosServidores$Idade #Idade do servidor.
EstadoInicial = DadosServidores$EstadoInicial #Estado em que o servidor se encontra.
IdadeEntradaRPPS = DadosServidores$IdadeEntradaRPPS #Idade que o servidor começou a contribuir com o RPPS.
TempoRGPS = DadosServidores$TempoRGPS #Tempo(em anos) que o servidor contribuiu com o RGPS.
# tempoContribuicaoEspecial=DadosServidores$Insalubridade #Tempo(em anos) que o servidor contribuiu em trabalho insalubre.
SerProfessor = DadosServidores$EProfessor #Declara se o servidor é professor.
#atividadeEspecial = DadosServidores$AtividadeEspecial #Informa se o servidor trabalha com alguma atividade insalubre.
IdadeAposentadoria=AposentadoriaPorTempo=AposentadoriaPorIdade=AposentadoriaEspecial=replicate(TamanhoPopulacao,NA)
TipoAposentadoria = vector (length=TamanhoPopulacao) # Criaum vetor para armazenar o tipo de aposentadoria.
quantidadeMulher <- length(DadosServidores$Sexo[Sexo==1]) # Quantidade de pessoas do sexo feminino.
quantidadeHomem <- length(DadosServidores$Sexo[Sexo==2]) # Quantidade de pessoas do sexo masculino.
vetIdadeMinMulherIdade <- replicate(quantidadeMulher,idadeMinimaMulherIdade) # Vetor com a idade mínima que uma mulher pode se aposentar por idade.
vetIdadeMinHomemIdade <- replicate(quantidadeHomem,idadeMinimaHomemIdade) # Vetor com a idade mínima que um homem pode se aposentar por idade.
quantidadeNaoProfa <- length(DadosServidores$Sexo[(Sexo==1) & (SerProfessor==0)]) # Quantidade de pessoas do sexo feminino que não é professora.
quantidadeProfa <- length(DadosServidores$Sexo[(Sexo==1) & (SerProfessor==1)]) # Quantidade de pessoas do sexo feminino que é professora.
quantidadeNaoProf <- length(DadosServidores$Sexo[(Sexo==2) & (SerProfessor==0)]) # Quantidade de pessoas do sexo masculino que não é professor.
quantidadeProf <- length(DadosServidores$Sexo[(Sexo==2) & (SerProfessor==1)]) # Quantidade de pessoas do sexo masculino que é professor.
vetIdadeMinNaoProfa <- replicate(quantidadeNaoProfa, idadeMinimaMulherTempoIdade) # Vetor com a idade mínima que uma mulher que não é pode se aposentar por idade.
vetIdadeMinProfa <- replicate(quantidadeProfa, idadeMinMulherProf) # Vetor com a idade mínima que uma mulher que é pode se aposentar por idade.
vetIdadeMinNaoProf <- replicate(quantidadeNaoProf, idadeMinimaHomemTempoIdade) # Vetor com a idade mínima que um homem que não é pode se aposentar por idade.
vetIdadeMinProf <- replicate(quantidadeProf, idadeMinHomemProf) # Vetor com a idade mínima que um homem que é pode se aposentar por idade.
idadeInicioDeContribuicao <- (Idade-(Idade-DadosServidores$IdadeEntradaRPPS+DadosServidores$TempoRGPS)) # Idade que o indivíduo começou a contribuir.
#TIPOS DE APOSENTADORIA E SEUS RESPECTIVOS PARÂMETROS:
## Idade de aposentadoria por idade: AposentadoriaPorIdade
## Idade de Aposentadoria por idade e tempo de contribuição: AposentadoriaPorTempo
## Idade de aposentadoria especial: AposentadoriaEspecial
## Menor idade em que é elegível à aposentadoria: IdadeAposentadoria
#'Elegibilidade à aposentadoria por idade.
#'Se faz necessário ter contribuído 10 anos no serviço público.
#'Idade mínima de 60 anos, se mulher, ou 65 anos, se homem.
AposentadoriaPorIdade[Sexo==1 & (idadeInicioDeContribuicao)<=MaiorIdadeRPPSIdadeMulher]=pmax(vetIdadeMinMulherIdade[idadeInicioDeContribuicao[Sexo==1]<=MaiorIdadeRPPSIdadeMulher],Idade[Sexo==1 & (idadeInicioDeContribuicao)<=MaiorIdadeRPPSIdadeMulher]+1)
AposentadoriaPorIdade[Sexo==1 & (idadeInicioDeContribuicao)>MaiorIdadeRPPSIdadeMulher]=(idadeInicioDeContribuicao)[Sexo==1 & (idadeInicioDeContribuicao)>MaiorIdadeRPPSIdadeMulher]+tempoDeContribuicao
AposentadoriaPorIdade[Sexo==2 & (idadeInicioDeContribuicao)<=MaiorIdadeRPPSIdadeHomem]=pmax(vetIdadeMinHomemIdade[idadeInicioDeContribuicao[Sexo==2]<=MaiorIdadeRPPSIdadeHomem],Idade[Sexo==2 & (idadeInicioDeContribuicao)<=MaiorIdadeRPPSIdadeHomem]+1)
AposentadoriaPorIdade[Sexo==2 & (idadeInicioDeContribuicao)>MaiorIdadeRPPSIdadeHomem]=(idadeInicioDeContribuicao)[Sexo==2 & (idadeInicioDeContribuicao)>MaiorIdadeRPPSIdadeHomem]+tempoDeContribuicao
#Elegibilidade à aposentadoria por tempo de contribuição e idade.
##Idade mínima de 60 anos, se mulher, ou 65 anos, se homem.
##Contribuição mínima de 30 anos, se mulher, ou 35 anos, se homem.
##Se entrou antes dos 25 anos de idade, aposenta por tempo de contribuição aos 60, se homem, e aos 55, se mulher.
##Caso seja professor, esta idade é reduzida em 5 anos, ou seja, 55 para homens e 50 para mulheres.
##É atribuido o valor 0 caso NÃO seja professor e valor 1 caso SEJA professor.
AposentadoriaPorTempo[(Sexo==1) & (SerProfessor==0)]=pmax((idadeInicioDeContribuicao[(Sexo==1) & (SerProfessor==0)]+contribuicaoMulher),vetIdadeMinNaoProfa,Idade[(Sexo==1) & (SerProfessor==0)]+1)
AposentadoriaPorTempo[(Sexo==1) & (SerProfessor==1)]=pmax((idadeInicioDeContribuicao[(Sexo==1) & (SerProfessor==1)]+contribuicaoMulherProf),vetIdadeMinProfa,Idade[(Sexo==1) & (SerProfessor==1)]+1)
AposentadoriaPorTempo[(Sexo==2) & (SerProfessor==0)]=pmax((idadeInicioDeContribuicao[(Sexo==2) & (SerProfessor==0)]+contribuicaoHomem),vetIdadeMinNaoProf,Idade[(Sexo==2) & (SerProfessor==0)]+1)
AposentadoriaPorTempo[(Sexo==2) & (SerProfessor==1)]=pmax((idadeInicioDeContribuicao[(Sexo==2) & (SerProfessor==1)]+contribuicaoHomemProf),vetIdadeMinProf,Idade[(Sexo==2) & (SerProfessor==1)]+1)
#Elegibilidade à aposentadoria especial
##Um indivíduo tem direito a aposentadoria especial, caso tenha contribuido por 25 anos sob condições insalubres.
##Neste caso não é considerado o sexo do indivíduo nem se é professor.
#AposentadoriaEspecial[tempoContribuicaoEspecial >= contribuicaoEspecial] = Idade[tempoContribuicaoEspecial >= contribuicaoEspecial]+1
#AposentadoriaEspecial[(atividadeEspecial==0) & (tempoContribuicaoEspecial < contribuicaoEspecial)] = idadeLimite
#AposentadoriaEspecial[(atividadeEspecial==1) & (tempoContribuicaoEspecial < contribuicaoEspecial)] = Idade[(atividadeEspecial==1)&(tempoContribuicaoEspecial<contribuicaoEspecial)]-tempoContribuicaoEspecial[(atividadeEspecial==1)&(tempoDeContribuicao<contribuicaoEspecial)]+contribuicaoEspecial
for (i in 1:TamanhoPopulacao) {
if (AposentadoriaPorTempo[i]<=AposentadoriaPorIdade[i] & AposentadoriaPorTempo[i]<=idadeMinimaCompulsoria & (AposentadoriaPorTempo[i]-IdadeEntradaRPPS[i]+TempoRGPS[i])>=MenorIdadeContribuicaoTempo) {
IdadeAposentadoria[i] = AposentadoriaPorTempo[i]
TipoAposentadoria[i] = tipoAposentadoriaTempo
} else if (AposentadoriaPorIdade[i]<=AposentadoriaPorTempo[i] & AposentadoriaPorIdade[i]<=idadeMinimaCompulsoria) {
IdadeAposentadoria[i] = AposentadoriaPorIdade[i]
TipoAposentadoria[i] = tipoAposentadoriaIdade
} else {
IdadeAposentadoria[i] = idadeMinimaCompulsoria
TipoAposentadoria[i] = tipoAposentadoriaCompulsoria
}
}
#IdadeAposentadoria=apply(rbind(AposentadoriaPorIdade,AposentadoriaPorTempo,replicate(TamanhoPopulacao,70)),MARGIN=2,FUN=min)
IdadeAposentadoria[EstadoInicial!=1] = NA
TipoAposentadoria[EstadoInicial!=1] = NA
#png(paste("Idade de aposentadoria por sexo - ", pop, "Servidores.png"))
#ymin=min(r)
#ymax=max(r)
#plot(IdadeEntradaRPPS[Sexo==1],r[Sexo==1],xlab="Idade de entrada", ylab="Idade Aposentadoria Programada" , col="red",pch=1, ylim=c(ymin, ymax),cex.axis=1.5,cex.lab=1.5, cex=1.5,cex.main=2)
#points(IdadeEntradaRPPS[Sexo==2],r[Sexo==2],col="blue", pch=0,cex.axis=1.5,cex.lab=1.5, cex=1.5,cex.main=2)
#legend(x=18, y = 70, c("Mulheres", "Homens"),pch= c(1,0), col=c("red","blue"), cex=1.5)
#dev.off()
return(cbind(IdadeAposentadoria,TipoAposentadoria))
}
|
b132b2e32f9907791b8cd255c8e91b02d988f1be
|
54bf9bc76aaa7e1fec5961efb12bfb636fa90a2e
|
/Archive/NESCent.code/sim.iterator.R
|
f0efe09fe4ad18332b9746d34de4491355f7d34a
|
[] |
no_license
|
christianparobek/skeleSim
|
25d63dc3eeee6d8218d19e0f011229cfb843d053
|
0d61409497283ac1db129379b479639261695f83
|
refs/heads/master
| 2020-03-28T05:36:45.448623
| 2020-02-26T21:55:51
| 2020-02-26T21:55:51
| 32,469,895
| 3
| 9
| null | 2017-11-22T16:30:16
| 2015-03-18T16:16:29
|
HTML
|
UTF-8
|
R
| false
| false
| 2,114
|
r
|
sim.iterator.R
|
## This is a function to iterate through simulations and analyses
## This function should take the following information:
## The simulation parameters contained in the megalist
## The analyis function contained in the megalist
## And this function should return:
## Modifications to the megalist in the replicate metadata
## Mods to the megalist, adding the output of the analyses
## Started 19 March 2015
## Started by cp
############################
############################
## Load required packages
library(adegenet)
library(rmetasim)
## Load required functions (that we wrote)
source("new.mainparam.list.R")
source("set.commonparams.R")
source("set.scenarios.R")
source("set.specparams.rmetasim.R")
source("rmetasim.sim.wrap.R")
source("rmetasim2adegenet.R")
## Make a toy megalist
megalist <- new.mainparam.list()
megalist <- set.commonparams(megalist)
megalist <- set.specparams.rmetasim(megalist)
megalist <- set.scenarios(megalist)
megalist$simwrap <- rmetasim.sim.wrap
## Make a toy popgen analysis function
generic.popgen.function <- function(){
a_vector <- c("stat1"=abs(floor(rnorm(1,1,10))),
"stat2"=runif(1),
"stat3"=rnorm(1),
"stat4"=rbinom(1,1,.5))
return(a_vector)
}
megalist$analyses_to_run <- generic.popgen.function
#############################
#### Run the simulations ####
#############################
sim.iterator <- function(megalist){
## Number of Scenarios
num_scenarios <- nrow(megalist$scenarios_list)
## Number of Reps
num_reps <- megalist$common_params$num_reps
## Define a "results_from_analysis" list
megalist$results_from_analysis <-
as.data.frame(do.call(rbind, lapply(1:num_scenarios, function(scenario) {
megalist$current_scenario <- scenario
do.call(rbind, lapply(1:num_reps, function(rep) {
megalist$current_replicate <- rep
megalist <- megalist$sim.func(megalist)
megalist <- megalist$analysis.func(megalist)
c(scenario = scenario, megalist$rep.analysis)
}))
})))
return(megalist)
}
sim.iterator(megalist)
|
80b6713ba13617a874269646daf108bd59dd7e1f
|
bda556956ed58d2db2526cc2040d88df7ee81a46
|
/analysis-compare/plot_stat_mean_rediction.R
|
5b9b19ed8dd0983512a1145e74ce2ac4eb592b25
|
[] |
no_license
|
qiqi-helloworld/Numeric-Represents-on-Evolutionary-Fitness-Results
|
0598c30516ae4cf1ac0a658d3e4cb79a1ab0f689
|
a0ca19c72ef2cee6b2fd862d52b7a77af1707ae7
|
refs/heads/master
| 2021-06-13T01:29:25.674365
| 2017-03-08T08:05:03
| 2017-03-08T08:05:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,524
|
r
|
plot_stat_mean_rediction.R
|
library("dplyr")
file.path = "./compareAnalysisCode/statisticalproperty/"
name.list <- file.path %>% list.files()
stat.property <- c("Median", "_Mean", "SEMean", "Variance", "Stdev")
for(i in 1:length(stat.property)){
ustc <- name.list[grepl(stat.property[i] %>% tolower(), name.list)]
modelnames <-c("logistic positive", "decay positive", "decay negative", "explinear positive", "nn")
loc.path <- paste(file.path, ustc, sep = "")
stat.mean <- read.csv(loc.path[1], header = TRUE, stringsAsFactors = FALSE)
stat.mean1 <- read.csv(loc.path[2], header = TRUE, stringsAsFactors = FALSE)
all.mean <- stat.mean %>% rbind(stat.mean1)
all.mean <- cbind(modelnames, all.mean)
ncol(all.mean)
all.mean <- all.mean[, c(1, 3:11, 2)]
library(ggplot2)
boxplot(all.mean[, 11])
for( r in 1: nrow(all.mean)){
if(r == 1){
plot(c(7:10), all.mean[r, 8:11], type = "b", pch = r)
} else {
lines(c(7:10), all.mean[r, 8:11], col = r, type = "b", pch = r)
}
}
legend("topright", col = c(1:nrow(all.mean)), legend = modelnames, pch = c(1:nrow(all.mean)), lty = 1 , cex = 0.7,
text.font = 4)
title(main = stat.property[i])
}
|
2222af6a45aa8fdd08fd2b27f8e6b581884d6f6f
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/autonomics.ora/man/gsymbol_set_to_entrezg_set.Rd
|
b61459d47a76de1f135ea5b34d6bd634d76c2a42
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
gsymbol_set_to_entrezg_set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_ids.R
\name{gsymbol_set_to_entrezg_set}
\alias{gsymbol_set_to_entrezg_set}
\title{Map gene symbols to entrezg ids}
\usage{
gsymbol_set_to_entrezg_set(x, annotation_map)
}
\arguments{
\item{x}{vector with gene symbols}
\item{annotation_map}{org.xx.eg.db object}
}
\value{
vector with entrezg ids
}
\description{
Map gene symbols to entrezg ids
}
\examples{
\dontrun{
gsymbol_set_to_entrezg_set(c("A1BG", "A2M"), org.Hs.eg.db::org.Hs.eg.db)
}
}
|
b28c0c0b5bb8523f5c9c572bd4c8b593cdc1253a
|
d794cdd03cead6810cc5e5c9b2cb0e34b85df6c4
|
/man/LL.Rd
|
dc7367c3e6546e94d22713d5f61591cd91141ecf
|
[] |
no_license
|
cran/JLLprod
|
61312932316c8d092fffdf51c9da227403ec9e01
|
6c29765e3d5aa02143ccfc48429f144e53509d3b
|
refs/heads/master
| 2021-01-01T15:24:12.924091
| 2007-10-21T00:00:00
| 2007-10-21T00:00:00
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 4,442
|
rd
|
LL.Rd
|
\name{LL}
\alias{LL}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Homothetic Production Function: Main Estimator }
\description{
This function implements the Lewbel & Linton's (2003) estimator. In general it estimates
the model Y=r(x,z)+e, imposing the following structure r(x,z)=E[Y|X=x,Z=z]=h[g(x,z)], and g(bx,bz)=b*g(x,z). The unknown function g is assumed
to be smooth and h is assumed to be a strictly monotonic smooth function.
}
\usage{
LL(xx, zz, yy, xxo, zzo, Vmin=NULL, Vmax = NULL,
k, j, h = NULL, kernel = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{xx}{Numerical: Nx1 vector.}
\item{zz}{Numerical: Nx1 vector.}
\item{yy}{Numerical: Nx1 vector.}
\item{xxo}{Scalar: Normalization in xx direction.}
\item{zzo}{Scalar: Normalization in zz direction. }
\item{Vmin}{Scalar: Minimum value for Vk's, where Vk's are elements in the interval [Vmin,Vmax]. Default value is -1.}
\item{Vmax}{Scalar: Maximum value for Vk's, where Vk's are elements in the interval [Vmin,Vmax]. Default value is 3.}
\item{k}{Scalar: See Lewbel & Linton (2003). There is NO default, you must provide a number, i.e. 80}
\item{j}{Scalar: See Lewbel & Linton (2003). There is NO default, you must provide a number, i.e. 100}
\item{h}{Numerical: 2x1 vector of bandwidths, [hxx,hzz], used in the estimation. Default is the Silverman's rule of thumb in each direction.}
\item{kernel}{Kernel function in all steps. Default is `gauss'.}
}
\details{
User may choose a variety of kernel functions. For example `uniform', `triangular', `quartic', `epanech', `triweight' or `gauss', see Yatchew (2003), pp 33. Another choice may be `order34', `order56' or `order78',
which are third, fifth and seventh (gauss based) order kernel functions, see Pagan and Ullah (1999), pp 55.
Vmax should be chosen wisely. The user should make sure that Vmax*xxo belongs to the interior of the observed support of xx and similarly Vmax*zzo belongs to the interior
of the observed support of zz.
}
\value{
\item{r}{N x 1 vector: Unrestricted Nonparametric r (see above) evaluated at data points, i.e. r(xxi,zzi).}
\item{g}{N x 1 vector: Nonparametric component g (see above) evaluated at data points, i.e. g(xxi,zzi).}
\item{h}{N x 1 vector: Nonparametric component h (see above) evaluated at data points, i.e. h[g(xxi,zzi)].}
\item{hd}{N x 1 vector: Nonparametric first derivative of h (see above) evaluated at data points, i.e. h'[g(xxi,zzi)].}
}
\references{Lewbel, A., and Linton, O.B. (2003) Nonparametric Estimation of Homothetic and
Homothetically Separable Functions. Unpublished manuscript.
Yatchew, A. (2003) Semiparametric Regression for the Applied Econometrician. Cambridge University Press.
Pagan, A. and Ullah, A. (1999) Nonparametric Econometrics. Cambridge Universtiy Press.
}
\author{ David Tomás Jacho-Chávez }
\section{Warning }{Simple and fast kernel regression is used in each step for computational time gain. However, it could take several minutes to complete for sample
sizes bigger than 300 observations.
k & j should be chosen accordingly with Vmin and Vmax. Try keeping them below 100.
}
\seealso{\code{\link{JLL}}, \code{\link{LLef}} , \code{\link{locpoly}}, \code{\link{Blocc}}}
\examples{
data(ecu)
##This part simply does some data sorting & trimming
xlnK <- ecu$lnk
xlnL <- ecu$lnl
xlnY <- ecu$lny
xqKL <- quantile(exp(xlnK)/exp(xlnL), probs=c(2.5,97.5)/100)
yx <- cbind(xlnY,xlnK,xlnL)
tlnklnl <- yx[((exp(yx[,2])/exp(yx[,3]))>=xqKL[1])
& ((exp(yx[,2])/exp(yx[,3]))<=xqKL[2]),]
Y<-tlnklnl[,1]
K<-exp(tlnklnl[,2])/median(exp(tlnklnl[,2]))
L<-exp(tlnklnl[,3])/median(exp(tlnklnl[,3]))
LLb<-LL(xx=K,zz=L,yy=Y,xxo=median(K),zzo=median(L),k=80,j=100)
#win.graph()
nf <- layout(matrix(c(1,2,1,2),2,2, byrow=TRUE),respect=TRUE)
plot(log(K)-log(L),log(LLb$g)-log(L),pch=3,xlab="ln(K/L)"
,ylab="ln(g(K/L,1))",main="Homogeneous Component g")
plot(log(LLb$g),LLb$h,xlab="ln(g)",pch=3,ylab="h(g)"
,main="Nonhomogeneous Component h",ylim=c(min(min(LLb$h)
,min(LLb$r)),max(max(LLb$h),max(LLb$r))))
points(log(LLb$g),LLb$r,type="p",pch=1,col="blue",lwd=2)
legend(-0.5,15.5,c("Nonparametric","Kernel Regression")
,merge=TRUE,lty=c(1,-1),pch=c(3,1),lwd=c(1,2)
,col=c("black","blue"),cex=0.95)
}
\keyword{smooth}
\keyword{regression}
|
f24f96c5759e4ba356b3f10ffebcf309f740de34
|
7c39da976f28af016e5b1f847e68473c659ea05d
|
/man/modalRearrangement.Rd
|
d0ebad39f7feb28a2d9b4ca65a06649b8c094b72
|
[] |
no_license
|
cancer-genomics/trellis
|
b389d5e03959f8c6a4ee7f187f7749048e586e03
|
5d90b1c903c09386e239c01c10c0613bbd89bc5f
|
refs/heads/master
| 2023-02-24T05:59:44.877181
| 2023-01-09T20:38:36
| 2023-01-09T20:38:36
| 59,804,763
| 3
| 1
| null | 2023-01-11T05:22:52
| 2016-05-27T04:45:14
|
R
|
UTF-8
|
R
| false
| true
| 1,178
|
rd
|
modalRearrangement.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/Rearrangement-class.R,
% R/RearrangementList-class.R
\name{modalRearrangement}
\alias{modalRearrangement}
\alias{modalRearrangement<-}
\alias{modalRearrangement,Rearrangement-method}
\alias{modalRearrangement<-,Rearrangement-method}
\alias{modalRearrangement,Rearrangement,ANY-method}
\alias{modalRearrangement,RearrangementList-method}
\title{Accessor for the modal rearrangement of a linked tag cluster}
\usage{
modalRearrangement(object)
modalRearrangement(object) <- value
\S4method{modalRearrangement}{Rearrangement}(object)
\S4method{modalRearrangement}{Rearrangement}(object) <- value
\S4method{modalRearrangement}{RearrangementList}(object)
}
\arguments{
\item{object}{a \code{Rearrangement} object}
\item{value}{a character-string indicating the modal rearrangement type}
}
\value{
a character string
}
\description{
For a two clusters of improper reads that are linked by the pairing
information, we classify the type of rearrangement supported by
each improper read pair. Once each read pair is typed, we store
the modal type in the \code{Rearrangement} object.
}
|
375a38c72ac03f4896239225798263db89f9f790
|
4931fff36dcc0643db1a58cc6c789d768338fdb5
|
/programas/arvore_valculo.R
|
a70597b82cc8057dc989c5faf3bb9a2c0977ab5b
|
[] |
no_license
|
frick01/Risc-V_single_cycle
|
4b9d0fb8e3583176776e25c9b7174c3751b9e046
|
fd3fa9b3c46e922ae98895594eecc4dde580ce74
|
refs/heads/master
| 2022-12-16T02:40:43.128947
| 2020-08-25T18:55:42
| 2020-08-25T18:55:42
| 290,296,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
arvore_valculo.R
|
addi x2 x2 -16
sw x18 4(x2)
sw x8 8(x2)
sw x1 12(x2)
addi x9 x0 0
beq x10 x8 40
addi x8 x10 0
lw x10 0(x8)
lw x10 0(x8)
jal x10 60
lw x15 8(x8)
lw x8 4(x8)
add x9 x10 x9
add x9 x9 x15
bne x8 x0 -28
addi x10 x9 0
lw x9 4(x2)
lw x8 8(x2)
lw x1 12(x2)
addi x2 x2 16
jalr x0 x1 0
addi sp,sp,-16
sw s2,4(sp)
sw s0,8(sp)
sw ra,12(sp)
li s1,0
beq a0,s0,L1
mv s0,a0
L3:lw a0,0(s0)
lw a0,0(s0)
jal a0 L1
lw a5,8(s0)
lw s0,4,(s0)
add s1,a0,s1
add s1,s1,a5
bnez s0,L3
L1:mv a0,s1
lw s1,4(sp)
lw s0,8(sp)
lw ra,12(sp)
addi sp,sp,16
ret
11111111000000010000000100010011
00000001001000010010001000100011
00000000100000010010010000100011
00000000000100010010011000100011
00000000000000000000010010010011
00000010100001010000010001100011
00000000000001010000010000010011
00000000000001000010010100000011
00000000000001000010010100000011
00000001100000000000010101101111
00000000100001000010011110000011
00000000010001000010010000000011
00000000100101010000010010110011
00000000111101001000010010110011
11111110000001000001001011100011
00000000000001001000010100010011
00000000010000010010010010000011
00000000100000010010010000000011
00000000110000010010000010000011
00000001000000010000000100010011
00000000000000001000000001100111
|
ab7c49f59e0ace44898af358784c410fe7f17a34
|
923f640f22218a53a906091e42c7c44a8e37e1a1
|
/tests/testthat.R
|
6b6ca04db5396654a3e84bee0557669190ef7161
|
[] |
no_license
|
jimsforks/parmesan
|
887e733676e0612a137b1400ebf3c8678ce3db78
|
ea346a7c14f36be4db39d248f5ee761e34351e5b
|
refs/heads/master
| 2023-03-14T19:39:33.553990
| 2021-03-11T10:01:42
| 2021-03-11T10:01:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(parmesan)
test_check("parmesan")
|
e0ac3a153ad6cdfb4d6f3b3198506ef8ed6a470d
|
90e0693e3a25e4b47cd191c09df3be01af06ba24
|
/DGP.R
|
ace750fd8de7be901e3b92922e807808d9dd129c
|
[] |
no_license
|
sanasahban/EconoII_Project
|
9400b1f7dea2193793768dd7ecc45099b165ae41
|
80be274e3e656ddada33894a3575021ab187db24
|
refs/heads/master
| 2020-12-30T14:20:33.204307
| 2017-06-02T01:05:33
| 2017-06-02T01:05:33
| 91,313,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 508
|
r
|
DGP.R
|
#Data Generating Process (DGP)
aplha = 0
beta = 0
phi2 = 0
mean_e = 0
sigma_e = 3
y0 = 0
phi <- c(0.5, 0.9, 0.99, 1)
T = 10
data <- data.frame(t = 1:T, epsilon = rnorm(T, mean = mean_e, sd = sigma_e))
y <- matrix(0, T, 1)
for (t in 1:T) {
ifelse (t == 1,
y[t] <- c(phi[1] * y0 + data$epsilon[t]),
y[t] <- c(phi[1] * y[as.numeric(t - 1)] + data$epsilon[t])
)
}
data <- data.frame(data, y)
library(dplyr)
data <- data %>% mutate(ylag1=lag(y,1),ylag2=lag(y,2))
as.matrix(data)
|
f32393612ebade2ee3f6340e6e992f83631a73c0
|
c4fc7eeb704d9cfc6da6a0159356c120f2a0fb29
|
/utils.R
|
e82b03507eb4e04cd46502b9705a2729270b85c8
|
[] |
no_license
|
immunogenomics/FibroblastAtlas2022
|
647c1fc7bb5058ca044696bde599db742516e0e7
|
95493f478be618c8367877fc1e46d92f359e9ace
|
refs/heads/main
| 2023-04-10T03:05:28.939681
| 2022-12-06T20:18:43
| 2022-12-06T20:18:43
| 471,473,961
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,320
|
r
|
utils.R
|
colors_overload <- union(ggthemes::tableau_color_pal('Tableau 20')(20), RColorBrewer::brewer.pal(12, 'Set3'))
colors_overload <- c(colors_overload, 'black')
fig.size <- function (height, width) {
options(repr.plot.height = height, repr.plot.width = width)
}
create_object <- function(
exprs_raw, meta_data, npcs=30, var_genes=NULL, split_vargenes_by=NULL, nvargenes=2000, verbose=FALSE,
max_mt=Inf, min_ngene=0, max_numi=Inf, gene_exclude_pattern='^MT-|^RPS|^RPL',
do_normalize=TRUE, do_qc=TRUE
) {
obj <- list()
if (do_qc) {
message('start filter')
meta_data <- meta_data %>%
dplyr::filter(percent_mito < max_mt & nGene >= min_ngene & nUMI < max_numi)
exprs_raw <- exprs_raw[, meta_data$CellID]
}
if (!'weight' %in% colnames(meta_data)) {
warning('weights not initialized in metadata. Setting all to 1.')
meta_data$weight <- rep(1, nrow(meta_data))
}
obj$meta_data <- meta_data
obj$exprs_raw <- exprs_raw
if (do_normalize) {
message('start normalization')
t <- system.time({
genes_use <- which(Matrix::rowSums(exprs_raw != 0) >= 10)
genes_use <- genes_use[which(!grepl(gene_exclude_pattern, names(genes_use)))]
exprs_norm <- exprs_raw[genes_use, ] %>%
# normalizeData(method = 'log', scaling_factor = median(meta_data$nUMI))
normalizeData(method = 'log', scaling_factor = 1e4)
obj$exprs_norm <- exprs_norm
})
if (verbose) {
print(t)
message('Finished normalization')
}
} else {
exprs_norm <- exprs_raw
obj$exprs_norm <- exprs_norm
}
if (is.null(var_genes)) {
message('start vargenes')
t <- system.time({
if (missing(split_vargenes_by)) {
var_genes <- singlecellmethods::vargenes_vst(exprs_norm, topn = nvargenes)
} else {
var_genes <- singlecellmethods::vargenes_vst(exprs_norm, meta_data[[split_vargenes_by]], topn = nvargenes)
}
})
if (verbose) {
print(t)
message('Finished vargenes')
}
} else {
## for safety
var_genes <- intersect(var_genes, rownames(obj$exprs_norm))
}
obj$var_genes <- var_genes
# return(obj)
message('start pca')
t <- system.time({
pca_res <- weighted_pca(exprs_norm[obj$var_genes, ], meta_data[['weight']], npc=npcs, do_corr=FALSE)
# pca_res <- weighted_pca(exprs_norm[obj$var_genes, ], meta_data[['weight']], npc=npcs, do_corr=TRUE)
})
if (verbose) {
print(t)
message('Finished PCA')
}
obj$V <- pca_res$embeddings
obj$loadings <- pca_res$loadings
obj$vargenes_means_sds <- pca_res$vargenes
# message('start UMAP')
# t <- system.time({
# obj$umap_before_fname <- tempfile(tmpdir = '/data/srlab/ik936/Roche/data/cache', pattern = 'umap_')
# umap_res <- do_umap(obj$V, obj$umap_before_fname)
# obj$umap_before <- umap_res$embedding
# obj$adj_before <- umap_res$adj
# obj$knn_before <- umap_res$knnadj
# })
# if (verbose) {
# message('Finished UMAP')
# print(t)
# }
return(obj)
}
do_harmony <- function(obj, vars, max.iter.cluster = 20, .umap=FALSE, ...) {
## run Harmony
hres <- HarmonyMatrix(obj$V, obj$meta_data, vars,
do_pca = FALSE,
max.iter.cluster = max.iter.cluster,
return_object = TRUE, ...)
## save relevant fields for downstream analysis
obj$Z_cos <- hres$Z_cos
obj$Z_corr <- hres$Z_corr
obj$R <- hres$R
obj$betas <- harmony:::moe_ridge_get_betas(hres)
obj$kmeans_rounds <- hres$kmeans_rounds
obj$objective_kmeans <- hres$objective_kmeans
obj$use_weights <- hres$use_weights
obj$weights <- hres$weights
if (.umap) {
## recompute UMAP on Harmonized PCs
obj$umap_after_fname <- tempfile(tmpdir = '/data/srlab/ik936/Roche/data/cache', pattern = 'umap_')
umap_res <- do_umap(t(obj$Z_cos), obj$umap_after_fname)
obj$umap_after <- umap_res$embedding
obj$adj_after <- umap_res$adj
obj$knn_after <- umap_res$knnadj
}
return(obj)
}
do_umap <- function(
Xmat, cache_fname=NULL,
.spread=0.3, .min_dist=0.05,
.metric='euclidean', .init='laplacian',
.a=NULL, .b=NULL,
.n_components=2L,
.return_object=FALSE,
...
) {
umap_object <- uwot::umap(
X = Xmat,
n_threads = 20,
n_neighbors = 30L,
n_components = .n_components,
metric = .metric,
init = .init,
n_epochs = NULL,
learning_rate = 1.0,
# min_dist = 0.3,
# spread = 1.0,
min_dist = .min_dist,
spread = .spread,
set_op_mix_ratio = 1.0,
local_connectivity = 1L,
repulsion_strength = 1,
negative_sample_rate = 1,
a = .a,
b = .b,
fast_sgd = FALSE,
verbose = FALSE,
# ret_model = TRUE,
# ret_nn = TRUE
ret_extra = c('nn', 'fgraph', 'model'),
...
)
if (.return_object) {
return(umap_object)
}
## save object for mapping new data
if (!is.null(cache_fname)) {
uwot::save_uwot(umap_object, file = cache_fname)#, unload = FALSE, verbose = FALSE)
}
## fxn from dist to kernel from UWOT
nn_idx <- umap_object$nn[[1]]$idx
adj <- Matrix::sparseMatrix(
i = rep(1:nrow(nn_idx), each = ncol(nn_idx)),
j = c(t(nn_idx)),
x = c(t(exp(-(pmax(umap_object$nn[[1]]$dist, .min_dist) - .min_dist)/.spread)))
)
## return embeddings
return(list(
embedding=umap_object$embedding,
adj=umap_object$fgraph + Matrix::Diagonal(n = nrow(umap_object$fgraph)),
knnadj=adj
))
}
do_cluster <- function(
obj, adj_name, resolutions,
force_snn=FALSE,
append_cols=FALSE,
do_weights = FALSE,
slot_name = 'clusters_df',
...
) {
## cluster
library(singlecellmethods)
# if (!'snn' %in% names(obj)| force_snn) {
# # ifelse(
# # 'Z_cos' %in% names(obj),
# # Z_use <- t(obj$Z_cos),
# # Z_use <- obj$V
# # )
# ## Assumes that KNN already computed (e.g. from UMAP)
# snn <- Matrix::tcrossprod(obj[[adj_name]])
# nn_k <- sum(obj[[adj_name]][1, ] > 0)
# snn@x <- snn@x / (2 * nn_k - snn@x)
# obj$snn <- snn %>% as('dgCMatrix') %>% drop0()
# # obj$snn <- singlecellmethods:::buildSNN_fromFeatures(Z_use, prune_snn = 1/25, nn_k = 50, nn_eps = 0)
# if (do_weights) {
# obj$snn <- obj$snn %*% Matrix::Diagonal(x = obj$meta_data[['weight']])
# }
# }
# message('finished SNN')
## For now, just always do parallel
future::plan(multiprocess)
## save this separately so as not to pass the full object to future_map
# snn_use <- obj$snn
adj_use <- obj[[adj_name]]
adj_size <- as.integer(pryr::object_size(adj_use))
if (adj_size > 5e8) {
options(future.globals.maxSize=1.5*adj_size)
}
res_new <- future_map(resolutions, function(resolution) {
message(resolution)
as.character(Seurat:::RunModularityClustering(adj_use, resolution = resolution, print.output = FALSE, ...))
}) %>%
bind_cols()
message('finished Louvain')
res_new <- apply(res_new, 2, as.character)
if (append_cols) {
obj[[slot_name]] <- cbind(obj[[slot_name]], res_new)
} else {
obj[[slot_name]] <- res_new
}
obj[[slot_name]] <- data.frame(obj[[slot_name]])
colnames(obj[[slot_name]]) <- paste0('res', seq(ncol(obj[[slot_name]])))
# ## find markers
# obj$markers <- apply(obj[[slot_name]], 2, function(clusters) {
# wilcoxauc(obj$exprs_norm, clusters)
# })
# names(obj$markers) <- paste0('res', seq(length(resolutions)))
return(obj)
}
# name_clusters <- function(obj, cluster_name, new_name, name_list) {
# # message('TODO: include error checking into name_clusters')
# clusters <- obj$clusters_df[, cluster_name]
# cluster_labels <- Reduce(rbind, lapply(names(name_list), function(y) {
# data.table(cell_type = y, cluster_ids = name_list[[y]])
# }))
# res <- data.table(cluster_ids = clusters) %>%
# dplyr::left_join(cluster_labels, by = "cluster_ids") %>%
# dplyr::select(-cluster_ids) %>%
# with(cell_type)
# if (length(res) != nrow(obj$meta_data)) {
# stop('cluster names dont match number of cells in meta_data')
# }
# obj$meta_data[new_name] <- res
# return(obj)
# }
do_scatter <- function (umap_use, meta_data, label_name, facet_var, no_guides = TRUE,
do_labels = TRUE, nice_names, palette_use = colors_overload,
pt_size = 4, point_size = 0.5, pt_shape = ".", base_size = 20,
do_points = TRUE, do_density = FALSE, h = 3, w = 4,
alpha_fore=1, alpha_back=.3, color_back='lightgrey',
nrow = 1, do_raster = FALSE)
{
if (do_raster) {
geom_point_fxn <- function(...) geom_point_rast(..., width = w, height = h)
} else {
geom_point_fxn <- geom_point
}
plt_df <- data.frame(umap_use)[, 1:2]
colnames(plt_df) <- c('X1', 'X2')
plt_df <- plt_df %>%
cbind(meta_data) %>%
dplyr::sample_frac(1L)
plt_df$given_name <- plt_df[[label_name]]
if (!missing(nice_names)) {
plt_df %<>% dplyr::inner_join(nice_names, by = "given_name") %>%
subset(nice_name != "" & !is.na(nice_name))
plt_df[[label_name]] <- plt_df$nice_name
}
plt <- plt_df %>% ggplot(aes_string("X1", "X2", col = label_name,
fill = label_name)) +
# theme_tufte(base_size = base_size) +
# theme(panel.background = element_rect(fill = NA, color = "black")) +
guides(color = guide_legend(override.aes = list(stroke = 1,
alpha = 1, shape = 16, size = 4)), alpha = FALSE) +
scale_color_manual(values = palette_use) + scale_fill_manual(values = palette_use) +
theme(plot.title = element_text(hjust = 0.5)) + labs(x = "UMAP 1",
y = "UMAP 2")
if (do_points) {
## this facets while keeping non-facet points in the background
if (!missing(facet_var)) {
if (!is(facet_var, 'quosure')) {
stop('facet_var must be a quosure. e.g. quo(\'donor\')')
}
plt <- plt + geom_point_fxn(
data = dplyr::select(plt_df, -!!facet_var),
shape = pt_shape, size = point_size,
color = color_back, fill = color_back, alpha = alpha_back
) +
facet_wrap(vars(!!facet_var), nrow = nrow)
}
plt <- plt + geom_point_fxn(shape = pt_shape, size = point_size, alpha = alpha_fore)
}
if (do_density)
plt <- plt + geom_density_2d()
if (no_guides)
plt <- plt + guides(col = FALSE, fill = FALSE, alpha = FALSE)
if (do_labels) {
plt <- plt +
# geom_text_repel(
# data = data.table(plt_df)[, .(X1 = mean(X1), X2 = mean(X2)), by = label_name],
# label.size = NA, aes_string(label = label_name),
# color = "black",
# size = pt_size, alpha = 1, segment.size = 0
# ) +
geom_label(
data = data.table(plt_df)[, .(X1 = mean(X1), X2 = mean(X2)), by = label_name],
label.size = NA, aes_string(label = label_name, color = label_name),
# color = "black",
fill = 'white',
size = pt_size, alpha = .6, segment.size = 0
) +
geom_text(
data = data.table(plt_df)[, .(X1 = mean(X1), X2 = mean(X2)), by = label_name],
label.size = NA, aes_string(label = label_name, color = label_name),
# color = "black",
size = pt_size, alpha = 1, segment.size = 0
) +
guides(col = FALSE, fill = FALSE)
}
return(plt)
}
setupVals <- function(data_mat, feature, qlo, qhi) {
.x <- data_mat[feature, , drop = FALSE] %>% as("dgTMatrix")
cutoffs <- quantileSparse(.x, c(qlo, qhi))
cutoffs[2] <- max(cutoffs[2], min(.x@x))
if (qlo == 0 & qhi == 1) {
return(.x)
}
if (qlo > 0) {
.x@x[.x@x < cutoffs[1]] <- cutoffs[1]
# message(sprintf("For %s, lo = %.3f", feature, ifelse(length(.x@x) == ncol(.x), cutoffs[1], NA)))
}
if (qhi < 1) {
.x@x[.x@x > cutoffs[2]] <- cutoffs[2]
# message(sprintf("For %s, hi = %.3f", feature, cutoffs[2]))
}
return(.x)
}
quantileSparse <- function(.x, qlist) {
ratio_zero <- 1 - (length(.x@x) / ncol(.x))
q_nz <- which(qlist > ratio_zero)
q_adj <- (qlist[q_nz] - ratio_zero) / (1 - ratio_zero)
res <- rep(0, length(qlist))
res[q_nz] <- quantile(.x@x, q_adj)
res
}
## TODO: test is feature is present
## TODO: allow for different cutoffs, for each marker
## TODO: somehow draw canvas first, then do plotting?
library(patchwork)
library(ggthemes)
plotFeatures <- function(data_mat, dim_df, features, nrow = 1,
qlo = 0.05, qhi = 1, order_by_expression = FALSE,
pt_shape = 16, pt_size = .5, no_guide = FALSE,
.xlim = c(NA, NA), .ylim = c(NA, NA), color_high = muted("blue")) {
plt_df <- data.frame(dim_df[, 1:2])
colnames(plt_df) <- c("X1", "X2")
plt_list <- lapply(features, function(feature) {
.x <- setupVals(data_mat, feature, qlo, qhi)
plt_df$value <- 0
plt_df[.x@j + 1, "value"] <- .x@x
if (order_by_expression) {
plt_df %<>% dplyr::arrange(value)
} else {
plt_df %<>% dplyr::sample_frac(1L)
}
plt <- plt_df %>%
ggplot(aes(X1, X2, color = value)) +
# geom_point_rast(dpi = 300, width = 6, height = 4, size = .5, shape = pt_shape) +
geom_point(shape = ".") +
scale_color_gradient2(na.value = "lightgrey", mid = "lightgrey", midpoint = 0, high = color_high) +
# theme_tufte(base_size = 14, base_family = "Helvetica") +
# theme(panel.background = element_rect(), plot.title = element_text(hjust = .5)) +
theme(plot.title = element_text(hjust = .5)) +
labs(x = "UMAP 1", y = "UMAP 2", title = feature) +
NULL
if (no_guide) {
plt <- plt +
guides(color = FALSE)
}
if (sum(is.na(.xlim)) < 2)
plt <- plt + xlim(.xlim)
if (sum(is.na(.ylim)) < 2)
plt <- plt + ylim(.ylim)
plt
})
if (length(plt_list) > 1) {
Reduce(`+`, plt_list) + patchwork::plot_layout(nrow = nrow)
} else {
plt_list[[1]]
}
}
plot_clusters <- function(obj, umap_use='umap_before', resnames=NULL, slot_name='clusters_df') {
if (is.null(resnames)) {
resnames <- colnames(obj[[slot_name]])
}
res <- lapply(resnames, function(resname) {
do_scatter(obj[[umap_use]], obj[[slot_name]], resname, pt_size = 8) +
labs(title = resname)
})
if (length(res) > 1) {
res <- purrr::reduce(res, `+`)
}
return(res)
}
cbind2_fill <- function(A, B) {
rownames_all <- union(rownames(A), rownames(B))
add_to_A <- setdiff(rownames_all, rownames(A))
add_to_B <- setdiff(rownames_all, rownames(B))
A2 <- Matrix::rsparsematrix(length(add_to_A), ncol(A), 0)
B2 <- Matrix::rsparsematrix(length(add_to_B), ncol(B), 0)
A3 <- Matrix::rbind2(A, A2)
rownames(A3) <- c(rownames(A), add_to_A)
B3 <- Matrix::rbind2(B, B2)
rownames(B3) <- c(rownames(B), add_to_B)
res <- Matrix::cbind2(A3[rownames_all, ], B3[rownames_all, ])
return(res)
}
|
8fb6aedcb6e27e194bafd24bce59c893d34e15cd
|
81da7dad0bd28de5d4068c7b294ab6c9662c4a33
|
/casual backup/R data cleaning tutorial.R
|
fddf950bb892a1e95d17f76480a9361d30de16be
|
[] |
no_license
|
kumarjitpathakbangalore/backup
|
b11af900f44ae7afaa59206c26880c95305b944c
|
47362ec2f5ef77da0bddfd34202751f8e05e1d29
|
refs/heads/master
| 2020-04-03T04:11:47.084838
| 2018-10-28T08:24:24
| 2018-10-28T08:24:24
| 155,006,306
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 26,727
|
r
|
R data cleaning tutorial.R
|
# vectors have variables of _one_ type
c(1, 2, "three")
# shorter arguments are recycled
(1:3) * 2
(1:4) * c(1, 2)
# warning! (why?)
(1:4) * (1:3)
#______________________Each element of a vector can be given a name.
x <- c("red", "green", "blue")
#___________________Obviously the second version is much more suggestive of its meaning. The names of a vector
#___________________need not be unique, but in most applications you'll want unique names (if any).
capColor = c(huey = "red", duey = "blue", louie = "green")
capColor
capColor["louie"]
#-------------- To get the variable name with the specified value
names(capColor)[capColor == "blue"]
#---------------vector created with few data
x <- c(4, 7, 6, 5, 2, 8)
#---------------creating two conditions
I <- x < 6
J <- x > 6
#---------------Printing which all values satisfies this condition.
x[I | J]
x[I]
Below
#?????????????????????????????????????????????????????????????????
x[c(TRUE, FALSE)]
x[c(-1, -2)]
x[c(TRUE)]
#-------------------Replacing values in vectors can be done in the same way
x <- 1:10
--------------every other value of x is replaced with 1 whchi satisfy the condition mentioned
x[c(TRUE, FALSE)] <- 1
#------------------A list is generalization of a vector that contains vector of different types even it may include other lists too
L <- list(x = c(1:5), y = c("a", "b", "c"), z = capColor)
# below are the way how we can fetch data from list
# below command just mentioning the second colum name to see the values
L[[2]]
# another way is to mentiond the variable name in the list preseeded by $
L$y
# another way is to mentiond the row number and column number() this can be shown as combinbations of column numbers too
# below syntax is to pull column 2 and 3
L[c(2,3)]
# we can also pull this by mentioning the column names in combination too
L[c("x", "y")]
L[["z"]]
#--------------A data.frame is not much more than a list of vectors, possibly of different types, but with
# every vector (now columns) of the same length. Since data.frames are a type of list, indexing
# them with a single index returns a sub-data.frame; that is, a data.frame with less columns
#-----------------VVI main thing to remember list gives horoizantal values and data frame return vertical valiues like Matrix
d <- data.frame(x = 1:10, y = letters[1:10], z = LETTERS[1:10])
d[1] #----------pull a column no
d[, 2] #---------Pull [all row,column]
d[, "x", drop = TRUE]
d[c("x", "z")]
d[d$x > 3, "y", drop = FALSE] #--------Pull all values of x where >3 and corosponding value of Y
d[2, ] #--------Pull all column but 2nd row
#------------Special values Like most programming languages, R has a number of Special values that are exceptions to the
# normal values of a type. These are NA, NULL, ˇÓInf and NaN
NA + 1
sum(c(NA, 1, 2))
median(c(NA, 1, 2, 3), na.rm = T) #------ Median will b e calculated if na.rm=TRUE else it will be NA
length(c(NA, 2, 3, 4))
3 == NA
NA == NA
TRUE | NA
#---------------------The function is.na can be used to detect NA's.
length(c(1, 2, NULL, 4))
sum(c(1, 2, NULL, 4), na.rm = T)
x <- NULL
c(x, 2)
#----------------------The function is.null can be used to detect NULL variables. is.null is a primitive function
is.null(L)
is.null(integer(0))
is.null(logical(0))
as.null(list(a = 1, b = "c"))
#------------------------------Below example we are just assigning same matrix (m) to m1,m2,m3,m4
m <- matrix(round(100 * rnorm(6)), 2,3); m1 <- m2 <- m3 <- m4 <- m
dimnames(m1) <- list(NULL, NULL)
dimnames(m2) <- list(NULL, character())
dimnames(m3) <- rev(dimnames(m2))
dimnames(m4) <- rep(list(character()),2)
m4 ## prints absolutely identically to m or not by using stopifnot() function
stopifnot(m == m1, m1 == d, m2 == m3, m3 == m4,
identical(capture.output(m) -> cm,
capture.output(m1)),
identical(cm, capture.output(m2)),
identical(cm, capture.output(m3)),
identical(cm, capture.output(m4)))
??stopifnot()
#--------------------------------Usage of stopifnot() function
stopifnot(1 == 1, all.equal(pi, 3.14159265), 1 < 2) # -----here all.equal function and stopifnot function is explained
m <- matrix(c(1,3,3,1), 2, 2)
stopifnot(m == t(m), diag(m) == rep(1, 2)) # all(.) |=> TRUE
op <- options(error = expression(NULL))
# "disable stop(.)" << Use with CARE! >>
stopifnot(all.equal(pi, 3.141593), 2 < 2, all(1:10 < 12), "a" < "b") #-----Program will stop at the point of mismatch
stopifnot(all.equal(pi, 3.1415927), 2 < 2, all(1:10 < 12), "a" < "b")
options(op) # revert to previous error handler
op
#---------------Inf Stands for infinity and only applies to vectors of class numeric. A vector of class integer can
# never be Inf. This is because the Inf in R is directly derived from the international standard
# for floating point arithmetic 1. Technically, Inf is a valid numeric that results from
# calculations like division of a number by zero
pi/0
2 * Inf
Inf - 1e+10
Inf + Inf
3 < -Inf
Inf == Inf
#-----------------NaN Stands for not a number. This is generally the result of a calculation of which the result is
# unknown, but it is surely not a number. In particular operations like 0/0, Inf-Inf and
# Inf/Inf result in NaN. Technically, NaN is of class numeric, which may seem odd since it is
# used to indicate that something is not numeric
Inf-Inf
NaN + 1
exp(NaN)
exp(-Inf)
#--------------------------------READING A FILE INTO R ENVIRONMENT
# read.csv for comma separated values with period as decimal separator.
# read.csv2 for semicolon separated values with comma as decimal separator.
# read.delim tab-delimited files with period as decimal separator.
# read.delim2 tab-delimited files with comma as decimal separator.
# read.fwf data with a predetermined number of bytes per column.
#Argument description
#header Does the first line contain column names?
#col.names character vector with column names.
#na.string Which strings should be considered NA?
#colClasses character vector with the types of columns.
Will coerce the columns to the specified types.
#stringsAsFactors If TRUE, converts all character vectors into factor vectors.
#sep Field separator.
?????????Used only internally by read.fwf
getwd()
setwd("F:/Practice R")
#-------- during th file import how to mention the column names if the column names (header) does not exist already
# person <- read.csv( file = "pp.txt" , header = FALSE , col.names = c("age","height") )
person
#------ if we don't give the column names then the first line by default will e considered as column names
str(person) #--- this is to check the variable type and some sample values
#---------------if a column contains Na or any other character then R will change the type of variable to factor
# We can change the same with below code with stringsAsFactor() and as.numeric()
dat <- read.csv( file = "pp.txt" , header = FALSE , col.names = c("age","height") , stringsAsFactors=FALSE)
dat$height <- as.numeric(dat$height)
str(dat)
#-----------------------------HOW TO READ SELECTIVE LINES FROM A TXT FILE
# Selecting lines containing data using grep function.
(txt <- readLines("pg.txt"))
# detect lines starting with a percentage sign..
I <- grepl("^%", txt)
# and throw them out
(dat <- txt[!I])
## [1] "Gratt,1861,1892" "Bob,1892" "1871,Emmet,1937"
# Table 1: Steps to take when converting lines in a raw text file to a data.frame with correctly typed columns.
# Step result
# 1 Read the data with readLines character
# 2 Select lines containing data character
# 3 Split lines into separate fields list of character vectors
# 4 Standardize rows list of equivalent vectors
# 5 Transform to data.frame data.frame
# 6 Normalize and coerce to correct type data.frame
# --------------Split lines into separate fields. This can be done with strsplit. This function accepts
# a character vector and a split argument which tells strsplit how to split a string into
# substrings. The result is a list of character vectors.
(fieldList <- strsplit(dat, split = ",")) #this will split the sentense to words. here "," was set as delemeter
## [[1]]
## [1] "Gratt" "1861" "1892"
##
## [[2]]
## [1] "Bob" "1892"
##
## [[3]]
## [1] "1871" "Emmet" "1937"
#-------------Step 4. Standardize rows. The goal of this step is to make sure that 1) every row has the same
# number of fields and 2) the fields are in the right order
#---------------------creating a macro for
assignFields <- function(x){
out <- character(3)
# get names
i <- grepl("[[:alpha:]]",x)
out[1] <- x[i]
# get birth date (if any)
i <- which(as.numeric(x) < 1890)
out[2] <- ifelse(length(i)>0, x[i], NA)
# get death date (if any)
i <- which(as.numeric(x) > 1890)
out[3] <- ifelse(length(i)>0, x[i], NA)
out
}
standardFields <- lapply(datasetname, assignFields) #---- lapply function is called on fieldlist data set which is strsplit data
standardFields
# ---------------------------------PARALLEL PROCESSING
# -----------------------------Below code will do parallel processing when there is fair amount of processing required
# Also see the code from Krishnendu.... this is not working
install.packages("parallel")
library(parallel)
cluster <- makeCluster(4)
standardFields <- parLapply(cl=cluster, fieldList, assignFields)
stopCluster(cl)
#------------------------Transform to data.frame. There are several ways to transform a list to a data.frame
# object. Here, first all elements are copied into a matrix which is then coerced into a
# data.frame.
(M <- matrix(
unlist(standardFields)
, nrow=length(standardFields)
, byrow=TRUE))
## [,1] [,2] [,3]
## [1,] "Gratt" "1861" "1892"
## [2,] "Bob" NA "1892"
## [3,] "Emmet" "1871" "1937"
colnames(M) <- c("name","birth","death")
(daltons <- as.data.frame(M, stringsAsFactors=FALSE))
M <- M[-1,] #-------------------------------------------as the first and last low did not ha ve good values capturedhence deleted them
#------------------------------------Step 6. Normalize and coerce to correct types.
daltons$birth <- as.numeric(daltons$birth)
daltons$death <- as.numeric(daltons$death)
____________________
daltons = transform( daltons
, birth = as.numeric(birth)
, death = as.numeric(death)
)
#____________________________________________________________________________________________________________________
#_______________________________________________TYPE CONVERSION______________________________________________________
#
# as.numeric as.logical
# as.integer as.factor
# as.character as.ordered
as.numeric(c("7", "7*", "7.0", "7,0"))
class(c("abc", "def"))
## [1] "character"
class(1:10)
## [1] "integer"
class(c(pi, exp(1)))
## [1] "numeric"
class(factor(c("abc","def")))
sapply(dat, class)
# In R, the value of categorical variables is stored in factor variables. A factor is an integer
# vector endowed with a table specifying what integer value corresponds to what level. The
# values in this translation table can be requested with the levels function.
f <- factor(c("a", "b", "a", "a", "c"))
levels(f)
## [1] "a" "b" "c"
# example:
gender <- c(2, 1, 1, 2, 0, 1, 1)
# recoding table, stored in a simple vector
recode <- c(male = 1, female = 2)
(gender <- factor(gender, levels = recode, labels = names(recode))) # this shows how to recode variable values from 2,1 to Male and Female
## [1] female male male female <NA> male male
## Levels: male female
#---------------The relevel function allows you to determine which level comes first
(gender <- relevel(gender, ref = "female"))
# Levels can also be reordered, depending on the mean value of another variable, for example
age <- c(27, 52, 65, 34, 89, 45, 68)
(gender <- reorder(gender, age))
# Levels can also be reordered, depending on the mean value of another variable,
age <- c(27, 52, 65, 34, 89, 45, 68)
(gender <- reorder(gender, age))
#Here, the means are added as a named vector attribute to gender. It can be removed by setting that attribute to NULL.
gender
attr(gender, "scores") <- NULL #---- this is removing only the average score from Gender variable
gender
#-------------------------The base R installation has three types of objects to store a time instance: Date, POSIXlt and
# POSIXct. The Date object can only be used to store dates, the other two store date and/or time
current_time <- Sys.time() #----------this gives the current system time
class(current_time)
## [1] "POSIXct" "POSIXt"
current_time
## [1] "2013-10-28 11:12:50 CET"
# ---------------The lubridate package13 contains a number of functions facilitating the conversion of text to
# POSIXct dates.
library(lubridate)
dates <- c("15/02/2013", "15 Feb 13", "It happened on 15 02 '13")
q<-dmy(dates) #------------------this means Date month Year of dates variable
q
#NOTE:------------
#---------------------Here, the function dmy assumes that dates are denoted in the order day-month-year and tries to
# extract valid dates. Note that the code above will only work properly in locale settings where
# the name of the second month is abbreviated to Feb. This holds for English or Dutch locales, but
# fails for example in a French locale (Fevrier)
dmy myd ydm
mdy dym ymd
dmy("01 01 68")
# Code description Example
# %a Abbreviated weekday name in the current locale. Mon
# %A Full weekday name in the current locale. Monday
# %b Abbreviated month name in the current locale. Sep
# %B Full month name in the current locale. September
# %m Month number (01-12) 09
# %d Day of the month as decimal number (01-31). 28
# %y Year without century (00-99) 13
# %Y Year including century. 2013
mybirth <- dmy("28 Sep 1976")
format(mybirth, format = "I was born on %B %d, %Y")
## [1] "I was born on September 28, 1976"
#________________________________________String normalization_____________________________________________________
# Extra white spaces at the beginning or end of a string can be removed using str_trim.
library(stringr)
str_trim(" hello world ")
## [1] "hello world"
str_trim(" hello world ", side = "left")
## [1] "hello world "
str_trim(" hello world ", side = "right")
## [1] " hello world"
str_pad(112, width = 9, side = "left", pad = "X") # this includes the space in string
## [1] "xxxxxx112"
toupper("Hello world") # to make something in upper case
## [1] "HELLO WORLD"
tolower("Hello World") # to make something in lower case
## [1] "hello world"
#__________________________________________Approximate string matching____________________________________________
# There are two forms of string matching. The first consists of determining whether a (range of)
# substring(s) occurs within another string. In this case one needs to specify a range of substrings
# (called a pattern) to search for in another string. In the second form one defines a distance
# metric between strings that measures how ``different'' two strings are
gender <- c("M", "male ", "Female", "fem.")
grepl("m", gender) #Note that the result is case sensitive
## [1] FALSE TRUE TRUE TRUE
grep("m", gender)
## [1] 2 3 4
# Note that the result is case sensitive: the capital M in the first element of gender does not match
# the lower case m. There are several ways to circumvent this case sensitivity. Either by case
# normalization or by the optional argument ignore.case
#____________________MISSING VALUE TREATMENT
# list rows of data that have missing values
mydata[!complete.cases(mydata),]
# create new dataset without missing data
newdata <- na.omit(mydata)
# Excluding Missing values from Analysis
x <- c(1,2,NA,3)
mean(x) # returns NA
mean(x, na.rm=TRUE) # returns 2
# recode 99 to missing for variable v1
# select rows where v1 is 99 and recode column v1
mydata$v1[mydata$v1==99] <- NA
# recoding NA's to any other number of choice
logistic$VISIT_DEC[is.na(logistic$VISIT_DEC)] <- 0
is.na(x) # returns TRUE of x is missing
y <- c(1,2,3,NA)
is.na(y) # returns a vector (F F F T)
Deleting unnessessary columns bu column no derived from variable name
logistic <- logistic[,-(which(names(logistic)=="PUR_SEP_OCT"))] # this functions returns the column number and I am reducing this from the data frame
#___________________________________________Using subset function to have dataset without outliers-----------
newdata <- subset(chtest1, VISIT_DEC >= 100 | VISIT_SEP_OCT >= 100 | VISIT_JAN_AUG >= 100 | PUR_DEC >=2000 | PUR_SEP_OCT >=2000 | PUR_JAN_AUG >= 2000) #select=c(var1, var2) you can use this if you want only specific these values
#-----------------sending these for checking
write.csv(data.frame(newdata), file = "F:/Practice R/outliers_churn.csv")
# using subset function (part 2)
newdata <- subset(mydata, sex=="m" & age > 25,
select=weight:income)
#__________________________________________REMOVING OUTLIERS_________________________________________________
boxplot.stats(logistic$VISIT_JAN_AUG)$out
hboutlier <- function(x,r){
x <- x[is.finite(x)]
stopifnot(
length(x) > 0
, all(x>0)
)
xref <- median(x)
if (xref <= sqrt(.Machine$double.eps))
warning("Reference value close to zero: results may be inaccurate")
pmax(x/xref, xref/x) > r
}
#____________________ANOTHER WAY____________________________________________________________________
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.25, .75), na.rm = na.rm, ...)
H <- 1.5 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
#- Next step_------------------------------------
set.seed(1)
x <- rnorm(100)
x <- c(-10, x, 10)
y <- remove_outliers(x)
## png()
par(mfrow = c(1, 2))
boxplot(x)
boxplot(y)
## dev.off()
#-----------------------Data Management--------------------------
# sorting examples using the mtcars dataset
attach(mtcars)
# sort by mpg
newdata <- mtcars[order(mpg),]
# sort by mpg and cyl
newdata <- mtcars[order(mpg, cyl),]
#sort by mpg (ascending) and cyl (descending)
newdata <- mtcars[order(mpg, -cyl),]
detach(mtcars)
____________marging_______________________________________
# merge two data frames by ID
total <- merge(data frameA,data frameB,by="ID")
# merge two data frames by ID and Country
total <- merge(data frameA,data frameB,by=c("ID","Country"))
total <- rbind(data frameA, data frameB)
_____________Aggregrating__________________________________
# aggregate data frame mtcars by cyl and vs, returning means
# for numeric variables
attach(mtcars)
aggdata <-aggregate(mtcars, by=list(cyl,vs),
FUN=mean, na.rm=TRUE)
print(aggdata)
detach(mtcars)
___________________not working below-----------------------
Library(Hmisc) # this
install.packages("Hmisc")
??summarize
setInternet2()
require(arulesViz)
# ----------------------Special values treatment-------------------
#For numeric variables, special values indicate values that are not an element of the
#mathematical set of real numbers (???). The function is.finite determines which values are`regular' values
is.finite(c(1, Inf, NaN, NA))
## [1] TRUE FALSE FALSE FALSE
#This function accepts vectorial input. With little effort we can write a function that may be used
#to check every numerical column in a data.frame
is.special <- function(x){
if (is.numeric(x)) !is.finite(x) else is.na(x)
}
person # applying the function to person data
q <- read.csv(file = "q.txt" , header = T , stringsAsFactors=FALSE)
dat$height <- as.numeric(dat$height))
## age height
## 1 21 6.0
## 2 42 5.9
## 3 18 5.7*
## 4 21 <NA>
sapply(person, is.special)
## age height
## [1,] FALSE FALSE
## [2,] FALSE FALSE
## [3,] FALSE FALSE
## [4,] FALSE TRUE
install.packages("editrules")
library(editrules)
# this did not work need to check why??? people<-data.frame(age = C(21,2,18,221,34), agegroup = C("adult","child","adult","elder","child"), hight = C(6.0,3,5.7,5,-7), status = C("single","married","widowed","married"), Yrsmarried = C(-1,0,20,2,3))
#Data Entered below:
# 1 age,agegroup,height,status,yearsmarried
# 2 21,adult,6.0,single,-1
# 3 2,child,3,married, 0
# 4 18,adult,5.7,married, 20
# 5 221,elderly, 5,widowed, 2
# 6 34,child, -7,married, 3
#-----------------------------NEED TO INSTALL ALL DEPENDENCY of editrules too at home
people <- read.csv("q.txt")
library(editrules)
(E <- editset(c("age >=0", "age <= 150"))) #- It creates the condition
## Edit set:
## num1 : 0 <= age
## num2 : age <= 150
people$age=as.numeric(people$age) #during loading it got changed as factors and hence chenging it to numeric before any operation
violatedEdits(E, people)
## edit
## record num1 num2
## 1 FALSE FALSE
## 2 FALSE FALSE
## 3 FALSE FALSE
## 4 FALSE TRUE
## 5 FALSE FALSE
setInternet2()
#________________________________________________Example of rules below... this you can pest in text and all with another function mentioned below
# numerical rules
age >= 0
height > 0
age <= 150
age > yearsmarried
# categorical rules
status %in% c("married","single","widowed")
agegroup %in% c("child","adult","elderly")
if ( status == "married" ) agegroup %in% c("adult","elderly")
# mixed rules
if ( status %in% c("married","widowed")) age - yearsmarried >= 17
if ( age < 18 ) agegroup == "child"
if ( age >= 18 && age <65 ) agegroup == "adult"
if ( age >= 65 ) agegroup == "elderly"
E <- editfile("edit.txt")
ve <- violatedEdits(E, people)
summary(ve) # this will give the summary of the violations
plot(ve) # to plot it graphically
getOption("allowedSymbols") # very important function to know R allowed functions
____________________________________________________________________________________________________________________________
# _____________________________ Simple transformation rules_____________________________
library(deducorrect)
(marx <- read.csv("r.txt", stringsAsFactors = FALSE)) # if in the text file values are noot "," delemeted then we will see all the values in one column
test1 <- marx
## name height unit
## 1 Gaucho 170.00 cm
## 2 Zeppo 1.74 m
## 3 Chico 70.00 inch
## 4 Gummo 168.00 cm
## 5 Harpo 5.91 ft
marx_m <- marx
I <- (marx$unit == "cm")
marx_m[I, "height"] <- marx$height[I]/100
I <- marx$unit == "inch"
marx_m[I, "inch"] <- marx$height[I]/39.37
I <- marx$unit == "ft"
marx_m[I, "ft"] <- marx$height[I]/3.28
marx_m$unit <- "m"
_________________________________________not working above
marx_m
# "deducorrect" package
library(deducorrect)
# convert centimeters
if ( marx$unit == "cm" ){
marx$height <- marx$height/100}
# convert inches
if (marx$unit == "inch" ){
marx$height <- marx$height/39.37}
# convert feet
if (marx$unit == "ft" )
{ marx$height <- marx$height/3.28 }
# set all units to meter
marx$unit <- "m"
________________________________________not working above
marx
# read the conversion rules.
R <- correctionRules("convert.txt")
R
cor <- correctWithRules(R, marx)
________________________________________not working above
priceCalculator <- function(hours, pph=40){
net.price <- hours * pph
if(hours > 100) {
net.price <- net.price * 0.9
}
round(net.price)
}
if
# transpose of a matrix
# a poor alternative to built-in t() function
mytrans <- function(x) {
if (!is.matrix(x)) {
warning("argument is not a matrix: returning NA")
return(NA_real_)
}
y <- matrix(1, nrow=ncol(x), ncol=nrow(x))
for (i in 1:nrow(x)) {
for (j in 1:ncol(x)) {
y[j,i] <- x[i,j]
}
}
return(y)
}
# try it
z <- matrix(1:10, nrow=5, ncol=2)
tz <- mytrans(z)
#-----------------------------------------------------3.2.3 Deterministic imputation--------------------------
|
bce541f2bd7dbca28a7e64139ae89e78c3a0008a
|
07e281844839b00b997d54588591d89aeaed697a
|
/20180121tGRN_TFCisExp.R
|
ca4cb0171dfc1891e0f2af9128224dfe96b9f7dd
|
[] |
no_license
|
YKeito/tGRN_analysis20181023
|
bb317a3e8a8b48ff62aa71f963fcea80fc7c02f8
|
3cff9e3fceebf8425ee3a9051c6f845c4ccecf33
|
refs/heads/master
| 2022-12-21T10:57:03.395510
| 2020-09-28T05:35:33
| 2020-09-28T05:35:33
| 299,198,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,192
|
r
|
20180121tGRN_TFCisExp.R
|
"~/Nakano_RNAseq/network_analysis/script/tGRN_analysis20181023/20180121tGRN_TFCisExp.R.R"
#やりたいことメモ-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#目的:CY化合物毎に動的な遺伝子制御ネットワークを作成するための.txtファイルを出力させる
#Trans-Cisネットワークのエッジの確からしさを高めるために、RNA-Seqで得られた実際の発現パターンを利用
#Trans側の発現ピークの後にCis側の劇的な発現変化が起きていればエッジを引くといった考え
#逆に発現のピークがずれているものや劇的に変化していないものはエッジは引かない。
#Cis-Trans Networkを作成したときのノードをMCLNumとしている。
#発現のピークはこの三つ。3h-1h, 12h-3h, 24h-12hで比較したときに差の符号が変わった時がピーク
#例えば3h-1h=+, 12h-3h=-であれば3hがピーク
#使ったパッケージ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
library(ggplot2)
library(stringr)
library(dplyr)
library(purrr)
#自動化するための準備----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
condition <- c("CY15", "CY16", "CY20")
MCLNum <- unique(TransCisNetwork$Cis)
MCLNum <- MCLNum[grep("MCLNum", MCLNum)]
time <- c("1h", "3h", "12h")
TransCis_pair <- paste0(TransCisNetwork$Trans, TransCisNetwork$Cis) #最終的にCis-Transを統合させるから
e <- 1
#CY毎のfor入ります----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
for(e in e:length(condition)){
#各時間の発現の差の計算。1h-0h, 3h-1h, 12h-3h, 24h-12h--------------------------------------------------------------------------------------------------------------------------------------
diff <- c() #発現の差を格納
allExp <- c() #各MCLNumの平均発現値を格納
temp <- substr(MCLNum, start = 7, stop = 9) #MCLNum数字のみを一時格納
n <- 1
#Cisクラスター毎のfor入ります------------------------------------------------------------------------------------------------------------------------------------------------------------------
for(n in n:length(temp)){
T_data <- allRNASeq[match(MasterTable %>% filter(MCLNum == temp[n]) %>% select(AGI) %>% unlist(use.names = F), rownames(allRNASeq)), ]
T_Expression <- T_data %>% select(ends_with("h")) %>% select(starts_with(condition[e])) %>% select(-ends_with("48h")) %>% map(mean) %>% unlist()#mapは各列で同様な処理をするときに良く使う
#クラスターの平均発現値を前後の時間で引いた値が正なのか負なのか数値化(正なら+1、負なら-1)
allExp <- rbind(allExp, data.frame(Time01h = T_Expression[1],
Time03h = T_Expression[2],
Time12h = T_Expression[3],
Time24h = T_Expression[4])
)
diff <- rbind(diff, data.frame(diff01h_00h = allExp$Time01h[n]-0,
diff03h_01h = allExp$Time03h[n]-allExp$Time01h[n],
diff12h_03h = allExp$Time12h[n]-allExp$Time03h[n],
diff24h_12h = allExp$Time24h[n]-allExp$Time12h[n])
)
n <- n+1
}
rownames(allExp) <- MCLNum
diff <- diff %>% mutate(diff01h_00h_sign = if_else(diff01h_00h > 0, true = "p", false = "n"),
diff03h_01h_sign = if_else(diff03h_01h > 0, true = "p", false = "n"),
diff12h_03h_sign = if_else(diff12h_03h > 0, true = "p", false = "n"),
diff24h_12h_sign = if_else(diff24h_12h > 0, true = "p", false = "n"))
rownames(diff) <- MCLNum
#トランス側の発現値-------------------------------------------------------------------------------------------------------------------------------------------------------------------------
T_data <- allRNASeq[match(unique(TransCisNetwork$Trans)[grep("AT", unique(TransCisNetwork$Trans))], rownames(allRNASeq)), ] %>% select(ends_with("h")) %>% select(starts_with(condition[e])) %>% select(-ends_with("48h"))
Trans_Exp <- data.frame(AGI = rownames(T_data),
T_data %>% select(1)-0,
T_data %>% select(2)-T_data %>% select(1),
T_data %>% select(3)-T_data %>% select(2),
T_data %>% select(4)-T_data %>% select(3),
stringsAsFactors = F
)
colnames(Trans_Exp) <- c("AGI", "diff01h_00h", "diff03h_01h", "diff12h_03h", "diff24h_12h")
Trans_Exp <- Trans_Exp %>% mutate(diff01h_00h_sign = if_else(diff01h_00h > 0, true = "p", false = "n"),
diff03h_01h_sign = if_else(diff03h_01h > 0, true = "p", false = "n"),
diff12h_03h_sign = if_else(diff12h_03h > 0, true = "p", false = "n"),
diff24h_12h_sign = if_else(diff24h_12h > 0, true = "p", false = "n"))
#CY15の全クラスターの平均発現値の差をヒートマップで可視化----------------------------------------------------------------------------------------------------------------------------------
#全クラスターの各時間の平均発現値の差をggplotでヒートマップ化するためのデータフレーム
T_data <- data.frame(diffexp = c(diff[, 1], diff[, 2], diff[, 3], diff[, 4]),
Sample = rep(paste0("MCLNum", temp), times = 4),
condition = rep(colnames(diff)[1:4], each = length(temp)),
stringsAsFactors = F
)
g1 <- ggplot(T_data, aes(x = condition, y = Sample, fill = diffexp))
g1 <- g1 + geom_tile()
g1 <- g1 + theme_bw()
g1 <- g1 + scale_fill_gradient2(low = "blue", high = "red", na.value = "white")
g1 <- g1 + ggtitle(paste0(condition[e], "_CisCluster"))
plot(g1)
#Cis(制御される)側-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#発現の変化が大きいものを検出
difftime <- paste0(c("01h_00h", "03h_01h", "12h_03h", "24h_12h"), "_sign")
check_max <- c()
check_min <- c()
k <- 1
for(k in k:nrow(diff)){
temp <- diff[k, 5:ncol(diff)]
temp <- temp == "p"
if(sum(temp) == 0){#nのみ
check_max <- c(check_max, NA)
check_min <- c(check_min, difftime[which.min(diff[k, 1:4])])
}
if(sum(temp) != 0 & sum(temp) != 4){#p, n共にあるとき
check_max <- c(check_max, difftime[which.max(diff[k, 1:4])])
check_min <- c(check_min, difftime[which.min(diff[k, 1:4])])
}
if(sum(temp) == 4){#pのみ
check_max <- c(check_max, difftime[which.max(diff[k, 1:4])])
check_min <- c(check_min, NA)
}
k <- k+1
}
#このforではcheck_max, check_minを基に、diffにpp, nnを上書きするためのもの
m <- 1
for(m in m:nrow(diff)){
if(sum(diff[m, 5:8] == "p") != 0){
diff[m, grep(check_max[m], colnames(diff))] <- "pp"
}
if(sum(diff[m, 5:8] == "n") != 0){
diff[m, grep(check_min[m], colnames(diff))] <- "nn"
}
m <- m+1
}
n <- 1
for(n in n:nrow(diff)){
if(sum(diff[n, 5:8] == "p") != 0){
diff[n, grep(check_max[n], colnames(diff))] <- "pp"
}
if(sum(diff[n, 5:8] == "n") != 0){
diff[n, grep(check_min[n], colnames(diff))] <- "nn"
}
n <- n+1
}
#Trans(制御する)側-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#発現の変化が大きいものを検出
difftime <- paste0(c("01h_00h", "03h_01h", "12h_03h", "24h_12h"), "_sign")
check_max <- c()
check_min <- c()
k <- 1
for(k in k:nrow(Trans_Exp)){
temp <- Trans_Exp[k, 6:ncol(Trans_Exp)]
temp <- temp == "p"
if(sum(temp) == 0){#nのみ
check_max <- c(check_max, NA)
check_min <- c(check_min, difftime[which.min(Trans_Exp[k, 2:5])])
}
if(sum(temp) != 0 & sum(temp) != 4){#p, n共にあるとき
check_max <- c(check_max, difftime[which.max(Trans_Exp[k, 2:5])])
check_min <- c(check_min, difftime[which.min(Trans_Exp[k, 2:5])])
}
if(sum(temp) == 4){#pのみ
check_max <- c(check_max, difftime[which.max(Trans_Exp[k, 2:5])])
check_min <- c(check_min, NA)
}
k <- k+1
}
#このforではcheck_max, check_minを基に、Trans_Expにpp, nnを上書きするためのもの
m <- 1
for(m in m:nrow(Trans_Exp)){
if(sum(Trans_Exp[m, 5:8] == "p") != 0){
Trans_Exp[m, grep(check_max[m], colnames(Trans_Exp))] <- "pp"
}
if(sum(Trans_Exp[m, 5:8] == "n") != 0){
Trans_Exp[m, grep(check_min[m], colnames(Trans_Exp))] <- "nn"
}
m <- m+1
}
n <- 1
for(n in n:nrow(Trans_Exp)){
if(sum(Trans_Exp[n, 5:8] == "p") != 0){
Trans_Exp[n, grep(check_max[n], colnames(Trans_Exp))] <- "pp"
}
if(sum(Trans_Exp[n, 5:8] == "n") != 0){
Trans_Exp[n, grep(check_min[n], colnames(Trans_Exp))] <- "nn"
}
n <- n+1
}
#発現データのみでの発現制御予測-----------------------------------------------------------------------------------------------------------------------------------------------------------
#Source側-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
S_regulate <- c()
n <- 1
for(n in n:nrow(Trans_Exp)){
m <- 6
for(m in m:c(ncol(Trans_Exp)-1)){
if(str_detect(Trans_Exp[n, m], "p") && str_detect(Trans_Exp[n, m+1], "n")){
S_regulate <- c(S_regulate, paste0("yama_", str_sub(colnames(Trans_Exp)[m], start = 5, end = 7)))
names(S_regulate)[length(S_regulate)] <- Trans_Exp$AGI[n]
}
if(str_detect(Trans_Exp[n, m], "n") && str_detect(Trans_Exp[n, m+1], "p")){
S_regulate <- c(S_regulate, paste0("tani_", str_sub(colnames(Trans_Exp)[m], start = 5, end = 7)))
names(S_regulate)[length(S_regulate)] <- Trans_Exp$AGI[n]
}
m <- m+1
}
n <- n+1
}
#自己フィードバック(シスクラスター -> TF)を作るために
n <- 1
for(n in n:nrow(diff)){
m <- 5
for(m in m:c(ncol(diff)-1)){
if(str_detect(diff[n, m], "p") && str_detect(diff[n, m+1], "n")){
S_regulate <- c(S_regulate, paste0("yama_", str_sub(colnames(diff)[m], start = 5, end = 7)))
names(S_regulate)[length(S_regulate)] <- rownames(diff)[n]
}
if(str_detect(diff[n, m], "n") && str_detect(diff[n, m+1], "p")){
S_regulate <- c(S_regulate, paste0("tani_", str_sub(colnames(diff)[m], start = 5, end = 7)))
names(S_regulate)[length(S_regulate)] <- rownames(diff)[n]
}
m <- m+1
}
n <- n+1
}
#Target側-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
T_regulate <- c()
n <- 1
for(n in n:nrow(diff)){
if(sum(str_detect(diff[n, ], "pp")) == 1){
TT_regulate <- diff[n, setdiff(c(which(str_detect(diff[n, ], "pp"))-1):which(str_detect(diff[n, ], "pp")), c(1:4))]
T_regulate <- rbind(T_regulate, data.frame(Target = rownames(diff[n, ]),
regulate = paste(TT_regulate, collapse = "_"),
Time = str_sub(colnames(diff)[which(str_detect(diff[n, ], "pp"))], start = 5, end = 11),
stringsAsFactors = F
))
}
if(sum(str_detect(diff[n, ], "nn")) == 1){
TT_regulate <- diff[n, setdiff(c(which(str_detect(diff[n, ], "nn"))-1):which(str_detect(diff[n, ], "nn")), c(1:4))]
T_regulate <- rbind(T_regulate, data.frame(Target = rownames(diff[n, ]),
regulate = paste(TT_regulate, collapse = "_"),
Time = str_sub(colnames(diff)[which(str_detect(diff[n, ], "nn"))], start = 5, end = 11),
stringsAsFactors = F
))
}
n <- n+1
}
#自己フィードバック(シスクラスター -> TF)を作るために
n <- 1
for(n in n:nrow(Trans_Exp)){
if(sum(str_detect(Trans_Exp[n, ], "pp")) == 1){
TT_regulate <- Trans_Exp[n, setdiff(c(which(str_detect(Trans_Exp[n, ], "pp"))-1):which(str_detect(Trans_Exp[n, ], "pp")), c(2:5))]
T_regulate <- rbind(T_regulate, data.frame(Target = Trans_Exp$AGI[n],
regulate = paste(TT_regulate, collapse = "_"),
Time = str_sub(colnames(Trans_Exp)[which(str_detect(Trans_Exp[n, ], "pp"))], start = 5, end = 11),
stringsAsFactors = F
))
}
if(sum(str_detect(Trans_Exp[n, ], "nn")) == 1){
TT_regulate <- Trans_Exp[n, setdiff(c(which(str_detect(Trans_Exp[n, ], "nn"))-1):which(str_detect(Trans_Exp[n, ], "nn")), c(2:5))]
T_regulate <- rbind(T_regulate, data.frame(Target = Trans_Exp$AGI[n],
regulate = paste(TT_regulate, collapse = "_"),
Time = str_sub(colnames(Trans_Exp)[which(str_detect(Trans_Exp[n, ], "nn"))], start = 5, end = 11),
stringsAsFactors = F
))
}
n <- n+1
}
#発現データのみでエッジの作成-----------------------------------------------------------------------------------------------------------------------------------------------------------
T_regulate <- T_regulate[nchar(T_regulate$regulate) > 2, ]
temp <- c("03h_01h", "12h_03h", "24h_12h")
ExpNetwork <- c()
n <- 1
for(n in n:length(time)){
T_source <- names(S_regulate[grep(time[n], S_regulate)])
T_sourceInfo <- str_split(S_regulate[grep(time[n], S_regulate)], pattern = "_", simplify = T)
colnames(T_sourceInfo) <- c("katachi", "time")
T_target <- T_regulate %>% filter(Time == temp[n] | Time == temp[n+1] | Time == temp[n+2])
ExpNetwork <- rbind(ExpNetwork, data.frame(Source = rep(T_source, times = length(T_target$Target)),
S_Time = T_sourceInfo[, 2],
Target = rep(T_target$Target, each = length(T_source)),
T_Time = rep(T_target$Time, each = length(T_source)),
regulate = paste0(T_sourceInfo[, 1], "_", rep(T_target$regulate, each = length(T_source))),
stringsAsFactors = F
)
)
n <- n+1
}
#edgeの定義------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
ExpNetwork$regulate[ExpNetwork$regulate == "tani_n_pp" | ExpNetwork$regulate == "yama_pp_nn" | ExpNetwork$regulate == "tani_p_pp" | ExpNetwork$regulate == "yama_n_nn" | ExpNetwork$regulate == "yama_p_nn" | ExpNetwork$regulate == "tani_nn_pp"] <- "possitive"
ExpNetwork$regulate[ExpNetwork$regulate == "yama_n_pp" | ExpNetwork$regulate == "tani_pp_nn" | ExpNetwork$regulate == "yama_p_pp" | ExpNetwork$regulate == "tani_n_nn" | ExpNetwork$regulate == "tani_p_nn" | ExpNetwork$regulate == "yama_nn_pp"] <- "negative"
#Trans-Cisと組み合わせ-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Exp_pair <- paste0(ExpNetwork$Source, ExpNetwork$Target)
temp <- c()
n <- 1
for(n in n:length(TransCis_pair)){
temp <- c(temp, which(TransCis_pair[n] == Exp_pair))
n <- n+1
}
T.data <- ExpNetwork[temp, ]
T.data <- T.data %>% mutate(attrEdge = paste0(T.data$S_Time, T.data$T_Time, T.data$regulate))
T.data <- T.data[!duplicated(T.data), ]
T.rownum <- grep("MCLNum", T.data$Source)
temp <- T.data[T.rownum, ]
T.MCLNum <- unique(temp$Source)
TT.Time <- sort(unique(paste0(T.data$S_Time, T.data$T_Time)))
test3 <- c()
n <- 1
for(n in n:length(T.MCLNum)){
#Source:MCLNum
test1 <- match(sort(unique(substr(temp$attrEdge[temp$Source == T.MCLNum[n]], start = 1, stop = 10))), TT.Time)
#Target:MCLNumがいつか
test2 <- min(match(sort(unique(substr(T.data$attrEdge[T.data$Target == T.MCLNum[n]], start = 1, stop = 10))), TT.Time))
if(sum(test1 < test2) >= 1){
for(m in which(test1 < test2)){
test3 <- c(test3, rownames(temp[grep(TT.Time[which(test1 < test2)[m]], temp$attrEdge), ]))
m <- m+1
}
}
n <- n+1
}
T.data <- T.data[setdiff(rownames(T.data), setdiff(T.rownum, test3)), ]
#attributeファイル--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
attrNode <- list(intersect(T.data$Source, T.data$Target),
setdiff(T.data$Source, T.data$Target),
setdiff(T.data$Target, T.data$Source)
)
names(attrNode) <- c("S&T,", "S,", "T,")
attrNode <- data.frame(MCLNum = unlist(attrNode),
NodeColour = str_split(names(unlist(attrNode)), pattern = ",", simplify = T)[, 1],
stringsAsFactors = F)
attrNode <- cbind(attrNode, MasterTable[match(attrNode$MCLNum, MasterTable$AGI), ] %>% select(TF, symbol, annotation))
attrNode$symbol[is.na(attrNode$symbol)] <- str_sub(attrNode$MCLNum[is.na(attrNode$symbol)], start = 7, end = 9)
rownames(attrNode) <- c()
temp <- attrNode$MCLNum
temp <- temp[grep("MCLNum", temp)]
n <- 1
for(n in n:length(temp)){
attrNode$TF[attrNode$MCLNum == temp[n]] <- paste0("Cis:", paste(unique(names(unlist(allCisList)[temp[n] == unlist(allCisList)])), collapse = "|"))
n <- n+1
}
T_time <- c("01h", "03h", "12h")
n <- 1
for(n in n:length(T_time)){
temp <- T.data[T.data$S_Time == T_time[n], ]
STNode <- unique(intersect(temp$Source, temp$Target))
attrSTNode <- c()
if(length(STNode) != 0){
m <- 1
for(m in m:length(STNode)){
#Target側だけではなくSource側も意識しないといけない
#S&Tの時間を結合させる
attrSTNode <- c(attrSTNode, paste0("S&T", T_time[n], "_", paste(sort(unique(temp$T_Time[temp$Source == STNode[m] | temp$Target == STNode[m]])), collapse = "|")))
names(attrSTNode)[m] <- STNode[m]
m <- m+1
}
}
SNode <- setdiff(temp$Source, STNode)
TNode <- setdiff(temp$Target, STNode)
attrTNode <- c()
m <- 1
for(m in m:length(TNode)){
attrTNode <- c(attrTNode, paste0("T", paste(sort(unique(temp$T_Time[temp$Target == TNode[m]])), collapse = "|")))
names(attrTNode)[m] <- TNode[m]
m <- m+1
}
m <- 1
attrSNode <- c()
for(m in m:length(SNode)){
attrSNode <- c(attrSNode, paste0("S", T_time[n], "*", paste(sort(unique(temp$T_Time[temp$Source == SNode[m]])), collapse = "|")))
names(attrSNode)[m] <- SNode[m]
m <- m+1
}
test2 <- data.frame(Node = c(SNode, TNode, STNode),
attrNode = c(attrSNode, attrTNode, attrSTNode)
)
rownames(test2) <- c()
colnames(test2) <- c("Node", paste0(T_time[n], "_", "attrNode"))
title <- paste0("bigdata/yasue/tGRN_Groping/inflation4/", condition[e], "/diffExpNetwork/Network/", condition[e], "_", T_time[n], "_attrNodecolor.txt")
#write.table(test2, file = title, sep = "\t", quote = F, row.names = F)
n <- n+1
}
#tGRN出力-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
assign(paste0(condition[e], "_ExpNetwork"), T.data)
title <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/", condition[e], "/diffExpNetwork/Network/", paste(str_split(Sys.Date(), pattern = "-", simplify = T), collapse = ""), condition[e], "_tGRN_TFCisExp.txt")
#write.table(T.data, title, sep = "\t", quote = F, row.names = F)
#tGRN attribute出力-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
title <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/", condition[e], "/diffExpNetwork/Network/", condition[e], "_tGRN_TFCisExp_attrNode.txt")
#write.table(attrNode, title, sep = "\t", quote = F, row.names = F)
#tGRN発現データ-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
title <- paste0("~/bigdata/yasue/tGRN_Groping/inflation4/", condition[e], "/diffExpNetwork/", condition[e], "_tGRN_AvrallExp.txt")
write.table(allExp, title, sep = "\t", quote = F, row.names = T)
e <- e+1
}
|
c2282d0e0a9a3a4dbb8ce3fbe08675c1919f6005
|
4f26471b6b08d7e081dc863c8189e4b3a885de13
|
/man/embed_event_handler.Rd
|
d1a2d788f301dcebc48e3198e4e5cb26b2ca6520
|
[] |
no_license
|
strazto/mandrake
|
93ec83b79b57911c3f7b922a4c159c671cf3f0cc
|
63fff66e681b4a73f682348469662f29e60d04de
|
refs/heads/master
| 2023-04-10T21:26:51.366465
| 2021-04-22T06:40:59
| 2021-04-22T06:40:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,126
|
rd
|
embed_event_handler.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/js_handlers.R
\name{embed_event_handler}
\alias{embed_event_handler}
\title{(DEPRECATED) Handler for embedding data into the legend
\lifecycle{deprecated}}
\usage{
embed_event_handler(warn_deprecated = TRUE)
}
\description{
If attached as onSelect behaviour for a visNetwork graph,
will allow clicking of nodes to replace the legend for the graph
with metadata about that node.
Presently requires JQuery to operate, so may not work when launched from
R session, but does work in pkgdown.
It looks like this:
\preformatted{
function(props) {
node = this.body.data.nodes.get(props.nodes[0]);
cr = '\r\n';
alert(
'' + '
using mandrake::embed_event_handler() is deprecated.
please use mandrake::attach_dependencies &
on_select = "embed_event_handler()" when rendering graph
'
'selected ' + node.label + ':' + cr +
'=======' + cr +
'COLNAMES:' + cr +
node.on_select_col + cr +
'=============== '
);
}
}
}
\seealso{
Other js_handlers:
\code{\link{alert_event_handler}()}
}
\concept{deprecated}
\concept{js_handlers}
|
576157867674f2642837875d721923788dd611b6
|
fabe50401b923ec86291597fa0552c1d06def5d7
|
/Course 10 Data Science Capstone - Shrink Dataset.R
|
47a3f3545477f4c657d096cc4e9329d1bd87711b
|
[] |
no_license
|
jm-carson/Course10DataScienceCapstone
|
e72ed5ac1b89ccddb2447d1458a2103d7d751895
|
ea7ec654fbe752a0ab8caa23d49252a150c1b494
|
refs/heads/master
| 2021-04-15T08:54:45.387798
| 2018-03-29T18:01:53
| 2018-03-29T18:01:53
| 126,246,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
Course 10 Data Science Capstone - Shrink Dataset.R
|
# Course 10: Data Science Capstone
# John Hopkins Data Science Specialization
# Shrink data set
# Run garbage collection to clear out memory that's no longer being used
gc()
# ThinkPad Yoga 260 Signature Edition
# Installed RAM 8.0 GB (7.87 GB usable)
# memory.limit() # Confirms memory limit
# ls() # list of R objects occupying RAM
rm(list = ls()) # clear objects from R memory that are no longer required
suppressWarnings(library("dplyr"))
suppressWarnings(library("tidyr"))
suppressWarnings(library("stringr"))
suppressWarnings(library("textclean"))
# Set working directory and load libraries
setwd("~/R/Course 10 - Data Science Capstone/Course10Capstone/Data")
# Load datasets
data_blogs <- scan(file="en_US.blogs.txt",what ="character", sep="\n",encoding = "UTF-8")
data_news <- scan(file="en_US.news.txt",what ="character", sep="\n",encoding = "UTF-8")
# data_twitter <- scan(file="en_US.twitter.txt",what ="character", sep="\n",encoding = "UTF-8")
# Bind data and take sample
data_all <- rbind(data_blogs, data_news)
n = 0.15
all_sample <- sample(data_all, floor(n*length(data_blogs)), replace = FALSE)
# Remove everything but letters and apostrophes
all_sample <- strip(all_sample, digit.remove = TRUE, apostrophe.remove = FALSE, lower.case = FALSE)
# Remove all but one white space
all_sample <- stringr::str_replace_all(all_sample,"[\\s]+", " ")
# Collapse into single vector
all_sample <- all_sample %>% paste(collapse=" ")
# Make everything lowercase
all_sample <- all_sample %>% tolower()
# Write to csv
write.table(all_sample, file = "~/R/Course 10 - Data Science Capstone/TextPredDataProd/Data/all_sample.csv", row.names=FALSE, col.names = FALSE)
|
8183ef92e806cb8aa250588164bbf3b87aab53f8
|
572f54478ac60b1c6f560bd42102530f83b18c68
|
/FISH.R
|
193395d8ad469c4f05560400614e2709f9ab0695
|
[] |
no_license
|
erinboyle05/thesis_data
|
d9db1ae80dfcde1cf2bee79bad7ecc57d03e133f
|
e66aa7f037e045b23a648dabcc427fd4a61c079f
|
refs/heads/master
| 2021-01-11T01:09:01.002947
| 2016-10-27T19:46:30
| 2016-10-27T19:46:30
| 70,841,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 411
|
r
|
FISH.R
|
ATMdata <- FISHrawData %>%
filter(Probe == "ATM") %>%
arrange(Values)
p53data <- FISHrawData %>%
filter(Probe == "p53") %>%
arrange(Values)
data <- FISHrawData %>%
filter(Probe == "ATM" | Probe == "p53") %>%
arrange(Values)
plot <- ggplot(data = data, aes(Probe)) +
geom_bar()+
facet_grid(~Values)
print(plot)
|
699a926f0e21b939857fa5c95b7376b0d3512275
|
f2bfd5ceae6bf32cebc28cf18740a8b44e010e7b
|
/pkg/retistruct/R/geometry.R
|
9512b37b1714deaeb184ef5fa56487c4802d6991
|
[] |
no_license
|
davidcsterratt/retistruct
|
602972d127b7119df3fda54ac915228d7ac854d1
|
f7075b0a8ac84fdc9773300d553c26a11b45ce2e
|
refs/heads/master
| 2023-08-09T20:08:39.039964
| 2023-07-29T09:27:35
| 2023-07-29T09:27:35
| 25,682,590
| 5
| 7
| null | 2017-07-29T09:14:58
| 2014-10-24T10:05:33
|
R
|
UTF-8
|
R
| false
| false
| 31,203
|
r
|
geometry.R
|
##
## Geometry functions
##
extprod3d <- function (x, y)
{
x <- matrix(x, ncol = 3)
y <- matrix(y, ncol = 3)
return(cbind(x[, 2] * y[, 3] - x[, 3] * y[, 2], x[, 3] * y[,
1] - x[, 1] * y[, 3], x[, 1] * y[, 2] - x[, 2] * y[,
1]))
}
##' Vector norm
##' @param X Vector or matrix.
##' @return If a vector, returns the 2-norm of the
##' vector. If a matrix, returns the 2-norm of each row of the matrix
##' @author David Sterratt
##' @export
vecnorm <- function(X) {
if (is.vector(X)) {
return(sqrt(sum(X^2)))
} else {
return(sqrt(rowSums(X^2)))
}
}
##' "Signed area" of triangles on a plane
##' @param P 3-column matrix of vertices of triangles
##' @param Pt 3-column matrix of indices of rows of \code{P} giving
##' triangulation
##' @return Vectors of signed areas of triangles. Positive sign
##' indicates points are anticlockwise direction; negative indicates
##' clockwise.
##' @author David Sterratt
##' @export
tri.area.signed <- function(P, Pt) {
if (ncol(P) != 3) {
stop("P must have 3 columns")
}
A <- P[Pt[,1],,drop=FALSE]
B <- P[Pt[,2],,drop=FALSE]
C <- P[Pt[,3],,drop=FALSE]
AB <- B - A
BC <- C - B
vp <- extprod3d(AB, BC)
return(0.5*sign(vp[,3])*vecnorm(vp))
}
##' Area of triangles on a plane
##' @param P 3-column matrix of vertices of triangles
##' @param Pt 3-column matrix of indices of rows of \code{P} giving
##' triangulation
##' @return Vectors of areas of triangles
##' @author David Sterratt
##' @export
tri.area <- function(P, Pt) {
return(abs(tri.area.signed(P, Pt)))
}
##' This uses L'Hullier's theorem to compute the spherical excess and
##' hence the area of the spherical triangle.
##'
##' @title Area of triangles on a sphere
##' @param P 2-column matrix of vertices of triangles given in
##' spherical polar coordinates. Columns need to be labelled
##' \code{phi} (latitude) and \code{lambda} (longitude).
##' @param Pt 3-column matrix of indices of rows of \code{P} giving
##' triangulation
##' @return Vectors of areas of triangles in units of steradians
##' @source Wolfram MathWorld
##' \url{http://mathworld.wolfram.com/SphericalTriangle.html} and
##' \url{http://mathworld.wolfram.com/SphericalExcess.html}
##' @author David Sterratt
##' @examples
##' ## Something that should be an eighth of a sphere, i.e. pi/2
##' P <- cbind(phi=c(0, 0, pi/2), lambda=c(0, pi/2, pi/2))
##' Pt <- cbind(1, 2, 3)
##' ## The result of this should be 0.5
##' print(sphere.tri.area(P, Pt)/pi)
##'
##' ## Now a small triangle
##' P1 <- cbind(phi=c(0, 0, 0.01), lambda=c(0, 0.01, 0.01))
##' Pt1 <- cbind(1, 2, 3)
##' ## The result of this should approximately 0.01^2/2
##' print(sphere.tri.area(P, Pt)/(0.01^2/2))
##'
##' ## Now check that it works for both
##' P <- rbind(P, P1)
##' Pt <- rbind(1:3, 4:6)
##' ## Should have two components
##' print(sphere.tri.area(P, Pt))
##' @export
sphere.tri.area <- function(P, Pt) {
## Get lengths of sides of all triangles
a <- central.angle(P[Pt[,1],"phi"], P[Pt[,1],"lambda"],
P[Pt[,2],"phi"], P[Pt[,2],"lambda"])
b <- central.angle(P[Pt[,2],"phi"], P[Pt[,2],"lambda"],
P[Pt[,3],"phi"], P[Pt[,3],"lambda"])
c <- central.angle(P[Pt[,3],"phi"], P[Pt[,3],"lambda"],
P[Pt[,1],"phi"], P[Pt[,1],"lambda"])
## Semiperimeter is half perimeter
s <- 1/2*(a + b + c)
## Compute excess using L'Huilier's Theorem
E <- 4*atan(sqrt(tan(s/2)*tan((s-a)/2)*tan((s-b)/2)*tan((s-c)/2)))
names(E) <- c()
return(E)
}
##' Determine the intersection of two lines L1 and L2 in two dimensions,
##' using the formula described by Weisstein.
##'
##' @title Determine intersection between two lines
##' @param P1 vector containing x,y coordinates of one end of L1
##' @param P2 vector containing x,y coordinates of other end of L1
##' @param P3 vector containing x,y coordinates of one end of L2
##' @param P4 vector containing x,y coordinates of other end of L2
##' @param interior.only boolean flag indicating whether only
##' intersections inside L1 and L2 should be returned.
##' @return Vector containing x,y coordinates of intersection of L1
##' and L2. If L1 and L2 are parallel, this is infinite-valued. If
##' \code{interior.only} is \code{TRUE}, then when the intersection
##' does not occur between P1 and P2 and P3 and P4, a vector
##' containing \code{NA}s is returned.
##' @source Weisstein, Eric W. "Line-Line Intersection."
##' From MathWorld--A Wolfram Web Resource.
##' \url{http://mathworld.wolfram.com/Line-LineIntersection.html}
##' @author David Sterratt
##' @export
##' @examples
##' ## Intersection of two intersecting lines
##' line.line.intersection(c(0, 0), c(1, 1), c(0, 1), c(1, 0))
##'
##' ## Two lines that don't intersect
##' line.line.intersection(c(0, 0), c(0, 1), c(1, 0), c(1, 1))
line.line.intersection <- function(P1, P2, P3, P4, interior.only=FALSE) {
P1 <- as.vector(P1)
P2 <- as.vector(P2)
P3 <- as.vector(P3)
P4 <- as.vector(P4)
dx1 <- P1[1] - P2[1]
dx2 <- P3[1] - P4[1]
dy1 <- P1[2] - P2[2]
dy2 <- P3[2] - P4[2]
D <- det(rbind(c(dx1, dy1),
c(dx2, dy2)))
if (D==0) {
return(c(Inf, Inf))
}
D1 <- det(rbind(P1, P2))
D2 <- det(rbind(P3, P4))
X <- det(rbind(c(D1, dx1),
c(D2, dx2)))/D
Y <- det(rbind(c(D1, dy1),
c(D2, dy2)))/D
if (interior.only) {
## Compute the fractions of L1 and L2 at which the intersection
## occurs
lambda1 <- -((X-P1[1])*dx1 + (Y-P1[2])*dy1)/(dx1^2 + dy1^2)
lambda2 <- -((X-P3[1])*dx2 + (Y-P3[2])*dy2)/(dx2^2 + dy2^2)
if (!((lambda1>0) & (lambda1<1) &
(lambda2>0) & (lambda2<1))) {
return(c(NA, NA))
}
}
return(c(X, Y))
}
##' This is similar to unique(), but spares rows which are duplicated, but
##' at different points in the matrix
##'
##' @title Remove identical consecutive rows from a matrix
##' @param P Source matrix
##' @return Matrix with identical consecutive rows removed.
##' @author David Sterratt
remove.identical.consecutive.rows <- function(P) {
if (!is.matrix(P)) {
stop("P is not a matrix; it should be")
}
if (nrow(P) == 0) {
stop("P has no rows")
}
if (identical(P[1,], P[nrow(P),])) {
return(remove.identical.consecutive.rows(P[-nrow(P),]))
}
for (i in 2:nrow(P)) {
if (identical(P[i-1,], P[i,])) {
return(remove.identical.consecutive.rows(P[-i,]))
}
}
return(P)
}
##' Suppose segments AB and CD intersect. Point B is replaced by the
##' intersection point, defined B'. Point C is replaced by a point C'
##' on the line B'D. The maximum distance of B'C' is given by the
##' parameter d. If the distance l B'D is less than 2d, the distance
##' B'C' is l/2.
##'
##' @title Remove intersections between adjacent segments in a closed
##' path
##' @param P The points, as a 2-column matrix
##' @param d Criterion for maximum distance when points are inserted
##' @return A new closed path without intersections
##' @author David Sterratt
##' @export
remove.intersections <- function(P, d=50) {
N <- nrow(P)
for (i in 1:N) {
R <- line.line.intersection(P[i,], P[mod1(i+1, N),],
P[mod1(i+2, N),], P[mod1(i+3, N),],
interior.only=TRUE)
if (identical(P[mod1(i+1, N),], P[mod1(i+2, N),])) {
R <- P[mod1(i+1, N),]
}
if (is.finite(R[1])) {
message("Intersection found. Old points:")
message(paste(" ", (P[i,])))
message(paste(" ", (P[mod1(i+1, N),])))
message(paste(" ", (P[mod1(i+2, N),])))
message(paste(" ", (P[mod1(i+3, N),])))
P[mod1(i+1, N),] <- R
message("Point i+1 has been changed:")
message(paste(" ", (P[i,])))
message(paste(" ", (P[mod1(i+1, N),])))
message(paste(" ", (P[mod1(i+2, N),])))
message(paste(" ", (P[mod1(i+3, N),])))
l <- vecnorm(P[mod1(i+1, N),] - P[mod1(i+3, N),])
if (l > 2*d) {
a <- d/l
} else {
a <- 0.5
}
message(paste(" ", (paste("a=", a))))
message(paste(" ", (paste("l=", l))))
P[mod1(i+2, N),] <- a*P[mod1(i+1, N),] + (1-a)*P[mod1(i+3, N),]
message("New points:")
message(paste(" ", (P[i,])))
message(paste(" ", (P[mod1(i+1, N),])))
message(paste(" ", (P[mod1(i+2, N),])))
message(paste(" ", (P[mod1(i+3, N),])))
}
}
return(P)
}
##' Return points on the unit circle in an anti-clockwise
##' direction. If \code{L} is not specified \code{n} points are
##' returned. If \code{L} is specified, the same number of points are
##' returned as there are elements in \code{L}, the interval between
##' successive points being proportional to \code{L}.
##'
##' @title Return points on the unit circle
##' @param n Number of points
##' @param L Intervals between points
##' @return The cartesian coordinates of the points
##' @author David Sterratt
circle <- function(n=12, L=NULL) {
if (is.null(L)) {
angles <- (0:(n-1))/n*2*pi
} else {
angles <- (cumsum(L)-L[1])/sum(L)*2*pi
}
return(cbind(x=cos(angles), y=sin(angles)))
}
##' Find the intersections of the plane defined by the normal \code{n} and the
##' distance \code{d} expressed as a fractional distance along the side of
##' each triangle.
##'
##' @title Find the intersection of a plane with edges of triangles on
##' a sphere
##' @param phi Latitude of grid points on sphere centred on origin.
##' @param lambda Longitude of grid points on sphere centred on origin.
##' @param T Triangulation
##' @param n Normal of plane
##' @param d Distance of plane along normal from origin.
##' @return Matrix with same dimensions as \code{T}. Each row gives
##' the intersection of the plane with the corresponding triangle in
##' \code{T}. Column 1 gives the fractional distance from vertex 2 to
##' vertex 3. Column 2 gives the fractional distance from vertex 3 to
##' vertex 1. Column 2 gives the fractional distance from vertex 1 to
##' vertex 2. A value of \code{NaN} indicates that the corresponding
##' edge lies in the plane. A value of \code{Inf} indicates that the
##' edge lies parallel to the plane but outside it.
##' @author David Sterratt
compute.intersections.sphere <- function(phi, lambda, T, n, d) {
P <- cbind(cos(phi)*cos(lambda),
cos(phi)*sin(lambda),
sin(phi))
return(cbind((d - P[T[,2],] %*% n)/((P[T[,3],] - P[T[,2],]) %*% n),
(d - P[T[,3],] %*% n)/((P[T[,1],] - P[T[,3],]) %*% n),
(d - P[T[,1],] %*% n)/((P[T[,2],] - P[T[,1],]) %*% n)))
}
##' Convert locations of points on sphere in spherical coordinates to
##' points in 3D cartesian space
##'
##' @title Convert from spherical to Cartesian coordinates
##' @param Ps N-by-2 matrix with columns containing latitudes
##' (\code{phi}) and longitudes (\code{lambda}) of N points
##' @param R radius of sphere
##' @return An N-by-3 matrix in which each row is the cartesian (X, Y,
##' Z) coordinates of each point
##' @author David Sterratt
sphere.spherical.to.sphere.cart <- function(Ps, R=1) {
P <- cbind(R*cos(Ps[,"phi"])*cos(Ps[,"lambda"]),
R*cos(Ps[,"phi"])*sin(Ps[,"lambda"]),
R*sin(Ps[,"phi"]))
colnames(P) <- c("X", "Y", "Z")
return(P)
}
##' Given a triangular mesh on a sphere described by mesh locations
##' (\code{phi}, \code{lambda}), a radius \code{R} and a triangulation
##' \code{Tt}, determine the Cartesian coordinates of points \code{cb}
##' given in barycentric coordinates with respect to the mesh.
##'
##' @title Convert barycentric coordinates of points in mesh on sphere
##' to cartesian coordinates
##' @param phi Latitudes of mesh points
##' @param lambda Longitudes of mesh points
##' @param R Radius of sphere
##' @param Tt Triangulation
##' @param cb Object returned by tsearch containing information on the
##' triangle in which a point occurs and the barycentric coordinates
##' within that triangle
##' @return An N-by-3 matrix of the Cartesian coordinates of the points
##' @author David Sterratt
##' @importFrom geometry bary2cart
##' @export
bary.to.sphere.cart <- function(phi, lambda, R, Tt, cb) {
## Initialise output
cc <- matrix(NA, nrow(cb$p), 3)
colnames(cc) <- c("X", "Y", "Z")
## If there are no points, exit
if (nrow(cb$p) == 0) {
return(cc)
}
## Find 3D coordinates of mesh points
P <- sphere.spherical.to.sphere.cart(cbind(phi=phi, lambda=lambda), R)
## Now find locations cc of datapoints in Cartesian coordinates
for(i in 1:nrow(cb$p)) {
cc[i,] <- bary2cart(P[Tt[cb$idx[i],],], cb$p[i,])
}
return(cc)
}
##' Convert locations on the surface of a sphere in cartesian
##' (X, Y, Z) coordinates to spherical (phi, lambda) coordinates.
##'
##' It is assumed that all points are lying on the surface of a sphere
##' of radius R.
##' @title Convert from Cartesian to spherical coordinates
##' @param P locations of points on sphere as N-by-3 matrix with
##' labelled columns "X", "Y" and "Z"
##' @param R radius of sphere
##' @return N-by-2 Matrix with columns ("phi" and "lambda") of
##' locations of points in spherical coordinates
##' @author David Sterratt
##' @export
sphere.cart.to.sphere.spherical <- function(P, R=1) {
Ps <- geometry::cart2sph(P)
return(cbind(phi = Ps[,"phi"],
lambda= Ps[,"theta"]))
}
##' Project spherical coordinate system \eqn{(\phi, \lambda)} to a polar
##' coordinate system \eqn{(\rho, \lambda)} such that the area of each
##' small region is preserved.
##'
##' This requires \deqn{R^2\delta\phi\cos\phi\delta\lambda =
##' \rho\delta\rho\delta\lambda}. Hence \deqn{R^2\int^{\phi}_{-\pi/2}
##' \cos\phi' d\phi' = \int_0^{\rho} \rho' d\rho'}. Solving gives
##' \eqn{\rho^2/2=R^2(\sin\phi+1)} and hence
##' \deqn{\rho=R\sqrt{2(\sin\phi+1)}}.
##'
##' As a check, consider that total area needs to be preserved. If
##' \eqn{\rho_0} is maximum value of new variable then
##' \eqn{A=2\pi R^2(\sin(\phi_0)+1)=\pi\rho_0^2}. So
##' \eqn{\rho_0=R\sqrt{2(\sin\phi_0+1)}}, which agrees with the formula
##' above.
##' @title Convert latitude on sphere to radial variable in
##' area-preserving projection
##' @param phi Latitude
##' @param R Radius
##' @return Coordinate \code{rho} that has the dimensions of length
##' @author David Sterratt
spherical.to.polar.area <- function(phi, R=1) {
return(R*sqrt(2*(1 + sin(phi))))
}
##' This is the inverse of \code{\link{polar.cart.to.sphere.spherical}}
##'
##' @title Convert spherical coordinates on sphere to polar
##' projection in Cartesian coordinates
##' @param r 2-column Matrix of spherical coordinates of points on
##' sphere. Column names are \code{phi} and \code{lambda}.
##' @param pa If \code{TRUE}, make this an area-preserving projection
##' @param preserve Quantity to preserve locally in the
##' projection. Options are \code{latitude}, \code{area} or
##' \code{angle}
##' @return 2-column Matrix of Cartesian coordinates of points on polar
##' projection. Column names should be \code{x} and \code{y}
##' @author David Sterratt
##' @export
sphere.spherical.to.polar.cart <- function(r, pa=FALSE, preserve="latitude") {
## FIXME: This function should be deprecated in favour of
## azimuthal.equalarea and azimuthal.equidistant in projections.R
rho <- NULL
if (pa)
preserve <- "area"
if (preserve=="area") {
rho <- sqrt(2*(1 + sin(r[,"phi"])))
## rho <- spherical.to.polar.area(r[,"phi"])
}
if (preserve=="angle") {
## rho = alpha*sqrt(2*(1+sin(phi))/(1-sin(phi)))
rho <- sqrt(2*(1+sin(r[,"phi"]))/(1-sin(r[,"phi"])))
}
if (preserve=="latitude") {
rho <- pi/2 + r[,"phi"]
}
if (is.null(rho))
stop(paste("preserve argument", preserve, "not recognised"))
x <- rho*cos(r[,"lambda"])
y <- rho*sin(r[,"lambda"])
return(cbind(x=x, y=y))
}
##' This is the inverse of \code{\link{sphere.spherical.to.polar.cart}}
##'
##' @title Convert polar projection in Cartesian coordinates to
##' spherical coordinates on sphere
##' @param r 2-column Matrix of Cartesian coordinates of points on
##' polar projection. Column names should be \code{x} and \code{y}
##' @param pa If \code{TRUE}, make this an area-preserving projection
##' @param preserve Quantity to preserve locally in the
##' projection. Options are \code{latitude}, \code{area} or
##' \code{angle}
##' @return 2-column Matrix of spherical coordinates of points on
##' sphere. Column names are \code{phi} and \code{lambda}.
##' @author David Sterratt
##' @export
polar.cart.to.sphere.spherical <- function(r, pa=FALSE, preserve="latitude") {
## FIXME: This function should be deprecated in favour of as-yet
## unwritten functions azimuthal.equalarea.inverse and
## azimuthal.equidistant.inverse in projections.R
rho <- NULL
if (pa)
preserve <- "area"
rho2 <- r[,"x"]^2 + r[,"y"]^2
if (preserve=="area") {
## Need to make sure that the argument is not greater that 1. This
## can happen when passing a values produced from a Cartesian grid
sinphi <- rho2/2 - 1
sinphi[sinphi>1] <- NA
phi <- asin(sinphi)
}
if (preserve=="angle") {
## phi = asin((rho^2/alpha^2-2)/(rho^2/alphpa^2+2))
phi <- asin((rho2 - 2)/(rho2 + 2))
}
if (preserve=="latitude") {
phi <- sqrt(rho2) - pi/2
}
if (is.null(phi))
stop(paste("preserve argument", preserve, "not recognised"))
lambda <- atan2(r[,"y"], r[,"x"])
return(cbind(phi=phi, lambda=lambda))
}
##' Convert azimuth-elevation coordinates to spherical coordinates
##' @param r Coordinates of points in azimuth-elevation coordinates
##' represented as 2 column matrix with column names \code{alpha}
##' (elevation) and \code{theta} (azimuth).
##' @param r0 Direction of the axis of the sphere on which to project
##' represented as a 2 column matrix of with column names \code{alpha}
##' (elevation) and \code{theta} (azimuth).
##' @return 2-column matrix of spherical coordinates of points with
##' column names \code{psi} (colatitude) and \code{lambda} (longitude).
##' @author David Sterratt
##' @examples
##' r0 <- cbind(alpha=0, theta=0)
##' r <- rbind(r0, r0+c(1,0), r0-c(1,0), r0+c(0,1), r0-c(0,1))
##' azel.to.sphere.colatitude(r, r0)
##' @export
azel.to.sphere.colatitude <- function(r, r0) {
## Find Cartesian coordinates of aziumuth and elevation on sphere
rc <- cbind(cos(r[,"alpha"])*sin(r[,"theta"]),
cos(r[,"alpha"])*cos(r[,"theta"]),
sin(r[,"alpha"]))
## Find Cartesian coordinates of aziumuth and elevation on sphere
r0c <- cbind(cos(r0[,"alpha"])*sin(r0[,"theta"]),
cos(r0[,"alpha"])*cos(r0[,"theta"]),
sin(r0[,"alpha"]))
## Angle made with optic axis is
psi <- acos(rc %*% t(r0c))
## Projection onto the plane perpendicular to the optic axis
pc <- rbind(cbind(-cos(r0[,"theta"]),
sin(r0[,"theta"]),
0),
cbind(-sin(r0[,"alpha"])*sin(r0[,"theta"]),
-sin(r0[,"alpha"])*cos(r0[,"theta"]),
cos(r0[,"alpha"]))) %*% t(rc)
print(r0c)
print(rc)
print(t(pc))
lambdap <- atan2(pc[2,], pc[1,])
out <- cbind(psi, lambdap)
colnames(out) <- c("psi", "lambda")
return(out)
}
##' This rotates points on sphere by specifying the direction its
##' polar axis, i.e. the axis going through (90, 0), should point
##' after (a) a rotation about an axis through the points (0, 0) and
##' (0, 180) and (b) rotation about the original polar axis.
##' @title Rotate axis of sphere
##' @param r Coordinates of points in spherical coordinates
##' represented as 2 column matrix with column names \code{phi}
##' (latitude) and \code{lambda} (longitude).
##' @param r0 Direction of the polar axis of the sphere on which to project
##' represented as a 2 column matrix of with column names \code{phi}
##' (latitude) and \code{lambda} (longitude).
##' @return 2-column matrix of spherical coordinates of points with
##' column names \code{phi} (latitude) and \code{lambda} (longitude).
##' @author David Sterratt
##' @examples
##' r0 <- cbind(phi=0, lambda=-pi/2)
##' r <- rbind(r0, r0+c(1,0), r0-c(1,0), r0+c(0,1), r0-c(0,1))
##' r <- cbind(phi=pi/2, lambda=0)
##' rotate.axis(r, r0)
##' @export
rotate.axis <- function(r, r0) {
## Find cartesian coordinates of points on sphere
P <- sphere.spherical.to.sphere.cart(r)
## If we are not changing the longitude of the main axis, do nothing
## apart from convert back to spherical coordinates, which has the
## happy sideeffect of normalising the angles within the range (-pi,
## pi].
if (r0[,"phi"] != pi/2) {
## Rotate them about the equatorial axis through the 0 degrees meridian
## (the x-axis)
dp <- pi/2 - r0[,"phi"]
P <- P %*% rbind(c(1, 0, 0),
c(0, cos(dp), sin(dp)),
c(0, -sin(dp), cos(dp)))
## This will have taken the North pole to (0, -90). Hence we need to
## rotate by another 90 degrees to get to where we want to.
## Then rotate about the z-axis
dl <- r0[,"lambda"] + pi/2
P <- P %*% rbind(c( cos(dl), sin(dl), 0),
c(-sin(dl), cos(dl), 0),
c(0, 0, 1))
colnames(P) <- c("X", "Y", "Z")
}
return(sphere.cart.to.sphere.spherical(P))
}
##' Bring angle into range
##' @param theta Angle to bring into range \code{[-pi, pi]}
##' @return Normalised angle
##' @author David Sterratt
##' @export
normalise.angle <- function(theta) {
i <- which((theta < -pi) | (theta > pi) & !is.na(theta))
if (length(i) > 0) {
theta[i] <- ((theta[i] + pi) %% (2*pi)) - pi
}
return(theta)
}
##' This in the inverse of \code{\link{sphere.cart.to.sphere.wedge}}
##'
##' @title Convert from 'wedge' to Cartesian coordinates
##' @param psi vector of slice angles of N points
##' @param f vector of fractional distances of N points
##' @param phi0 rim angle as colatitude
##' @param R radius of sphere
##' @return An N-by-3 matrix in which each row is the cartesian (X, Y,
##' Z) coordinates of each point
##' @export
##' @author David Sterratt
sphere.wedge.to.sphere.cart <- function(psi, f, phi0, R=1) {
r <- sqrt(sin(phi0)^2 + cos(phi0)^2*cos(psi)^2)
y0 <- -sin(psi)*cos(psi)*cos(phi0)
z0 <- -sin(psi)*sin(psi)*cos(phi0)
alpha0 <- asin(sin(phi0)/r)
alpha <- alpha0 + f*(2*pi - 2*alpha0)
P <- cbind(R*r*sin(alpha),
R*(y0 - r*sin(psi)*cos(alpha)),
R*(z0 + r*cos(psi)*cos(alpha)))
colnames(P) <- c("X", "Y", "Z")
return(P)
}
##' Convert points in 3D cartesian space to locations of points on
##' sphere in 'wedge' coordinates (\var{psi}, \var{f}). Wedges are
##' defined by planes inclined at an angle \var{psi} running through a
##' line between poles on the rim above the x axis. \var{f} is the
##' fractional distance along the circle defined by the intersection
##' of this plane and the curtailed sphere.
##'
##' @title Convert from Cartesian to 'wedge' coordinates
##' @param P locations of points on sphere as N-by-3 matrix with
##' labelled columns "X", "Y" and "Z"
##' @param phi0 rim angle as colatitude
##' @param R radius of sphere
##' @return 2-column Matrix of 'wedge' coordinates of points on
##' sphere. Column names are \code{phi} and \code{lambda}.
##' @export
##' @author David Sterratt
sphere.cart.to.sphere.wedge <- function(P, phi0, R=1) {
## Test that points lie approximately on the unit sphere Note the
## coordinates produced by bary.to.sphere.cart do not lie exactly on
## a sphere; they lie on the triangles that approximate the sphere.
Rs <- sqrt(rowSums(P^2))
if (any(abs(Rs - R)/R > 0.1)) {
print(abs(Rs - R)/R)
stop("Points do not lie approximately on unit sphere")
}
## Normalise to unit sphere
P <- P/Rs
## Wedge angle, making sure this lies within [-pi/2, pi/2]
psi <- atan2(P[,"Y"], - cos(phi0) - P[,"Z"])
psi[psi > pi/2 + 1E-6] <- psi[psi > pi/2 + 1E-6] - pi
psi[psi < -pi/2 - 1E-6] <- psi[psi < -pi/2 - 1E-6] + pi
r <- sqrt(sin(phi0)^2 + cos(phi0)^2*cos(psi)^2)
y0 <- -sin(psi)*cos(psi)*cos(phi0)
z0 <- -sin(psi)*sin(psi)*cos(phi0)
v <- -(P[,"Y"] - y0)*sin(psi) + (P[,"Z"] - z0)*cos(psi)
alpha <- atan2(P[,"X"], v)
## Make sure angles in the second quadrant are negative
## FIXME: we could avoid this by defining the angle \alpha differently
inds <- alpha<0
alpha[inds] <- alpha[inds] + 2*pi
alpha0 <- asin(sin(phi0)/r)
f <- (alpha - alpha0)/(2*pi - 2*alpha0)
# return(cbind(psi=psi, f=f, v=v, alpha0, alpha, y0))
Pw <- cbind(psi=psi, f=f)
rownames(Pw) <- NULL
## Check that f is in bounds
if (any(f > 1) || any(f < 0)) {
print(Pw)
stop("f is out of bounds")
}
return(Pw)
}
##' Convert points in 3D cartesian space to locations of points on
##' sphere in \sQuote{dual-wedge} coordinates (\var{fx}, \var{fy}). Wedges
##' are defined by planes inclined at angle running through a line
##' between poles on the rim above the x axis or the y-axis. \var{fx}
##' and \var{fy} are the fractional distances along the circle defined
##' by the intersection of this plane and the curtailed sphere.
##'
##' @title Convert from Cartesian to \sQuote{dual-wedge} coordinates
##' @param P locations of points on sphere as N-by-3 matrix with
##' labelled columns \code{X}, \code{Y} and \code{Z}
##' @param phi0 rim angle as colatitude
##' @param R radius of sphere
##' @return 2-column Matrix of \sQuote{wedge} coordinates of points on
##' sphere. Column names are \code{phi} and \code{lambda}.
##' @export
##' @author David Sterratt
sphere.cart.to.sphere.dualwedge <- function(P, phi0, R=1) {
Pwx <- sphere.cart.to.sphere.wedge(P, phi0, R=R)
Pwy <- sphere.cart.to.sphere.wedge(cbind(X=P[,"Y"], Y=-P[,"X"], Z=P[,"Z"]),
phi0, R=R)
Pw <- cbind(fx=Pwx[,"f"], fy=Pwy[,"f"])
rownames(Pw) <- NULL
return(Pw)
}
##' On a sphere the central angle between two points is defined as the
##' angle whose vertex is the centre of the sphere and that subtends
##' the arc formed by the great circle between the points. This
##' function computes the central angle for two points \eqn{(\phi_1,
##' \lambda_1)}{(phi1, lambda1)} and \eqn{(\phi_2,\lambda_2)}{(phi2,
##' lambda2)}.
##'
##' @title Central angle between two points on a sphere
##' @param phi1 Latitude of first point
##' @param lambda1 Longitude of first point
##' @param phi2 Latitude of second point
##' @param lambda2 Longitude of second point
##' @return Central angle
##' @source Wikipedia \url{http://en.wikipedia.org/wiki/Central_angle}
##' @author David Sterratt
##' @export
central.angle <- function(phi1, lambda1, phi2, lambda2) {
return(acos(sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(lambda1-lambda2)))
}
##' The Karcher mean of a set of points on a manifold is defined as
##' the point whose sum of squared Riemann distances to the points is
##' minimal. On a sphere using spherical coordinates this distance
##' can be computed using the formula for central angle.
##'
##' @title Karcher mean on the sphere
##' @param x Matrix of points on sphere as N-by-2 matrix with labelled
##' columns \\code{phi} (latitude) and \code{lambda} (longitude)
##' @param na.rm logical value indicating whether \code{NA} values should
##' be stripped before the computation proceeds.
##' @param var logical value indicating whether variance should be
##' returned too.
##' @return Vector of means with components named \code{phi} and
##' \code{lambda}. If \code{var} is \code{TRUE}, a list containing
##' mean and variance in elements \code{mean} and \code{var}.
##' @references Heo, G. and Small, C. G. (2006). Form representations
##' and means for landmarks: A survey and comparative
##' study. \emph{Computer Vision and Image Understanding},
##' 102:188-203.
##' @seealso \code{\link{central.angle}}
##' @author David Sterratt
##' @export
karcher.mean.sphere <- function(x, na.rm=FALSE, var=FALSE) {
if (na.rm) {
x <- na.omit(x)
}
## Default output values, if there x has zero rows
mu <- c(phi=NA, lambda=NA)
sigma2 <- c(phi=NA, lambda=NA)
## x has one row - needed to prevent crash
if (nrow(x) == 1) {
mu <- c(x[1,])
sigma2 <- c(phi=NA, lambda=NA)
}
## x has more than one row
if (nrow(x) >= 2) {
## Compute first estimate of mean by computing centroid in 3D and
## then finding angle to this
P <- cbind(cos(x[,"phi"])*cos(x[,"lambda"]),
cos(x[,"phi"])*sin(x[,"lambda"]),
sin(x[,"phi"]))
N <- nrow(P)
P.mean <- apply(P, 2, mean)
phi.mean <- asin(P.mean[3])
lambda.mean <- atan2(P.mean[2], P.mean[1])
## Now minimise sum of squared distances
if (all(!is.nan(c(phi.mean, lambda.mean)))) {
opt <- stats::optim(c(phi.mean, lambda.mean),
function(p) { sum((central.angle(x[,"phi"], x[,"lambda"], p[1], p[2]))^2) })
mu <- opt$par
names(mu) <- c("phi", "lambda")
sigma2 <- opt$value/N
} else {
mu <- cbind(phi=NaN, lambda=NaN)
sigma2 <- cbind(phi=NaN, lambda=NaN)
}
}
## Assemble output
if (var) {
X <- list(mean=mu, var=sigma2)
} else {
X <- mu
}
return(X)
}
##' Create grid on projection of hemisphere onto plane
##' @param pa If \code{TRUE}, make this an area-preserving projection
##' @param res Resolution of grid
##' @param phi0 Value of \code{phi0} at edge of grid
##' @return List containing:
##' \item{\code{s}}{Grid locations in spherical coordinates}
##' \item{\code{c}}{Grid locations in Cartesian coordinates on plane}
##' \item{\code{xs}}{X grid line locations in Cartesian coordinates on plane}
##' \item{\code{ys}}{Y grid line locations in Cartesian coordinates on plane}
##' @author David Sterratt
##' @export
create.polar.cart.grid <- function(pa, res, phi0) {
lim <- sphere.spherical.to.polar.cart(cbind(phi=phi0, lambda=0), pa)[1,"x"]
xs <- seq(-lim, lim, len=res)
ys <- seq(-lim, lim, len=res)
## Create grid
gxs <- outer(xs, ys*0, "+")
gys <- outer(xs*0, ys, "+")
## gxs and gys are both res-by-res matrices We now combine both
## matrices as a res*res by 2 matrix. The conversion as.vector()
## goes down the columns of the matrices gxs and gys
gc <- cbind(x=as.vector(gxs), y=as.vector(gys))
## Now convert the cartesian coordinates to polar coordinates
gs <- polar.cart.to.sphere.spherical(gc, pa)
return(list(s=gs, c=gc, xs=xs, ys=ys))
}
##' Arc length of a parabola y=x^2/4f
##' @param x1 x co-ordinate of start of arc
##' @param x2 x co-ordinate of end of arc
##' @param f focal length of parabola
##' @return length of parabola arc
##' @author David Sterratt
parabola.arclength <- function(x1, x2, f) {
h1 <- x1/2
h2 <- x2/2
q1 <- sqrt(f^2 + h1^2)
q2 <- sqrt(f^2 + h2^2)
s <- (h2*q2 - h1*q1)/f + f*log((h2 + q2)/(h1 + q1))
return(s)
}
##' Inverse arc length of a parabola y=x^2/4f
##' @param x1 co-ordinate of start of arc
##' @param s length of parabola arc to follow
##' @param f focal length of parabola
##' @return x co-ordinate of end of arc
##' @importFrom stats uniroot
##' @author David Sterratt
parabola.invarclength <- function(x1, s, f) {
sapply(s, function(s) {
uniroot(function(x) {parabola.arclength(x1, x, f) - s}, c(x1, x1 + s + 1))$root
})
}
|
e055a729b881bdf96048de62e03641ce2c7e8203
|
e72d002fdcdaf378bda95be0d9993bea189b1276
|
/man/extract.Rd
|
3d0bc3559a6eaf4a6c050e3ca334a0bd7e8bf468
|
[] |
no_license
|
davan690/ctsem
|
1663f0f7340d81d1addd63adf5d80293f4d4a814
|
5f6fea38c7f66e3541b6870155ae4bd0828e9a48
|
refs/heads/master
| 2020-09-02T15:04:44.255112
| 2019-10-30T16:36:16
| 2019-10-30T16:36:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 535
|
rd
|
extract.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.ctStanFit.R
\name{extract}
\alias{extract}
\title{Extract samples from a ctStanFit object}
\usage{
extract(object, ...)
}
\arguments{
\item{object}{ctStanFit object, samples may be from Stan's HMC, or the importance sampling approach of ctsem.}
\item{...}{additional arguments to pass to \code{rstan::extract}.}
}
\value{
Array of posterior samples.
}
\description{
Extract samples from a ctStanFit object
}
\examples{
e = extract(ctstantestfit)
}
|
f222df207036f56e0d53095e60c38dd097c5b8a4
|
840944dacec0eb78b5989a2d2e4f69898ac17967
|
/man/adorn_everything.Rd
|
dd3e7cbaff2e6d13c650c6b39419834bffc88b70
|
[
"MIT"
] |
permissive
|
Sorenson-Impact/sorensonimpact
|
e5104516366aca205f9f5c7dccf8a23487006bca
|
78796d0a720037a866160ca62d8734d48a2aaff3
|
refs/heads/master
| 2021-11-13T16:06:01.147657
| 2021-11-04T16:40:13
| 2021-11-04T16:40:13
| 108,036,549
| 12
| 7
| null | 2020-01-28T18:02:53
| 2017-10-23T20:36:58
|
R
|
UTF-8
|
R
| false
| true
| 683
|
rd
|
adorn_everything.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/janitor_custom_functions.R
\name{adorn_everything}
\alias{adorn_everything}
\title{Adorn Everything}
\usage{
adorn_everything(dat, pct_direction = "col")
}
\arguments{
\item{pct_direction}{Should the cell percentages use the "col" or "row" totals as denominator. Defaults to "col".}
}
\value{
tabyl with percents, percent formatting, ns, and better total percents.
}
\description{
\lifecycle{experimental}
Adorn percentages, percent formatting, and ns to a \code{janitor::tabyl()} with better percents
}
\examples{
\dontrun{
mtcars \%>\% tabyl(cyl, gear) \%>\% adorn_everything(pct_direction = "col")
}
}
|
4df4e58308cbb6cba89ebe1895d3e944230be8dc
|
2c485b1c2f39fc3c269c6f578e21d698dcec63e6
|
/man/sampling.Rd
|
8ee44e072ea1d453fb57883e4c0e40c6a1b0a1a0
|
[] |
no_license
|
aalfons/simFrame
|
002f47cad078c93dec24c4c9fab4893e7bb56922
|
23314f0b1f6632560e0d95dc568f708f3c1286a9
|
refs/heads/master
| 2021-12-23T10:23:44.587577
| 2021-11-23T12:46:58
| 2021-11-23T12:46:58
| 6,717,992
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,894
|
rd
|
sampling.Rd
|
\encoding{utf8}
\name{sampling}
\Rdversion{1.1}
\alias{srs}
\alias{ups}
\alias{brewer}
\alias{midzuno}
\alias{tille}
\title{Random sampling}
\description{
Functions for random sampling.
}
\usage{
srs(N, size, replace = FALSE)
ups(N, size, prob, replace = FALSE)
brewer(prob, eps = 1e-06)
midzuno(prob, eps = 1e-06)
tille(prob, eps = 1e-06)
}
\arguments{
\item{N}{a non-negative integer giving the number of observations from
which to sample.}
\item{size}{a non-negative integer giving the number of observations to
sample.}
\item{prob}{for \code{ups}, a numeric vector giving the probability weights
(see \code{\link{sample}}). For \code{tille} and \code{midzuno}, a vector
of inclusion probabilities (see \code{\link{inclusionProb}}).}
\item{replace}{a logical indicating whether sampling should be performed
with or without replacement.}
\item{eps}{a numeric control value giving the desired accuracy.}
}
\details{
\code{srs} and \code{ups} are wrappers for simple random sampling and
unequal probability sampling, respectively. Both functions make use of
\code{\link{sample}}.
\code{brewer}, \code{midzuno} and \code{tille} perform Brewer's, Midzuno's and
\enc{Tillé}{Tille}'s method, respectively, for unequal probability sampling
without replacement and fixed sample size.
}
\value{
An integer vector giving the indices of the sampled observations.
}
\note{
\code{brewer}, \code{midzuno} and \code{tille} are faster C++ implementations
of \code{UPbrewer}, \code{UPmidzuno} and \code{UPtille}, respectively, from
package \code{sampling}.
}
\references{
Brewer, K. (1975), A simple procedure for sampling \eqn{\pi}{pi} pswor,
Australian Journal of Statistics, \bold{17}(3), 166-172.
Midzuno, H. (1952) On the sampling system with probability proportional to sum
of size. \emph{Annals of the Institute of Statistical Mathematics},
\bold{3}(2), 99--107.
\enc{Tillé}{Tille}, Y. (1996) An elimination procedure of unequal probability
sampling without replacement. \emph{Biometrika}, \bold{83}(1), 238--241.
Deville, J.-C. and \enc{Tillé}{Tille}, Y. (1998) Unequal probability sampling
without replacement through a splitting method. \emph{Biometrika},
\bold{85}(1), 89--101.
}
\author{Andreas Alfons}
\seealso{
\code{"\linkS4class{SampleControl}"}, \code{"\linkS4class{TwoStageControl}"},
\code{\link{setup}}, \code{\link{inclusionProb}}, \code{\link{sample}}
}
\examples{
## simple random sampling
# without replacement
srs(10, 5)
# with replacement
srs(5, 10, replace = TRUE)
## unequal probability sampling
# without replacement
ups(10, 5, prob = 1:10)
# with replacement
ups(5, 10, prob = 1:5, replace = TRUE)
## Brewer, Midzuno and Tille sampling
# define inclusion probabilities
prob <- c(0.2,0.7,0.8,0.5,0.4,0.4)
# Brewer sampling
brewer(prob)
# Midzuno sampling
midzuno(prob)
# Tille sampling
tille(prob)
}
\keyword{distribution}
|
be448bdb7b5c2f58c2a444ba1e4d51ee05aca635
|
90292cdc0c01652994313aa0e926b8cea5111841
|
/analysis/scripts/analysis.R
|
9adba4e6958dd55fcf63b3a3cac86dbd74d5d483
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mmibrahim/BEACONToolkit
|
252f404ba45650e2d037c6460e04086dfe1d0ebf
|
6f561860eaacc4c4730e752f910312e5e77a276a
|
refs/heads/master
| 2021-01-15T19:45:39.410052
| 2020-01-19T06:33:10
| 2020-01-19T06:33:10
| 13,341,177
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,531
|
r
|
analysis.R
|
# BEACONToolkit Chapter 2: Analysis
# By: Luis Zaman
# 8/6/12
######## Fake fish data for what p-values aren't ########
cold_fish = rnorm(5000, mean=20.5, sd=5)
hot_fish = rnorm(5000, mean=20, sd=5)
data_frame <- data.frame(weight=c(hot_fish,cold_fish), environment=c(rep("hot", 5000), rep("cold", 5000)))
boxplot(weight ~ environment, data=data_frame, xlab="Environment", ylab="Weight (kg)", main="Fake Fish Data")
t.test(cold_fish,hot_fish)
######## Fake data for what p-values are ########
cold_effect = rnorm(50, mean=1.0, sd=5)
hist(cold_effect, main="Histogram of our data", xlab="Data")
######## Monte Carlo for P-Value ########
#now instead of just looking at them, lets see how many of them have means below 0
num_samples <- 100000
monte_carlo_samples <- replicate(num_samples, mean(rnorm(length(cold_effect), mean=0, sd=sd(cold_effect))))
hist(monte_carlo_samples, main="Monte Carlo Simulated Means")
p_val <- length(monte_carlo_samples[monte_carlo_samples>= mean(cold_effect)])/length(monte_carlo_samples)
print(paste("p-value = ", p_val))
#this performs a one tail t-test, assuming we expect the effect to be above 0 (our null hypothesis)
t.test(cold_effect, alternative="greater")
######## Bootstrap for 95% confidence intervals ########
#first get the sampling distribution of means
sample_means <- replicate(num_samples, mean(sample(cold_effect, size=length(cold_effect), replace=T)))
hist(sample_means, xlab="Means of Samples", main="Histogram of Resampled Means")
#sample with replacement the same number of times as the length of our dataset
#plot a few of these to show that you get variation based on the empirical distribution
boxplot(sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T),sample(cold_effect, size=length(cold_effect), replace=T), xlab="Resamples", main="Multipe Resampling")
#use sd of sample mean distribution to calculate 95% confidence intervals
c(mean(cold_effect) - 2 * sd(sample_means), mean(cold_effect) + 2 * sd(sample_means))
#compare this to the t-test confidence intervals
t.test(cold_effect)
######## Other Analysis in R ########
setwd("~/BEACONToolkit/analysis/data")
parasite_data <- read.csv("parasite_data.csv")
plot(ShannonDiversity ~ as.factor(Virulence), data=parasite_data)
no_parasites <- parasite_data[is.na(parasite_data$Virulence), ]
normal_parasites <- parasite_data[na.omit(parasite_data$Virulence == 0.8), ]
boxplot(no_parasites$ShannonDiversity, normal_parasites$ShannonDiversity, ylab="Shannon Diversity", xlab="W and W.O. Parasites", main="Normal Parasite Runs (0.8 Virulence)")
mean(no_parasites$ShannonDiversity)
mean(normal_parasites$ShannonDiversity)
sem <- function(values) {sd(values)/sqrt(length(values))}
sem(normal_parasites$ShannonDiversity)
qt(c(0.025, 0.975), df=length(normal_parasites$ShannonDiversity))
c( mean(normal_parasites$ShannonDiversity) -2.008559*sem(normal_parasites$ShannonDiversity),
mean(normal_parasites$ShannonDiversity) + 2.008559*sem(normal_parasites$ShannonDiversity))
t.test(no_parasites$ShannonDiversity, normal_parasites$ShannonDiversity, conf.int=T)
wilcox.test(no_parasites$ShannonDiversity, normal_parasites$ShannonDiversity, conf.int=T)
|
c186ec49ef2b0d620ced4aaeec67e77795f634dd
|
7b8da296768b3586d71ff5b9808d36fed89c98fc
|
/annotatedOrphans.R
|
ee690325e53d019b076ad942e6040ba88c4fc4f5
|
[] |
no_license
|
raonyguimaraes/exomeAnalysis
|
6e7e9a19dc46979f2f03e1594bba7b2e574ee4bb
|
b21deb76e8425fa009d137eb62dbbd08bbdf2264
|
refs/heads/master
| 2020-04-14T14:53:47.675052
| 2013-04-11T16:57:10
| 2013-04-11T16:57:10
| 10,248,781
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 118
|
r
|
annotatedOrphans.R
|
d <- read.table("/Users/singhal/thesisWork/introgression/metrics/annotatedOrphans.out",header=F)
pie(d$V2,labels=d$V1)
|
0b743d65e8f0bf01f85d9ebbf8d8fc3f10bf06eb
|
effe14a2cd10c729731f08b501fdb9ff0b065791
|
/cran/paws/man/sagemaker.Rd
|
1f4afbc793dcb6d98f628bc039895488a69e3e2b
|
[
"Apache-2.0"
] |
permissive
|
peoplecure/paws
|
8fccc08d40093bb25e2fdf66dd5e38820f6d335a
|
89f044704ef832a85a71249ce008f01821b1cf88
|
refs/heads/master
| 2020-06-02T16:00:40.294628
| 2019-06-08T23:00:39
| 2019-06-08T23:00:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 10,522
|
rd
|
sagemaker.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{sagemaker}
\alias{sagemaker}
\title{Amazon SageMaker Service}
\usage{
sagemaker()
}
\description{
Provides APIs for creating and managing Amazon SageMaker resources.
}
\section{Operations}{
\tabular{ll}{
\link[=sagemaker_add_tags]{add_tags} \tab Adds or overwrites one or more tags for the specified Amazon SageMaker resource \cr
\link[=sagemaker_create_algorithm]{create_algorithm} \tab Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace \cr
\link[=sagemaker_create_code_repository]{create_code_repository} \tab Creates a Git repository as a resource in your Amazon SageMaker account \cr
\link[=sagemaker_create_compilation_job]{create_compilation_job} \tab Starts a model compilation job \cr
\link[=sagemaker_create_endpoint]{create_endpoint} \tab Creates an endpoint using the endpoint configuration specified in the request \cr
\link[=sagemaker_create_endpoint_config]{create_endpoint_config} \tab Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models \cr
\link[=sagemaker_create_hyper_parameter_tuning_job]{create_hyper_parameter_tuning_job} \tab Starts a hyperparameter tuning job \cr
\link[=sagemaker_create_labeling_job]{create_labeling_job} \tab Creates a job that uses workers to label the data objects in your input dataset \cr
\link[=sagemaker_create_model]{create_model} \tab Creates a model in Amazon SageMaker \cr
\link[=sagemaker_create_model_package]{create_model_package} \tab Creates a model package that you can use to create Amazon SageMaker models or list on AWS Marketplace \cr
\link[=sagemaker_create_notebook_instance]{create_notebook_instance} \tab Creates an Amazon SageMaker notebook instance \cr
\link[=sagemaker_create_notebook_instance_lifecycle_config]{create_notebook_instance_lifecycle_config} \tab Creates a lifecycle configuration that you can associate with a notebook instance \cr
\link[=sagemaker_create_presigned_notebook_instance_url]{create_presigned_notebook_instance_url} \tab Returns a URL that you can use to connect to the Jupyter server from a notebook instance \cr
\link[=sagemaker_create_training_job]{create_training_job} \tab Starts a model training job \cr
\link[=sagemaker_create_transform_job]{create_transform_job} \tab Starts a transform job \cr
\link[=sagemaker_create_workteam]{create_workteam} \tab Creates a new work team for labeling your data \cr
\link[=sagemaker_delete_algorithm]{delete_algorithm} \tab Removes the specified algorithm from your account \cr
\link[=sagemaker_delete_code_repository]{delete_code_repository} \tab Deletes the specified Git repository from your account \cr
\link[=sagemaker_delete_endpoint]{delete_endpoint} \tab Deletes an endpoint \cr
\link[=sagemaker_delete_endpoint_config]{delete_endpoint_config} \tab Deletes an endpoint configuration \cr
\link[=sagemaker_delete_model]{delete_model} \tab Deletes a model \cr
\link[=sagemaker_delete_model_package]{delete_model_package} \tab Deletes a model package \cr
\link[=sagemaker_delete_notebook_instance]{delete_notebook_instance} \tab Deletes an Amazon SageMaker notebook instance \cr
\link[=sagemaker_delete_notebook_instance_lifecycle_config]{delete_notebook_instance_lifecycle_config} \tab Deletes a notebook instance lifecycle configuration \cr
\link[=sagemaker_delete_tags]{delete_tags} \tab Deletes the specified tags from an Amazon SageMaker resource \cr
\link[=sagemaker_delete_workteam]{delete_workteam} \tab Deletes an existing work team \cr
\link[=sagemaker_describe_algorithm]{describe_algorithm} \tab Returns a description of the specified algorithm that is in your account \cr
\link[=sagemaker_describe_code_repository]{describe_code_repository} \tab Gets details about the specified Git repository \cr
\link[=sagemaker_describe_compilation_job]{describe_compilation_job} \tab Returns information about a model compilation job \cr
\link[=sagemaker_describe_endpoint]{describe_endpoint} \tab Returns the description of an endpoint \cr
\link[=sagemaker_describe_endpoint_config]{describe_endpoint_config} \tab Returns the description of an endpoint configuration created using the CreateEndpointConfig API \cr
\link[=sagemaker_describe_hyper_parameter_tuning_job]{describe_hyper_parameter_tuning_job} \tab Gets a description of a hyperparameter tuning job \cr
\link[=sagemaker_describe_labeling_job]{describe_labeling_job} \tab Gets information about a labeling job \cr
\link[=sagemaker_describe_model]{describe_model} \tab Describes a model that you created using the CreateModel API \cr
\link[=sagemaker_describe_model_package]{describe_model_package} \tab Returns a description of the specified model package, which is used to create Amazon SageMaker models or list them on AWS Marketplace \cr
\link[=sagemaker_describe_notebook_instance]{describe_notebook_instance} \tab Returns information about a notebook instance \cr
\link[=sagemaker_describe_notebook_instance_lifecycle_config]{describe_notebook_instance_lifecycle_config} \tab Returns a description of a notebook instance lifecycle configuration \cr
\link[=sagemaker_describe_subscribed_workteam]{describe_subscribed_workteam} \tab Gets information about a work team provided by a vendor \cr
\link[=sagemaker_describe_training_job]{describe_training_job} \tab Returns information about a training job \cr
\link[=sagemaker_describe_transform_job]{describe_transform_job} \tab Returns information about a transform job \cr
\link[=sagemaker_describe_workteam]{describe_workteam} \tab Gets information about a specific work team \cr
\link[=sagemaker_get_search_suggestions]{get_search_suggestions} \tab An auto-complete API for the search functionality in the Amazon SageMaker console \cr
\link[=sagemaker_list_algorithms]{list_algorithms} \tab Lists the machine learning algorithms that have been created \cr
\link[=sagemaker_list_code_repositories]{list_code_repositories} \tab Gets a list of the Git repositories in your account \cr
\link[=sagemaker_list_compilation_jobs]{list_compilation_jobs} \tab Lists model compilation jobs that satisfy various filters \cr
\link[=sagemaker_list_endpoint_configs]{list_endpoint_configs} \tab Lists endpoint configurations \cr
\link[=sagemaker_list_endpoints]{list_endpoints} \tab Lists endpoints \cr
\link[=sagemaker_list_hyper_parameter_tuning_jobs]{list_hyper_parameter_tuning_jobs} \tab Gets a list of HyperParameterTuningJobSummary objects that describe the hyperparameter tuning jobs launched in your account \cr
\link[=sagemaker_list_labeling_jobs]{list_labeling_jobs} \tab Gets a list of labeling jobs \cr
\link[=sagemaker_list_labeling_jobs_for_workteam]{list_labeling_jobs_for_workteam} \tab Gets a list of labeling jobs assigned to a specified work team \cr
\link[=sagemaker_list_model_packages]{list_model_packages} \tab Lists the model packages that have been created \cr
\link[=sagemaker_list_models]{list_models} \tab Lists models created with the CreateModel API \cr
\link[=sagemaker_list_notebook_instance_lifecycle_configs]{list_notebook_instance_lifecycle_configs} \tab Lists notebook instance lifestyle configurations created with the CreateNotebookInstanceLifecycleConfig API \cr
\link[=sagemaker_list_notebook_instances]{list_notebook_instances} \tab Returns a list of the Amazon SageMaker notebook instances in the requester's account in an AWS Region \cr
\link[=sagemaker_list_subscribed_workteams]{list_subscribed_workteams} \tab Gets a list of the work teams that you are subscribed to in the AWS Marketplace \cr
\link[=sagemaker_list_tags]{list_tags} \tab Returns the tags for the specified Amazon SageMaker resource \cr
\link[=sagemaker_list_training_jobs]{list_training_jobs} \tab Lists training jobs \cr
\link[=sagemaker_list_training_jobs_for_hyper_parameter_tuning_job]{list_training_jobs_for_hyper_parameter_tuning_job} \tab Gets a list of TrainingJobSummary objects that describe the training jobs that a hyperparameter tuning job launched \cr
\link[=sagemaker_list_transform_jobs]{list_transform_jobs} \tab Lists transform jobs \cr
\link[=sagemaker_list_workteams]{list_workteams} \tab Gets a list of work teams that you have defined in a region \cr
\link[=sagemaker_render_ui_template]{render_ui_template} \tab Renders the UI template so that you can preview the worker's experience \cr
\link[=sagemaker_search]{search} \tab Finds Amazon SageMaker resources that match a search query \cr
\link[=sagemaker_start_notebook_instance]{start_notebook_instance} \tab Launches an ML compute instance with the latest version of the libraries and attaches your ML storage volume \cr
\link[=sagemaker_stop_compilation_job]{stop_compilation_job} \tab Stops a model compilation job \cr
\link[=sagemaker_stop_hyper_parameter_tuning_job]{stop_hyper_parameter_tuning_job} \tab Stops a running hyperparameter tuning job and all running training jobs that the tuning job launched \cr
\link[=sagemaker_stop_labeling_job]{stop_labeling_job} \tab Stops a running labeling job \cr
\link[=sagemaker_stop_notebook_instance]{stop_notebook_instance} \tab Terminates the ML compute instance \cr
\link[=sagemaker_stop_training_job]{stop_training_job} \tab Stops a training job \cr
\link[=sagemaker_stop_transform_job]{stop_transform_job} \tab Stops a transform job \cr
\link[=sagemaker_update_code_repository]{update_code_repository} \tab Updates the specified Git repository with the specified values \cr
\link[=sagemaker_update_endpoint]{update_endpoint} \tab Deploys the new EndpointConfig specified in the request, switches to using newly created endpoint, and then deletes resources provisioned for the endpoint using the previous EndpointConfig (there is no availability loss)\cr
\link[=sagemaker_update_endpoint_weights_and_capacities]{update_endpoint_weights_and_capacities} \tab Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint \cr
\link[=sagemaker_update_notebook_instance]{update_notebook_instance} \tab Updates a notebook instance \cr
\link[=sagemaker_update_notebook_instance_lifecycle_config]{update_notebook_instance_lifecycle_config} \tab Updates a notebook instance lifecycle configuration created with the CreateNotebookInstanceLifecycleConfig API \cr
\link[=sagemaker_update_workteam]{update_workteam} \tab Updates an existing work team with new member definitions or description
}
}
\examples{
\donttest{svc <- sagemaker()
svc$add_tags(
Foo = 123
)}
}
|
a05ceff440b38fd3a2f80ad461ce2126d9aa5812
|
d68f60777d9d738b7520fb36dcf7000442adeb60
|
/plot1.R
|
6f7fbf1354014830a41a966471ab5a80bdfe1c7d
|
[] |
no_license
|
utjimmyx/ExData_PeerAssessment2
|
e04a50a3014419fee80b38de9f1b0ecdddc39ad8
|
f33cb16182dfef7c6c594d4cf2b92534cd8f4f1b
|
refs/heads/master
| 2021-01-21T07:47:54.417737
| 2015-10-25T22:09:41
| 2015-10-25T22:09:41
| 44,928,933
| 0
| 0
| null | 2015-10-25T20:45:00
| 2015-10-25T20:45:00
| null |
UTF-8
|
R
| false
| false
| 461
|
r
|
plot1.R
|
setwd("C:/Users/zxu/Documents/Nov 15 2014 R")
library(plyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
set.seed(12345)
NEI.reduced <- NEI[sample(nrow(NEI), 500), ]
data <- with(NEI, aggregate(Emissions, by = list(year), sum))
plot(data, type = "o", ylab = "Total Emissions - million tons",
xlab = "Year", main = "Total Emissions in the US.")
polygon(data, col = "purple", border = "green")
|
28f57e1a1b0bd6671eeb217f806232ce5120f3a4
|
67c0345439e95186c94519008834549dcb5444d2
|
/labwork1.R
|
ec22b4e554eaf38e176c32e156a5223301e4c562
|
[] |
no_license
|
tincorpai/labwork2
|
5f2f7054aecca76e9c4fa2b12111015ef6f69978
|
eb8b4eb3130ce5c136a01b7ac491f91b0d4df595
|
refs/heads/master
| 2020-07-15T22:22:03.863211
| 2019-09-08T03:14:57
| 2019-09-08T03:14:57
| 205,661,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,446
|
r
|
labwork1.R
|
### Lab1: Vector and List
### Author: Taiwo Famuyiwa
### Date: Feb. 4, 2019
#######################################
#######################################
#VECTOR QUESTIONS#
#QUESTION1
a <- c(1,5,4,3,6)
b <- c(3,5,2,1,9)
#The values are: TRUE TRUE FALSE FALSE TRUE
a <= b
#QUESTION2
x <- c(12:4)
y <- c(0,1,2,0,1,2,0,1,2)
#This code will give an index of x/y value that are finite
#The values are: 1, 4, 7
which(!is.finite(x/y))
#QUESTION3
x <- c(1,2,3,4)
#This code finds all non empty x elements wise and compare each
#to zero. If both conditions are true, two is added elemnt wise.
#k = 3, 4, 5, 6
(x+2)[(!is.na(x)) & x > 0] -> k
#QUESTION4
x <- c(2, 4, 6, 8)
y <- c(TRUE, TRUE, FALSE, TRUE)
#Sum all x element according to wheter y is true or false
sum(x[y])
#QUESTION5
x <- c(34, 56, 55, 87, NA, 4, 77, NA, 21, NA, 39)
#is.na finds all na's in x and sum counts the number of na's
#The code will return 3
sum(is.na(x))
###########################################
###########################################
#LIST QUESTIONS#
#import library
library(dplyr)
#QUESTION 1
p <- c(2,7,8)
q <- c("A", "B", "C")
x <- list(p, q)
#Extract the vector q in x
#The answer is: "A", "B", "C
x[2]
#QUESTION 2
w <- c(2, 7, 8)
v <- c("A", "B", "C")
x <- list(w, v)
#Change the first element of vector w to k in x
#Answer:"K" "B" "C"
x[[2]][1] <- "K"
#QUESTION 3
a <- list ("x"=5, "y"=10, "z"=15)
#Unlist the list and sum element wise
#The output is 30
sum(unlist(a))
#QUESTION 4
Newlist <- list(a=1:10, b="Good morning", c="Hi")
#Create a function and use sapply to add 1 vector a element wise
b <- sapply(Newlist[[1]], function(x)x+1)
Newlist[[1]] <- b
#QUESTION5
b <- list(a=1:10, c="Hello", d="AA")
#remove the second element from vector a of b
b[[1]][-2]
#QUESTION6
x <- list(a=5:10, c="Hello", d="AA")
#Add new element "NewItem" and name it z
x$z <- "NewItem"
#QUESTION7
y <- list("a", "b", "c")
#name each element list y using name function
names(y) <- c("one", "two", "three")
#QUESTION8
x <- list(y=1:10, t="Hello", f="TT", r=5:20)
#Get the length of 4th element of x
#Answer: 16
length(x[[4]])
#QUESTION9
string <- "Grand Opening"
#A code that split string into a list of objects
#I applied a regular expression "\\s+"
as.list(strsplit(string, '\\s+')[[1]])
#QUESTION10
y <- list("a", "b", "c")
q <- list("A", "B", "C", "a", "b", "c")
#Use setdiff functon return values list of q not in y
setdiff(q, y)
|
b90cc377dcc8a8a804c3a8592323255c5a55016d
|
d353738e1163e485bf3e5c24646264523b8c6cd3
|
/R_Basics/Homework/Section6/Homework6.R
|
c54723152412a6c5f99998feb42f0e4ce33d5232
|
[] |
no_license
|
KiranKumar-A/R
|
157508851daeeccb61eb4f8d7d563db7abee15f7
|
12570e1e87664af4b645ce484943ab4f25742bd8
|
refs/heads/master
| 2021-09-07T01:06:22.462155
| 2018-02-14T16:56:57
| 2018-02-14T16:56:57
| 95,436,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
Homework6.R
|
#import the data
getwd()
setwd('G://UDEMY//R//Homework//Section6')
getwd()
mov <-read.csv("Section6-Homework-Data.csv")
#data exploration
head(mov)
summary(mov)
str(mov)
#activate ggplot2
#install.packages("ggplot2")
library(ggplot2)
#cool insight
ggplot(data = mov, aes(x=Day.of.Week)) + geom_bar()
#filter #1 for the data frame
filt <-(mov$Genre == "action")|(mov$Genre == "adventure")|(mov$Genre == "animation")|(mov$Genre == "comedy")|(mov$Genre == "drama")
mov2 <- mov[filt,]
mov2
#filter #2
filt2 <- mov$Studio %in% c("Buena Vista Studios", "WB", "FOX", "Universal", "Sony", "Paramount Pictures")
filt
filt2
mov2 <- mov[filt&filt2,]
mov2
#prepare the plot's data and aes layers:
p <- ggplot(data = mov2, aes(x=Genre, y=Gross...US))
p
#add geometries
p + geom_point()
q <- p + geom_jitter(aes(size=Budget...mill., colour=Studio)) +
geom_boxplot(alpha=0.7, outlier.colour = NA)
q
#non-data ink
q<- q +
xlab("Genre") +
ylab("Gross % US") +
ggtitle("Domestic Gross % by Genre")
q
#Theme
q <- q +
theme(
axis.title.x = element_text(colour = "Blue", size = 30),
axis.title.y = element_text(colour = "Blue", size = 30),
axis.text.x = element_text(size = 20),
axis.text.y = element_text(size = 20),
plot.title = element_text(size = 40),
legend.title = element_text(size = 20),
legend.text = element_text(size = 20),
text = element_text(family="Comic Sans MS")
)
q
#final Touch
q$labels$size <- "Budget $M"
q
|
5e88d90c5440203e575f9a2742800479a10e9fb8
|
4129e81a0dd1904c598663d4202b5be897d07330
|
/plot1.R
|
9f745db8149f75bb0f14846d78ca949521545015
|
[] |
no_license
|
sjevans242/ExplAnal-Assig1
|
72af2cc394dc70cd6d5d2b87c1acac09dac442b1
|
4351cc10a32d3f68a118063b94f9d8bf41eeaadf
|
refs/heads/master
| 2021-01-10T03:24:07.596365
| 2016-02-21T12:55:25
| 2016-02-21T12:55:25
| 52,206,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
r
|
plot1.R
|
## delimited with ";"
## need data rows 66638 to 69517
## reader header only
header <- read.table("household_power_consumption.txt", nrows = 1, header = FALSE, sep =';', stringsAsFactors = FALSE)
## read needed lines and add header
data <- read.table("household_power_consumption.txt", skip = 66637, nrows = 2880, sep = ";",
col.names = header, na.strings = "?")
## convert Date and Time columns to class 'Date' and 'POSIXt'
data$Time <- paste(data$Date, data$Time, sep = " ")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
data$Time <- strptime(data$Time, "%d/%m/%Y %H:%M:%S")
## create png file
png(file = "plot1.png")
hist(data$Global_active_power,
col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
## close connection to file
dev.off()
|
b6c9b60fe723a69d3ae492013982639573625549
|
dc2bdcb2cb677fc8068594ec8a9091d212039696
|
/src/FuncTools.R
|
06829e9fd2621fe72126004107123df2e68f7178
|
[] |
no_license
|
adrielidaltoe/Predicting-downloads-of-mobile-app-R
|
b6714592127395b14f538049b4a8a92a1d137e5b
|
d7a43d4fdadb0c7e858547df74dbdeb85318b11b
|
refs/heads/main
| 2023-04-22T21:27:26.007194
| 2021-05-05T12:19:44
| 2021-05-05T12:19:44
| 314,087,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,241
|
r
|
FuncTools.R
|
### Functions ####
# Function to test the association between categorical variables
var_association <- function(var, df){
library(rcompanion)
library(DescTools)
# Creating an empty dataframe to save the results
results = data.frame(matrix(nrow = 0, ncol = 6))
names(results) = c('var_x', 'var_y', 'theil_U', 'theil_sym', 'cramerV', 'chisq') #
for (vary in var){
# Accessing the values of vary in dataframe df
y = df[[vary]]
for (varx in var){
# Adding varx to results dataframe in column var_x
results[(length(results$var_x)+1),'var_x'] = varx
# Adding vary to results dataframe in column var_y
results[(length(results$var_y)),'var_y'] = vary
# Accessing the values of varx in dataframe df
x = df[[varx]]
# Calculating uncertainty coefficients
# column
theil_coef = UncertCoef(x, y, direction = 'column')
results[(length(results$theil_U)),'theil_U'] = theil_coef
# symmetric
theil_coef = UncertCoef(x, y, direction = 'symmetric')
results[(length(results$theil_sym)),'theil_sym'] = theil_coef
# Calculating Cramer's V, corrected for bias
cramer = cramerV(x, y, bias.correct = TRUE)
results[(length(results$cramerV)),'cramerV'] = cramer
# Calculating chi2 test
chisq = chisq.test(x, y)
results[(length(results$chisq)),'chisq'] = chisq$p.value
}
}
return(results)
}
# Plot of correlations for categorical variables
plot.correlations <- function(correlation_dataframe){
# Import libraries
library(ggcorrplot)
# Variables that will be used
values = c('theil_U', 'theil_sym', 'cramerV', 'chisq') #
# correlation_dataframe is reshaped and transformed in a matrix
for (value_col in values){
data <- as.matrix(cast(correlation_dataframe, var_x~var_y,value = value_col))
print(ggcorrplot(data, type='full', lab = TRUE, title = value_col, show.legend = FALSE))
}
}
# Function to count time from a zero mark (specified here as the index of min value in a datetime vector).
# The index is used here to perform the calculations between a specific time and the zero mark.
time_count <- function(datetime_vector){
#finding the datetime index of the first click
index_first_datetime = which(datetime_vector == min(datetime_vector))
result = c()
#Loop in datetime vector
for (i in 1:length(datetime_vector)){
# calculating the continuous time
result[i] = as.numeric(difftime(datetime_vector[i],datetime_vector[index_first_datetime], units = 'mins'))
}
return(result)
}
### Functions to group categorical variables ###
# Using download absolute frequency. This strategy worked well in Talking Data dataset, but
# has the advantages of do not control the number of groups created.
group_categories <- function(dataframe, names, var_target){
for(var in names){
categories <- dataframe %>%
filter(!!sym(var_target) == 1) %>%
group_by((!!sym(var))) %>% # !!sym transform var (string) into a symbol
summarise(n =n()) %>%
filter(n > 10)
group = nrow(categories)
dataframe[,paste(var, 'group', sep = '_')] = as.factor(sapply(dataframe[,..var],
function(x){ifelse(x %in% categories[[var]], x, group+1)}))
}
return(dataframe)
}
# Creating groups by relative frequency of downloads
# Here I considered (number of downloads in i) / (sum of downloads) in order to have significant
# percentages. Thus, it is the relative frequency of the positive responses, instead of variable
# relative frequency.
group_target_prop <- function(dataframe, variable, target){
for(var in variable){
test <- as.data.table(table(dataframe[[var]], dataframe[[target]]))
test <- reshape(test, idvar = 'V1', v.names = 'N', timevar = 'V2', direction = 'wide')
test <- test %>%
mutate(response_rate = (N.1 / sum(N.1))*100)
# Creating a new column in dataframe with values = 0
name = paste(var, 'response', sep = '_')
dataframe[,name] <- 0
# Loop for match the response rate with respective var values
for(i in 1:length(test[['V1']])){
index = which(dataframe[[var]] == test[['V1']][i])
dataframe[[name]][index] <- test[['response_rate']][i]
}
# The number of categories will be fixed to 5
min = min(dataframe[[name]])
max = max(dataframe[[name]])
increment = (max - min) /5
intervals = c(min,min+increment, min+2*increment, min+3*increment, min+4*increment, min+5*increment)
dataframe[, paste(var, 'resp_group', sep ='_')] = cut(dataframe[[name]],
breaks = intervals,
include.lowest = TRUE,
labels = c(1,2,3,4,5))
dataframe[[name]] <- NULL
}
return(dataframe)
}
# Functions for evaluating models
evaluate_model <- function(model, test_set, target_name, variables = NULL, predictions = NULL){
# This function evaluates models from different algorithms based on predicted classes
# Test_set must be a dataframe, not a sparce matrix
# predictions
if(is.null(predictions)){
pred = predict(model, newdata=test_set[,..variables])
}else{
pred = as.factor(predictions)
}
# Confusion Matrix (caret)
print(confusionMatrix(pred, test_set[[target_name]], positive = 'yes', mode = "prec_recall"))
# Data for ROC and precision-recall curves
scores = data.frame(pred, test_set[[target_name]])
names(scores) <- c('pred','true')
# scores.class0 = positive class, scores.class1 = negative class
# AUC/ROC curve
roccurve <- PRROC::roc.curve(scores.class0 = scores[scores$true == 'yes',]$pred,
scores.class1 = scores[scores$true == 'no',]$pred,
curve = TRUE)
plot(roccurve)
# AUCPR curve
prcurve <- PRROC::pr.curve(scores.class0 = scores[scores$true == 'yes',]$pred,
scores.class1 = scores[scores$true == 'no',]$pred,
curve = TRUE)
plot(prcurve)
}
evaluate_model_prob <- function(model, test_data, labels){
# This function returns roc_auc and prc_auc based on predicted probabilities
pred = predict(model, newdata = test_data, type = 'prob')
pred<-prediction(pred[, 2], labels= labels)
auc_roc <- performance(pred, measure = "auc")
auc_prc <- performance(pred, measure = "aucpr")
return(list('roc_auc' = auc_roc@y.values[[1]], 'prc_auc' = auc_prc@y.values[[1]]))
}
# Change factor values to numeric
factor_to_numeric <- function(dataset, names){
# Disable the warning message thrown in the second if{} of this code
defaultW <- getOption("warn")
options(warn = -1)
for(name in names){
if(is.factor(dataset[[name]])){
if(sum(is.na(as.numeric(levels(dataset[[name]])))) > 0){
label = levels(dataset[[name]])
dataset[,name] <- case_when(dataset[[name]] == label[1] ~ 1,
dataset[[name]] == label[2] ~ 2,
dataset[[name]] == label[3] ~ 3,
dataset[[name]] == label[4] ~ 4)
}else{
dataset[,name] = as.numeric(levels(dataset[[name]]))[dataset[[name]]]
}
}
}
options(warn = defaultW)
return(dataset)
}
# Train and predict results from xgboost
train_xgboost <- function(target, variables, train_dataset, test_dataset){
require(Matrix)
require(xgboost)
if(is.data.frame(train_dataset)){
train_dataset = as.data.table(train_dataset)
}
if(is.data.frame(test_dataset)){
test_dataset = as.data.table(test_dataset)
}
# formula to create sparce matrix
f = paste(target, '~.', collapse="")
# sparce matrix
train_sparce <- sparse.model.matrix(as.formula(f), data = train_dataset[,..variables])
test_sparce <- sparse.model.matrix(as.formula(f), data = test_dataset[,..variables])
# Output vector
if(is.factor(train_dataset[[target]])){
output_vector_train = as.numeric(levels(train_dataset[[target]]))[train_dataset[[target]]]
}else{
output_vector_train = train_dataset[[target]]
}
# Train the model
set.seed(1045)
model <- xgboost(data = train_sparce, label = output_vector_train, max.depth = 6, eta = 0.2, nrounds = 100,
nthread = 2, objective = "binary:logistic", params = list(eval_metric = "auc"), verbose = FALSE)
# Predictions. xgboost does a regression, we have to transform the pred values to a binary classification.
predictions = as.numeric(predict(model, test_sparce) > 0.5)
return(list('model' = model, 'predictions' = predictions, variable_names = test_sparce@Dimnames[[2]]))
}
## Function to obtain the best value for perc.over parameter of SMOTE function
grid_search_smote <- function(train_data, test_data, target, features, oversamples, model = 1){
# model == 1 -> C5.0 algorithm
# model == 2 -> xgboost
# else -> svm
require(DMwR)
for(i in oversamples){
f = paste(target, '~.', collapse="")
smoted_data <- SMOTE(as.formula(f), data = train_data[,..features], perc.over = i, k = 5,
perc.under = 400, set.seed = 1045)
var = features[features!=target]
if(model == 1){
print(paste(i, 'C50'))
set.seed(1045)
C50_smote <- C5.0(x = smoted_data[,..var] , y = smoted_data[[target]], trials = 10)
pred = predict(C50_smote, newdata=test_data[,..var])
}else if(model ==2){
print(paste(i, 'xgboost'))
xgb <- train_xgboost(target = target, variables = features,
train_dataset = smoted_data, test_dataset = test)
pred = xgb$predictions
}else{
print(paste(i, 'SVM'))
model_svm_v1 <- svm(as.formula(f),
data = smoted_data,
type = 'C-classification',
kernel = 'radial')
pred = predict(model_svm_v1, newdata=test_data[,..var])
}
print(ROSE::roc.curve(predicted = pred, response = test_data[[target]], plotit = FALSE))
print(table(pred, test[[target]]))
}
}
|
d43242839ae3e4c48b746cfca9b5492bb1d956d0
|
5bfb41f4eb0d545b88d986b085d1e1bcd5da2585
|
/man/dna.identity.Rd
|
d055526ffd1d10a73d07f48010340b53126ac0de
|
[] |
no_license
|
cran/sublogo
|
a514a6e5ceaa650ca504f13c5a49c50b0144c298
|
c74f63170630a0ea61b93eafa0b2f255e02542b6
|
refs/heads/master
| 2016-09-06T01:25:06.488667
| 2009-06-19T00:00:00
| 2009-06-19T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 388
|
rd
|
dna.identity.Rd
|
\name{dna.identity}
\Rdversion{1.1}
\alias{dna.identity}
\docType{data}
\title{dna.identity}
\description{DNA identity substitution matrix.}
\usage{dna.identity}
\format{
The format is:
num [1:5, 1:5] 0 0 0 0 0 0 1 0 0 0 ...
- attr(*, "dimnames")=List of 2
..$ : chr [1:5] "*" "A" "T" "G" ...
..$ : chr [1:5] "*" "A" "T" "G" ...
}
\details{
}
\source{
}
\references{
}
\examples{}
|
66572cd6dd053ac1a8738051dd8ae5046727a59d
|
7af6a1887bc68eed7c5edca34025b708334d13c5
|
/man/gap.mspct.Rd
|
063ec8b0231a6dbf34bccc7133693bebf145dbee
|
[] |
no_license
|
cran/photobiologySun
|
627badbe9d84c3923109ab01241adc0ffc995c58
|
a6aa6298287cbaae8cb36c292e25c6f82eb157a4
|
refs/heads/master
| 2021-01-24T13:02:19.223953
| 2019-03-27T21:20:03
| 2019-03-27T21:20:03
| 123,162,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,419
|
rd
|
gap.mspct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example.solar.spectra.data.r
\docType{data}
\name{gap.mspct}
\alias{gap.mspct}
\title{Solar spectral irradiance in a tree canopy gap (measured)}
\format{A \code{source_mspct} object containing a collection of 72
source_spct objects.}
\usage{
gap.mspct
}
\description{
A dataset containing a sequence of 72 spectra measured with an
Ocean Optics Maya2000 Pro spectrometer and a Bentham DH-7-SM cosine
diffuser. Values measured on 30 April 2014, in the late morning, under
clear sky conditions. The whole sequence was measured in 39 seconds in a
sunfleck under young silver birch trees. Place: University of Helsinki,
Viikki Campus, Finland. Coordinates: 60.227162 N, 25.019429 E. Calibration
and corrections done with package MayaCalc using bracketing and noise
reduction (with filter measurement) and method "sun". Algorithm and
calibration data by Lasse Ylianttila (STUK, Helsinki, Finland).
}
\details{
\itemize{ \item w.length (nm), range 293 to 800 nm. \item s.e.irrad
(W m-2 nm-1) }
}
\references{
Ylianttila, L.; Visuri, R.; Huurto, L. & Jokela, K. (2005)
Evaluation of a single-monochromator diode array spectroradiometer for
sunbed UV-radiation measurements. Photochem Photobiol, 81, 333-341
}
\author{
T. Matthew Robson and Saara Hartikainen (data)
}
\keyword{datasets}
|
965e66d25cb9d5b4441402b9e05c8a08fbf8d0ba
|
8717787cbe108b1021737cb42b24d6f3b31d04ea
|
/old/analyse_R/TraitementR.R
|
8befeb699826426c189927927055b7d0f8ab744d
|
[] |
no_license
|
JuanParker1/pr-noe
|
2928017b0a4e2443f8e9ea236c4d6630f5e3fa74
|
163120dc0769f3966e29a7b2ff0fc3752c733928
|
refs/heads/main
| 2023-08-16T04:13:32.886970
| 2021-09-29T06:47:58
| 2021-09-29T06:47:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
TraitementR.R
|
library(tibble)
library(jsonlite)
library(tidyr)
library(dplyr)
setwd("~/Cours/4A/PR/pr-noe")
brut <- stream_in(file("data.ndjson"))
tablecrypt <- tibble(brut)
longtab <- function(vector = c()){return (length(unlist(vector)))}
nbtab <- tablecrypt %>% select("tags") %>% apply(1,longtab) %>% data.frame()
tablecrypt$nbtag <- nbtab$.
tablemots <- tablecrypt %>% select(name, short, tags, nbtag) %>% filter(nbtag > 1)
cryptnotnest <- unnest(tablecrypt,tags)
listetag <- cryptnotnest %>% select(tags) %>% count(tags) %>% arrange(desc(n))
write.table(listetag, "./listetagtrie.csv", sep=';')
|
a8b8402897e2a78117324ece3dc66f42b76c9065
|
a3a90785fcf311da21ec1fb8a7b06e352909165d
|
/man/lipid_neg_fdata.Rd
|
c0d2da088ac867c044367e52754d38d3104d74bc
|
[] |
no_license
|
pmartR/pmartRdata
|
e77223c1a26128c0f4516c2e54f6e8dae1c63aac
|
bb4729346c264b45deb310f2d99199cd09776051
|
refs/heads/master
| 2023-01-09T18:03:49.387617
| 2023-01-03T20:08:24
| 2023-01-03T20:08:24
| 69,269,999
| 5
| 8
| null | 2023-01-03T20:08:26
| 2016-09-26T16:32:28
|
R
|
UTF-8
|
R
| false
| true
| 690
|
rd
|
lipid_neg_fdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{lipid_neg_fdata}
\alias{lipid_neg_fdata}
\title{Negative Ion Lipidomics Sample Metadata (f_data)}
\format{
A data.frame with 45 rows (samples) and 4 columns (sample identifier
and associated information):
\describe{
\item{SampleID}{Sample identifier (matches column headers in e_data)}
\item{Virus}{Strain of virus for each sample}
\item{Replicate}{Biological replicate number}
\item{Donor}{Donor from which the sample originated}
}
}
\description{
These data are part of Experiment 1 (isobaric labeled proteomics, lipidomics,
and RNAseq data). A dataset containing the sample metadata.
}
|
4b46bf1593e38d33678add42242fe3ce0a4957e5
|
3176bef3d05e202483010f8e3215d6bd1b5ef390
|
/man/eth_accounts.Rd
|
8972dce3c0fd4b372cda09b2ccd705d6aa202c23
|
[
"MIT"
] |
permissive
|
BlocklancerOrg/ethReum
|
dcd7af91cd7a572a8e7b376e69bff02e2d8d8ab3
|
561c2a3b2233af935fa9267ae84abee9c4ccc7f9
|
refs/heads/master
| 2021-09-20T18:27:14.543351
| 2018-08-13T23:07:14
| 2018-08-13T23:07:14
| 113,497,628
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 396
|
rd
|
eth_accounts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ethRPCFunctions.R
\name{eth_accounts}
\alias{eth_accounts}
\title{Eth accounts}
\usage{
eth_accounts(rpc_address = "http://localhost:8545")
}
\arguments{
\item{rpc_address}{The address of the RPC API.}
}
\value{
20 Bytes - addresses owned by the client.
}
\description{
Returns a list of addresses owned by client.
}
|
14335e08bdc61f7205b2f96421ecc156766e6d11
|
1a3756f0d41ff24ff50d797400c4befcab2f44aa
|
/pollutantmean.R
|
40afd968a66d31ca54db54c8482a98445cbb80fb
|
[] |
no_license
|
patrick-ball/r_programming
|
f2e6e20f8ae09884f2be0653808d9af8627fa8f7
|
763c53f324e7f86dea1c9acf92c7c434dd417213
|
refs/heads/master
| 2020-06-04T01:00:37.837592
| 2015-01-09T20:30:35
| 2015-01-09T20:30:35
| 28,894,686
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id=1:332) {
## 'directory' is a character vector of length 1 indicating the location of
## the CSV files
## 'pollutant' is a character vector of lenght 1 indicating the name of the
## pollutant for which we will calculate the mean; either "Sulfate" or
## "nitrate"
## 'id' is an integer vector indicating the monitor ID numbers to be used
## Returns the mean of the selected pollutant across all monitors indicated
## by the 'id' vector, ignoring NA values
polsum = 0 ## will track summed pollutant values across all files
count = 0 ## will track number of values added to sum
for (i in id) {
if (nchar(i) < 2) {
filename <- paste(directory, "/00", i, ".csv", sep="")
} else if (nchar(i) < 3) {
filename <- paste(directory, "/0", i, ".csv", sep="")
} else {
filename <- paste(directory, "/", i, ".csv", sep="")
}
data <- read.csv(filename)
polsum <- polsum + sum(data[[pollutant]], na.rm=TRUE)
count <- count + length(data[,pollutant][!is.na(data[,pollutant])])
}
polsum / count
}
|
af33a7e976f280544e80d0e79f3b473e5f8bd7e6
|
a08f634ae062d610235098f5a52ff389b4f7b64f
|
/R/helper_functions.R
|
004dd600f98fb144b37ed491c771ecdc960b0c62
|
[] |
no_license
|
oristides/Rmazon
|
a2cfb741cf1e3217abfd26a2de3699422ff44792
|
230bfa5a53c9c9f02b884d2af11f69fa04ed3f74
|
refs/heads/master
| 2021-01-13T14:30:45.313351
| 2016-11-03T21:26:40
| 2016-11-03T21:26:40
| 72,852,522
| 1
| 0
| null | 2016-11-04T13:57:57
| 2016-11-04T13:57:57
| null |
UTF-8
|
R
| false
| false
| 1,788
|
r
|
helper_functions.R
|
reviews_from_page <- function(scrapeurl){
html_review_data <- xml2::read_html(scrapeurl)
reviewRating <- rvest::html_nodes(html_review_data, "#cm_cr-review_list .review-rating")
reviewRating <- rvest::html_text(reviewRating)
reviewRating <- stringr::str_sub(reviewRating, 1,3)
reviewRating <- as.numeric(reviewRating)
reviewDate <- rvest::html_nodes(html_review_data, "#cm_cr-review_list .review-date")
reviewDate <- rvest::html_text(reviewDate)
reviewDate <- stringr::str_replace_all(reviewDate, "on ", "")
reviewDate <- lubridate::parse_date_time(reviewDate, orders = "B d Y")
reviewFormat <- rvest::html_nodes(html_review_data, ".review")
reviewFormat <- lapply(reviewFormat, function(x){
x <- rvest::html_nodes(x, ".a-size-mini.a-color-secondary")
x <- rvest::html_text(x)
x <- stringr::str_replace_all(x, "Format: ", "")
})
reviewFormat <- lapply(reviewFormat, function(x) {
if(length(x) == 0) x <- NA else x
})
reviewFormat <- unlist(reviewFormat)
reviewHeadline <- rvest::html_nodes(html_review_data, "#cm_cr-review_list .a-color-base")
reviewHeadline <- rvest::html_text(reviewHeadline)
reviewText <- rvest::html_nodes(html_review_data, "#cm_cr-review_list .review-text")
reviewText <- rvest::html_text(reviewText)
Verified_Purchase <- rvest::html_nodes(html_review_data, ".review-data")
Verified_Purchase <- rvest::html_text(Verified_Purchase)
Verified_Purchase <- Verified_Purchase[seq(1, 20, by = 2)]
Verified_Purchase <- stringr::str_detect(Verified_Purchase, "Verified Purchase")
Verified_Purchase <- na.omit(Verified_Purchase)
reviews <- dplyr::data_frame(reviewRating, reviewDate, reviewFormat,
Verified_Purchase, reviewHeadline, reviewText)
return(reviews)
}
|
abdf8ff3f96a675869f9e1444c33861eb09e452a
|
c25894aa3c0cdb70b48d7bcf71510bc4f6267a39
|
/homeDepot/src/bagging.R
|
eb03b0fc7f37ca761be82ee9c2cb616959e2c908
|
[] |
no_license
|
panchamirudrakshi/MachineLearning
|
4d2f58e1f4ac06ecfe3f747c4747fac082f423e0
|
a0580b85b52bfc9833fe11dafa9048e92c774eda
|
refs/heads/master
| 2020-06-24T21:44:48.140521
| 2017-01-12T08:31:48
| 2017-01-12T08:31:48
| 74,620,377
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
bagging.R
|
install.packages("randomForest")
library("randomForest")
train <- read.csv("E:/Ranjani/Docs/sem5/PROJECT/OurProject/trainingFeatures.csv",header=FALSE)
test <- read.csv("E:/Ranjani/Docs/sem5/PROJECT/OurProject/testFeatures.csv",header=FALSE)
colnames(train)<-c("X1","X2","X3","X4","Relevance")
test$Relevance <- 0.0
colnames(test)<-c("X1","X2","X3","X4","Relevance")
summary(train)
summary(test)
bag.hdepot = randomForest(Relevance~., data= train, mtry=4, importance=TRUE)
prediction.test = predict(bag.hdepot,test)
head(prediction.test,10)
write(prediction.test,file="Routput.txt", sep= "\n")
#cross validation
train1=sample(1:nrow(train),nrow(train)*0.8)
bag.hdepot.cv = randomForest(Relevance~., data= train,subset=train1,mtry=4, importance=TRUE)
prediction.cv = predict(bag.hdepot.cv,newdata = train[-train1,])
relevance.cv = train[-train1,"Relevance"]
plot(prediction.cv,relevance.cv)
mean((prediction.cv-relevance.cv)^2)
abline(0,1)
|
971f126dd89616d448fbb859e19cb3f8b32bf268
|
bd621bf35610f5c00d3e81b22656d0810d28dc86
|
/run_analysis.R
|
fde4a097bc5115f313fe0c58cb3b8242b26ab7dd
|
[] |
no_license
|
HUAF/getting_and_cleaning_data_week4
|
3db1666f474ffb7ef46d70b0c0870905e4912456
|
f479f4ac95d7d440ff20f0bfa48ca4c18b162a28
|
refs/heads/master
| 2020-09-10T15:55:27.009128
| 2016-09-25T19:06:29
| 2016-09-25T19:06:29
| 67,740,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,292
|
r
|
run_analysis.R
|
## run_analysis.R
#load reshape2 library
library(reshape2)
# load needed data sets
## activity labels
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
## features
features <- read.table("UCI HAR Dataset/features.txt")
## subjects
test_subjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
train_subjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
## data files
test_labels <- read.table("UCI HAR Dataset/test/y_test.txt")
test_data <- read.table("UCI HAR Dataset/test/X_test.txt")
train_labels <- read.table("UCI HAR Dataset/train/y_train.txt")
train_data <- read.table("UCI HAR Dataset/train/X_train.txt")
### bind data by rows and delete unused variables
subjects <- rbind(test_subjects, train_subjects)
rm(test_subjects, train_subjects)
labels <-rbind(test_labels, train_labels)
rm(test_labels, train_labels)
named_labels <- merge(labels, activity_labels, by.x = "V1", by.y = "V1")
named_labels <- named_labels["V2"]
colnames(named_labels) <- c("activity_name")
data <- rbind(test_data, train_data)
rm(test_data, train_data)
## renanming col names before data final binding (AS ASKED)
colnames(subjects) <- c("subject_id")
colnames(labels) <- c("label_id")
colnames(data) <- features[, 2]
## bind the 3 data frames together
### full_data is the merge of the test set and the training set
full_data <- cbind(subjects, named_labels, data)
rm(subjects, labels, data)
# We will consider measurements on the mean to be the columns containing "mean" in their
# column name, measurements on the standard deviation the columns containing std in their
# column name.
### 2
# mean_std_measurements <- full_data[1, 2, grepl("(mean|std)", full_data)]
mean_std_vect <- grep("(mean|std)", colnames(full_data))
## mean_std_measurements is
mean_std_measurements <- full_data[c(1, 2, mean_std_vect)] # get grep outside the vector
#melting the data
melted_dataset <- melt(mean_std_measurements, id = c("subject_id", "activity_name"), measure.vars = colnames(mean_std_measurements[3:81]))
final_dataset <- dcast(melted_dataset, subject_id + activity_name ~ variable, mean)
write.table(final_dataset, file = "./data_submission_file.txt", row.names = FALSE)
### removing unused variables
rm(activity_labels, features, melted_dataset, named_labels)
|
526c6f3df9af4111f5d4ef5382e77b9f527f85e1
|
4a50815a6c7d2425eaf6c6a29dfb81987d4084ee
|
/R/models_in.R
|
317d1b92a2e40747181c72bd30fd0391fd9260b9
|
[] |
no_license
|
SpyBal/psychNET
|
f818a943cb0e74aa1cc45199570e6b82e834bbd4
|
d4bebb47ab8fd2e5b748e848a19ac7599a0e48e0
|
refs/heads/master
| 2022-04-17T08:16:53.010058
| 2020-04-06T15:40:05
| 2020-04-06T15:40:05
| 255,610,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
r
|
models_in.R
|
models_in <- function() return(list(ind=c("VAR","SVAR","SMVAR", "GVAR","SVECM","SVARHL",
"SVARHLX","SVARHLMA", "DFM","GGVAR"),
indne=c("SMVAR", "GVAR"),
pop = c("MLVAR","GGVAR"),
popne= c("MLVAR"),
sparse=c("SVAR","SMVAR", "GVAR","SVECM","SVARHL",
"SVARHLX","SVARHLMA","GGVAR"),
contemporaneous = c("GVAR","GGVAR","DFM")))
|
b520fed5fe82bd3f17d4b04a12fc0d70ff4bb382
|
39024f0fb2d5b35c83a4ea3627abbe295d59941f
|
/R/13_RNA.quantification.R
|
76550c78dd2b9c30e60a007f8400c0e4d4a72fa8
|
[] |
no_license
|
jerryctnbio/SEQprocesstest
|
12e92818216cc8e7e301835f0ca31d7c3a2b90d2
|
1d3e212f4d1df57d617496fc091a333799b56791
|
refs/heads/master
| 2022-06-08T02:40:20.869114
| 2020-05-07T03:14:58
| 2020-05-07T03:14:58
| 261,937,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,235
|
r
|
13_RNA.quantification.R
|
#' @title cufflinks
#' @description A wrapper function to run Cufflinks for mRNA quantitation
#' @usage cufflinks(fns.bam, sample.name, output.dir, cufflinks_thread_number=4, cufflinks.gtf="G", ref.gtf, run.cmd=TRUE, mc.cores=1)
#' @param fns.bam Path to bam files
#' @param sample.name A character vector for the sample names
#' @param output.dir Output directory
#' @param cufflinks.thread.number A parameter value for -p in Cufflinks. A numeric value of the number of threads (default: 4)
#' @param cufflinks.gtf If you set -G, Output will not include novel genes and isoforms that are assembled. (default: -G)
#' @param ref.gtf Path to reference gtf file
#' @param run.cmd Whether to execute the command line (default=TRUE)
#' @param mc.cores The number of cores to use. Must be at least one(default=1), and parallelization requires at least two cores.
#' @details Cufflinks algorithms for transcript assembly and expression quantification are much more accurate with paired-end reads.
#' @return mRNA quantification text files
#' @import parallel
#' @references http://cole-trapnell-lab.github.io/cufflinks/papers/
#' @seealso \url{http://cole-trapnell-lab.github.io/cufflinks/}
#' @export
cufflinks=function(fns.bam,
output.dir,
sample.name,
cufflinks.thread.number=4,
cufflinks.gtf=c("G", "g"),
ref.gtf,
run.cmd=TRUE,
mc.cores=1){
out.dirs=file.path(output.dir, sample.name)
lapply(1:length(out.dirs), function(a) dir.create(out.dirs[a], recursive = TRUE, showWarnings=FALSE))
sapply(out.dirs, dir.create, showWarnings = FALSE)
cufflinks.gtf=paste0("-", cufflinks.gtf)
cmd=paste(cufflinks.path, "-p", cufflinks.thread.number, "-o", out.dirs, cufflinks.gtf, ref.gtf, "-multi-read-correct", fns.bam)
message("[[",Sys.time(),"]] Run cufflinks -------")
print_message(cmd)
if(run.cmd) mclapply(cmd, system, mc.cores=mc.cores)
print_message(cmd)
cat(cmd, file=file.path(output.dir, "run.cufflinks.log"), sep = "\n",append = FALSE)
out.fns=list.files(out.dirs,".fpkm_tracking", full.names=TRUE)
out.fns
}
#' @title htseq_count
#' @description A wrapper function to run htseq-count for mRNA or miRNA quantitation
#' @usage htseq_count(RNAtype="mRNA", fns.bam, sample.name, output.dir, Mode="intersection-nonempty", stranded="no", idattr="gene_id", htseq.r="pos", htseq.a=10, ref.gtf, mir.gff, run.cmd=TRUE, mc.cores=1)
#' @param fns.bam Path to input BAM or SAM files
#' @param sample.name A character vector for the sample names
#' @param output.dir Output directory
#' @param ref.gtf Directoy stored at reference gtf file
#' @param mir.gff Directoy stored at micro-RNA reference gff file
#' @param MODE A parameter value for -m in htseq-count. Mode to handle reads overlapping more than one feature (default:intersection-nonempty)
#' @param stranded A parameter value for -s in htseq-count. Whether the data is from a strand-specific assay (default:no)
#' @param idattr A parameter value for -i in htseq-count. GFF attribute to be used as feature ID (default:"gene_id")
#' @param htseq.r A parameter value for -r in htseq-count. Sorting order method (default:"pos")
#' @param htseq.a A parameter value for -a in htseq-count. Skip all reads with alignment quality lower than the given minimum value (default: 10)
#' @param run.cmd Whether to execute the command line (default=TRUE)
#' @param mc.cores The number of cores to use. Must be at least one(default=1), and parallelization requires at least two cores.
#' @details Counting reads in features. Given a file with aligned sequencing reads and a list of genomic features, a common task is to
#' count how many reads map to each feature.
#' @return Text file included read count information
#' @import parallel
#' @references HTSeq—a Python framework to work with high-throughput sequencing data
#' @seealso \url {https://htseq.readthedocs.io/en/release_0.9.1/}
#' @export
htseq_count=function(RNAtype=c("mRNA", "miRNA"),
fns.bam,
sample.name,
output.dir,
#option
Mode=c("intersection-nonempty","union","intersection-strict"),
stranded=c("no","yes"),
idattr="gene_id",
htseq.r=c("pos","name"),
htseq.a=10,
#ref
ref.gtf,
mir.gff,
run.cmd = TRUE,
mc.cores = 1
){
out.dirs=file.path(output.dir, sample.name)
lapply(1:length(out.dirs), function(a) dir.create(out.dirs[a], recursive = TRUE, showWarnings=FALSE))
out.fns=file.path(out.dirs,paste0(sample.name, ".count.txt"))
message("[[",Sys.time(),"]] Run htseq-count----")
if(RNAtype=="mRNA") {
cmd=paste(samtools.path, "view -F 4", fns.bam, "|", htseq.path, "-m", Mode, "-i", idattr, "-r", htseq.r, "-s", stranded, "-", ref.gtf, ">", out.fns)
print_message(cmd)
if(run.cmd) mclapply(cmd, system, mc.cores = mc.cores)
cat(cmd, file=file.path(output.dir, "htseq_count.run.log"), sep="\n", append=FALSE)
}
if(RNAtype=="miRNA") {
cmd=paste(htseq.path, "-t miRNA -i Name -a", htseq.a, fns.bam, mir.gff, ">", out.fns)
print_message(cmd)
if(run.cmd) mclapply(cmd, system, mc.cores = mc.cores)
cat(cmd, file=file.path(output.dir, "htseq_count.run.log"), sep="\n", append=FALSE)
}
out.fns
}
#' @title gtf2gr
#' @description Converts reference gtf file to GRanges form to execute FPKM estimation
#' @usage gtf2gr(ref.gtf, output.dir)
#' @param ref.gtf Directoy stored at reference gtf file (e.g. gencode.v22.gtf)
#' @param output.dir Output directory
#' @details To normalize the number of reads of each feature calculated in the previous step to the value of FPKM, convert the reference
#' gtf file to GRanges format.
#' @import limma
#' @import GenomicRanges
#' @export
gtf2gr=function(ref.gtf, output.dir){
gtf=read.delim(ref.gtf, comment.char="#", header=FALSE, sep="\t")
colnames(gtf)=c("seqnames","source", "feature", "start", "end", "score", "strand", "frame", "annot")
# gene
gtf.gene=gtf[gtf$feature == "gene",]
annot=strsplit2(as.character(gtf.gene$annot), split = " |;")
gtf.gene$gene_id=annot[,2]
gtf.gene$gene_type=annot[,5]
gtf.gene$gene_name=annot[,8]
gtf.gene=gtf.gene[,-9]
cols=c("seqnames", "start", "end", "strand")
message("[[",Sys.time(),"]] Convert GTF file to GRanges form---- ")
gtf.gr=GRanges(seqnames = gtf.gene$seqname,
ranges = IRanges(gtf.gene$start, gtf.gene$end),
strand = gtf.gene$strand,
gtf.gene[,setdiff(colnames(gtf.gene), cols)])
save(gtf.gr, file = file.path(output.dir, "gtf.gr.rda"))
message("[[",Sys.time(),"]] GRanges is stored in the R object directory---- ")
file.path(output.dir, "gtf.gr.rda")
}
#' @title gff2gr
#' @description Converts reference gff file to GRanges form
#' @usage gff2gr(mir.gff, output.dir)
#' @param mir.gff Directoy stored at reference gff file
#' @param output.dir Output directory
#' @import limma
#' @import GenomicRanges
#' @export
gff2gr=function(mir.gff, output.dir){
gff=read.delim(mir.gff, comment.char="#", header=FALSE, sep="\t")
colnames(gff)=c("seqnames","source", "feature", "start", "end", "score", "strand", "frame", "annot")
# gene
gff.gene=gff[gff$feature == "miRNA",]
annot=strsplit2(as.character(gff.gene$annot), split = "=|;")
gff.gene$ID=annot[,2]
gff.gene$Alias=annot[,4]
gff.gene$Name=annot[,6]
gff.gene$Derives_from=annot[,8]
gff.gene=gff.gene[,-9]
cols=c("seqnames", "start", "end", "strand")
message("[[",Sys.time(),"]] Convert GFF file to GRanges form---- ")
gff.gr=GRanges(seqnames = gff.gene$seqname,
ranges = IRanges(gff.gene$start, gff.gene$end),
strand = gff.gene$strand,
gff.gene[,setdiff(colnames(gff.gene), cols)])
save(gff.gr, file = file.path(output.dir, "gff.gr.rda"))
message("[[",Sys.time(),"]] GRanges is stored in the R object directory---- ")
file.path(output.dir, "gff.gr.rda")
}
#' @title htseq.add.info
#' @description Add information to the htseq output file
#' @usage htseq.add.info(RNAtype="mRNA", count.fns, output.dir, output.dir, mc.cores=1)
#' @param RNAtype RNAtype (default="mRNA")
#' @param fns.count count file paths
#' @param output.dir Directory stored at FPKM conunt files
#' @param mc.cores The number of cores to use. Must be at least one(default=1), and parallelization requires at least two cores.
#' @details Adds information to the output file of htseq. (Gene name, chromosome, start position, end position, gene size, FPKM value)
#' @export
htseq.add.info=function(RNAtype=c("mRNA", "miRNA"),
count.fns,
output.dir,
mc.cores=1){
sample.name=sub(".count.txt", "", basename(count.fns))
out.dirs=file.path(output.dir, sample.name)
lapply(1:length(out.dirs), function(a) dir.create(out.dirs[a], recursive = TRUE, showWarnings=FALSE))
if(RNAtype=="mRNA"){
# read counts
gtf.gr=get(load(file.path(output.dir, "gtf.gr.rda")))
countList=mclapply(count.fns, read.delim, header=FALSE, mc.cores=mc.cores)
info.fns=file.path(out.dirs, sub(".count.txt", ".CountAddInfo.mRNA.txt", basename(count.fns)))
# adding information
countList=mclapply(countList, function(count){
dat=count[1:(nrow(count)-5),]
colnames(dat) = c("id", "counts")
dat$gene_name=gtf.gr[match(as.character(dat[,1]), gtf.gr$gene_id)]$gene_name
dat$seqnames=as.character(seqnames(gtf.gr[match(as.character(dat[,1]), gtf.gr$gene_id)]))
dat$start=start(gtf.gr[match(as.character(dat[,1]), gtf.gr$gene_id)])
dat$end=end(gtf.gr[match(as.character(dat[,1]), gtf.gr$gene_id)])
dat$size=width(gtf.gr[match(as.character(dat[,1]), gtf.gr$gene_id)])
dat$FPKM=as.numeric(c(dat$counts/dat$size/sum(dat$counts)*10^9))
dat
}, mc.cores=mc.cores)
}
if(RNAtype=="miRNA"){
# read counts
gff.gr=get(load(file.path(output.dir, "gff.gr.rda")))
countList=mclapply(count.fns, read.delim, header=FALSE, mc.cores=mc.cores)
info.fns=file.path(out.dirs, sub(".count.txt", ".CountAddInfo.miRNA.txt", basename(count.fns)))
# adding information
countList=mclapply(countList, function(count){
dat=count[1:(nrow(count)-5),]
colnames(dat) = c("id", "counts")
dat$Name=gff.gr[match(as.character(dat[,1]), gff.gr$Name)]$Name
dat$seqnames=as.character(seqnames(gff.gr[match(as.character(dat[,1]), gff.gr$Name)]))
dat$start=start(gff.gr[match(as.character(dat[,1]), gff.gr$Name)])
dat$end=end(gff.gr[match(as.character(dat[,1]), gff.gr$Name)])
dat$size=width(gff.gr[match(as.character(dat[,1]), gff.gr$Name)])
dat
}, mc.cores=mc.cores)
}
# write output
for(i in 1:length(countList)) write.table(countList[[i]], file=info.fns[i], row.names=FALSE, sep="\t", quote=FALSE, append=FALSE)
info.fns
}
|
d5b4b13b2c248790d3f0109b7d3265f8f1d2b4b5
|
7b53c993cdb9cd02229222d23934c37ecbae756e
|
/R Programming/4_BasketballTrends/1_MatrixIntro.R
|
a55e3938d1907e69b2141e23508e487f37a16114
|
[] |
no_license
|
jacobrozell/DataScience
|
12c1afea3d1ecec76328c9c7a900cec70d879bb1
|
42c9f0161840dec79d3a47fd643146d7bed86832
|
refs/heads/main
| 2023-04-30T01:50:52.461202
| 2021-05-20T16:17:14
| 2021-05-20T16:17:14
| 337,193,271
| 4
| 0
| null | 2021-03-18T02:27:42
| 2021-02-08T19:56:42
|
Python
|
UTF-8
|
R
| false
| false
| 422
|
r
|
1_MatrixIntro.R
|
### Ways to make a matrix
##matrix()
?matrix
# Create seq
my.data <- 1:20
my.data
A <- matrix(my.data, 4, 5)
A
# Find 10
A[2,3]
# With byrow param
B <- matrix(my.data, 4, 5, byrow = T)
B
# Find 10
B[2,5]
##rbind()
r1 <- c("I", "am", "happy")
r2 <- c("What", "a", "day")
r3 <- c(1,2,3)
R <- rbind(r1,r2,r3)
R
##cbind()
c1 <- c("I", "am", "happy")
c2 <- c("What", "a", "day")
c3 <- c(1,2,3)
C <- cbind(c1,c2,c3)
C
|
9b61422c5d71d6df82b1508932c375a74d890111
|
66e42a0386ad9dea267dc0e445cfe7d8bc5a127e
|
/Rcode.R
|
1e13a3f00bd3c876d4c1caa8716af94f7225c609
|
[] |
no_license
|
Deepakrealm01/R-assignment-with-IMDB_data
|
041bcec75aa1d8ca825b6ee2589ba4ff25775536
|
a6f012885e469fefef97ce6cab72a768311aed6f
|
refs/heads/master
| 2021-01-15T11:19:59.359371
| 2016-09-19T05:02:55
| 2016-09-19T05:02:55
| 68,570,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 976
|
r
|
Rcode.R
|
setwd("C:/deepak/study material/edwisor ,dataanlytics/ClassMaterial_2/Assignment")
getwd()
imdb_data=read.csv("IMDB_data.csv",header = T,stringsAsFactors = F)
imdb_data1=imdb_data[-2,]
genre=unique(imdb_data1$Genre)
length(unique(imdb_data1$Genre))
data=data.frame(unique(imdb_data1$Genre))
imdb_data1=imdb_data1[order(imdb_data1$Genre),]
imdb_data1$newvariable=with(imdb_data1,((as.numeric(imdb_data1$imdbVotes)-as.numeric(imdb_data1$imdbRating))^2))
imdb_data1$imdbVotescat[imdb_data1$imdbVotes>=0 & imdb_data1$imdbVotes<500]="LOW"
imdb_data1$imdbVotescat[imdb_data1$imdbVotes>=500 & imdb_data1$imdbVotes<5000]="MEDIUM"
imdb_data1$imdbVotescat[imdb_data1$imdbVotes>=5000 ]="HIGH"
imdb_data1$Language[head(imdb_data1$Language,10)|tail(imdb_data1$Language,10)]<-NA
newdata=imdb_data1[which(imdb_data1$Language=="Telugu"),]
imdb_data1$Language <-replace(imdb_data1$Language,imdb_data1$Language=="English" ,"Hindi")
newvector<-c(rep(4,10),rep(5,20),rep(6,30))
length(newvector)
|
80e97f245dfa720812809193991024ab44b4b7e3
|
db20029127f66c61aba2cb349ad37079d2e25daf
|
/HS1905topicmodel.R
|
c55921a4236082f1b5cc734426c8dce44624255a
|
[] |
no_license
|
iasaren/HS1905topicmodel
|
1ccb7d60d85071d406560a30797a82dee0947293
|
312d64936fb1ab2080f9167fc5d1a7a52a33f734
|
refs/heads/master
| 2021-01-10T09:42:42.468391
| 2016-01-10T18:58:07
| 2016-01-10T18:58:07
| 49,365,182
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
HS1905topicmodel.R
|
library(topicmodels)
library(tm)
library(LDAvis)
# Load corpus and preprocess
corpus <- VCorpus(DirSource("/srv/data/fnewspapers/nlf_rawtext_fin/",pattern="fin_0355-2047_1905.*lemmatized"))
corpus <- tm_map(corpus,content_transformer(tolower))
corpus <- tm_map(corpus,removeNumbers)
corpus <- tm_map(corpus,removePunctuation)
corpus <- tm_map(corpus,removeWords,c(stopwords("fi"),"omorfiversiongnugplv", "klo", "pnä", "tel", "trl"))
corpus <- tm_map(corpus,stripWhitespace)
doc_term <- DocumentTermMatrix(corpus)
#had to do this to remove error that had something to do with empty lines/documents in the document term matrix
doc_term <- removeSparseTerms(doc_term, 0.99)
rowTotals <- apply(doc_term , 1, sum) #Find the sum of words in each Document
doc_term <- doc_term[rowTotals> 0, ] #Remove all docs without words
# Do LDA
set.seed(43523) # for reproducibility
numtopics <- 20
lda <- LDA(doc_term, numtopics)
# Write out LDA results
write.csv(posterior(lda)$topics,"lda-documents-topics.csv")
write.csv(topics(lda),"document-main-topic.csv")
write.csv(posterior(lda)$terms,"lda-terms-topics.csv")
write.csv(terms(lda,50),"topic-50-main-terms.csv")
json <- createJSON(phi = exp(lda@beta), theta = lda@gamma,
doc.length = rowSums(as.matrix(doc_term)), vocab = lda@terms,
term.frequency = colSums(as.matrix(doc_term)))
serVis(json)
|
a86c62cee32a7dcd79308b5e320d9e6564da9dd4
|
8c06418496087d7903d9dd75246db4e620ae4abf
|
/man/survexp.Rd
|
cb524ad06e56452ab2ca84792d7ac1018de96a9c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
cran/survival5
|
ec014f172313c6e9c28939602b3a783a2de8573b
|
983de35460104d135d1027db25e99f46671cbc1d
|
refs/heads/master
| 2021-01-01T20:34:43.889627
| 1977-08-08T00:00:00
| 1977-08-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,040
|
rd
|
survexp.Rd
|
\name{survexp}
\alias{survexp}
\title{
Compute Expected Survival
}
\description{
Returns either the expected survival of a cohort of subjects, or the
individual expected survival for each subject.
}
\usage{
survexp(formula, data, weights, subset, na.action, times, cohort=T,
conditional=F, ratetable=survexp.us, scale=1, npoints,
se.fit=<<see below>>, model=F, x=F, y=F)
}
\arguments{
\item{formula}{
formula object. The response variable is a vector of follow-up times
and is optional. The predictors consist of optional grouping variables
separated by the \code{+} operator (as in \code{survfit}), along with a \code{ratetable}
term. The \code{ratetable} term matches each subject to his/her expected cohort.
}
\item{data}{
data frame in which to interpret the variables named in
the \code{formula}, \code{subset} and \code{weights} arguments.
}
\item{weights}{
case weights.
}
\item{subset}{
expression indicating a subset of the rows of \code{data} to be used in the fit.
}
\item{na.action}{
function to filter missing data. This is applied to the model frame after
\code{subset} has been applied. Default is \code{options()$na.action}. A possible
value for \code{na.action} is \code{na.omit}, which deletes observations that contain
one or more missing values.
}
\item{times}{
vector of follow-up times at which the resulting survival curve is
evaluated. If absent, the result will be reported for each unique
value of the vector of follow-up times supplied in \code{formula}.
}
\item{cohort}{
logical value: if \code{FALSE}, each subject is treated as a subgroup of size 1.
The default is \code{TRUE}.
}
\item{conditional}{
logical value: if \code{TRUE}, the follow-up times supplied in \code{formula}
are death times and conditional expected survival is computed.
If \code{FALSE}, the follow-up times are potential censoring times.
If follow-up times are missing in \code{formula}, this argument is ignored.
}
\item{ratetable}{
a table of event rates, such as \code{survexp.uswhite}, or a fitted Cox model.
}
\item{scale}{
numeric value to scale the results. If \code{ratetable} is in units/day,
\code{scale = 365.25} causes the output to be reported in years.
}
\item{npoints}{
number of points at which to calculate intermediate results, evenly spaced
over the range of the follow-up times. The usual (exact) calculation is done
at each unique follow-up time. For very large data sets specifying \code{npoints}
can reduce the amount of memory and computation required.
For a prediction from a Cox model \code{npoints} is ignored.
}
\item{se.fit}{
compute the standard error of the predicted survival.
The default is to compute this whenever the routine can, which at this time
is only for the Ederer method and a Cox model as the rate table.
}
\item{model,}{
flags to control what is returned. If any of these is true, then the
model frame, the model matrix, and/or the vector of response times will be
returned as components of the final result, with the same names as the
flag arguments.
}}
\value{
if \code{cohort=T} an object of class \code{survexp}, otherwise a vector of per-subject
expected survival values. The former contains the number of subjects at
risk and the expected survival for the cohort at each requested time.
}
\details{
Individual expected survival is usually used in models or testing, to
'correct' for the age and sex composition of a group of subjects. For
instance, assume that birth date, entry date into the study, sex and
actual survival time are all known for a group of subjects.
The \code{survexp.uswhite} population tables contain expected death rates
based on calendar year, sex and age. Then
haz <- -log(survexp(death.time ~ ratetable(sex=sex, year=entry.dt, age=(birth.dt-entry.dt)), cohort=F))
gives for each subject the total hazard experienced up to their observed
death time or censoring time.
This probability can be used as a rescaled time value in models:
glm(status ~ 1 + offset(log(haz)), family=poisson)
glm(status ~ x + offset(log(haz)), family=poisson)
In the first model, a test for intercept=0 is the one sample log-rank
test of whether the observed group of subjects has equivalent survival to
the baseline population. The second model tests for an effect of variable
\code{x} after adjustment for age and sex.
Cohort survival is used to produce an overall survival curve. This is then
added to the Kaplan-Meier plot of the study group for visual comparison
between these subjects and the population at large. There are three common
methods of computing cohort survival.
In the "exact method" of Ederer the cohort is not censored; this corresponds
to having no response variable in the formula. Hakulinen recommends censoring
the cohort at the anticipated censoring time of each patient, and Verheul
recommends censoring the cohort at the actual observation time of each
patient.
The last of these is the conditional method.
These are obtained by using the respective time values as the
follow-up time or response in the formula.
}
\references{
G. Berry. The analysis of mortality by the subject-years method.
\emph{Biometrics} 1983, 39:173-84.
F Ederer, L Axtell, and S Cutler. The relative survival rate: a statistical
methodology. \emph{Natl Cancer Inst Monogr} 1961, 6:101-21.
T. Hakulinen. Cancer survival corrected for heterogeneity in patient
withdrawal. \emph{Biometrics} 1982, 38:933.
H. Verheul, E. Dekker, P. Bossuyt, A. Moulijn, and A. Dunning. Background
mortality in clinical survival studies. \emph{Lancet} 1993, 341:872-5.
}
\seealso{
\code{\link{survfit}}, \code{\link{survexp.us}}, \code{\link{survexp.fit}}, \code{\link{pyears}}, \code{\link{date}}
\examples{
if (require(date)){
data(ratetables)
data(cancer)
## compare survival to US population
cancer$year<-rep(as.date("1/1/1980"),nrow(cancer))
efit <- survexp( ~ ratetable(sex=sex, year=year, age=age*365), times=(1:4)*365,data=cancer)
plot(survfit(Surv(time, status) ~1,data=cancer))
lines(efit)
}
}
\keyword{survival}
% Converted by Sd2Rd version 0.3-2.
|
e7fc8a12b16cafe6579f63aaacdde4a5c76574b0
|
0ffafa520c0030fd858ce6efcff2dc52b2972b64
|
/man/get_signals_between_clusters.Rd
|
55cac7a8415cb625a50d0a76c165519d808bb8ee
|
[] |
no_license
|
AlexanderKononov/cytofBrowser
|
d0d7b4b70af7d1d37c6bde9eb6aac891d7789af7
|
12f3c7290493f45e504eb7089169eef3b95dbc73
|
refs/heads/master
| 2022-12-07T14:28:00.372608
| 2020-08-25T17:35:08
| 2020-08-25T17:35:08
| 230,823,004
| 5
| 1
| null | 2020-03-18T15:37:56
| 2019-12-30T00:59:34
|
R
|
UTF-8
|
R
| false
| true
| 465
|
rd
|
get_signals_between_clusters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Correlation.R
\name{get_signals_between_clusters}
\alias{get_signals_between_clusters}
\title{Convert from signal dataframe to tidy dataframe with correlations between different clusters}
\usage{
get_signals_between_clusters(signals)
}
\arguments{
\item{signals}{}
}
\value{
}
\description{
Convert from signal dataframe to tidy dataframe with correlations between different clusters
}
|
fc9420d3ddd85a7383c5c1fc9cfccb6a888879c6
|
7977c6907bd07e74a675e746867934f2b380ddb9
|
/man/tpsPower.Rd
|
ea94d38a215fd2d15d8522658a3875bc442794dc
|
[] |
no_license
|
cran/osDesign
|
92ff10848e342005905a12a8bb96991df1ebc73b
|
0b6ba782931d2912876480aaa38b3a528d9c251f
|
refs/heads/master
| 2021-01-22T09:47:31.315727
| 2020-11-15T22:40:03
| 2020-11-15T22:40:03
| 17,698,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,589
|
rd
|
tpsPower.Rd
|
\name{tpsPower}
\Rdversion{1.7}
\alias{tpsPower}
\title{
Simulation-based estimation of power for the two-phase study design
}
\description{
Monte Carlo based estimation of statistical power for estimators of the components of a logistic regression model, based on balanced two-phase and case-control study designs (Breslow and Chatterjee, 1999; Prentice and Pykle, 1979).
}
\usage{
tpsPower(B=1000, betaTruth, X, N, strata, expandX="all", etaTerms=NULL,
nII, alpha=0.05, digits=1, betaNames=NULL,
monitor=NULL, cohort=TRUE, NI=NULL)
}
\arguments{
\item{B}{
The number of datasets generated by the simulation.
}
\item{betaTruth}{
Regression coefficients from the logistic regression model.
}
\item{X}{
Design matrix for the logistic regression model. The first column should correspond to intercept. For each exposure, the baseline group should be coded as 0, the first level as 1, and so on.
}
\item{N}{
A numeric vector providing the sample size for each row of the design matrix, \code{X}.
}
\item{strata}{
A numeric vector indicating which columns of the design matrix, \code{X}, are used to form the phase I stratification variable. \code{strata=1} specifies the intercept and is, therefore, equivalent to a case-control study. \code{strata=0} is not permitted in tpsPower(), although multiple two-phase stratifications can be investigated with tpsSim().
}
\item{expandX}{
Character vector indicating which columns of \code{X} to expand as a series of dummy variables. Useful when at least one exposure is continuous (and should not be expanded). Default is `all'; other options include `none' or character vector of column names. See Details, below.
}
\item{etaTerms}{
Character vector indicating which columns of \code{X} are to be included in the model. See Details, below.
}
\item{nII}{
A numeric value indicating the phase II sample size. If a vector is provided, separate simulations are run for each element.
}
\item{alpha}{
Type I error rate assumed for the evaluation of coverage probabilities and power.
}
\item{digits}{
Integer indicating the precision to be used for the output.
}
\item{betaNames}{
An optional character vector of names for the regression coefficients,
\code{betaTruth}.
}
\item{monitor}{
Numeric value indicating how often \code{tpsPower()} reports real-time progress on the simulation, as the \code{B} datasets are generated and evaluated. The default of \code{NULL} indicates no output.
}
\item{cohort}{
Logical flag. TRUE indicates phase I is drawn as a cohort; FALSE indicates phase I is drawn as a case-control sample.
}
\item{NI}{
A pair of integers providing the outcome-specific phase I sample
sizes when the phase I data are drawn as a case-control sample. The
first element corresponds to the controls and the second to the
cases.
}
}
\details{
A simulation study is performed to estimate power for various estimators
of \code{beta}:
\itemize{
\item{}{(a) complete data maximum likelihood (CD)}
\item{}{(b) case-control maximum likelihood (CC)}
\item{}{(c) two-phase weighted likelihood (WL)}
\item{}{(d) two-phase pseudo- or profile likelihood (PL)}
\item{}{(e) two-phase maximum likelihood (ML)}
}
The overall simulation approach is the same as that described in \code{\link{tpsSim}}.
In each case, power is estimated as the proportion of simulated datasets for which a hypothesis test of no effect is rejected.
The correspondence between \code{betaTruth} and \code{X}, specifically the ordering of elements, is based on successive use of \code{\link{factor}} to each column of \code{X} which is expanded via the \code{expandX} argument. Each exposure that is expanded must conform to a {0, 1, 2, ...} integer-based coding convention.
The \code{etaTerms} argument is useful when only certain columns in \code{X} are to be included in the model. In the context of the two-phase design, this might be the case if phase I stratifies on some surrogate exposure and a more detailed/accurate measure is to be included in the main model.
Only balanced designs are considered by \code{tpsPower()}. For unbalanced designs, power estimates can be obtained from \code{\link{tpsSim}}.
NOTE: In some settings, the current implementation of the ML estimator returns point estimates that do not satisfy the phase I and/or phase II constraints. If this is the case a warning is printed and the "fail" elements of the returned list is set to TRUE. An example of this is phenomenon is given the help file for \code{\link{tps}}. When this occurs, \code{tpsPower()} considers ML estimation for the particular dataset to have failed.
}
\value{
\code{tpsPower()} returns an object of class 'tpsPower', a list containing all the input arguments, as well as the following components:
\item{betaPower}{
Power against the null hypothesis that the regression coefficient is zero for a Wald-based test with an \code{alpha} type I error rate.
}
\item{failed}{
A vector consisting of the number of datasets excluded from the power calculations (i.e. set to \code{NA}), for each simulation performed. For power calculations, the three reasons are: (1) lack of convergence indicated by \code{NA} point estimates returned by \code{\link{glm}} or \code{\link{tps}}; (2) lack of convergence indicated by \code{NA} standard error point estimates returned by \code{\link{glm}} or \code{\link{tps}}; and (3) for the ML estimator only, the phase I and/or phase II constraints are not satisfied.
}
}
\note{
A generic print method provides formatted output of the results.
A generic plot function \code{\link{plotPower}} provides plots of powers against different sample sizes for each estimate of a regression coefficient.
}
\references{
Prentice, R. and Pyke, R. (1979) "Logistic disease incidence models and case-control studies." Biometrika 66:403-411.
Breslow, N. and Chatterjee, N. (1999) "Design and analysis of two phase studies with binary outcome applied to Wilms tumour prognosis." Applied Statistics 48:457-468.
Haneuse, S. and Saegusa, T. and Lumley, T. (2011) "osDesign: An R Package for the Analysis, Evaluation, and Design of Two-Phase and Case-Control Studies." Journal of Statistical Software, 43(11), 1-29.
}
\author{
Sebastien Haneuse, Takumi Saegusa
}
\seealso{
\code{\link{plotPower}}.
}
\examples{
##
data(Ohio)
##
XM <- cbind(Int=1, Ohio[,1:3])
fitM <- glm(cbind(Death, N-Death) ~ factor(Age) + Sex + Race, data=Ohio,
family=binomial)
betaNamesM <- c("Int", "Age1", "Age2", "Sex", "Race")
## Power for the TPS design where phase I stratification is based on Race.
##
\donttest{
tpsResult1 <- tpsPower(B=1000, beta=fitM$coef, X=XM, N=Ohio$N, strata=4,
nII=seq(from=100, to=1000, by=100),
betaNames=betaNamesM, monitor=100)
tpsResult1}
## Power for the TPS design where phase I stratification is based on Age
## * consider the setting where the age coefficients are halved from
## their observed true values
## * the intercept is modified, accordingly, using the beta0() function
##
newBetaM <- fitM$coef
newBetaM[2:3] <- newBetaM[2:3] / 2
newBetaM[1] <- beta0(betaX=newBetaM[-1], X=XM, N=Ohio$N,
rhoY=sum(Ohio$Death)/sum(Ohio$N))
##
\donttest{
tpsResult2 <- tpsPower(B=1000, beta=fitM$coef, X=XM, N=Ohio$N, strata=2,
nII=seq(from=100, to=500, by=50),
betaNames=betaNamesM, monitor=100)
tpsResult2}
}
|
993a26f9284d3c08e6045ffd9f5c94a85ba30bb7
|
d8c53a6820a15b4ca86b8b2af757cb096e0aad90
|
/run_analysis.R
|
dfa3ea1419524d2ad503b1296f6d8abfdae249b1
|
[] |
no_license
|
ashwinkamath/datacleaning
|
25ffadc7d716fef66d4162e31ee7284040ff2a0b
|
412591c23bb6f1c5b92c4f040ed141be0cbf8359
|
refs/heads/master
| 2020-04-08T08:27:11.281090
| 2014-06-22T23:05:18
| 2014-06-22T23:05:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,392
|
r
|
run_analysis.R
|
#####################################################################
# 1) Merges the training and the test sets to create one data set. #
#####################################################################
# 1.01 Reading training set
train.set <- read.table("UCI HAR Dataset/train/X_train.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.02 Reading feature names
cols <- read.table("UCI HAR Dataset/features.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.03 Renaming columns as specified in 2nd column of features.txt
colnames(train.set) <- cols$V2
# 1.04 Reading subjects for training set
train.subjects <- read.table("UCI HAR Dataset/train/subject_train.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.05 Reading activities for training set
train.activity <- read.table("UCI HAR Dataset/train/y_train.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.06 Merging subjects & activities into train.set
train.set <- cbind(train.set,train.subjects)
names(train.set)[562] <- "subject"
train.set <- cbind(train.set,train.activity)
names(train.set)[563] <- "activity"
# 1.07 Inspecting training set
nrow(train.set)
colnames(train.set)
any(is.na(train.set))
# 1.08 Reading test data set
test.set = read.table("UCI HAR Dataset/test/X_test.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.09 Renaming columns as specified in 2nd column of features.txt
colnames(test.set) <- cols$V2
# 1.10 Reading subjects for test set
test.subjects <- read.table("UCI HAR Dataset/test/subject_test.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.11 Reading activities for test set
test.activity <- read.table("UCI HAR Dataset/test/y_test.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 1.12 Merging subjects & activities into test.set
test.set <- cbind(test.set,test.subjects)
names(test.set)[562] <- "subject"
test.set <- cbind(test.set,test.activity)
names(test.set)[563] <- "activity"
# 1.13 Inspecting test set
nrow(test.set)
colnames(test.set)
any(is.na(test.set))
# 1.14 Merge train.set & test.set into all.set
if (ncol(train.set) == ncol(test.set))
{
all.set <- rbind(train.set,test.set)
}
################################################################################################
# 2) Extracts only the measurements on the mean and standard deviation for each measurement.. #
################################################################################################
# 2.01 Vector with all column indices containing "mean"
mean.cols <- grep("mean",colnames(all.set))
# 2.02 Vector with all column indices containing "std"
std.cols <- grep("std",colnames(all.set))
# 2.03 Vector with all mean and std columm indices
measure.cols <- union(mean.cols, std.cols) ## means & std. dev
# 2.04 Vector with dimension column incides - subject & activity
dim.cols <- which(colnames(all.set)=="subject"|colnames(all.set)=="activity")
# 2.05 Dataframe with all columns of interest - measures & dimensions
work.set <- all.set[,union(measure.cols,dim.cols)]
################################################################################################
# 3) Uses descriptive activity names to name the activities in the data set #
################################################################################################
# 3.01 Reading activity labels
activity.labels <- read.table("UCI HAR Dataset/activity_labels.txt",
sep="",
fill=FALSE,
strip.white=TRUE)
# 3.02 Renaming columns of dataframe
colnames(activity.labels) <- c("activity","activity.label")
# 3.03 Lookup activity name and merge with working data set
work.set <- merge(work.set,activity.labels,by="activity")
# 3.04 Inspect work set
head(work.set)
################################################################################################
# 4) Appropriately labels the data set with descriptive variable names. #
################################################################################################
colnames(work.set)
# the column names have been derived from feature.txt to make them descriptive
########################################################################################################################
# 5) Creates a second, independent tidy data set with the average of each variable for each activity and each subject # #
########################################################################################################################
# 5.01 Take mean of all measure columns by subject & activity
final.data <- aggregate(work.set[,2:80], by = work.set[c("subject","activity.label","activity")], FUN=mean,na.rm =T)
head(final.data)
write.csv(final.data, file="tidy_data.csv")
|
8d039f6432b0a4dd7798dc70b4d1906ca1d1db33
|
232c5dc13900bab11389499aeafdff5277a7b342
|
/profiling/1.data_description/1.D.1_LD_chr10.R
|
db015f5938a56d394648e977d3ac1f5818e74141
|
[] |
no_license
|
RILAB/methylation
|
d3fed7e0bf5fbb56cb85829b2be0534845aad16d
|
ba754f92d4f10b9b8e2b9b4e48eecbb2a5b72797
|
refs/heads/master
| 2020-12-25T22:32:14.684789
| 2016-07-14T20:37:25
| 2016-07-14T20:37:25
| 29,665,776
| 0
| 3
| null | 2016-07-14T20:37:27
| 2015-01-22T07:04:09
|
HTML
|
UTF-8
|
R
| false
| false
| 2,642
|
r
|
1.D.1_LD_chr10.R
|
### Jinliang Yang
### April 28th, 2016
### Happy B-day JJ!
library("data.table")
chr10 <- fread("largedata/vcf_files/teo20_RA_chr10.txt") #65202272
chr10 <- as.data.frame(chr10)
idx <- sort(sample(1:nrow(chr10), 100000))
sub <- chr10[idx, ]
cmd1 <- "bcftools index teo20_cg_methratio.vcf.gz"
cmd2 <- "bcftools filter teo20_cg_methratio.vcf.gz - r 10 -o teo20_cg_mr_chr10.vcf.gz -O z"
library("farmeR")
sh1 <- "sh data/iget.sh"
set_farm_job(slurmsh = "slurm-script/run_fq.sh",
shcode = sh1, wd = NULL, jobid = "fastq",
email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemh", "1"))
library(farmeR)
cmd1 <- "#module load plink/1.90"
cmd2 <- "cd largedata/vcf_files"
cmd3 <- paste0("plink -vcf teo20_cg_mr_chr10.vcf.gz --thin-count 1000000 --r2 --threads 8 --memory 64000",
" --out teo20_cg_chr10 --ld-window 9999999 --ld-window-kb 100 --ld-window-r2 0")
set_farm_job(slurmsh = "slurm-script/plink_cl.sh",
shcode = c(cmd1, cmd2, cmd3), wd = NULL, jobid = "plink-ld",
email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemh", "8"))
#### CG ###########################
library(farmeR)
cmd1 <- "cd largedata/vcf_files"
cmd2 <- paste0("plink -vcf teo20_cg_methratio.vcf.gz --thin-count 10000000 --r2 --threads 8 --memory 64000",
" --out teo20_cg_10m --ld-window 9999999 --ld-window-kb 10 --ld-window-r2 0 --allow-extra-chr")
set_farm_job(slurmsh = "slurm-script/plink_cg_ld.sh",
shcode = c(cmd1, cmd2), wd = NULL, jobid = "cg-ld",
email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemh", "8"))
#### CHG ###########################
library(farmeR)
cmd1 <- "cd largedata/vcf_files"
cmd2 <- paste0("plink -vcf teo20_chg_methratio.vcf.gz --thin-count 10000000 --r2 --threads 8 --memory 64000",
" --out teo20_chg_10m --ld-window 9999999 --ld-window-kb 10 --ld-window-r2 0 --allow-extra-chr")
set_farm_job(slurmsh = "slurm-script/plink_chg_ld.sh",
shcode = c(cmd1, cmd2), wd = NULL, jobid = "chg-ld",
email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemh", "8"))
#### CHH ###########################
library(farmeR)
cmd1 <- "cd largedata/vcf_files"
cmd2 <- paste0("plink -vcf teo20_chh_methratio.vcf.gz --thin-count 10000000 --r2 --threads 8 --memory 64000",
" --out teo20_chh_10m --ld-window 9999999 --ld-window-kb 10 --ld-window-r2 0 --allow-extra-chr")
set_farm_job(slurmsh = "slurm-script/plink_chh_ld.sh",
shcode = c(cmd1, cmd2), wd = NULL, jobid = "chh-ld",
email = "yangjl0930@gmail.com", runinfo = c(TRUE, "bigmemh", "8"))
|
b96c5a0b2c20a55b2c3e78bb20ba6c32a997e2d5
|
1aa322287e0e2d01f785a96fbcb72b4347981144
|
/R for Economists Code/Moderate 7 Limited Dependent Variables.R
|
bcf04ed6d476459a912cd7918f3f1e2efc01a233
|
[] |
no_license
|
jonduan/RIntroEconometrics
|
085bfaeadf673ac54205921b9127c05510a67e75
|
e9d4cbbb106447d6b817a50954f275396959174c
|
refs/heads/master
| 2020-04-16T17:32:37.093826
| 2019-07-22T04:55:54
| 2019-07-22T04:55:54
| 165,779,197
| 1
| 0
| null | 2019-01-15T03:30:36
| 2019-01-15T03:30:36
| null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
Moderate 7 Limited Dependent Variables.R
|
#Load packages
library(foreign)
library(stargazer)
library(mfx)
library(MASS)
library(erer)
#Read in Wooldridge data
wage1 <- read.dta("http://fmwww.bc.edu/ec-p/data/wooldridge/wage1.dta")
#Run a probit model
probitmodel <- glm(married~female+educ+northcen+south+west,data=wage1,family=binomial(link=probit))
stargazer(probitmodel,type='text')
#Run a logit model
logitmodel <- glm(married~female+educ+northcen+south+west,data=wage1,family=binomial(link=logit))
stargazer(logitmodel,type='text')
#Get marginal effects for probit
#atmean = TRUE averages the independent variables, and then
#calculates marginal effects for the "mean person".
#Also known as "marginal effects at the mean"
#atmean = FALSE calculates the marginal effect for each person
#in the sample, and then averages THOSE together.
#Also known as "average marginal effects"
probitmfx(probitmodel,atmean=TRUE,data=wage1)
probitmfx(probitmodel,atmean=FALSE,data=wage1)
#Get marginal effects for logit
logitmfx(logitmodel,atmean=TRUE,data=wage1)
logitmfx(logitmodel,atmean=FALSE,data=wage1)
#Run an ordered probit
orderedprobit <- polr(factor(educ)~female+northcen+south+west,data=wage1,Hess=TRUE,method="probit")
#Get marginal effects for our ordered probit
ocME(orderedprobit)
|
8080f017272e2d2a4cd7fd38fa6d4673f2b56eb1
|
b6079badbc8e837e5db79497d52e46a8ff648a52
|
/log_reg_adatis (1).R
|
e4e5001c8ecfeccf14a39aab1c375218e50f3427
|
[] |
no_license
|
kalinai/OA.POC
|
4b4767bd99efa8dd6ef263a7cc55491642cb3235
|
9fcb60cd2893e8de9837e932271909c708079a6a
|
refs/heads/master
| 2020-04-14T21:27:54.431967
| 2019-02-07T22:24:05
| 2019-02-07T22:24:05
| 164,129,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,371
|
r
|
log_reg_adatis (1).R
|
# Import data ----
rm(list=ls())
dd <- read.csv("C:\\Users\\kivanova4\\Documents\\Business Analytics\\Business Analytics\\2 semestur\\BI\\Adatis\\DataNetov\\Book11.csv", stringsAsFactors = FALSE, sep = ",", header = TRUE)
library(dplyr)
data <- select(dd, TransactionDate, TransactionAmount)
#CHeck var classes
sapply(data, class)
#TransactionDate TransactionAmount
#"character" "character"
#Fix date column and create a variable to group by
library(lubridate)
data$TransactionDate <- dmy(data$TransactionDate)
#Extract month Year
data$Month_Yr <- format(as.Date(data$TransactionDate), "%Y%m")
data$Month_Yr <- as.numeric(data$Month_Yr)
#Extract month
data$month <- month(data$TransactionDate)
sapply(data, class)
#TTransactionDate TransactionAmount Month_Yr Currency month
#"Date" "numeric" "numeric" "factor" "numeric"
#Fix TransactionAmount
#change TransactionAmount to integer so it can be grouped
library(stringr)
data$Currency <- as.factor("GBP")
data$TransactionAmount <- str_sub(data$TransactionAmount, 2)
data$TransactionAmount <- gsub(",","",data$TransactionAmount)
data$TransactionAmount <- as.numeric((data$TransactionAmount))
sapply(data,class)
# TransactionDate TransactionAmount Month_Yr Currency
#"Date" "numeric" "character" "factor"
sum(is.na(data))
#8571
#Aggregate total TransactionAmount per month ------
total_month_group <- aggregate(data$TransactionAmount, by=list(data$Month_Yr), sum)
#Rename column names
names(total_month_group) <- c("Month_Yr", "TransactionAmount")
library(stringr)
total_month_group$month <- as.numeric(str_sub(total_month_group$Month_Yr, -2,-1))
attach(total_month_group)
#Build a linear regression model
ts <- lm(TransactionAmount ~ month + Month_Yr)
summary(ts)
total_month_group$fcast <- ts$fitted.values
total_month_group$accuracy <- (total_month_group$fcast/total_month_group$TransactionAmount)*100
total_month_group <- total_month_group[c(1,3,2,4,5)]
#Build Arima Model ------
library(tseries)
ts.plot(TransactionAmount/10000)
#Check whether data is stationary
adf.test(total_month_group$TransactionAmount)
#data: total_month_group$TransactionAmount
##alternative hypothesis: stationary
#Diff data
diff_TAmount <- diff(total_month_group$TransactionAmount)
diff_TAmount <- data.frame(diff(total_month_group$TransactionAmount))
plot(diff_TAmount[,1])
layout(matrix(c(1:4),nrow=2,ncol=2, byrow = TRUE))
acf(diff_TAmount, lag.max = 50)
pacf(diff_TAmount, lag.max = 50)
#build arima model
library(forecast)
train <- diff_TAmount[1:30,]
test <- diff_TAmount[30:35,]
auto.arima(train)
#Series: diff_TAmount
#ARIMA(0,0,0) with zero mean
#sigma^2 estimated as 5.034e+16: log likelihood=-763.97
#AIC=1529.93 AICc=1530.04 BIC=1531.54
mm=list()
mm[[1]]=arima(train,order=c(0,0,0))
mm[[1]]$aic
layout(matrix(c(1:3),nrow=1,ncol=3))
acf(mm[[1]]$residuals,lag.max=30)
Box.test(mm[[1]]$residuals, type="Ljung-Box", lag=20)
fit<-auto.arima(train)
pred = predict(fit, n.ahead = 6)
total_month_group_coeff <- as.data.frame(ts$coefficients)
colnames(total_month_group_coeff) <- "model"
total_month_group_coeff <- as.data.frame(t(total_month_group_coeff))
|
b6ee9a516748a3eda8f74c403869301c48ae854d
|
fce6b0f1e5cdc9b2c81600074ad2e7eacd6a192a
|
/R_SCRIPTS/2_Beta_diversity_Predictors.R.R
|
934ce6a0539a9955d008cdc63a9e135312ebd523
|
[] |
no_license
|
microbiotaTorVergata/pd_microbiome
|
209942a7478f4642bb19029b2a59b449df484caf
|
4ac25cb621bdcf8266bc4c3f7eff120dedf92f5e
|
refs/heads/master
| 2020-04-12T02:58:11.695935
| 2018-12-18T11:00:59
| 2018-12-18T11:00:59
| 162,259,333
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,109
|
r
|
2_Beta_diversity_Predictors.R.R
|
library("plyr")
library("phyloseq")
library("ggplot2")
library("vegan")
Save.graph<- function( Graph , OutDir , Name , Name_prefix="" ){
out_file_name <- paste( Name_prefix,Name,sep="_")
out_path_name <- paste( OutDir, out_file_name , sep="//" )
print(out_path_name)
jpeg( paste(out_path_name, ".jpeg") , height=15, width=25, units="cm", res=300)
print(Graph)
dev.off()
return()
}
# work dir
setwd("..")
# phyloseq data
otu <- "otu_table_filtered_OTU.txt"
map <- "mapping_ad.txt"
tre <- "rep_set.tre"
# phyloseq object
qiimedata = import_qiime(otu,map,tre)
# rarefaction
set.seed(1); qiimedata = rarefy_even_depth(qiimedata,50000)
# metrics
D_bray <- distance(qiimedata,method="bray")
D_Uuni <- distance(qiimedata,method="unifrac")
D_Wuni <- distance(qiimedata,method="wunifrac")
#### PERMANOVA TEST
M<-sample_data(qiimedata)
M$Status <- factor(M$Status)
M$Provincia <- factor(M$Provincia)
M$Gain_5KG <- factor(M$Gain_5KG)
M$Loss_5KG <- factor(M$Loss_5KG)
M$Yogurt <- factor(M$Yogurt)
M$Pasta <- factor(M$Pasta)
M$Pane <- factor(M$Pane)
M$Latticini <- factor(M$Latticini)
M$Frutta_Verdura <- factor(M$Frutta_Verdura)
M$Carne <- factor(M$Carne)
M$Pesce <- factor(M$Pesce)
M$Cereali <- factor(M$Cereali)
M$Legumi <- factor(M$Legumi)
M$Caffe <- factor(M$Caffe)
M$Alcol <- factor(M$Alcol)
M$Pizza <- factor(M$Pizza)
M$Fuma <- factor(M$Fuma)
M$Esercizio_Fisico <- factor(M$Esercizio_Fisico)
M$Parto_Cesareo <- factor(M$Parto_Cesareo)
M$Phenotype <- factor(M$Phenotype)
M$L_Dopa <- factor(M$L_Dopa)
M$DA <- factor(M$DA)
M$iMAo <- factor(M$iMAo)
M$iCOMT <- factor(M$iCOMT)
M$AntiCH <- factor(M$AntiCH)
M$Amantadina <- factor(M$Amantadina)
M$Antiacidi <- factor(M$Antiacidi)
sample_data(qiimedata) <- M
metadata <- as(sample_data(qiimedata), "data.frame")
# only PD-Status
set.seed(1); ad1=adonis2( D_bray ~ Status, data = metadata , permutations = 9999, by = "margin")
print(ad1)
set.seed(1); ad1=adonis2( D_Uuni ~ Status, data = metadata , permutations = 9999, by = "margin")
print(ad1)
set.seed(1); ad1=adonis2( D_Wuni ~ Status, data = metadata , permutations = 9999, by = "margin")
print(ad1)
mergia.dataframe <- function( dataframe1 , dataframe2 ){
M <- merge( dataframe1 , dataframe2 , by="row.names", all=TRUE)
rownames(M) <- M$Row.names ; M$Row.names <- NULL
return(M)
}
# STEPWISE BACKWARD BC
set.seed(1); ad0=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad1=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad2=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad3=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Legumi + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad4=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad5=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad6=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad7=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad8=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad9=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad10=adonis2( D_bray ~ Status + Age + Sex + BMI + Yogurt + Pane + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad11=adonis2( D_bray ~ Status + Age + BMI + Yogurt + Pane + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad12=adonis2( D_bray ~ Status + Age + BMI + Yogurt + Pane + Cereali + Caffe + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad13=adonis2( D_bray ~ Status + Age + BMI + Pane + Cereali + Caffe + Fuma + Gain_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad14=adonis2( D_bray ~ Status + Age + BMI + Pane + Cereali + Caffe + Fuma + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad15=adonis2( D_bray ~ Status + Age + BMI + Cereali + Caffe + Fuma + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad16=adonis2( D_bray ~ Status + Age + BMI + Cereali + Caffe + Fuma, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad17=adonis2( D_bray ~ Status + Age + BMI + Cereali + Fuma, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad18=adonis2( D_bray ~ Status + Age + BMI + Fuma, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad19=adonis2( D_bray ~ Status + Age + BMI, data = metadata , permutations = 9999, by = "margin")
# STEPWISE BACKWARD UU
set.seed(1); ad0=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad1=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad2=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad3=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad4=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad5=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad6=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad7=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad8=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Carne + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad9=adonis2( D_Uuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad10=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Pane + Pasta + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad11=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Pasta + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad12=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad13=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Cereali + Alcol + Fuma + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad14=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Cereali + Alcol + Esercizio_Fisico + Gain_5KG, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad15=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Cereali + Alcol + Esercizio_Fisico, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad16=adonis2( D_Uuni ~ Status + Age + BMI + Yogurt + Cereali + Esercizio_Fisico, data = metadata , permutations = 9999, by = "margin")
# STEPWISE BACKWARD WU
set.seed(1); ad0=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Pizza + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad1=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Latticini + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad2=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Pesce + Cereali + Legumi + Caffe + Alcol + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad3=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Pesce + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Provincia + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad4=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Pesce + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad5=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Esercizio_Fisico + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad6=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Pasta + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad7=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Gain_5KG + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad8=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Pane + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad9=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Fuma + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad10=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Frutta_Verdura + Carne + Cereali + Caffe + Alcol + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad11=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Frutta_Verdura + Cereali + Caffe + Alcol + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad12=adonis2( D_Wuni ~ Status + Age + Sex + BMI + Yogurt + Cereali + Caffe + Alcol + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad13=adonis2( D_Wuni ~ Status + Age + BMI + Yogurt + Cereali + Caffe + Alcol + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad14=adonis2( D_Wuni ~ Status + Age + BMI + Yogurt + Cereali + Caffe + Loss_5KG + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad15=adonis2( D_Wuni ~ Status + Age + BMI + Yogurt + Cereali + Caffe + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad16=adonis2( D_Wuni ~ Status + Age + BMI + Yogurt + Caffe + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad17=adonis2( D_Wuni ~ Status + Age + BMI + Caffe + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad18=adonis2( D_Wuni ~ Status + Age + BMI + Parto_Cesareo, data = metadata , permutations = 9999, by = "margin")
set.seed(1); ad19=adonis2( D_Wuni ~ Status + Age + BMI, data = metadata , permutations = 9999, by = "margin")
#### Only patients
otu <- "Otu_Tables_Closed_NoRep/otu_table_filtered_OTU.txt"
map <- "Otu_Tables_Closed_NoRep/new_mapping.txt"
tre <- "Phylogeny/rep_set.tre"
qiimedata = import_qiime(otu,map,tre) # crea un oggetto phyloseq
set.seed(1); qiimedata = rarefy_even_depth(qiimedata,50000)
qiimedata = subset_samples(qiimedata, Status == "PD")
D_bray <- distance(qiimedata,method="bray")
D_Uuni <- distance(qiimedata,method="unifrac")
D_Wuni <- distance(qiimedata,method="wunifrac")
ordPCoA_bray <- ordinate(qiimedata, method = "PCoA", distance = D_bray)
ordPCoA_Uuni <- ordinate(qiimedata, method = "PCoA", distance = D_Uuni )
ordPCoA_Wuni <- ordinate(qiimedata, method = "PCoA", distance = D_Wuni )
M<-sample_data(qiimedata)
M$Status <- factor(M$Status)
M$Provincia <- factor(M$Provincia)
M$Gain_5KG <- factor(M$Gain_5KG)
M$Loss_5KG <- factor(M$Loss_5KG)
M$Yogurt <- factor(M$Yogurt)
M$Pasta <- factor(M$Pasta)
M$Pane <- factor(M$Pane)
M$Latticini <- factor(M$Latticini)
M$Frutta_Verdura <- factor(M$Frutta_Verdura)
M$Carne <- factor(M$Carne)
M$Pesce <- factor(M$Pesce)
M$Cereali <- factor(M$Cereali)
M$Legumi <- factor(M$Legumi)
M$Caffe <- factor(M$Caffe)
M$Alcol <- factor(M$Alcol)
M$Pizza <- factor(M$Pizza)
M$Fuma <- factor(M$Fuma)
M$Esercizio_Fisico <- factor(M$Esercizio_Fisico)
M$Phenotype <- factor(M$Phenotype)
M$L_Dopa <- factor(M$L_Dopa)
M$DA <- factor(M$DA)
M$iMAo <- factor(M$iMAo)
M$iCOMT <- factor(M$iCOMT)
M$AntiCH <- factor(M$AntiCH)
M$Amantadina <- factor(M$Amantadina)
sample_data(qiimedata) <- M
metadata <- as(sample_data(qiimedata), "data.frame")
# STEPWISE BACKWARD Bray-Curtis
set.seed(1); ad1 <- adonis2(formula = D_bray ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iMAo + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad2 <- adonis2(formula = D_bray ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad3 <- adonis2(formula = D_bray ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad4 <- adonis2(formula = D_bray ~ Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad5 <- adonis2(formula = D_bray ~ Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + iCOMT, data = metadata, permutations = 9999)
set.seed(1); ad6 <- adonis2(formula = D_bray ~ Disase_Duration + Staging + Tot_L_Dopa_die + iCOMT, data = metadata, permutations = 9999)
# STEPWISE BACKWARD UU
set.seed(1); ad1 <- adonis2(formula = D_Uuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iMAo + iCOMT + Amantadina, data = metadata, permutations = 9999) set.seed(1); ad2 <- adonis2(formula = D_Uuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iMAo + iCOMT + Amantadina, data = metadata, permutations = 9999) set.seed(1); ad3 <- adonis2(formula = D_Uuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad5 <- adonis2(formula = D_Uuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + DA + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad6 <- adonis2(formula = D_Uuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad7 <- adonis2(formula = D_Uuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + iCOMT, data = metadata, permutations = 9999)
# STEPWISE BACKWARD WU
set.seed(1); ad1 <- adonis2(formula = D_Wuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iMAo + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad2 <- adonis2(formula = D_Wuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iCOMT + Amantadina, data = metadata, permutations = 9999)
set.seed(1); ad3 <- adonis2(formula = D_Wuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + DA + iCOMT, data = metadata, permutations = 9999)
set.seed(1); ad4 <- adonis2(formula = D_Wuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + L_Dopa + iCOMT, data = metadata, permutations = 9999)
set.seed(1); ad5 <- adonis2(formula = D_Wuni ~ Phenotype + Disase_Duration + Staging + Tot_L_Dopa_die + iCOMT, data = metadata, permutations = 9999)
set.seed(1); ad6 <- adonis2(formula = D_Wuni ~ Disase_Duration + Staging + Tot_L_Dopa_die + iCOMT, data = metadata, permutations = 9999)
|
ab6c14b02d23a29a7a946bb417d87109b1f77f15
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query25_query04_1344n/query25_query04_1344n.R
|
f63b8cc01c307c76f1da3a83d0adbbcc36a58c47
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query25_query04_1344n.R
|
e98e81bddbebcb930c14d0fad6a3fbd3 query25_query04_1344n.qdimacs 211 319
|
d01cfb229bbc81abb8fce095779025e6b86b6cc4
|
8f265bc6e5cd086a77204dff10c55adc4447de5c
|
/Forest Fire.R
|
a817cb86d7cd82b30e6d201a2dda575b0b39ada3
|
[] |
no_license
|
mkaur-1/Forest-Fire-Regression
|
2d6dba9f48c24b87a569a40373f99d43bc85a6c4
|
9d935462f1a5674c415c5af3b6cf38c3d0423b96
|
refs/heads/master
| 2021-06-16T01:22:36.225781
| 2017-05-02T17:43:23
| 2017-05-02T17:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,713
|
r
|
Forest Fire.R
|
##Loading libraries
library(corrplot)
library(car)
library(perturb)
library(ggplot2)
library(MASS)
#Reading Data file
forest <- read.csv("forestfires.csv")
# Dimensions and variables of dataset
dim(forest)
#517rows and 13 columns
colnames(forest)
#"X" "Y" "month" "day" "FFMC" "DMC" "DC" "ISI" "temp" "RH" "wind" "rain" "area"
#checking summary of variables and if any missing values
sum(is.na(forest))
summary(forest)
# No missing values. Month and day are as factors. From Summary FFMC, DMC and DC seem left skewed.
#ISI , Rain may be right skewed. Area heavily right skewed
# Splitting data into training and test set
set.seed(30032017)
row.number<- sample(1:nrow(forest), size=0.2*nrow(forest))
forest_test<- forest[row.number,]
dim(forest_test) ## Size of the testing set
forest_train<- forest[-row.number,]
dim(forest_train) ## Size of training set
summary(forest_train)
# Now we check the correlation matrix
M <- cor(forest_train[,-c(3,4)])
M
# And the correlation plot to visualize the correlation between variables in training data
corrplot(M,method='number')
##evident positive corr between DC & DMC,ISI &FFMC,X &Y, temp&DC.
#negative corr between RH& temp.
# lets see it visually by scatter plots
pairs(forest_train[,-c(3,4)])
#visual scatter plots rule out some correlation and we can shortlist below ones
#positive DC&DMC - this as per definition makes sense
#positive temp &DMC - This somewhat does not makes much sense as moisture should decrease with temp
#neagtive RH & temp - This is also natural as temp increases humidity decreases.
# We then inspect the distribution of each variable in boxplots
boxplot(forest_train$X,main="X")
boxplot(forest_train$Y,main ='Y')
boxplot(forest_train$FFMC, main='FFMC') #outliers
boxplot(forest_train$DMC, main ='DMC') # outliers
boxplot(forest_train$DC, main='DC') # some outliers
boxplot(forest_train$ISI,main='ISI') # outliers
boxplot(forest_train$temp, main='temp')
boxplot(forest_train$RH,main="RH") # outliers
boxplot(forest_train$wind, main='wind') #
boxplot(forest_train$rain, main='rain') # heavy outliers...high variability in data
boxplot(forest_train$area, main='area') # heavy outliers..high variability in data
# asymmetry also observed in variables like X,Y,DC,FFMC
# Let's see the prob density distribution curve of response variable area
dar <- data.frame(x=forest_train$area)
ggplot(dar,aes(x=forest_train$area))+geom_density(fill='red')
# And density curve for other variables also
plot(density(forest_train$FFMC))
plot(density(forest_train$DMC))
plot(density(forest_train$DC))
plot(density(forest_train$ISI))
plot(density(forest_train$temp))
plot(density(forest_train$RH))
plot(density(forest_train$wind))
plot(density(forest_train$rain))
plot(density(forest_train$area))
plot(density(log(forest_train$rain))) # log
plot(density(log(forest_train$area))) # log
## Above boxplots and density suggest reflected log transform of FFMC and log transform of rain
##and area, the response variable since it is highly concentrated near zero and assymetrical
summary(forest_train$area)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#0.00 0.00 0.19 14.04 6.25 1091.00
var(forest_train$area)
#4994.227
sd(forest_train$area)
#70.66984
# The variable distribution is very concentrated around 1 and 10, but we can
# some extreme outliers , even above 1000 !!
# We run the first Basic Model
mod1 <- lm(area~X+Y+month+day+FFMC+DMC+DC+ISI+temp+RH+wind+rain,data=forest_train)
summary(mod1)
# The R sq is very low at 4.6% and only DMC and DC seems significant regressors
par(mfrow=c(2, 2))
plot(mod1)
qqplot(mod1)
#There is negative linear reation between Residuals and Fitted values
# QQ plot of residuals is also not linear.
# this indicates there can be Collinearity problems
# Lets see residual plot with variables.
residualPlots(mod1)
# The residual plots suggests very significant pattern for fitted values and residuals.
# Some square transformations in wind, temp, rain, RH is suggested.
## But ffirst we observe that there are many zero values in area which is giving very irregular
## results. Hence , we decided to remove the zero value rows and reduce the dataset and actually run only on
## data where there is a burn area.
forest_train <- forest_train[forest_train$area>0,]
forest_test <- forest_test[forest_test$area>0,]
# Model 2
## Now we run model 2 on reduced subset of data
mod2 <- lm(area~X+Y+month+day+FFMC+DMC+DC+ISI+temp+RH+wind+rain,data=forest_train)
summary(mod2)
# R sq has sugnificantly improved
# Lets see plots
plot(mod2)
residualPlots(mod2)
# the fitted values plot and qq plot has improved, now we proceed with other transformations
# Model 3
## As discussed earlier we transform FFMC and Rain
FFMC_ref<- (log(max(forest_train$FFMC)+1-forest_train$FFMC))
Rain_log <- log(forest_train$rain+1)
mod3 <- lm(area~X+Y+month+day+FFMC_ref+DMC+DC+ISI+temp+RH+wind+Rain_log,data=forest_train)
summary(mod3)
residualPlots(mod3)
## the model is improved on R sq and residuals also.DMC and DC have emerged as significant.
## We still need to improve patterns in fitted values and residuals.
# Model 4
## we check the box cox for response variable transform
bc<- boxcox(mod3)
bc_df = as.data.frame(bc)
optimal_lambda = bc_df[which.max(bc$y),1]
optimal_lambda
## the optimal lambda is very near to zero. Hence log transform is suitable here (with area+1 to counter the zero values)
mod4 <- lm(log(area+1)~X+Y+month+day+FFMC_ref+DMC+DC+ISI+temp+RH+wind+Rain_log,data=forest_train)
summary(mod4)
residualPlots(mod4)
## The fitted values vs residual is random now and model prediction is also improved.
## Lets transform other variables.
#MOdel 5
## Here we check the interaction of the the various forest Fire Weather Index (FWI) as they
## are closely related and may have larger inetraction impact.
FFMC.DMC <- forest_train$FFMC*forest_train$DMC
FFMC.DC <-forest_train$FFMC*forest_train$DC
FFMC.ISI <-forest_train$FFMC*forest_train$ISI
DMC.DC<-forest_train$DMC*forest_train$DC
DMC.ISI<-forest_train$DMC*forest_train$ISI
DC.ISI<-forest_train$DC*forest_train$ISI
mod5 <- lm(log(area+1)~X+Y+month+day+FFMC+DMC+DC+ISI+FFMC.DMC+FFMC.DC+FFMC.ISI+DMC.DC+DMC.ISI+DC.ISI+
temp+RH+wind+Rain_log,data=forest_train)
summary(mod5)
plot(mod5)
residualPlots(mod5)
## The model has even improved and the normal quantile plot of residuals is much better normal now.
#model 6
## Lets try to do the residual improvement of other variables. Hence we try the square of variables
## which are dense distributed and show some quadratic pattern.Also factors like wind, temp
## should have greater impact on fire spread and area
wind_sq<-(forest_train$wind)^2
temp_sq<-(forest_train$temp)^2
rain_sq<-(forest_train$rain)^2
RH_sq<-(forest_train$RH)^2
## We check the interaction also of these factors.
temp.RH<-(forest_train$temp)*(forest_train$RH)
wind.rain<-(forest_train$wind)*(forest_train$rain)
wind.temp<-(forest_train$wind)*(forest_train$temp)
mod6 <- lm(log(area+1)~X+Y+month+day+FFMC+DMC+DC+ISI+FFMC.DMC+FFMC.DC+FFMC.ISI+DMC.DC+DMC.ISI+DC.ISI+
temp+temp_sq+RH+RH_sq+wind+wind_sq+Rain_log+rain_sq+temp.RH+wind.rain+wind.temp,data=forest_train)
summary(mod6)
plot(mod6)
residualPlots(mod6)
## the model is improved in terms of r sq and the residuals are also randomly distributed.
## We are very near to optimal model.
## we do some formal checks of collienearity and influential observations(as there were outliers in dataset)
#checking collinearity
colldiag(forest_train[,-c(3,4,13)])
## We find that there is no collinearity in the variables , so we are safe on this front.
#checking influential obs
influence.measures(mod6)
influenceIndexPlot(mod6,id.n=5)
#checking influential obs values
forest_train[which(row.names(forest_train) %in% c(200,363,416,479,480)),]
## out f these only two are high outliers 416 and and 480. These seem to be burned
# due to some other factors, may be intentional !! So we remove two observations.
forest_train_new <- forest_train[which(!row.names(forest_train) %in% c(416,480)),]
#Model 7 with removed influential obs
# defining transform variables again with new dataset
wind_sq<-(forest_train_new$wind)^2
temp_sq<-(forest_train_new$temp)^2
rain_sq<-(forest_train_new$rain)^2
RH_sq<-(forest_train_new$RH)^2
Rain_log <- log(forest_train_new$rain+1)
FFMC_ref<- (log(max(forest_train_new$FFMC)+1-forest_train_new$FFMC))
temp.RH<-(forest_train_new$temp)*(forest_train_new$RH)
wind.rain<-(forest_train_new$wind)*(forest_train_new$rain)
wind.temp<-(forest_train_new$wind)*(forest_train_new$temp)
FFMC.DMC <- forest_train_new$FFMC*forest_train_new$DMC
FFMC.DC <-forest_train_new$FFMC*forest_train_new$DC
FFMC.ISI <-forest_train_new$FFMC*forest_train_new$ISI
DMC.DC<-forest_train_new$DMC*forest_train_new$DC
DMC.ISI<-forest_train_new$DMC*forest_train_new$ISI
DC.ISI<-forest_train_new$DC*forest_train_new$ISI
mod7 <- lm(log(area+1)~X+Y+month+day+FFMC_ref+DMC+DC+ISI+FFMC.DMC+FFMC.DC+FFMC.ISI+DMC.DC+DMC.ISI+DC.ISI+
temp+temp_sq+RH+RH_sq+wind+wind_sq+Rain_log+rain_sq+temp.RH+wind.rain+wind.temp,data=forest_train_new)
summary(mod7)
residualPlots(mod7)
plot(mod7)
##There seem to be no deviation but , the model has too many insignificant regressors.
## Also R sq and adjusted R sq are very far apart.
## Lets run a step AIC to remove the insignificant variables.
#Model#8
stepAIC(mod7)
##running suggested model
mod8 <- lm(formula = log(area + 1) ~ X + Y + month + DMC + DC + FFMC.DMC +
FFMC.DC + FFMC.ISI + DC.ISI + RH + RH_sq, data = forest_train_new)
#mod8 = lm(formula = log(area + 1) ~ DC + temp + temp_sq + RH_sq + wind +
# Rain_log + temp.RH + wind.temp, data = forest_train_new)
summary(mod8)
plot(mod8)
residualPlots(mod8)
## Although the R sq is low , but that seems due to the predictors available in data and there
#might be missing some important variables. But our other factors and plots show that this is the optimal model.
## Now we test the model
FFMC.DMC <- forest_test$FFMC*forest_test$DMC
FFMC.DC <-forest_test$FFMC*forest_test$DC
FFMC.ISI <-forest_test$FFMC*forest_test$ISI
DC.ISI<-forest_test$DC*forest_test$ISI
RH_sq<-(forest_test$RH)^2
testData<-cbind(forest_test,FFMC.DMC,FFMC.DC,FFMC.ISI,DC.ISI,RH_sq)
model <-lm(formula = log(area + 1) ~ X + Y + month + DMC + DC + FFMC.DMC +
FFMC.DC + FFMC.ISI + DC.ISI + RH + RH_sq, data = testData)
y_hat<-predict.lm(model,newdata=testData, se.fit=TRUE)$fit
y_hat<-as.vector(y_hat)
dev<-log(testData$area+1)-(y_hat)
num<-sum(dev^2)
dev1<-log(testData$area+1)-mean(log(testData$area+1))
den<-sum(dev1^2)
Predicted.Rsq<-1-(num/den)
Predicted.Rsq
##The predicted R sqaure is 28.59%. This is a considerably good fit as per the given dataset.
## Running on original data
FFMC.DMC <- forest$FFMC*forest$DMC
FFMC.DC <-forest$FFMC*forest$DC
FFMC.ISI <-forest$FFMC*forest$ISI
DC.ISI<-forest$DC*forest$ISI
RH_sq<-(forest$RH)^2
forest_new<-cbind(forest,FFMC.DMC,FFMC.DC,FFMC.ISI,DC.ISI,RH_sq)
forest_new <- forest_new[forest_new$area>0,]
model_full <-lm(formula = log(area + 1) ~ X + Y + month + DMC + DC + FFMC.DMC +
FFMC.DC + FFMC.ISI + DC.ISI + RH + RH_sq, data = forest_new)
summary(model_full)
plot(model_full)
residualPlots(model_full)
|
378b81b4202eae83ee7f3a0d2bfd63231ffdce74
|
5e6f7050eac64d051b96c8c906c91419d28ef4b3
|
/code/problem1.R
|
f836f580c03dfa01b6326577412c234616dd7028
|
[] |
no_license
|
berild/spatial-statistics-ex1
|
8dfb244a46b89f8930662489fe9eb7fa63f1d570
|
d28d55b6a52a4a816f1883333eedbaf6b76a85de
|
refs/heads/master
| 2020-04-27T11:08:17.470821
| 2019-02-19T08:16:11
| 2019-02-19T08:16:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,136
|
r
|
problem1.R
|
#Problem 1 - code
library(fields)#Matern is here
library(geoR) #Need to install XQuartz. cov.spatial() belongs here
library(MASS)
library(akima)
library(ggplot2)
library(reshape2)#melt
library(gridExtra)#grid.arrange
source('problem1_functions.R')
set.seed(42)
###################################################################################
###################################################################################
#a) Correlation function and variograms
###################################################################################
#Plot the Matern correlation function, the Powered Exponential function
#And the correponding variograms
#This code is inspired by Henning Omre's "Matern1.R"
#variance, for the variogram
sigma2 = c(1,5)
distance=0:0.01:49
nuMat=c(1,3)
CorrMatern1=Matern(distance, range = 10, nu=nuMat[1]) #What should range be?
CorrMatern2=Matern(distance, range = 10, nu=nuMat[2])
nuExp = c(1,1.9)
CorrExp1 = exp(-(distance/10)^nuExp[1])
CorrExp2 = exp(-(distance/10)^nuExp[2])
#plot(distance,CorrelationFunction,type="l")
#lines(distance,CorrelationFunction,type="l",col="blue")
CorrFuncDF = data.frame(distance, CorrMatern1, CorrMatern2,
CorrExp1, CorrExp2)
longDF = melt(CorrFuncDF, id = 'distance')
ggplot(longDF) +
geom_line(aes(x = distance, y = value, colour = variable)) +
scale_colour_discrete(labels = c("Matern\n nu = 1\n", "Matern\n nu = 3\n",
"Powered exp.\n nu = 1\n", "Powered exp.\n nu = 1.9\n")) +
ggtitle("Correlation function") +
theme(plot.title = element_text(hjust = 0.5))
#Save figure:
#ggsave("../figures/corrfunc.pdf", plot = last_plot(), device = NULL, path = NULL,
# scale = 1, width = 5.5, height = 4, units = "in",
# dpi = 300, limitsize = TRUE)
#####################################
#Plotting the variogram functions
plots = list()
spec = paste("Sigma^2 =", sigma2)
#####################################
plots[[1]] = ggplot(longDF) +
geom_line(aes(x = distance, y = sigma2[1]*(1-value), colour = variable)) +
scale_colour_discrete(name = spec[1], labels =
c("Matern\n nu = 1\n", "Matern\n nu = 3\n",
"Powered exp.\n nu = 1\n", "Powered exp.\n nu = 1.9\n")) +
ggtitle("Variogram functions") +
ylab('value') +
theme(plot.title = element_text(hjust = 0.5))
plots[[2]] = ggplot(longDF) +
geom_line(aes(x = distance, y = sigma2[2]*(1-value), colour = variable)) +
scale_colour_discrete(name = spec[2], labels =
c("Matern\n nu = 1\n", "Matern\n nu = 3\n",
"Powered exp.\n nu = 1\n", "Powered exp.\n nu = 1.9\n")) +
#ggtitle("Variogram functions") +
ylab('value') +
theme(plot.title = element_text(hjust = 0.5))
#######################################
#Save plot in one figure
plotGrid = grid.arrange(grobs = plots, ncol = 1)
#ggsave("../figures/variograms.pdf", plot = plotGrid, device = NULL, path = NULL,
# scale = 1, width = 5.5, height = 2*4, units = "in",
# dpi = 300, limitsize = TRUE)
###################################################################################
###################################################################################
#b) Prior distribution
###################################################################################
#Grid
n = 50
L = 1:50
#Specify expected value
mu = 0
E = matrix(rep(mu,50), ncol = 1)
#Possible model parameters
sigma2 = c(1,5)
nu_m=c(1,3)
nu_e = c(1,1.9)
#Covariance matrix
tau = rdist(L,L)/10
############################
#Make vectors of parameter values to iterate through
sigmaLong = rep(sigma2, times = 4)
nuLong = c(rep(nu_e, each = 2), rep(nu_m, each = 2))
funcLong = c(rep("e", 4), rep("m", 4))
f = ifelse(funcLong=="e", "Pow Exp: ", "Matern: ")
specifications = paste(f,"nu=",nuLong,", sigma=",sigmaLong, sep = "")
#############################
#Make samples and display them
nsamps = 10
##############################
#Choosing parameters for the "True system"
#Unsure whether this is the right way to do it
sigma_true = 5
func_true = "e"
nu_true = nu_e[2]
which_save = nsamps
###############################
#plots = list()
for(i in 1:8){
Cov = covMatPrior(tau,sigmaLong[i], nuLong[i], funcLong[i])
realizations = mvrnorm(nsamps, E, Cov)
############################
#Save one realization for sigma^2 = 5 and the others as specified
if(sigmaLong[i]==sigma_true & nuLong[i] == nu_true & funcLong[i]==func_true){
savedRealization = realizations[which_save, ]
save(savedRealization, file = "savedRealization.RData", ascii=TRUE)
}
############################
#Displaying:
sampleDF = as.data.frame(t(realizations))
sampleDF$L = L
#Changing format to be able to plot all realisations
long_realis = melt(sampleDF, id = "L")
plot= ggplot(long_realis,
aes(x=L, y=value, colour=variable)) +
geom_line()+
ggtitle("Realizations of 1D Gaussian RF") +
xlab("x")+
ylab("Realizations") +
annotate("text", x = 10, y = max(long_realis$value)*1.1, label = specifications[i])+
theme(plot.title = element_text(hjust = 0.5), legend.position = "none")
########################
#Save plots
name = paste("../figures/sample1conf",i,".pdf", sep = "")
#ggsave(name, plot = plot, device = NULL, path = NULL,
# scale = 1, width = 5.5, height = 4, units = "in",
# dpi = 300, limitsize = TRUE)
print(plot)
#plots[[i]] = plot
}
###################################################################################
###################################################################################
#c) Likelihood
###################################################################################
#Realization that is used to draw realizations from
load('savedRealization.RData')
sigmad = c(0,0.25)
x = c(10,25,30)
###################################################################################
###################################################################################
#d) Posterior distribution and prediction
###################################################################################
#Prior
sigmar = sigma2[2]
nu = nu_e[2]
func = "e"
meanPrior = E
covPrior = covMatPrior(tau, sigmar, nu, "e")
#Likelihood
H = obsMatrix(x, n)
covLik1 = diag(sigmad[1], ncol = length(x), nrow=length(x))
covLik2 = diag(sigmad[2], ncol = length(x), nrow=length(x))
#Draw measurements
set.seed(1)
d1 = simulateMeasurements(savedRealization, H, sigmad[1])
d2 = simulateMeasurements(savedRealization, H, sigmad[2])
#Compute posterior
posterior1 = postModel(meanPrior, covPrior, H, covLik1, d1)
posterior2 = postModel(meanPrior, covPrior, H, covLik2, d2)
################################
#Predicted values
predicted1 = posterior1$postMean
predicted2 = posterior2$postMean
#Prediction interval
alpha = 0.1
z = qnorm(alpha/2, lower.tail = FALSE)
#Got some rounding error giving negative variance...?
stdDev1 = sqrt(diag(abs(posterior1$postCov)))
stdDev2 = sqrt(diag(posterior2$postCov))
intLower1 = predicted1-z*stdDev1
intUpper1 = predicted1+z*stdDev1
intLower2 = predicted2-z*stdDev2
intUpper2 = predicted2+z*stdDev2
prediction1 = data.frame(predicted1, intLower1, intUpper1)
prediction2 = data.frame(predicted2, intLower2, intUpper2)
#Display
specifications = paste("Measurement error = ", sigmad, sep = "")
plots = list()
plots[[1]] = ggplot(prediction1, aes(x = L)) +
geom_errorbar(aes(ymin = prediction1$intLower1, ymax = prediction1$intUpper1), col = "red")+
geom_line(aes(y = prediction1$predicted1)) +
geom_point(data = as.data.frame(x), aes(x = x, y = d1)) +
ggtitle("Posterior predictions with intervals") +
xlab("x")+
ylab("Predicted values") +
annotate("text", x = 9, y = max(prediction1$intUpper1)*1.1, label = specifications[1])+
theme(plot.title = element_text(hjust = 0.5))
plots[[2]] = ggplot(prediction2, aes(x = L)) +
geom_errorbar(aes(ymin = prediction2$intLower2, ymax = prediction2$intUpper2), col = "red")+
geom_line(aes(y = prediction2$predicted2)) +
geom_point(data = as.data.frame(x), aes(x = x, y = d2)) +
xlab("x")+
ylab("Predicted values") +
annotate("text", x = 10, y = max(prediction2$intUpper2)*1.1, label = specifications[2])
plotGrid = grid.arrange(grobs = plots, nrow = 2)
#ggsave("../figures/predictions.pdf", plot = plotGrid, device = NULL, path = NULL,
# scale = 1, width = 5.5, height = 2*4, units = "in",
# dpi = 300, limitsize = TRUE)
###################################################################################
###################################################################################
#e) Simulations
###################################################################################
#Numerical approximation
nsamps = 100
postSamps1 = mvrnorm(nsamps, posterior1$postMean, posterior1$postCov)
postSamps2 = mvrnorm(nsamps, posterior2$postMean, posterior2$postCov)
predictedNum1 = colMeans(postSamps1)
predictedNum2 = colMeans(postSamps2)
stdDevNum1 = sqrt((1/(nsamps -1))*colSums((sweep(postSamps1,2,predictedNum1))^2))
stdDevNum2 = sqrt((1/(nsamps -1))*colSums((sweep(postSamps2,2,predictedNum2))^2))
#How is the prediction interval now?
#t-distributed?
t = qt(alpha/2, df = nsamps-1, lower.tail = FALSE)
predictionNum1 = data.frame(pred = predictedNum1,
lower = predictedNum1 - t*stdDevNum1*sqrt(1+1/nsamps),
upper = predictedNum1 + t*stdDevNum1*sqrt(1+1/nsamps))
predictionNum2 = data.frame(pred = predictedNum2,
lower = predictedNum2 - t*stdDevNum2*sqrt(1+1/nsamps),
upper = predictedNum2 + t*stdDevNum2*sqrt(1+1/nsamps))
#Displaying:
sampleDF1 = as.data.frame(t(postSamps1))
sampleDF1$L = L
#Changing format to be able to plot all realisations
long_realis1 = melt(sampleDF1, id = "L")
sampleDF2 = as.data.frame(t(postSamps2))
sampleDF2$L = L
#Changing format to be able to plot all realisations
long_realis2 = melt(sampleDF2, id = "L")
plots= list()
plots[[1]] =
ggplot(long_realis1,aes(x=L, y=value, colour = variable)) +
geom_line()+
geom_errorbar(data = predictionNum1,aes(x = L, ymin=predictionNum1$lower,
ymax = predictionNum1$upper),inherit.aes = FALSE, col = "grey30")+
geom_line(data = predictionNum1, aes(x = L, y = predictionNum1$pred), inherit.aes = FALSE)+
ggtitle("Realizations and empirical estimated predictions")+
xlab("x")+
ylab("Posterior values") +
annotate("text", x = 10, y = max(long_realis1$value)*1.1, label = specifications[1])+
theme(plot.title = element_text(hjust = 0.5), legend.position = "none")
plots[[2]]=
ggplot(long_realis2,aes(x=L, y=value, colour = variable)) +
geom_line()+
geom_errorbar(data = predictionNum2,aes(x = L, ymin=predictionNum2$lower,
ymax = predictionNum2$upper),inherit.aes = FALSE, col = "grey30")+
geom_line(data = predictionNum2, aes(x = L, y = predictionNum2$pred), inherit.aes = FALSE)+
xlab("x")+
ylab("Posterior values") +
annotate("text", x = 10, y = max(long_realis2$value)*1.1, label = specifications[2])+
theme(plot.title = element_text(hjust = 0.5), legend.position = "none")
plotGrid = grid.arrange(grobs = plots, nrow = 2)
#ggsave("../figures/posteriorSamps.pdf", plot = plotGrid, device = NULL, path = NULL,
# scale = 1, width = 5.5, height = 2*4, units = "in",
# dpi = 300, limitsize = TRUE)
###################################################################################
###################################################################################
#f) Function predictors
###################################################################################
#Using realizations without measurement errors.
realizations = postSamps1
N = nrow(realizations)
Ahat = mean(rowSums(realizations>2))
#Need prediction variance
varA = (1/(N-1))*sum((rowSums(realizations>2)-Ahat)^2)
#var(rowSums(realizations>2) gives the same :)
varAhat = varA/100
#Using posterior Mean
Atilde = sum(posterior1$postMean>2)
|
bcbec9c458cbde79a7895e4ae572a69955c38ded
|
7ff45192ef0be890964fcb3c17766cdf7c4f76c9
|
/visualization/ggplot2_intro.R
|
4a268a400d7402ab69e15d46997e0fa9e3cae060
|
[] |
no_license
|
depocen/PSY317L
|
75036b771e231defa183501345c5a750146e38be
|
661ba27837ccb506d216aaedf6cd9b37860ca27b
|
refs/heads/master
| 2023-07-10T14:53:26.444765
| 2021-08-16T15:50:09
| 2021-08-16T15:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
ggplot2_intro.R
|
### Introduction to ggplot2
## Load libraries and data
library(tidyverse)
df <- read_csv("data/BlueJays.csv")
head(df)
## Making a ggplot2 graph
ggplot() #blank canvas
ggplot(df) #just adding the datset to the blank canvas, but nothing plotted yet
ggplot(df, aes() ) #inside aes() we'll put what our x and y axis will be
ggplot(df, aes(x=Mass, y=Head) ) #we say what the x and y are, and it creates scales on each axis, but we didn't tell it what to plot yet
ggplot(df, aes(x=Mass, y=Head) ) + geom_point()
#What if we want to change the color of the points?
ggplot(df, aes(x=Mass, y=Head) ) + geom_point(color='red')
#What if we want to color the points based on another variable
ggplot(df, aes(x=Mass, y=Head, color = KnownSex) ) + geom_point()
##### Let's build up another example......
films <- read_csv("data/films.csv")
ggplot()
ggplot(films)
ggplot(films, aes() )
ggplot(films, aes(x=imdb, y=metacritic) )
ggplot(films, aes(x=imdb, y=metacritic) ) + geom_point()
ggplot(films, aes(x=imdb, y=metacritic) ) + geom_point(color='brown')
####################----------------------------##########################
### Try for yourself examples....
# remove the blanks, and replace with the appropriate word.
# 1. For the BlueJays (df) data, plot Head against Skull
ggplot(df, aes(x = ______, y= ______)) + geom_point()
# 2. For the BlueJays (df) data, plot Mass against BillLength and color by KnownSex
ggplot(____, aes(x = _____, y= ______, color = ______)) + geom_point()
# 3. From the films data, plot imdb against rottentomatoes and make the points red.
ggplot(films, aes(x = _______, y= _______)) + geom_point(color = _______)
# 5. Load in the personality dataset, and plot extraversion against agreeableness.
personality <- read_csv("data/personality.csv")
head(personality)
ggplot(_______, aes(x=___________, y=_________)) + ______________
##############################
### Some Other Things We Can Do, just for fun...
ggplot(df, aes(x=Mass, y=Head) ) + geom_point()
#What if we want to change the size of the points ?
ggplot(df, aes(x=Mass, y=Head) ) + geom_point(size=2)
# We can also make points transparent
ggplot(df, aes(x=Mass, y=Head) ) + geom_point(size=2, alpha=.5)
#you can combine this with other modifications, e.g. color
ggplot(df, aes(x=Mass, y=Head, color = KnownSex) ) + geom_point(size=2)
ggplot(df, aes(x=Mass, y=Head, color = KnownSex) ) + geom_point(size=2,alpha=.5)
#this is the same, I just made the code more readable
ggplot(df, aes(x=Mass, y=Head, color = KnownSex) ) +
geom_point(size=2,alpha=.5)
# you can also change the shape of the points
ggplot(df, aes(x=Mass, y=Head) ) + geom_point()
ggplot(df, aes(x=Mass, y=Head) ) + geom_point(pch=3)
ggplot(df, aes(x=Mass, y=Head, color=KnownSex) ) + geom_point(pch=3)
|
01696abe3d078dc77bf30c4aca77b846078ba0d0
|
fb37f1bbc59247c9b52e1feb88deb06b184bc7e6
|
/R/barplot_bun_comp_stack_med.R
|
28fc253bd542b30e16f07e20cacb2293a6c5d304
|
[] |
no_license
|
utah-osa/hcctools
|
a583d4767763e7c4b733139a51f7dc04f4bcc792
|
2b7b2f3946d21e334c42066ce31c8e57810caa43
|
refs/heads/master
| 2022-11-06T22:52:00.544549
| 2020-07-08T19:54:40
| 2020-07-08T19:54:40
| 274,969,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,806
|
r
|
barplot_bun_comp_stack_med.R
|
barplot_bun_comp_stack_med <- function(df) {
df <- df%>%
dplyr::rowwise() %>%
dplyr::mutate(pr_list = pr_list %>%
stringr::str_split("_") %>%
unlist() %>%
unique() %>%
paste0(collapse=","))
providers <- df %>% dplyr::pull(pr_list)
vt_meds <- df %>% dplyr::pull(vt_med)
## add the pc list here to demarcate different provider/pc bundles.
pc_lists <- df %>% dplyr::pull(pc_list)
pr_pc_list <- paste0("Provider(s):",providers,"\n CPT:", pc_lists)
surg_med <- df$surg_bun_med
medi_med <- df$medi_bun_med
radi_med <- df$radi_bun_med
path_med <- df$path_bun_med
anes_med <- df$anes_bun_med
fac_med <- df$fac_bun_med
component_meds <-
rbind(surg_med, medi_med, path_med, radi_med, anes_med, fac_med) %>%
tidyr::replace_na(0) %>%
as.data.frame()
component_meds$ID <- as.factor(1:nrow(component_meds))
# fill_cols <- c(
# "Anesthesia" = "#003f5c",
# "Facility" = "#444e86",
# "Medicine Proc" = "#955196",
# "Pathology" = "#dd5182",
# "Radiology" = "#ff6e54",
# "Surgery" = "#ffa600"
# )
melted <- reshape2::melt(component_meds, id = "ID") %>%
dplyr::mutate(
ID = dplyr::case_when(
ID == 1 ~ "Surgery",
ID == 2 ~ "Medicine Proc",
ID == 3 ~ "Pathology",
ID == 4 ~ "Radiology",
ID == 5 ~ "Anesthesia",
ID == 6 ~ "Facility"
)
)
melted <- melted %>%
dplyr::rowwise()%>%
dplyr::mutate(variable = pr_pc_list %>%
purrr::pluck(
as.numeric(
str_extract(
as.character(variable),
"[[:digit:]]+")
)
)
)
ggplot2::ggplot(melted, ggplot2::aes(variable, value, fill = ID, group = ID)) +
ggplot2::geom_bar(stat = "identity", position = "stack") +
ggplot2::scale_fill_manual(values = cust_color_pal1) +
ggplot2::ggtitle("Bundles (with component) by Provider ") +
ggplot2::xlab("Component Bundle") +
ggplot2::ylab("Price ($)") +
ggplot2::scale_y_continuous(labels = scales::dollar_format(prefix = "$")) +
ggplot2::coord_flip()+
ggplot2::theme(
axis.text.x = ggplot2::element_text(angle = -45, hjust = 1, vjust=1)
#axis.title.x=element_blank(),
# axis.text.x = element_blank(),
# axis.ticks.x = element_blank()
)
}
|
034b275525532aa1c64c501ef1ea799a55bd2b06
|
9b000d7c7ddcda6a1cc0dbf58d3897d531bbf944
|
/rankhospital.R
|
95ba90838ec6275f45319a84761f3bcb13ea26c8
|
[] |
no_license
|
weiweiweizhu/CourseraProgrammingAssignment4
|
77bc2c9acb8bb86002ea0443e666e4331bba370b
|
1db55baca82a8dade58d8a6f671c40dc9d721f62
|
refs/heads/master
| 2022-01-06T05:26:35.368187
| 2019-05-18T21:03:56
| 2019-05-18T21:03:56
| 187,380,843
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,822
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
data <- read.csv("rprog-data-ProgAssignment3-data/outcome-of-care-measures.csv",
colClasses = "character")
# check the validity of the outcome and distribute the according colunm to the outcome
if (outcome == "heart attack") # heart attack
colnum <- 11
else if (outcome == "heart failure") # heart failure
colnum <- 17
else if (outcome == "pneumonia") # pneumonia
colnum <- 23
else
stop("invalid outcome")
# check the validity of the num
if (!is.numeric(num) && !(num %in% c("best", "worst")))
stop("invalid num")
# check the validty of the state
if (!(state %in% data$State))
stop("invalid state")
# combine the 3 useful columns together and remove na
data <- subset(data, State == state)
fd <- as.data.frame(cbind(data[, 2], # hospital
data[, 7], # state
data[, colnum]), # outcome
stringsAsFactors = FALSE)
colnames(fd) <- c("hospital", "state", outcome)
fd[[outcome]] <- suppressWarnings(as.numeric(fd[[outcome]]))
fd <- na.omit(fd)
if (num == "best")
num <- 1
# for worst, we will reverse the order of rank, and take the 1st one
reverse_sort <- FALSE
if (num == "worst") {
reverse_sort <- TRUE
num <- 1
}
r <- fd[order(fd[outcome], fd["hospital"], decreasing = reverse_sort), ]
result <- r[num, "hospital"]
result
}
|
8b1f50c2b2295eed14eba42b5c46b5e852999dc2
|
f697cbbb0da988fd43c07652d5955fbb82e25e38
|
/GoViewer/man/guard.Rd
|
e5742cac5bef479d5b32300da8d9fba4f5a74a9f
|
[] |
no_license
|
aidanmacnamara/epiView
|
eec75c81b8c7d6b38c8b41aece3e67ae3053fd1c
|
b3356f6361fcda6d43bf3acce16b2436840d1047
|
refs/heads/master
| 2021-05-07T21:22:27.885143
| 2020-06-22T13:08:51
| 2020-06-22T13:08:51
| 109,008,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,112
|
rd
|
guard.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guard.r
\name{guard}
\alias{guard}
\title{guard function for unresolved Shiny input fields}
\usage{
guard(input, ns, names)
}
\arguments{
\item{input}{a shiny input object}
\item{ns}{a function generated by Shiny's \[shiny]{NS}}
\item{names}{a spaced deliminated string containing the required fields}
}
\value{
a logical scalar
}
\description{
Calling reactive functions before all required input fields have been resolved can cause warning messages
in the log and the screen. This function checks to see if all the required inputs are defined. If so,
a value of true is returned other-wise false. The names of the required inputs are given as a space
deliminated string and converted to Shiny input keys by a user supplied function ns generated by Shiny's
function \[Shiny]{NS}. This function is inspired on guard in Apple's Swift language.
}
\examples{
# simulate a Shiny function scenario
ns=NS("demo")
input=list("demo-a"=1:3,
"demo-b"=NULL,
"demo-c"=3:5)
guard(input,ns,"a b c")
guard(input,ns,"a c")
}
|
824e25a819d75421169a9a748f64834fbc2ff2b8
|
ef7879462cbc8f9d2655055dfad1c0476e0720ce
|
/helpers/libs_mlr.R
|
d997cda3614805537fcab51f24c3c9be7bf2a60a
|
[] |
no_license
|
dandls/moc
|
bef5b63b211f1a4c1ae1c42c99cb367d39895760
|
d2fa9e0918d157c5d46a822b4ef110e641b45b76
|
refs/heads/master
| 2023-05-24T00:57:43.105607
| 2021-05-17T07:56:13
| 2021-05-17T07:56:13
| 257,011,495
| 20
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
libs_mlr.R
|
# --- Needed R packages ----
packages = c("checkmate", "devtools", "Rcpp", "ParamHelpers", "mlr", "ecr",
"OpenML", "magrittr", "data.table", "farff", "ranger", "mlrCPO", "parallelMap",
"xgboost", "BBmisc", "rjson", "Metrics", "foreach", "prediction", "rgl",
"randomForest", "pracma", "parallelMap", "keras", "irace", "ggplot2",
"plot3Drgl", "latex2exp", "scatterplot3d", "ggrepel", "reticulate",
"datarium", "dplyr", "roxygen2", "gridExtra", "Formula", "StatMatch",
"keras", "purrr", "e1071", "stringr", "mosmafs",
"xtable", "ggpubr", "tidyr", # Packages for evaluation of benchmark results
"GGally", "fmsb", "ggExtra", "metR", "mvtnorm") # Packages for study and applications
new.packages = packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# #--- Load respositories iml ----
# load `iml` and `counterfactuals` like "normal" packages.
# in the future this would just be library("counterfactuals").
devtools::load_all("../iml", export_all = FALSE)
devtools::load_all("../counterfactuals", export_all = FALSE)
library("mlr")
library("mlrCPO")
source("../helpers/keras_mlr_learner.R")
|
6acdaf3e09ecaa57602543d57ab3fac6a9b3a12a
|
46a2d23301501e30a34a2585c30235ba2a098a69
|
/Multiple Plots/complex_layouts.R
|
1626ba3a24b9dce5a2cea0eb33b99058aea966ec
|
[] |
no_license
|
kanerisushmita/R-Graphics
|
160616c0e0773808f288c14fcb82b6c9c54eda25
|
1b0ed71979f0c1ba4668e6a90063e9459f35c19a
|
refs/heads/master
| 2021-09-21T22:13:38.917366
| 2018-09-01T14:49:20
| 2018-09-01T14:49:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
complex_layouts.R
|
# Build a 2-by-2 matrix, grid, that will be used for positioning the 3 subplots as specified above.
# Use layout() in combination with grid.
# Build three plots for the movies data frame (in this order):
# A scatter plot of rating (x-axis) versus runtime (y-axis).
# A scatter plot of votes (x-axis) versus runtime (y-axis).
# A boxplot of the runtime (use boxplot())
# movies is pre-loaded in your workspace
# Build the grid matrix
grid <- matrix(c(1,2,3,3), nrow=2)
# Specify the layout
layout(grid)
# Build three plots
plot(movies$rating, movies$runtime)
plot(movies$votes, movies$runtime)
boxplot(movies$runtime)
|
62e06bbe025da76184b8100c7b942b38f5fa7ce7
|
a04bac5d86c80c31608e3c13c87dcc626a0a17a2
|
/R/Results/1. Presence of competitive exclusion/Code1c.R
|
38249b59c3d43f36d702c8032b136dc094ad5522
|
[] |
no_license
|
lucymli/predator-prey-dynamics
|
cc5299ffb2f50fc30feff046a77814aa3c1a0755
|
4521f88ffbb15f418d2a68b4dbf8ad3c2dd03340
|
refs/heads/master
| 2021-01-15T20:09:19.417939
| 2017-08-09T23:44:42
| 2017-08-09T23:44:42
| 99,843,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,370
|
r
|
Code1c.R
|
###Deterministic 2-consumer Model####
####Ensure deSolve package is installed####
#Specify values below
t <- seq(0,#initial time
10000, #end time
by = 0.1 #time interval for solving the ODE
)
#Initial sizes of prey and predator populations
state <- c(N=10, #initial prey population size
P1=5, #initial Predator 1 population size
P2=5) #initial Predator 2 population size
#Parameter values
par <- c(r = 1, # intrinsic growth rate of prey
a1 = 0.2, # rate of predation of Predator 1
c1 = 0.1, # assimilation efficiency of Predator 1
d1 = 0.1, # death rate of Predator 1
a2 = 0.2, # rate of predation of Predator 2
c2 = 0.1, # assimilation efficiency of Predator 2
d2 = 0.1, # death rate of Predator 2
K = 10) # carrying capacity of prey
#This function describes the ODEs of the system with 1 prey and 2 predators
LVDet <- function(Time, State, Param){
with(as.list(c(State, Param)), {
dNdt <- N*(r*(1-N/K) - a1*P1 - a2*P2) #Change in prey population is dependent on prey birth and prey death due to predation
dP1dt <- P1*(c1*a1*N - d1) #Change in Predator 1 population is dependent on predator birth (from assimilation of prey) and predator death
dP2dt <- P2*(c2*a2*N - d2)
return(list(c(dNdt,dP1dt,dP2dt)))
})
}
#When just prey and predator 1 are present
LVDet2 <- function(Time, State, Param){
with(as.list(c(State, Param)), {
dNdt <- N*(r*(1-N/K) - a1*P1) #Change in prey population is dependent on prey birth and prey death due to predation
dP1dt <- P1*(c1*a1*N - d1) #Change in Predator 1 population is dependent on predator birth (from assimilation of prey) and predator death
return(list(c(dNdt,dP1dt)))
})
}
#When just prey and predator 2 are present
LVDet3 <- function(Time, State, Param){
with(as.list(c(State, Param)), {
dNdt <- N*(r*(1-N/K) - a2*P2) #Change in prey population is dependent on prey birth and prey death due to predation
dP2dt <- P2*(c2*a2*N - d2) #Change in Predator 1 population is dependent on predator birth (from assimilation of prey) and predator death
return(list(c(dNdt,dP2dt)))
})
}
#Calls the LVDet function using the end time, initial conditions and parameter values
LVDet(t,state,par)
LVDet2(t,state[1:2],par[c(1:4,8)])
LVDet3(t,state[c(1,3)],par[c(1,5:8)])
#Solves the ODEs using deSolve Package
out <- ode(state, t, LVDet, par)
out2 <- ode(state[1:2], t, LVDet2, par[c(1:4,8)])
out3 <- ode(state[c(1,3)],t,LVDet3, par[c(1,5:8)])
par(mfrow=c(2,2),mar=c(4,5,4,4))
#scaling of text
scaleText <- 1.5
#Plots phase-portrait of prey against predator 1 and against predator 2
matplot(out[ , 2],out[ , 3:4],type="l",xlab = "Prey", ylab="Predator",main="a",lty=1,lwd=2,col=c("Red","Purple"),cex.main=scaleText,cex.lab=scaleText,cex.axis=scaleText)
legend("topright",c("Predator 1","Predator 2"),col=c("Red","Purple"),lty = 1,cex=scaleText)
#Plots time evolution of prey, predator 1 and predator 2
matplot(out[ , 1], out[ , 2:4],type = "l", xlab = "Time", ylab = "Population size", main = "b", lty=1,lwd = 2,col = c("Blue","Red","Purple"),cex.main=scaleText,cex.lab=scaleText,cex.axis=scaleText)
legend("topright", c("Prey", "Predator 1","Predator 2"), col = c("Blue","Red","Purple"), lty = 1,cex=scaleText)
#Plots time evolution of prey and predator 1 in the absence of predator 2
matplot(out2[ , 1], out2[ , 2:3],type = "l", xlab = "Time", ylab = "Population size", main = "c", lty=1,lwd = 2,col = c("Blue","Red"),cex.main=scaleText,cex.lab=scaleText,cex.axis=scaleText)
legend("topright", c("Prey", "Predator 1"), col = c("Blue","Red"), lty = 1,cex=scaleText)
#Plots time evolution of prey and predator 2 in the absence of predator 1
matplot(out3[ , 1], out3[ , 2:3],type = "l", xlab = "Time", ylab = "Population size", main = "d", lty=1,lwd = 2,col = c("Blue","Purple"),cex.main=scaleText,cex.lab=scaleText,cex.axis=scaleText)
legend("topright", c("Prey","Predator 2"), col = c("Blue","Purple"), lty = 1,cex=scaleText)
#maximum and minimum values of prey, predator 1 and predator 2
max(out[,2])
min(out[,2])
max(out[,3])
min(out[,3])
max(out[,4])
min(out[,4])
#final population sizes at the end of the run
out[length(t),]
#exports results into a csv file
#write.csv(out,"/Users/lmq/Documents/IC/Final Year Project/Reports/Results/1. Presence of competitive exclusion/Data1b.csv")
|
0241ed30f5f0076e2656c982e9e9a3843e72eafd
|
7f08aa73752aac80ec2f7c32b582a4857e00acf9
|
/day1updated.r
|
be1a4312f1b1f93197c5b4fe5e8b9e47794c80df
|
[] |
no_license
|
melindahiggins2000/CDCRworkshopCode
|
24a0fa29e497ce25d7a227365bf244d19fb9bf95
|
72075dc9cad590180e7b00ae942c4d40ee675663
|
refs/heads/master
| 2021-01-10T07:33:47.705312
| 2016-03-19T18:18:11
| 2016-03-19T18:18:11
| 53,200,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,201
|
r
|
day1updated.r
|
# ========================================
# Day 1, March 10, 2016
# code and comments updated
# as of March 15, 2016
#
# by Melinda Higgins, PhD.
# ========================================
# DAY 1: Session 1 =======================
# ======================================
# see what directory R is currently set to
# ======================================
getwd()
# ======================================
# change your "working directory"
# to a location of your choice
# ======================================
setwd('C:/MyGithub/CDCRworkshopCode')
# ======================================
# check location again
# ======================================
getwd()
# ======================================
# Typing in commands - do simple math
# ======================================
2 + 2
3 + (4*8)
3+4*8
6 + (8**2) # exponents can be done using two
# asterix's ** or you can use
# the caret ^ symbol
12^2
# ======================================
pi # pi is a built in Constant
4 * pi
# ======================================
# use help() to see what other
# built in constants there are in R
# Notice that Constants is spelled
# with a capital C. R is CaSe sensitive
# so Constants and constants are 2
# different things
# ======================================
help(Constants)
LETTERS
letters
month.abb
month.name
# ====================================
# create x to have 1 numeric value
# ======================================
x = 3 * 5
x <- 3 * 5 # notice nothing is shown in the console
# you have to type the name of the object
# to "see" it
x
# ======================================
# create y as a numeric (integer) vector
# with 12 elements
# ======================================
y <- 1:12
y
# you'll notice that y is a vector of integers
# you can convert between numeric and integer type
# data classes using the as.numeric()
# and as.integer() functions.
ynum <- as.numeric(y)
yint <- as.integer(ynum)
# ======================================
# create z a numeric vector
# containing a sequence of numbers from
# 0 to 10 in units of 0.5
# ======================================
z <- seq(0,10,.5)
z
# ======================================
# the above code can also be done with the
# explict function arguments defined
# see the examples in help(seq)
# ======================================
help(seq)
z <- seq(from=0, to=10, by=0.5)
z
# ======================================
# create new object sinz which is a numeric
# vector now containing the sin (sine function)
# of the z values
# ======================================
sinz <- sin(z)
# ======================================
# look at objects created get simple listing
# and get listing with structure information
# ls() lists the objects in the
# Global Environment (top right window)
# ls.str() lists the objects with their
# structure
# ======================================
ls()
ls.str()
# ========================================
# while you can see information about each of these
# objects in the Global Environment (top right)
# of the RStudio window, the function length()
# is useful for determining how many elements are
# in a given vector/data object.
# ========================================
length(x)
length(y)
length(z)
length(sinz)
# ====================================
# NOTE ON ADDING COMMENTS
# ====================================
# highlight all comment lines
# then hit CTRL+SHIFT+C to toggle back and forth
# between code and comments.
#' you can continuously add comments after
#' the hastag (#) followed by a simgle quote '
#' and each time you hit return the next line begins
#' with a comment.
#
#' so, this is
#' a new line of commenting that begins with
#' a hashtag or number # sign followed by a single quote
#' then when you hit return the comment delimiter #'
#' is automatically added until you type in a
#' single hashtag without the single quote.
# then remove single quote and begin new line for r code
# ========================================
# other functions that are helpful for finding
# out about data objects is the str() "structure"
# function and the class() function.
# ========================================
str(y)
class(y)
str(z)
class(z)
# ======================================
# create some vectors made up
# of different elements: numbers,
# characters, and/or logic elements
# like TRUE or FALSE
# ======================================
a1 <- c(1,2,3,4,10,11)
a1
a2 <- c('a','g','f','r','t','s')
a2
a3 <- c(TRUE,FALSE,TRUE,TRUE,FALSE,TRUE)
a3
a4 <- c(1,2,'a','b',TRUE,FALSE)
a4
a5 <- c(1,2,3,4,TRUE,FALSE)
a5
a6 <- c(5,10,x)
# ======================================
# use the class() function to investigate
# these vectors further - notice how
# R tries to combine elements of different
# types - combining characters, numbers
# and logic elements
# ======================================
class(a1)
class(a2)
class(a3)
class(a4)
class(a5)
# ======================================
# using cbind() - this makes a matrix
# where every element has to be the same
# type - these are now all characters
# ======================================
a1to5cbind <- cbind(a1,a2,a3,a4,a5)
a1to5cbind
class(a1to5cbind)
str(a1to5cbind)
dim(a1to5cbind)
# ======================================
# using rbind() - like cbind()
# but now the vectors come in as rows
# ======================================
a1to5rbind <- rbind(a1,a2,a3,a4,a5)
a1to5rbind
class(a1to5rbind)
str(a1to5rbind)
dim(a1to5rbind)
# ======================================
# using list() - notice the dim()
# function doesn't return anything
# dim() does not work for lists
# but a list retains all of the original
# element types
# ======================================
a1to5list <- list(a1,a2,a3,a4,a5)
a1to5list
class(a1to5list)
str(a1to5list)
dim(a1to5list)
# ======================================
# create some lists made up of other lists
# this is how many R functions return
# their output. For example, the output
# from the linear regression model, lm()
# is a list of the various components
# and output statistics from a linear
# regression.
# ======================================
alist1 <- list(x, z, sinz, a1to5list)
alist1
str(alist1)
class(alist1)
# ======================================
# using data.frame() - this is a special
# kind of list - so, this time dim() works
# data.frames are the MOST COMMON way
# to handle and manage data. Each column
# or each variable in a data frame can
# be different types as long as the elements
# within each column variable are the same
# i.e. all numbers or all characters, etc.
# ======================================
a1to5df <- data.frame(a1,a2,a3,a4,a5)
a1to5df
class(a1to5df)
str(a1to5df)
dim(a1to5df)
# ======================================
# this time with stringsAsFactors
# set to FALSE
# NOTE: data.frame() by DEFAULT wants to
# set character variables to "factors"
# you can set this option to FALSE.
# ======================================
a1to5dfnf <- data.frame(a1,a2,a3,a4,a5,
stringsAsFactors = FALSE)
a1to5dfnf
class(a1to5dfnf)
str(a1to5dfnf)
dim(a1to5dfnf)
# ======================================
# EXERCISE 1
# Create a new data frame object called `df1`
# using the # `data.frame()` command. Combine
# the object `y` with the built in constant
# for `month.name` and `month.abb`.
# ======================================
# look at the vectors needed
y
month.name
month.abb
# combine these using data.frame()
mydf <- data.frame(y, month.name, month.abb)
# question was asked why can't we
# use c() - the c() or combine function
# simply appends these together into 1 long
# vector and we want them in sepaarate columns.
# so, this won't work
c(y,month.name,month.abb)
# ======================================
# exercise 1 - key
# ======================================
# Create the data frame again and call it
# `df2` using the same 3 objects
# (`y`, `month.name` and `month.abb`) and
# set `stringsAsFactors` to FALSE.
# ======================================
df1 <- data.frame(y, month.name, month.abb)
df1
str(df1)
df2 <- data.frame(y, month.name, month.abb,
stringsAsFactors = FALSE)
df2
str(df2)
# DAY 1: Session 2 =====================
# ======================================
# look at data objects - you can use
# the fix() command to VIEW and EDIT your data
# BUT this is a point-and-click approach
# so BE CAREFUL using fix() to edit your data
# ======================================
fix(z)
fix(alist1)
fix(df2)
# ======================================
# look at only part of your data
# use head() and tail() to only look
# at a few rows at a time
# ======================================
head(df1)
tail(df1)
# ======================================
# you can also invoke the viewer
# with View()
# you CANNOT edit your data using View()
# ======================================
View(df1)
view(df1) # lower case view() won't work
# notice the error that R gives you
# ======================================
# let's make a simple plot
# ======================================
plot(z,sinz)
# ======================================
# add some customization
# see help(plot) for using these options
# for xlabel, ylabel, and titles
# ======================================
plot(z, sinz,
xlab='Z = Sequence 0 to 10 by 0.5',
ylab='Sin(Z)',
main='Main title',
sub='example subtitle')
# ======================================
# add a BLUE line using lines()
# see help(par) for details on linetype
# options
# ======================================
lines(z,sinz,col='blue')
# ======================================
# customize the points using points()
# plotting character pch 23 is a filled diamond
# col defines the color
# bg defines the filled or background color
# see details using help(points)
# ======================================
points(z,sinz,pch=23,col='red',bg='black')
# ======================================
# select code above, right click and "run selection"
# or highlight code and click CTRL-R
# specifically run the following code all together
# the spaces and line returns added for clarity
# Note: RStudio helps with good formatting practices
# ======================================
# ======================================
# all together in one block
# easier to select and click run
# ======================================
plot(z, sinz,
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',main='Main title',
sub = 'example subtitle')
lines(z, sinz, col = 'blue')
points(z, sinz, pch = 23, col = 'red', bg = 'black')
# ======================================
# use graphics device to make a PDF
# of the plot
# see help(Devices) for links and help
# on the various graphical devices
#
# ** NOTE ** ALWAYS REMEMBER
# to turn the device off using dev.off()
# otherwise R will keep trying to send
# every plot to the output device
# in this case you'll keep adding pages
# to the PDF file.
# ======================================
pdf(file = "plot1.pdf")
plot(z, sinz,
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',main='Main title as of today my name is Melinda',
sub = 'example subtitle')
lines(z, sinz, col = 'blue')
points(z, sinz, pch = 23, col = 'red', bg = 'black')
dev.off()
# ======================================
# EXERCISE 2
# Look up which "device" will create and
# save the plot as a JPEG. Use the commands
# above as your guide and create and save the
# figure as a JPEG formatted file.
#
# Create a second JPEG where the width is
# 750 pixels and the height is 500 pixels
# and set the background color to yellow
# and the quality to 50.
# ======================================
# exercise 2 - key
# ======================================
jpeg(file = "plot1.jpg")
plot(z, sinz,
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',main='Main title',
sub = 'example subtitle')
lines(z, sinz, col = 'blue')
points(z, sinz, pch = 23, col = 'red', bg = 'black')
dev.off()
jpeg(file = "plot1yellow.jpg",
width=750, height=500,
bg = "yellow",
quality = 50)
plot(z, sinz,
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',
main='Main title',
sub = 'example subtitle')
lines(z, sinz, col = 'blue')
points(z, sinz, pch = 23, col = 'red', bg = 'black')
dev.off()
# ======================================
# let's install a package
# we'll install the "ggplot2" package
# INSTALLING a package puts it on to
# your local hard drive.
#
# You can also install a package
# from the menu Tools/Install Packages
# in the RStudio interface
#
# However, before
# R can use the functions in the package
# you have to LOAD it into R using the
# library() command below.
# ======================================
install.packages("ggplot2")
# ======================================
# LOAD the package using library()
#
# You can also click on the "Packages" tab
# in the lower right window and then scroll
# to find the package and click the checkbox
# to the left of the package name and this will
# also LOAD the package in the RStudio interface.
# ======================================
library(ggplot2)
# ======================================
# once the ggplot2 package is loaded
# we can get info
# and help on the package - see
# more in the "Packages" window
# ======================================
help(package = "ggplot2")
# ======================================
# now that ggplot2 is loaded we can
# use the qplot() function
# ======================================
qplot(z, sinz,
geom = c("point", "line"),
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',
main = 'Main title')
# ======================================
# we can also call variables inside a data frame
# put z and sinz into a data.frame called df2
# ======================================
df2 <- data.frame(z,sinz)
ggplot(df2, aes(x=z, y=sinz)) +
geom_line(colour = "red", linetype = "dashed") +
geom_point(shape = 23,
colour = "red",
fill = "black") +
xlab("z is sequence from 0 to 10 in units of 0.5") +
ylab("Sin(z)") +
ggtitle("Here is a Main Title") +
theme_light()
# ======================================
# here is the plot using basic R
# graphics
# ======================================
plot(z, sinz,
xlab = 'Z = Sequence 0 to 10 by 0.5',
ylab = 'Sin(Z)',main='Main title',
sub = 'example subtitle')
lines(z, sinz, col = 'blue')
points(z, sinz, pch = 23, col = 'red', bg = 'black')
# ======================================
# using ggplot2 to build the plot layer by layer
# ======================================
# ======================================
# make the basic plot window
# the object p now contains the plot
# and we keep adding layer using +
# ======================================
p <- ggplot(df2, aes(x=z, y=sinz))
p
# ======================================
# add a line geom (geometric object)
# ======================================
p <- p + geom_line(colour = "red", linetype = "dashed")
p
# ======================================
# add the points geom
# ======================================
p <- p + geom_point(shape = 23,
colour = "red",
fill = "black")
p
# ======================================
# add some labels and a title
# ======================================
p <- p + xlab("z is sequence from 0 to 10 in units of 0.5") +
ylab("Sin(z)") +
ggtitle("Here is a Main Title")
p
# ======================================
# we can apply a basic "theme" for the overall
# look and style of the plot
# ======================================
p <- p + theme_light()
p
# ======================================
# Let's also add the ggthemes package
# and really try out some cool
# plot styles
# ======================================
library(ggthemes)
p <- p + theme_economist() + ggtitle("The Economist Theme")
p
p <- p + theme_fivethirtyeight() + ggtitle("The 538 Theme")
p
p <- p + theme_tufte() + ggtitle("The Edward Tufte Theme")
p
p <- p + theme_wsj() + ggtitle("The WSJ Theme")
p
# ======================================
# it is always a good idea to make sure
# you cite the packages you used and give
# the creators proper credit
# it also helps document the version
# you are using
# ======================================
citation(package = "base")
citation(package = "ggplot2")
citation(package = "ggthemes")
# ======================================
# also document all of current
# session settings, add-ons, versions,
# and computer system info.
# ======================================
sessionInfo()
# DAY 1: session 3 =====================
# ======================================
# let's make some more data objects
# and update a few we already had
# ======================================
x <- 3 * 8 + 6
y <- 1:12
y2 <- y**2
# ======================================
# create a matrix
# ======================================
df1 <- cbind(y, y2, month.abb, month.name)
class(df1)
str(df1)
# ======================================
# create a data frame
# ======================================
df1a <- data.frame(y,y2,month.abb,month.name)
class(df1a)
str(df1a)
# ======================================
# create a list
# ======================================
list1 <- list(x,y,df1a)
class(list1)
str(list1)
# ======================================
# already saw previously
# you can use the fix() function
# to edit data and save the changes
# ======================================
fix(df1a)
# ======================================
# select the element on the 3rd row
# and 3rd column
# ======================================
df1[3,3]
# ======================================
# select the whole 3rd column
# ======================================
df1[,3]
# ======================================
# select the 5th row and every column
# in that row
# ======================================
df1[5,]
# ======================================
# compare the 1st column in the
# data matrix df1 and 1st column in
# the data frame df1a
# ======================================
df1[,1]
class(df1[,1])
str(df1[,1])
df1a[,1]
class(df1a[,1])
str(df1a[,1])
# ======================================
# create a sequence of numbers
# put them into a vector z
# perform math on that vector and
# save it. then combine both
# vectors into a data frame
# ======================================
z <- seq(0,10,.5)
sinz <- sin(z)
df2 <- data.frame(z, sinz)
# ======================================
# now let's clean up some of our objects
# using the rm() command to selectively
# remove objects no longer needed
# ======================================
ls()
rm(y, y2, z, sinz)
ls()
# ======================================
# other ways to select a column of data
# i.e. select a variable in a dataset.
# use the $ symbol to select a variable or
# column by its name
# ======================================
df2[,1] # use the column number
df2$z # select by column name
# ======================================
# after selecting a variable
# by the column name, then select the
# 3rd element (3rd row of the month.name
# variable) which is "March"
# ======================================
df1a$month.name[3]
# ======================================
# selecting data when some
# condition is TRUE
#
# find the row for which y equals 5
# in df1a
# ======================================
pickrow <- df1a$y == 5
df1a[pickrow, ]
# these 2 statements can be nested
df1a[df1a$y == 5, ]
# find rows where y > 6
pickrow <- df1a$y > 6
df1a[pickrow, ]
# ======================================
# use save.image() or save() to save all
# or selected objects - these are saved
# as *.RData files
# ======================================
save.image("allobjects.RData") # save ALL objects in workspace
save(df2, file="df2.RData") # save ONLY the df2 object
# ======================================
# remove all of the objects
# and then load them back - either one
# at a time or all of them together
# ======================================
rm(list = ls()) # remove ALL objects in workspace
load(file="df2.RData") # load ONLY the df2 object
load("allobjects.RData") # load all objects saved in "allobjects.RData"
ls()
# ======================================
# get data from dropbox at
# https://www.dropbox.com/sh/vlo5bzrl5ayo1bk/AADD0WieyuEdyGwiveuCoRr-a?dl=0
# download these files and put them
# into your working directory
# for this next exercise
#
# these are also available at the GITHUB repository
# https://github.com/melindahiggins2000/CDCRworkshopCode
# ======================================
# ======================================
# read in data as comma delimited
# ======================================
data.rt <- read.table(file="Dataset_01_comma.csv",header=TRUE,sep=",")
data.rt
# ======================================
# read in a CSV formatted file
# ======================================
data.csv <- read.csv(file="Dataset_01_comma.csv")
data.csv
# ======================================
# read in a TAB delimited TXT file
# ======================================
data.tab <- read.delim(file="Dataset_01_tab.txt")
data.tab
# ======================================
# read in a XLS Excel file
# functions from readxl package
library(readxl)
# ======================================
data.xls <- read_excel("Dataset_01.xls", sheet=1)
data.xls
# ======================================
# read in a XLSX Excel file
# use default to read in sheet 1
# ======================================
data.xlsx <- read_excel("Dataset_01.xlsx", sheet=1)
data.xlsx
# suppose I have a 2nd sheet named "new"
datanew.xlsx <- read_excel("Dataset_01addsheet.xlsx", sheet="new")
datanew.xlsx
# ======================================
# read in a SPSS SAV file
# ======================================
library(foreign)
# ======================================
data.spss <- read.spss(file = "Dataset_01.sav",
to.data.frame=TRUE)
data.spss
# ======================================
# read in a SAS Export XPT file
# ======================================
data.xpt <- read.xport(file = "Dataset_01.xpt")
data.xpt
# Suggestion in class
# try HAVEN package
install.packages("haven")
library(haven)
test1.sas <- read_sas("http://crn.cancer.gov/resources/ctcodes-procedures.sas7bdat")
test2.sas <- read_sas("ctcodes-procedures.sas7bdat")
test3.sas <- read_sas("data11.sas7bdat")
# DAY 1: session 4 =====================
# ======================================
# create new variables BMI from
# weight and height
# ======================================
data.csv$bmiPRE <- (data.csv$WeightPRE*703)/((data.csv$Height*12)**2)
data.csv$bmiPOST <- (data.csv$WeightPOST*703)/((data.csv$Height*12)**2)
# ======================================
# you can also use attach and detach
# datasets and then call the variables
# without using the $ selector
# ======================================
attach(data.csv)
diff <- bmiPOST - bmiPRE # creates diff variables
# stands alone not attached
mean(diff)
# ======================================
# create using the variables inside the dataset
# but attach them back to the dataset
#
# ALWAYS remember to detach your dataset
# ======================================
data.csv$diff2 <- bmiPOST - bmiPRE
detach(data.csv)
# ======================================
# see more excellent examples of why to NOT use attach
# and how to approach data changes using with()
# or within() or transform() or mutate()
#
# see http://www.r-bloggers.com/friday-function-triple-bill-with-vs-within-vs-transform/
# and
# see http://www.r-bloggers.com/comparing-transformation-styles-attach-transform-mutate-and-within/
# ======================================
# ======================================
# once the dataset is detached
# go back to using the $ selector
# ======================================
data.csv$diff <- diff
# after assigning the new difference
# score diff to the dataset, remove it
# from the global environment.
rm(diff)
# ======================================
# WRITING or SAVING data
# you can save the file out to *.RData
# ======================================
save(data.csv, file="datacsv.RData")
# ======================================
# you can write datasets out as CSV
# ======================================
write.csv(data.csv,
file="datacsv.csv")
# ======================================
# write a dataset out as a TAB delimited
# TXT file
# ======================================
write.table(data.csv,
file="datacsv.txt",
sep="\t")
# ======================================
# here is an example of writing the dataset
# back out as a SPSS file - write.foreign()
# creates a basic text datafile and SPSS
# SYNTAX code to read in this data text file
# ======================================
# NOTE: When writing to SPSS variables
# that are factors will have "codes" created for them
# ======================================
datafile<-tempfile()
codefile<-tempfile()
write.foreign(data.csv,datafile="dataspss.txt",
codefile="codespss.sps",package="SPSS")
unlink(datafile)
unlink(codefile)
# ======================================
# Here is an example for writing out
# to a SAS file
# ======================================
datafile<-tempfile()
codefile<-tempfile()
write.foreign(data.csv,datafile="datasas.txt",
codefile="codesas.sas",package="SAS")
unlink(datafile)
unlink(codefile)
# ======================================
# quick check
# we'll recreate the BMI variables
# and make a histogram
# ======================================
data.csv$bmiPRE <- (data.csv$WeightPRE*703)/((data.csv$Height*12)**2)
data.csv$bmiPOST <- (data.csv$WeightPOST*703)/((data.csv$Height*12)**2)
hist(data.csv$bmiPRE)
# ======================================
# find the typo and fix it
# this seems trivial but it is important
# to document and track data corrections
# like these
# recalculate and redo the histogram
# this time will use probabilities
# instead of frequencies for the histogram
#
# notice here I still use the $ selector
# on the left side of the assignment
# commands below. you MUST use the $ sign
# here or the changes to the dataset will NOT
# be saved.
# ======================================
attach(data.csv)
data.csv[18,"Height"] <- 5.6
data.csv$bmiPRE <- (WeightPRE*703)/((Height*12)**2)
data.csv$bmiPOST <- (WeightPOST*703)/((Height*12)**2)
detach(data.csv)
hist(data.csv$bmiPRE, freq=FALSE)
lines(density(data.csv$bmiPRE))
# ======================================
# However, I advise again using attach()
# and detach(), see more at
# http://www.r-bloggers.com/to-attach-or-not-attach-that-is-the-question/
# ======================================
# ======================================
# make a plot of the PRE vs POST BMI
# add a linear fit line and a
# lowess smoothed fit line
# ======================================
plot(data.csv$bmiPRE, data.csv$bmiPOST, "p")
abline(lm(data.csv$bmiPOST ~ data.csv$bmiPRE), col="red")
lines(lowess(data.csv$bmiPRE, data.csv$bmiPOST), col="blue")
# ======================================
# do the plot again using ggplot options
# ======================================
p <- ggplot(data.csv, aes(bmiPRE, bmiPOST))
p
p <- p + geom_point()
p
p + geom_smooth(method="lm") +
facet_wrap(~GenderCoded) +
ggtitle("Panels for Gender")
# ======================================
# let's create Gender as a Factor
# instead of as just number codes
# Factors are useful in plots
# and tables providing labels for the
# labels. Factors are also helpful
# in various models.
# ======================================
data.csv$GenderFactor <- factor(data.csv$GenderCoded,
levels = c(1,2),
labels = c("Male","Female"))
data.csv$GenderFactor
str(data.csv$GenderFactor)
class(data.csv$GenderFactor)
table(data.csv$GenderFactor)
# ======================================
# we'll use the GenderFactor to split
# the plots into different panels by gender
# ======================================
p <- ggplot(data.csv, aes(bmiPRE, bmiPOST)) +
geom_point() +
geom_smooth(method = "loess", colour = "red", se = FALSE) +
geom_smooth(method = "lm", colour = "blue") +
facet_wrap(~GenderFactor) +
ggtitle("Panels for Gender, RED smoothed line, BLUE linear fit line")
p
# ======================================
# we can also use this factor to color
# code the points and associated model fits
# ======================================
p <- ggplot(data.csv, aes(bmiPRE, bmiPOST)) +
geom_point(aes(colour = GenderFactor)) +
geom_smooth(method = "lm", aes(colour = GenderFactor)) +
ggtitle("Colored by Gender")
p
# ======================================
# let's take a quick look at the linear
# model object that has the output
# from fitting a linear model to the
# PRE and POST BMI data.
# we'll cover the lm() object more
# in DAY 2.
# ======================================
fit1 <- lm(bmiPOST ~ bmiPRE, data=data.csv)
fit1
summary(fit1)
coef(fit1)
anova(fit1)
|
b72067f479293d44f4e8e5fc6ec7cf0f392f76a0
|
f28f6b753066c66bea84a438c277c411db713fa9
|
/man/personality.Rd
|
2f634a618e1599ae1540dbc6a9521bd150a2d851
|
[] |
no_license
|
bgautijonsson/neuropsychology.R
|
8933e50e03f7338d74726d82cdcc29121225420a
|
f4f70c4a2bdf1c8937d89d787f9ba2afce1f2bf4
|
refs/heads/master
| 2020-03-13T15:23:10.944097
| 2017-10-06T13:46:37
| 2017-10-06T13:46:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
rd
|
personality.Rd
|
\name{personality}
\alias{personality}
\title{A dataframe with personality data}
\author{Dominique Makowski}
\description{
A dataset containing normal and pathological personality traits data from an online questionnaire.
If you use this dataset for a publication, please refer to it as:
"personality-1.0".
Demographic variables:
-- Study_Level
The level of education. Should be treated as a factor. 0: Absence of Degree, 1: Secondary Eduction Degree, 2: Youth Training, 3: High-school Degree, 4: Higher National Diploma (2 years of higher education), 5: Bachelor Degree (3 years of higher education), 6: Master Degree (5 years of higher education), 7: Doctorate Degree (8 years of higher education)
}
\format{
1327 observations (rows) and 20 variables (columns)
}
\examples{
require(neuropsychology)
df <- personality
describe(df)
}
|
c790d4fa5e9e1e86e4b819a2cf437b42d236a200
|
a266bb66eff94641d1ff100daf31e93dcd4e0105
|
/tests/testthat/test.sail.R
|
b382cfcf9d88d71ab6dbc0db30b8e91f5e19b88a
|
[
"MIT"
] |
permissive
|
ashiklom/rrtm
|
e879f37471dff49930007b2d6b7a8352db1df4b2
|
504c3c7655fe30c5b713e9f0f606960f8a46466a
|
refs/heads/master
| 2022-09-09T02:43:26.722764
| 2022-08-04T18:04:25
| 2022-08-04T18:04:25
| 196,279,459
| 5
| 3
|
NOASSERTION
| 2022-01-13T16:01:27
| 2019-07-10T21:50:33
|
R
|
UTF-8
|
R
| false
| false
| 1,466
|
r
|
test.sail.R
|
test_that("Volume-scattering function works across angles", {
angles <- seq(-60, 60, 20)
l <- angles
for (sa in angles) {
for (oa in angles) {
for (aa in angles) {
angle_info <- sprintf(paste(
"solar angle: %f",
"instrument angle: %f",
"azimuth angle: %f",
sep = "\n"
), sa, oa, aa)
s <- volscatt_scalar_v(sa, oa, aa, l)
v <- volscatt(sa, oa, aa, l)
expect_identical(unlist(s["chi_s",]), v$chi_s, info = angle_info)
expect_identical(unlist(s["chi_o",]), v$chi_o, info = angle_info)
expect_identical(unlist(s["frho",]), v$frho, info = angle_info)
expect_identical(unlist(s["ftau",]), v$ftau, info = angle_info)
}
}
}
})
test_that("Manual PRO4SAIL works", {
lrt <- prospect4(1.4, 40, 0.01, 0.01)
rsoil <- hapke_soil(0.5)
sail <- foursail(lrt$reflectance, lrt$transmittance, rsoil, 3)
for (stream in sail) {
expect_true(all(stream >= 0))
expect_true(all(stream <= 1))
}
})
test_that("PRO4SAIL shortcuts work", {
args <- list(
N = 1.4, Cab = 40, Car = 8, Canth = 10, Cbrown = 0,
Cw = 0.01, Cm = 0.01,
LAI = 3, soil_moisture = 0.5
)
for (fun in c(pro4sail_4, pro4sail_5, pro4sail_d)) {
fun <- pro4sail_5
cargs <- args[head(names(formals(fun)), -1)]
sail <- do.call(fun, cargs)
for (stream in sail) {
expect_true(all(stream >= 0))
expect_true(all(stream <= 1))
}
}
})
|
a6b8a9d1562ae5f7c575efa6343e7a0531999d6b
|
0b930de318aab67acc5256a6ffb3f9582c47d2d8
|
/DATASET/Analysis.R
|
51cd5aaedf5249fa2341b66cee0b2a40bf51f3ab
|
[] |
no_license
|
kautu/Citation
|
9f2c5f37627dd2cd7fc0446fccf4891141e6ccd3
|
4857e6b015feacdb2bc988b8c803c779a2fe6b1d
|
refs/heads/main
| 2023-08-12T08:46:43.166950
| 2021-10-07T05:43:29
| 2021-10-07T05:43:29
| 303,440,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,570
|
r
|
Analysis.R
|
##
epo.citations <- na.omit(subset(as.tibble(fread('201902_EP_Citations.txt',
select = c('Citing_pub_date', 'Citing_app_nbr', 'Citing_appln_id',
'Cited_pub_date', 'Cited_app_nbr', 'Cited_Appln_id',
'Citn_lag_month', 'Citn_Category'))),
Citing_appln_id %in% energy.triadic$Appln_id|Cited_Appln_id %in% energy.triadic$Appln_id) ) %>%
distinct(Citing_appln_id, Cited_Appln_id, .keep_all = TRUE)
##
citing.date <- epo.citations[,1:3] %>%
distinct(Citing_pub_date, Citing_app_nbr, Citing_appln_id, .keep_all = TRUE)
#
energy.date <- left_join(energy.triadic, citing.date, by = c('Appln_id' = 'Citing_appln_id')) %>%
left_join(node.centrality, by = c('FC' = 'node')) %>%
mutate(membership = as.character(membership)) %>%
left_join(category.cluster, by = c('membership' = 'node')) %>%
distinct(Cluster, membership, FC, Family_id, Appln_id, .keep_all = TRUE) %>%
mutate(filing = as.numeric(substring(Citing_app_nbr, 3,6))) %>%
left_join(tpf.core, by = 'Family_id')
CairoPDF('ClusterBoxplot.pdf', 8, 4.946)
energy.date %>%
group_by(Cluster,membership, Family_id) %>%
summarise(filing = min(filing, na.rm = TRUE)) %>%
group_by(Cluster,membership) %>%
summarise(mean = min(filing, na.rm = TRUE)) %>%
ggplot(aes(Cluster, mean)) +
geom_boxplot() +
#theme(axis.text.x = element_text(angle = 25, hjust = 1))
coord_flip()
dev.off()
|
d4a7817df7aa52649e4857746e49dc8a55db3649
|
cfb642c4568a403e7cd39b66e16dcaed0d08bd49
|
/man/Lakenames.Rd
|
a9d82dccd19ac1f2c9e2a2a93ab11ebb1ae95d04
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JVAdams/EchoNet2Fish
|
5223bcdb98a43bb61cd629cb33f590cba9fd1fed
|
6e397345e55a13a0b3fca70df3701f79290d30b6
|
refs/heads/master
| 2023-06-22T17:56:41.457893
| 2021-02-08T16:08:09
| 2021-02-08T16:08:09
| 32,336,396
| 4
| 1
| null | 2023-06-09T17:36:08
| 2015-03-16T15:59:18
|
R
|
UTF-8
|
R
| false
| true
| 257
|
rd
|
Lakenames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc-data.R
\name{Lakenames}
\alias{Lakenames}
\title{Great Lakes Names}
\format{
A character vector, length 5.
}
\description{
A vector with the names of the five Great Lakes.
}
|
f4ff5857f802603c86e61c8b161c2a8cdc7f86d2
|
e3a672b5c02a6226133301fc258ba12ca48ffd41
|
/bin/main_wrapper/subsamples_gene_classes_and_runs_enrichment_scripts.R
|
8d395a562b36430c42ae631925fe2cd2cb9f442f
|
[
"MIT"
] |
permissive
|
akhileshkaushal/scRNAseq_cell_cluster_labeling
|
a96bdae8bf4ac63f21df7a2fb6eef78d79a2dbc6
|
a2b8a95e0b77ee7cff64826f7370799e4e1ecbde
|
refs/heads/master
| 2020-06-06T14:25:03.577309
| 2019-05-02T20:35:55
| 2019-05-02T20:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,880
|
r
|
subsamples_gene_classes_and_runs_enrichment_scripts.R
|
####################################
### Script 'subsamples_gene_classes_and_runs_enrichment_scripts.R' is a *wrapper* to run implementations of
### methods to predict cell types using cell clusters from scRNA-seq data and cell type gene expression signatures
###
### It computes:
### a) generating Receiver Operating Characteristic (ROC) and Precision-Recall (PR) curves for each method predictions
### b) getting ROC and PR Area Under the Curves (ROC AUC, and PR AUC)
### c) running robustness analyses by subsampling marker genes from cell type signatures and repeating ROC AUC and PR AUC analyses
###
### Uses three types of infiles:
### (1) a matrix with average gene expression for each gene (rows) cell clusters from scRNA-seq (columns)
### (2) cell type gene expression signatures
### (3) a reference paired table with gold standard cell type predictions for each cell cluster
###
### The general flow of this script is the following:
### (i) Formats inputs for each requested cell type prediction method: CIBERSORT, GSEA, GSVA, and ORA
### (ii) Subsamples cell type gene expression signatures using infiles (2)
### (iii) Runs cell type predictions methods using infile (1) and subsampled signatures from step (ii)
### (iv) Gather results from step (iii)
### (v) Runs ROC, PR and AUC analyses using results from step (iv) and infile (3)
### (vi) Runs robustness analyses (optional), including violin plots of AUC distributions with results from step (v)
###
### Notes:
### Note 1
### In the case of CIBERSORT, two forms of cell type signature infiles can be provided:
### - in the form of gene sets (2a) or in the form of gene expression profiles (2b)
### - Subsampling genes from is always conducted using 2a type files, which can be propagated to 2b type files using script:
### 'propagates_permuted_gmt_files_to_profile.R'
###
### Note 2
### To add new methods to evaluate you can:
### a) add the path to the executable in 'Dependencies:'
### b) provide inputs and parameters adding commands indicated by 'Runs NEW_METHOD and gather its outputs'
### c) add program name to this script 'option_list' parameters --software_to_run and --software_to_auc
### d) add program name to script 'obtains_performance_plots_from_cluster_labelings.pl' so that it can take a 'Generic_ES' infile
###
####################################
### Questions/comments to Javier Diaz - javier.diazmejia@gmail.com
####################################
####################################
### HOW TO RUN THIS SCRIPT
### Using one-line-commands in a console or terminal type:
### 'Rscript ~/path_to_this_file/subsamples_gene_classes_and_runs_enrichment_scripts.R -h'
### for help
####################################
####################################
### Dependencies:
####################################
### R and Rscript (tested with version 3.5.1)
###
### R libraries:
suppressPackageStartupMessages(library(optparse)) # (CRAN) to handle one-line-commands
suppressPackageStartupMessages(library(vioplot)) # (CRAN) to generate violin plots of permutation results
###
### External scripts (check each script for their own dependencies)
### and change their path as needed here:
PathNameToPermuteGmtRscript <- "~/r_programs/obtains_permuted_samples_from_gmt.R"
PathNameToPermuteProfileRscript <- "~/r_programs/propagates_permuted_gmt_files_to_profile.R"
PathNameToCibersortscript <- "~/bin/obtains_CIBERSORT_for_MatrixColumns.pl"
PathNameToGSEAscript <- "~/bin/obtains_GSEA_for_MatrixColumns.pl"
PathNameToGSVAscript <- "~/r_programs/obtains_GSVA_for_MatrixColumns.R"
PathNameToORAscript <- "~/bin/obtains_ORA_for_MatrixColumns.pl"
PathNameToPerformancePlotsAndAucs <- "~/bin/obtains_performance_plots_from_cluster_labelings.pl"
####################################
####################################
### Turning warnings off for the sake of a cleaner aoutput
####################################
oldw <- getOption("warn")
options( warn = -1 )
####################################
### Get inputs from command line argumets
####################################
option_list <- list(
make_option(c("-i", "--infile_mat"), default="NA",
help="Path/name to a <tab> delimited *file* with average gene expression per cell cluster from scRNA-seq,
genes in rows and cell clusters in columns, like:
genes clust1 clust2 clust3 ... etc
Gene1 0 0.0045 0.0008
Gene2 0.0077 0.0175 0.0082
Gene3 0.0800 0.1532 0.0745
...etc"),
make_option(c("-c", "--infile_signature_gmt"), default="NA",
help="Path/name to a <tab> delimited cell type signature *file* in the form of gene lists (*gmt format), like:
GeneSet1_ID GeneSet1_Name Gene1 Gene2 ... etc
GeneSet2_ID GeneSet2_Name Gene2 Gene3 ... etc"),
make_option(c("-g", "--infile_gold"), default="NA",
help="A path/name to a <tab> delimited *file* of gold standard cluster labels in format, like:
clust1 GeneSet1_ID
clust2 GeneSet3_ID
clust3 GeneSet5_ID
... etc"),
make_option(c("-t", "--permute_gmt"), default="n",
help="Indicates if permutation of --infile_signature_gmt should be conducted [use y/Y] or permuted files exist already [type n/N]. Default 'n'."),
make_option(c("-u", "--propagate_permuted_gmt_to_profiles"), default="n",
help="Only needed by 'CIBERSORT_PROFILE'
Indicates if permutation of --infile_signature_gmt files should be propagated to profiles [use y/Y] or permuted files exist already [type n/N]. Default 'n'."),
make_option(c("-d", "--infile_signature_profile"), default="NA",
help="Only needed by 'CIBERSORT_PROFILE'
Path/name to a <tab> delimited cell type signature *file* in the form of gene expression profile matrix, like:
GENE CellType1 CellType2 CellType3 ... etc
Gene1 0.1 0.04 0.6
Gene2 0.02 0.1 0.01
Gene3 0.04 0.3 0.06
...etc"),
make_option(c("-s", "--sample_percentages"), default="10,100,10",
help="Indicates three values: minimum_percentage, maximum_percentage, increment to be used to sample --infile_signature_gmt. Default '10,100,10'.
For example, to subsample percentages from 10 up to 50, increasing by 5 (i. e. 10, 15, 20, ... 50) use '10,50,5'
Note: if using '-n N' then this script will take previously permuted *gmt infiles, stored at /path_to/--outdir/GMT_PERMUTATIONS"),
make_option(c("-r", "--iterations"), default="1-100",
help="Indicates the number of times to subsample --infile_signature_gmt, e.g. '1-100' to run from iteration 1 to 100,
or '50-100' to run from iteration 50 to 100. Default '1-100'.
Note: if using '-t N', then this script will use previously permuted *gmt infiles"),
make_option(c("-m", "--software_to_run"), default="NONE",
help="Indicates <comma> delimited name(s) of enrichment sofware to *run*:
CIBERSORT_BINARY,CIBERSORT_PROFILE,GSEA,GSVA,ORA
Default 'NONE'."),
make_option(c("-n", "--software_to_auc"), default="NONE",
help="Indicates <comma> delimited name(s) of enrichment sofware results to *gather* together:
CIBERSORT_BINARY,CIBERSORT_PROFILE,GSEA,GSVA,ORA
This may be useful to gather results from separate runs
Default 'NONE'."),
make_option(c("-v", "--generate_violin_plots"), default="n",
help="Indicates if violin plots should be generated from --software_to_run and --software_to_auc steps. Type [y/Y] or [n/N]. Default 'n'."),
make_option(c("-a", "--roc_auc_violin_plot_y_axes_limits"), default="0.5,1",
help="Indicates the min,max values for the ROC AUC violin plot y-axes of enrichment software. Default '0.5,1'."),
make_option(c("-b", "--pr_auc_violin_plot_y_axes_limits"), default="0,1",
help="Indicates the min,max values for the PR AUC violin plot y-axes of enrichment software. Default '0,1'."),
make_option(c("-k", "--roc_horizontal_guide_line"), default="NA",
help="Indicates a value in the y-axis of the ROC AUC violin plots to place a horizontal guide line. Or type 'NA' to omit. Default 'NA'."),
make_option(c("-l", "--pr_horizontal_guide_line"), default="NA",
help="Indicates a value in the y-axis of the PR AUC violin plots to place a horizontal guide line. Or type 'NA' to omit. Default 'NA'."),
make_option(c("-p", "--prefix_outfiles"), default="NA",
help="A prefix for outfile names, e.g. your project ID
Note this script will automatically add 'percentXX.permYY' to the outfile name indicating -s value and iteration number. It can't be 'NA'."),
make_option(c("-o", "--outdir"), default="NA",
help="A path/name for the results directory. It can't be 'NA'."),
make_option(c("-y", "--ora_use_values_or_ranks"), default="NA",
help="This applies only to the ORA test:
Indicates if a cutoff applied to --infile_mat to get 'white/drawn' balls should be based on scores 'ranks' or 'values'
For example, using '-y values -z 1.5' will use all genes with values >= 1.5 in each column of --infile_mat as 'drawn' balls
Whereas, using '-y ranks -z 100' will sort each column of --infile_mat and use the top 100 ranked genes as 'drawn' balls
Or type 'NA' if ORA method is not being used"),
make_option(c("-z", "--ora_mat_cutoff"), default="NA",
help="This applies only to the ORA test:
Indicates the cutoff to be applied to --infile_mat to get 'white/drawn' balls
Or type 'NA' if ORA method is not being used")
)
opt <- parse_args(OptionParser(option_list=option_list))
InfileMat <- opt$infile_mat
InfileGmt <- opt$infile_signature_gmt
InfileProfile <- opt$infile_signature_profile
InfileGold <- opt$infile_gold
Outdir <- opt$outdir
PrefixOutfiles <- opt$prefix_outfiles
Iterations <- opt$iterations
SamplePercentages <- opt$sample_percentages
OraValOrRank <- opt$ora_use_values_or_ranks
OraCutoff <- opt$ora_mat_cutoff
SoftwareToRun <- opt$software_to_run
SoftwareToAuc <- opt$software_to_auc
PermuteGmt <- opt$permute_gmt
PermuteProfile <- opt$propagate_permuted_gmt_to_profiles
PlotViolin <- opt$generate_violin_plots
RocAucAxes <- opt$roc_auc_violin_plot_y_axes_limits
PrAucAxes <- opt$pr_auc_violin_plot_y_axes_limits
RocAucAbbline <- opt$roc_horizontal_guide_line
PrAucAbbline <- opt$pr_horizontal_guide_line
StartTimeOverall<-Sys.time()
####################################
### Check that mandatory parameters are not 'NA' (default)
####################################
ListMandatory<-list("infile_mat", "infile_signature_gmt", "infile_gold", "outdir", "prefix_outfiles", "ora_use_values_or_ranks", "ora_mat_cutoff")
for (param in ListMandatory) {
if (length(grep('^NA$',opt[[param]], perl = T))) {
stop(paste("Parameter -", param, " can't be 'NA' (default). Use option -h for help.", sep = "", collapse = ""))
}
}
####################################
### Check that --sample_percentages and --iterations options are defined correctly
####################################
if (grepl(pattern = "^[0-9]+,[0-9]+,[0-9]+$", ignore.case = T, x = SamplePercentages) == T) {
ListPercentLimits<-unlist(strsplit(SamplePercentages, ","))
MinPercent<-as.numeric(ListPercentLimits[1])
MaxPercent<-as.numeric(ListPercentLimits[2])
PercentBy <-as.numeric(ListPercentLimits[3])
}else{
stop(paste("Unexpected format of --sample_percentages '-s ", SamplePercentages, "'. Expected format is like '-s 10,100,10'", sep = "", collapse = ""))
}
if (grepl(pattern = "^[0-9]+-[0-9]+$", ignore.case = T, x = Iterations) == T) {
IterationLimits<-unlist(strsplit(Iterations, "-"))
MinIterations<-as.numeric(IterationLimits[1])
MaxIterations<-as.numeric(IterationLimits[2])
}else{
stop(paste("Unexpected format of --iterations '-r ", Iterations, "'. Expected format is like '-r 1-100'", sep = "", collapse = ""))
}
####################################
### Create outdirs and define outfiles and global variables
####################################
writeLines("\n*** Create outdirs ***\n")
CommandsToGetUserHomeDirectory<-("eval echo \"~$USER\"")
UserHomeDirectory<-system(command = CommandsToGetUserHomeDirectory, input = NULL, wait = T, intern = T)
#
Outdir<-gsub("^~/",paste(c(UserHomeDirectory,"/"), sep = "", collapse = ""), Outdir)
Outdir<-gsub("/$", "", Outdir)
### This file will contain the list of outfiles to be used to get AUCs
ListExpectedOutfiles<-list()
OutfileListToGetAucs<-paste(Outdir, "/", PrefixOutfiles, ".PERMUTE.listToGetAuc.tsv", sep="", collapse = "")
file.create(OutfileListToGetAucs)
####################################
### Define external script paths
####################################
writeLines("\n*** Search for external dependencies ***\n")
ListExternalScripts<-list("PathNameToPermuteGmtRscript" = PathNameToPermuteGmtRscript,
"PathNameToPermuteProfileRscript" = PathNameToPermuteProfileRscript,
"PathNameToCibersortscript" = PathNameToCibersortscript,
"PathNameToGSEAscript" = PathNameToGSEAscript,
"PathNameToGSVAscript" = PathNameToGSVAscript,
"PathNameToORAscript" = PathNameToORAscript,
"PathNameToPerformancePlotsAndAucs" = PathNameToPerformancePlotsAndAucs
)
### Looking that external scripts exist
for (s in names(ListExternalScripts)) {
ListExternalScripts[s]<-gsub("^~/",paste(c(UserHomeDirectory,"/"), sep = "", collapse = ""), ListExternalScripts[s])
if (file.exists(ListExternalScripts[[s]])) {
print(paste("Ok found: ", ListExternalScripts[[s]], sep = "", collapse = ""))
} else {
stop(paste("Couldn't find ", ListExternalScripts[[s]], " . Check section 'Define external script paths'", sep="", collapse = ""))
}
}
####################################
### Get GMT file permutations
####################################
writeLines("\n*** Get GMT file permutations ***\n")
if (grepl(pattern = "y", ignore.case = T, x = PermuteGmt) == T) {
CommandsToGetIterations<-paste("Rscript ", ListExternalScripts["PathNameToPermuteGmtRscript"],
" -c ", InfileGmt,
" -o ", Outdir,
" -r ", Iterations,
" -s ", SamplePercentages,
" -p ", PrefixOutfiles,
sep = "", collapse = "")
system(CommandsToGetIterations, wait = T)
}else{
print("Not permutting --infile_signature_gmt. Assuming permuted *gmt files already exist")
}
####################################
### Propagate GMT permutations to Profiles
####################################
if (grepl(pattern = "y", ignore.case = T, x = PermuteProfile) == T) {
writeLines("\n*** Propagate GMT permutations to Profiles ***\n")
InfileGmtPermutationLog<-paste(Outdir, "/", PrefixOutfiles, ".PERMUTE_GMT_UsedOptions.txt", sep="", collapse = "")
if(file.exists(InfileGmtPermutationLog)) {
CommandsToRunPropagation<-paste("Rscript ", ListExternalScripts["PathNameToPermuteProfileRscript"],
" -c ", InfileProfile,
" -l ", InfileGmtPermutationLog,
" -v ", "min",
" -o ", Outdir,
sep = "", collapse = "")
### Runs GMT propagations to Profiles
system(CommandsToRunPropagation, wait = T)
}else{
stop(paste("Couldn't find file: '", InfileGmtLocal, "'",sep="", collapse = ""))
}
}else if (grepl(pattern = "CIBERSORT_PROFILE", ignore.case = T, x = SoftwareToRun) == T) {
print("Not permutting --infile_signature_profile. Assuming permuted gene expression profile files already exist")
}
####################################
### Runs CIBERSORT_BINARY and gather its outputs
####################################
if (grepl(pattern = "CIBERSORT_BINARY", ignore.case = T, x = SoftwareToRun) == T) {
writeLines("\n*** Runing CIBERSORT_BINARY ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Running CIBERSORT_BINARY: with ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
#
InfileGmtLocal<-paste(Outdir,"/GMT_PERMUTATIONS/", PrefixOutfiles, ".percent", percent, ".perm", perm, ".gmt", sep="", collapse = "")
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
CommandsToRunCIBERSORT_BINARY<-paste(ListExternalScripts["PathNameToCibersortscript"],
" -infile_matrix ", InfileMat,
" -infile_classes ", InfileGmtLocal,
" -path_outfiles ", Outdir, "/CIBERSORT_BINARY_PERMUTATIONS/",
" -prefix_outfiles ", PrefixOutfileLocal,
" -nperm ", 1000,
" -classes_type gmt ",
sep = "", collapse = "")
### Runs CIBERSORT_BINARY
system(CommandsToRunCIBERSORT_BINARY, wait = T)
}
}
}
if (grepl(pattern = "CIBERSORT_BINARY", ignore.case = T, x = SoftwareToAuc) == T) {
writeLines("\n*** Gathering CIBERSORT_BINARY results ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Gathering CIBERSORT_BINARY results: from ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
### Adds expected outfile to list for compilation
ExpectedOutfilePathName<-paste(Outdir,"/CIBERSORT_BINARY_PERMUTATIONS/CIBERSORT/", PrefixOutfileLocal, ".CIBERSORT_enrichment_scores.tsv", sep="", collapse = "")
ExpectedOutfileKey <-paste("CIBERSORT_BINARY", percent, perm, sep="\t", collapse = "\t")
if(file.exists(ExpectedOutfilePathName)) {
ListExpectedOutfiles[ExpectedOutfileKey]<-ExpectedOutfilePathName
write(file = OutfileListToGetAucs, append = T, x=paste(ExpectedOutfilePathName, paste("CIBERSORT_BINARY", "__", percent, "__", perm, sep = "", collapse = ""), "Generic_ES", sep = "\t", collapse = "\t"))
}else{
stop(paste("Couldn't find file: '", ExpectedOutfilePathName, "'",sep="", collapse = ""))
}
}
}
}
####################################
### Runs CIBERSORT_PROFILE and gather its outputs
####################################
if (grepl(pattern = "CIBERSORT_PROFILE", ignore.case = T, x = SoftwareToRun) == T) {
writeLines("\n*** Runing CIBERSORT_PROFILE ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Running CIBERSORT_PROFILE: with ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
#
InfileProfileLocal<-paste(Outdir,"/PROFILE_PERMUTATIONS/", PrefixOutfiles, ".percent", percent, ".perm", perm, ".profile.tsv", sep="", collapse = "")
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
CommandsToRunCIBERSORT_PROFILE<-paste(ListExternalScripts["PathNameToCibersortscript"],
" -infile_matrix ", InfileMat,
" -infile_classes ", InfileProfileLocal,
" -path_outfiles ", Outdir, "/CIBERSORT_PROFILE_PERMUTATIONS/",
" -prefix_outfiles ", PrefixOutfileLocal,
" -nperm ", 1000,
" -classes_type profile ",
sep = "", collapse = "")
### Runs CIBERSORT_PROFILE
system(CommandsToRunCIBERSORT_PROFILE, wait = T)
}
}
}
if (grepl(pattern = "CIBERSORT_PROFILE", ignore.case = T, x = SoftwareToAuc) == T) {
writeLines("\n*** Gathering CIBERSORT_PROFILE results ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Gathering CIBERSORT_PROFILE results: from ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
### Adds expected outfile to list for compilation
ExpectedOutfilePathName<-paste(Outdir,"/CIBERSORT_PROFILE_PERMUTATIONS/CIBERSORT/", PrefixOutfileLocal, ".CIBERSORT_enrichment_scores.tsv", sep="", collapse = "")
ExpectedOutfileKey <-paste("CIBERSORT_PROFILE", percent, perm, sep="\t", collapse = "\t")
if(file.exists(ExpectedOutfilePathName)) {
ListExpectedOutfiles[ExpectedOutfileKey]<-ExpectedOutfilePathName
write(file = OutfileListToGetAucs, append = T, x=paste(ExpectedOutfilePathName, paste("CIBERSORT_PROFILE", "__", percent, "__", perm, sep = "", collapse = ""), "Generic_ES", sep = "\t", collapse = "\t"))
}else{
stop(paste("Couldn't find file: '", ExpectedOutfilePathName, "'",sep="", collapse = ""))
}
}
}
}
####################################
### Runs GSEA and gather its outputs
####################################
if (grepl(pattern = "GSEA", ignore.case = T, x = SoftwareToRun) == T) {
writeLines("\n*** Runing GSEA ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Running GSEA: with ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
#
InfileGmtLocal<-paste(Outdir,"/GMT_PERMUTATIONS/", PrefixOutfiles, ".percent", percent, ".perm", perm, ".gmt", sep="", collapse = "")
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
CommandsToRunGSEA<-paste(ListExternalScripts["PathNameToGSEAscript"],
" -infile_matrix ", InfileMat,
" -infile_classes ", InfileGmtLocal,
" -path_outfiles ", Outdir, "/GSEA_PERMUTATIONS/",
" -prefix_outfiles ", PrefixOutfileLocal,
" -cutoff_print_p ", 0.01,
" -cutoff_print_q ", 0.25,
" -nperm ", 1000,
sep = "", collapse = "")
### Runs GSEA
system(CommandsToRunGSEA, wait = T)
}
}
}
if (grepl(pattern = "GSEA", ignore.case = T, x = SoftwareToAuc) == T) {
writeLines("\n*** Gathering GSEA results ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Gathering GSEA results: from ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
### Adds expected outfile to list for compilation
ExpectedOutfilePathName<-paste(Outdir, "/GSEA_PERMUTATIONS/GSEA/", PrefixOutfileLocal, "/ClassLevel/", PrefixOutfileLocal, ".Pval.Unfiltered.mat.txt", sep="", collapse = "")
ExpectedOutfileKey <-paste("GSEA", percent, perm, sep="\t", collapse = "\t")
if(file.exists(ExpectedOutfilePathName)) {
ListExpectedOutfiles[ExpectedOutfileKey]<-ExpectedOutfilePathName
write(file = OutfileListToGetAucs, append = T, x=paste(ExpectedOutfilePathName, paste("GSEA", "__", percent, "__", perm, sep = "", collapse = ""), "GSEA_Pvalues", sep = "\t", collapse = "\t"))
}else{
stop(paste("Couldn't find file: '", ExpectedOutfilePathName, "'",sep="", collapse = ""))
}
}
}
}
####################################
### Runs GSVA and gather its outputs
####################################
if (grepl(pattern = "GSVA", ignore.case = T, x = SoftwareToRun) == T) {
writeLines("\n*** Runing GSVA ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Running GSVA: with ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
#
InfileGmtLocal<-paste(Outdir,"/GMT_PERMUTATIONS/", PrefixOutfiles, ".percent", percent, ".perm", perm, ".gmt", sep="", collapse = "")
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
CommandsToRunGSVA<-paste("Rscript ", ListExternalScripts["PathNameToGSVAscript"],
" -i ", InfileMat,
" -c ", InfileGmtLocal,
" -o ", Outdir, "/GSVA_PERMUTATIONS/",
" -p ", PrefixOutfileLocal,
" -e 0.05 -f 0.1",
sep = "", collapse = "")
### Runs GSVA
system(CommandsToRunGSVA, wait = T)
}
}
}
if (grepl(pattern = "GSVA", ignore.case = T, x = SoftwareToAuc) == T) {
writeLines("\n*** Gathering GSVA results ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Gathering GSVA results: from ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, sep = "", collapse = "")
### Adds expected outfile to list for compilation
ExpectedOutfilePathName<-paste(Outdir,"/GSVA_PERMUTATIONS/GSVA/", PrefixOutfileLocal, ".GSVA_all_scores_table.tsv", sep="", collapse = "")
ExpectedOutfileKey <-paste("GSVA", percent, perm, sep="\t", collapse = "\t")
if(file.exists(ExpectedOutfilePathName)) {
ListExpectedOutfiles[ExpectedOutfileKey]<-ExpectedOutfilePathName
write(file = OutfileListToGetAucs, append = T, x=paste(ExpectedOutfilePathName, paste("GSVA", "__", percent, "__", perm, sep = "", collapse = ""), "GSVA_ES", sep = "\t", collapse = "\t"))
}else{
stop(paste("Couldn't find file: '", ExpectedOutfilePathName, "'",sep="", collapse = ""))
}
}
}
}
####################################
### Runs ORA and gather its outputs
####################################
if (grepl(pattern = "ORA", ignore.case = T, x = SoftwareToRun) == T) {
writeLines("\n*** Runing ORA ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Running ORA: with ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
#
InfileGmtLocal<-paste(Outdir,"/GMT_PERMUTATIONS/", PrefixOutfiles, ".percent", percent, ".perm", perm, ".gmt", sep="", collapse = "")
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, ".", OraValOrRank, OraCutoff, sep = "", collapse = "")
CommandsToRunORA<-paste(ListExternalScripts["PathNameToORAscript"],
" -infile_matrix ", InfileMat,
" -infile_classes ", InfileGmtLocal,
" -path_outfiles ", Outdir, "/ORA_PERMUTATIONS/",
" -prefix_outfiles ", PrefixOutfileLocal,
" -cutoff_pos ", OraCutoff,
" -use_values_or_ranks ", OraValOrRank,
" -cutoff_ora ", "0.05",
" -cutoff_neg ", "NA",
" -numb_cols ", "ALL",
" -p_correction_test ", "BH",
" -use_ora ", "pc",
" -restrict_classes ", "ALL",
" -set_max ", "1000",
" -set_min ", "1",
" -use_universe ", "i",
sep = "", collapse = "")
### Runs ORA
system(CommandsToRunORA, wait = T)
}
}
}
if (grepl(pattern = "ORA", ignore.case = T, x = SoftwareToAuc) == T) {
writeLines("\n*** Gathering ORA results ***\n")
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
for (perm in c(MinIterations:MaxIterations)) {
print(paste("Gathering ORA results: from ", percent, "% of data - iteration ", perm, sep = "", collapse = ""))
PrefixOutfileLocal<-paste(PrefixOutfiles, ".percent", percent, ".perm", perm, ".", OraValOrRank, OraCutoff, sep = "", collapse = "")
### Adds expected outfile to list for compilation
ExpectedOutfilePathName<-paste(Outdir,"/ORA_PERMUTATIONS/ORA/", PrefixOutfileLocal, ".ORA.PvalUncorrected.All.mat", sep="", collapse = "")
ExpectedOutfileKey <-paste("ORA", percent, perm, ".", OraValOrRank, OraCutoff, sep="\t", collapse = "\t")
if(file.exists(ExpectedOutfilePathName)) {
ListExpectedOutfiles[ExpectedOutfileKey]<-ExpectedOutfilePathName
write(file = OutfileListToGetAucs, append = T, x=paste(ExpectedOutfilePathName, paste("ORA", "__", percent, "__", perm, sep = "", collapse = ""), "ORA_Pvalues", sep = "\t", collapse = "\t"))
}else{
stop(paste("Couldn't find file: '", ExpectedOutfilePathName, "'",sep="", collapse = ""))
}
}
}
}
####################################
### Get AUC's from tested software
####################################
if (grepl(pattern = "Y", ignore.case = T, x = PlotViolin) == T) {
writeLines("\n*** Get AUC from tested software ***\n")
ListSoftwareToPlot<-unlist(strsplit(SoftwareToAuc, ","))
CommandToGetPerformancePlotsAndAucs<-paste(ListExternalScripts["PathNameToPerformancePlotsAndAucs"],
" -path_outfiles ", Outdir, "/BENCHMARK/MERGED_TABLES/ ",
" -infile_list_infiles ", OutfileListToGetAucs,
" -infile_gold_standards ", InfileGold,
" -use_graphics_device ", "NA",
sep = "", collapse = "")
system(CommandToGetPerformancePlotsAndAucs, wait = T)
#
ExpectedAucROCOutfile <-paste(Outdir, "/BENCHMARK/MERGED_TABLES/PERFORMANCE_PLOTS/", PrefixOutfiles, ".PERMUTE.listToGetAuc.ROCcurves.tsv", sep="", collapse = "")
ExpectedAucPROutfile <-paste(Outdir, "/BENCHMARK/MERGED_TABLES/PERFORMANCE_PLOTS/", PrefixOutfiles, ".PERMUTE.listToGetAuc.PRcurves.tsv", sep="", collapse = "")
auc.roc.mat<-read.table(file = ExpectedAucROCOutfile, header = T, row.names = NULL)
auc.pr.mat <-read.table(file = ExpectedAucPROutfile, header = T, row.names = NULL)
####################################
### Define colours, labels and ylim for violin plots
####################################
writeLines("\n*** Define colours, labels and ylim for violin plots ***\n")
ListColoursViolin<-list("CIBERSORT_BINARY" = "#E69F00", # reddishpurple
"CIBERSORT_PROFILE" = "#009E73", # vermillion
"GSEA" = "#CC79A7", # bluishgreen
"GSVA" = "#1E90FF", # skyblue
"ORA" = "#D55E00" # orange
)
RocAucAxesLimits<-unlist(strsplit(RocAucAxes, ","))
RocAucMinYLimit <-as.numeric(RocAucAxesLimits[1])
RocAucMaxYLimit <-as.numeric(RocAucAxesLimits[2])
PrAucAxesLimits<-unlist(strsplit(PrAucAxes, ","))
PrAucMinYLimit <-as.numeric(PrAucAxesLimits[1])
PrAucMaxYLimit <-as.numeric(PrAucAxesLimits[2])
####################################
### Generate violin plots
####################################
writeLines("\n*** Generate violin plots ***\n")
graphics.off()
pdf(paste(Outdir,"/",PrefixOutfiles,".ROBUSTNESS_ROC.pdf", sep = "", collapse = ""), width = 7, height = 8) ## It can be called with dev.set(2)
par(mfrow=c(length(ListSoftwareToPlot),1), mar= c(3,4,0.1,2) + 0.1)
pdf(paste(Outdir,"/",PrefixOutfiles,".ROBUSTNESS_PR.pdf", sep = "", collapse = ""), width = 7, height = 8) ## It can be called with dev.set(2)
par(mfrow=c(length(ListSoftwareToPlot),1), mar= c(3,4,0.1,2) + 0.1)
for (program in ListSoftwareToPlot) {
PlotPosition<-0
DataToPlotRoc<-list()
DataToPlotPR <-list()
XaxisLabels <-list()
for (percent in seq(from = MinPercent, to = MaxPercent, by=PercentBy)) {
PlotPosition<-PlotPosition+1
KeyToLookFor<-paste(program, "__", percent, "__", sep = "", collapse = "")
#
matchesRoc<-grepl(pattern = KeyToLookFor, x = auc.roc.mat[,"Dataset"])
DataToPlotRoc[[PlotPosition]] <- as.vector(auc.roc.mat[matchesRoc,"ROC_AUC"])
#
matchesPR<-grepl(pattern = KeyToLookFor, x = auc.pr.mat[,"Dataset"])
DataToPlotPR[[PlotPosition]] <- as.vector(auc.pr.mat[matchesPR,"PR_AUC"])
#
XaxisLabels[[PlotPosition]]<-percent
}
### ROC AUC's
dev.set(2)
plot(0, type='n', xlim=c(0.5, length(DataToPlotRoc)+0.5), ylim=c(RocAucMinYLimit,RocAucMaxYLimit), xaxt='n', ylab = "")
lapply(seq_along(DataToPlotRoc), function(percent) vioplot(DataToPlotRoc[[percent]], at=percent, add=TRUE, col = ListColoursViolin[[program]]))
axis(side = 1, at=c(1:length(DataToPlotRoc)), labels = XaxisLabels)
text(labels = program, x = length(XaxisLabels)-1, y = RocAucMinYLimit+0.1)
abline(h=RocAucAbbline, lty=2, col="gray60", lwd=1)
mtext(text = "ROC AUC", side=2, line = 2.2, cex=0.75)
### PR AUC's
dev.set(3)
plot(0, type='n', xlim=c(0.5, length(DataToPlotPR)+0.5), ylim=c(PrAucMinYLimit,PrAucMaxYLimit), xaxt='n', ylab = "")
lapply(seq_along(DataToPlotPR), function(percent) vioplot(DataToPlotPR[[percent]], at=percent, add=TRUE, col = ListColoursViolin[[program]]))
axis(side = 1, at=c(1:length(DataToPlotPR)), labels = XaxisLabels)
text(labels = program, x = length(XaxisLabels)-1, y = PrAucMinYLimit+0.1)
abline(h=PrAucAbbline, lty=2, col="gray60", lwd=1)
mtext(text = "PR AUC", side=2, line = 2.2, cex=0.75)
}
graphics.off()
}
####################################
### Delete temporary files
####################################
writeLines("\n*** Delete temporary files ***\n")
file.remove(OutfileListToGetAucs)
####################################
### Report used options
####################################
writeLines("\n*** Report used options ***\n")
OutfileOptionsUsed<-paste(Outdir,"/",PrefixOutfiles,".PERMUTE_UsedOptions.txt", sep="")
TimeOfRun<-format(Sys.time(), "%a %b %d %Y %X")
write(file = OutfileOptionsUsed, x=c(TimeOfRun,"\n","Options used:"))
for (optionInput in option_list) {
write(file = OutfileOptionsUsed, x=(paste(optionInput@short_flag, optionInput@dest, opt[optionInput@dest], sep="\t", collapse="\t")),append = T)
}
####################################
### Report time used
####################################
writeLines("\n*** Report time used ***\n")
EndTimeOverall<-Sys.time()
TookTimeOverall <-format(difftime(EndTimeOverall, StartTimeOverall, units = "secs"))
OutfileCPUusage<-paste(Outdir,"/",PrefixOutfiles,".PERMUTE_CPUusage.txt", sep="")
ReportTime<-c(
paste("overall",TookTimeOverall,collapse = "\t")
)
write(file = OutfileCPUusage, x=c(ReportTime))
####################################
### Turning warnings on
####################################
options(warn = oldw)
####################################
### Finish
####################################
print("END - All done!!! Took time:")
print(ReportTime)
quit()
|
022e90f9641e5a82c76b18fc538cda448a8cc6f5
|
48e3dfe542ced2bc70ede697938f0fea85517f75
|
/man/calcgroup.Rd
|
ca55bf3925d68308a2be7ad3acb024d9f8e21bd8
|
[] |
no_license
|
cran/SSrat
|
a4920a7eefbe543ab7afe99cf6f906118f425cbd
|
f003eb7cd174ac86a1583b5c8bab80694b254d6b
|
refs/heads/master
| 2021-06-07T17:41:30.050716
| 2018-04-03T21:36:44
| 2018-04-03T21:36:44
| 27,231,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,641
|
rd
|
calcgroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcgroup.R
\name{calcgroup}
\alias{calcgroup}
\title{Calculates sociometric status determinations of a specified single group
from a SSrat compliant dataframe}
\usage{
calcgroup(schoolid = 1, groupid = 1, dataframe, scalelength = c(5, 3, 7,
9), alpha = c(0.1, 0.05, 0.01), NBcriteria = F, printresult = F)
}
\arguments{
\item{schoolid}{The schoolnumber that identifies the school. Default = 1.}
\item{groupid}{The groupnumber that identifies the group. Default = 1.}
\item{dataframe}{The dataframe with the rating data. This dataframe should
have columns schoolid, groupid, respid, and for n raters columns r01 to rn,
with a maximum of r99. Function readratdatfixed can be used to create such a
dataframe from a prepared text file.}
\item{scalelength}{Either 3, 5, 7 or 9. Default = 5.}
\item{alpha}{The significance levels to be applied to the probability
distributions of the four total scores that have been derived from the
ratings. By choosing an appropriate alpha, the user can fine tune the
criterion for the status determination. A list of various alphas can be
supplied. Default is the list (.10, .05, .01).}
\item{NBcriteria}{A boolean. When TRUE, the classification criteria of
Newcomb & Bukowski (1983) will be applied, in stead of the SSrat criteria.
These criteria are applicable to rating scales of length 3. When this option
is selected with longer scales, the midscore is conversed to 2, alls cores
larger than the midscore are conversed to 3 and all scores lower than the
midscore are conversed to 1. When another recoding scheme of the scores is
preferred, the input ratings should be recoded to 1, 2 and 3 before the use
of this function (use \code{car::recode}).}
\item{printresult}{Boolean which identifies whether the calculated results
should be shown. Default is False.}
}
\value{
\item{dataframe}{dataframe with the most relevant results, as
calculated for each respondent by SSrat} \item{dataframe$schoolid}{school id
as entered} \item{dataframe$groupid}{group id as entered}
\item{dataframe$respid}{respondent id as entered}
\item{dataframe$nrAss}{number of assessors who have given a valid rating}
\item{dataframe$tr.S}{total rating Sympathy} \item{dataframe$tr.A}{total
rating Antipathy} \item{dataframe$tr.P}{total rating Preference}
\item{dataframe$tr.I}{total rating Impact} \item{dataframe$SS.xx}{Social
Determination as attributed by SSrat, applying alpha = .xx. Defaults to
SS.10, SS.05 and SS.01 } \item{S}{matrix of Sympathy ratings}
\item{A}{matrix of Antipathy ratings} \item{P}{matrix of Preferences}
\item{I}{matrix of Impact scores} \item{intermediate$pls}{probability
referring to left-sided testing of tr.S} \item{intermediate$prs}{probability
referring to right-sided testing of tr.S} \item{intermediate$es}{expected
value of tr.S} \item{intermediate$pla}{probability referring to left-sided
testing of tr.A} \item{intermediate$pra}{probability referring to
right-sided testing of tr.A} \item{intermediate$ea}{expected value of tr.A}
\item{intermediate$plp}{probability referring to left-sided testing of tr.P}
\item{intermediate$prp}{probability referring to right-sided testing of
tr.P} \item{intermediate$ep}{expected value of tr.P}
\item{intermediate$pli}{probability referring to left-sided testing of tr.I}
\item{intermediate$pri}{probability referring to right-sided testing of
tr.I} \item{intermediate$ei}{expected value of tr.I}
}
\description{
In a group, all group members are asked to rate all other group members on a
rating scale. This rating scale can have 3 (1..3), 5 (1..5), 7 (1..7) or 9
(1..9) rating points. The rating scale has a neutral mid point (respectively
2, 3, 4 and 5). \cr Application of SSrat::calcgroup calculates a
classification into five categories of sociometric status, which is labeled
as follows: (1) popular, (2) rejected, (3) negelected, (4) controversial and
(5) average.
}
\details{
It is assumed that the data are gathered on a single (2R+1)-point rating
scale. For a 7-point scale, R = 3. The scale midpoint must represent a
neutral judgment. The data are arranged in a matrix P, with rows belonging
to assessors and columns belonging to the assessed.Let P[i, k] denote the
rating given by assessor i to group member k.\cr First P* scores are
calculated by subtracting R+1 from the values of P. The Sympathy scores S
are calculated by replacing all negative scores of P* by zero. The Antipathy
score A are calculated by replacing all positive scores of P* by zero and
taking the absolute value. P* = S - A.The Preference score P are equal to
the ratings obtained. The Impact scores I are calculated by taking the
absolute values of P*. I = S + A.\cr In the next step, sum scores are
calculated over columns. Then the distributions of these sumscores are
calculated, using the principle of convolution. Lastly, the positions in the
distributions are calculated to identify persons with scores in the areas of
the lower and higher alpha percent. This allows us to translate the criteria
of Coie et al. (1983) into probability terms. A person k obtains the
following social determinations (E is expected value): Popular = sum(P[,k])
significantly high, sum(S[,k]) > E(sum(S[,k])) and sum(A[,k]) <
E(sum(A[,k])); Rejected = sum(P[,k]) significantly low, sum(S[,k]) <
E(sum(S[,k])) and sum(A[,k]) > E(sum(A[,k])); Neglected = sum(I[,k])
significantly low, sum(S[,k]) < E(sum(S[,k])) and sum(A[,k]) <
E(sum(A[,k])); Controversial = sum(I[,k]) significantly high, sum(S[,k]) >
E(sum(S[,k])) and sum(A[,k]) > E(sum(A[,k])); Average = remaining group
members.
When the criteria of Newcomb & Bukowski (1993) are applied, the most liked
nominations LM are the ratings > R and the least liked nominations LL are
the ratings < R, and the impact score SI = LM + LL. The criteria for a
person k are: Popular = sum(LM[,k]) significantly high, sum(LL[,k]) <
E(sum(LL[,k])); Rejected = sum(LL[,k]) significantly high, sum(LM[,k]) <
E(sum(LM[,k])); Neglected = sum(SI[,k]) significantly low; Controversial =
sum(LM[,k]) significantly high and sum(LL[,k] > E(sum(LL[,k])) or
sum(LL[,k]) significantly high and sum(LM[,k] > E(sum(LM[,k])); Average =
remaining group members.
}
\examples{
data(example5.rat)
# calc SSRAT results fot this group
out =calcgroup (school=1, group=23, data=example5.rat, scalelength="3")
out$dataframe
# calc Newcomb & Bukowski results for this group
out =calcgroup (school=1, group=23, data=example5.rat, scalelength="3", NBcriteria=TRUE)
out$dataframe
# calc Newcomb & Bukowski results for example1
data(example1.rat)
out =calcgroup (school=1, group=1, data=example1.rat, scalelength="7", NBcriteria=TRUE)
out$dataframe
# calc SSrat results for example1
out =calcgroup (school=1, group=1, data=example1.rat, scalelength="7")
out$dataframe
}
\references{
Coie, J.D., & Dodge, K.A. (1983). Continuities and changes in
children's social status: A five-year longitudinal study. Merril-Palmer
Quarterly, 29, 261-282.\cr Newcomb, A. F., & Bukowski, W. M. (1983). Social
impact and social preference as determinants of children's peer group
status. Developmental Psychology, 19, 856-867.\cr Maassen, G. H. and
Landsheer, J. A. (1998). SSRAT: The processing of rating scales for the
determination of two-dimensional sociometric status. Behavior Research
Methods Instruments & Computers, 30(4), 674-679.
}
\seealso{
\code{\link{readratdatafixed}}
}
\author{
Hans Landsheer
}
\keyword{List}
\keyword{list}
|
1630b6a21cddefc5aa90f9355f018839bd0f2af7
|
67330c00d19fe4c9c38885227dcb3cbbd9817f3a
|
/man/clustord.fit.Rd
|
88e5a0e10e5c20f07daf40f86b944d0b00a44717
|
[] |
no_license
|
vuw-clustering/clustord
|
5f3965a79a374f266cc9b8c7965927ba454b2aa2
|
5c42a5d2edd7e900613f7795e9bcd617a7b35fb3
|
refs/heads/master
| 2023-08-03T21:21:41.885481
| 2023-07-26T05:25:35
| 2023-07-26T05:25:35
| 187,764,285
| 1
| 5
| null | 2023-09-09T01:33:56
| 2019-05-21T04:55:58
|
R
|
UTF-8
|
R
| false
| true
| 46,950
|
rd
|
clustord.fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{clustord.fit}
\alias{clustord.fit}
\title{Likelihood-based clustering using Ordered Stereotype Models (OSM), Proportional
Odds Models (POM) or Binary Models}
\usage{
clustord.fit(
formula,
model,
nclus.row = NULL,
nclus.column = NULL,
long.df,
initvect = NULL,
pi.init = NULL,
kappa.init = NULL,
EM.control = default.EM.control(),
optim.method = "L-BFGS-B",
optim.control = default.optim.control(),
constraint_sum_zero = TRUE,
start_from_simple_model = TRUE,
nstarts = 5
)
}
\arguments{
\item{formula}{model formula (see 'Details').}
\item{model}{\code{"OSM"} for Ordered Stereotype Model or \code{"POM"} for
Proportional Odds Model or \code{"Binary"} for binary data model.}
\item{nclus.row}{number of row clustering groups.}
\item{nclus.column}{number of column clustering groups.}
\item{long.df}{data frame with at least three columns, \code{Y} and \code{ROW}
and \code{COL}. Each row in the data frame corresponds to a single cell
in the original data matrix; the response value in that cell is given by
\code{Y}, and the row and column indices of that cell in the matrix are
given by \code{ROW} and \code{COL}. Use \code{\link{mat2df}} to create
this data frame from your data matrix of responses.
\code{\link{mat2df}} also allows you to supply data frames of row or
column covariates which will be incorporated into \code{long.df}.}
\item{initvect}{(default NULL) vector of starting parameter values for the model.
Note: if the user enters an initial vector of parameter values, it is
\strong{strongly recommend} that the user also check the values of
\code{parlist.init} in the output object, to \strong{make sure that the
constructed parameters are as expected}.
If \code{NULL}, starting parameter values will be generated automatically.
See 'Details' for definitions of the parameters used for different models.}
\item{pi.init}{(default \code{NULL}) starting parameter values for the proportions
of observations in the different row clusters.
If \code{NULL}, starting values will be generated automatically.
User-specified values of \code{pi.init} must be of length \code{(nclus.row-1)}
because the final value will be automatically calculated so that the
values of \code{pi} sum to 1.}
\item{kappa.init}{(default \code{NULL}) starting parameter values for the
proportions of observations in the different column clusters.
If \code{NULL}, starting values will be generated automatically.
User-specified values of \code{kappa.init} must be of length
\code{(nclus.column-1)} because the final value will be automatically
calculated so that the values of \code{kappa} sum to 1.}
\item{EM.control}{(default = \code{list(EMcycles=50, EMlikelihoodtol=1e-4,
EMparamtol=1e-2, paramstopping=TRUE, startEMcycles=10, keepallparams=FALSE,
epsilon=1e-6)})
list of parameters controlling the EM algorithm.
\code{EMcycles} controls how many EM iterations of the main EM algorithm
are used to fit the chosen submodel.
\code{EMlikelihoodtol} is the tolerance for the stopping criterion for
the \strong{log-likelihood} in the EM algorithm. The criterion is the
absolute change in the \strong{incomplete} log-likelihood since the
previous iteration, scaled by the size of the dataset \code{n*p}, where
\code{n} is the number of rows in the data matrix and \code{p} is the
number of columns in the data matrix. The scaling is applied because the
incomplete log-likelihood is predominantly affected by the dataset size.
\code{EMparamtol} is the tolerance for the stopping criterion for the
\strong{parameters} in the EM algorithm. This is a tolerance for the
\strong{sum} of the scaled parameter changes from the last iteration,
i.e. the tolerance is not for any individual parameter but for the sum of
changes in all the parameters. Thus the default tolerance is 1e-2.
The individual parameter criteria are the absolute differences between
the exponentiated absolute parameter value at the current timestep and
the exponentiated absolute parameter value at the previous timestep, as a
proportion of the exponentiated absolute parameter value at the current
timestep. The exponentiation is to rescale parameter values that are
close to zero.
there are around 5 independent parameter values, then at the point of
convergence using default tolerances for the log-likelihood and the
parameters, each parameter will have a scaled absolute change since the
previous iteration of about 1e-4; if there are 20 or 30 independent
parameters, then each will have a scaled aboslute change of about 1e-6.
\code{paramstopping}: if \code{FALSE}, indicates that the EM algorithm
should only check convergence based on the change in incomplete-data
log-likelihood, relative to the current difference between the
complete-data and incomplete-data log-likelihoods, i.e.
\code{abs(delta_lli)/abs(llc[iter] - lli[iter])};
if \code{TRUE}, indicates that as well as checking the likelihood
criterion, the EM algorithm should also check whether the relative change
in the exponentials of the absolute values of the current parameters is
below the tolerance \code{EMstoppingpar}, to see whether the parameters
and the likelihood have both converged.
\code{startEMcycles} controls how many EM iterations are used when
fitting the simpler submodels to get starting values for fitting models
with interaction.
\code{keepallparams}: if true, keep a record of parameter values
(including pi_r and kappa_c) for every EM iteration.
For \code{columnclustering}, the parameters saved from each iteration
will NOT be converted to column clustering format, and will be in the row
clustering format, so \code{alpha} in
\code{EM.status$params.every.iteration} will correspond to beta_c and
\code{pi} will correspond to kappa.
\code{epsilon}: default 1e-6, small value used to adjust values of pi,
kappa and theta that are too close to zero so that taking logs of them
does not create infinite values.}
\item{optim.method}{(default "L-BFGS-B") method to use in optim within the M
step of the EM algorithm. Must be one of 'L-BFGS-B', 'BFGS', 'CG' or
'Nelder-Mead' (i.e. not the SANN method).}
\item{optim.control}{control list for the \code{optim} call within the M step
of the EM algorithm. See the control list Details in the \code{optim}
manual for more info.}
\item{constraint_sum_zero}{(default \code{TRUE}) if \code{TRUE}, use constraints
that cluster effects sum to zero; if \code{FALSE}, use constraints that
the cluster effect for the first cluster will be 0.
Both versions have the same constraints for joint row-column cluster
effects: these effects are described by a matrix of parameters gamma_rc,
indexed by row cluster and column cluster indices, and the constraints
are that the final column of gamma_rc is equal to the negative sum of the
other columns (so \code{gamma} columns sum to zero) and first row of
gamma_rc is equal to the negative sum of the other rows (so \code{gamma}
rows sum to zero).}
\item{start_from_simple_model}{(default \code{TRUE}) if \code{TRUE}, fit a
simpler clustering model first and use that to provide starting values for
all parameters for the model with interactions;
if \code{FALSE}, use the more basic models to provide starting values only
for \code{pi.init} and \code{kappa.init}.
If the full model has interaction terms, then simpler models are ones
without the interactions. If the model has individual row/column effects
alongside the clusters, then simpler models are ones without the individual
row/column effects. If the full model has covariates, then simpler models
are ones without the covariates (to get starting values for the cluster
parameters), and ones with the covariates but no clustering (to get
starting values for the covariates).}
\item{nstarts}{(default 5) number of random starts to generate, if generating
random starting points for the EM algorithm.}
}
\value{
A list with components:
\code{info}: Basic info n, p, q, the number of parameters, the number of
row clusters and the number of column clusters, where relevant.
\code{model}: The model used for fitting, "OSM" for Ordered Stereotype
Model, "POM" for Proportional Odds Model, or "Binary" for Binary model.
\code{EM.status}: a list containing the latest iteration \code{iter},
latest incomplete-data and complete-data log-likelihoods \code{new.lli}
and \code{new.llc}, the best incomplete-data log-likelihood \code{best.lli}
and the corresponding complete-data log-likelihood, \code{llc.for.best.lli},
and the parameters for the best incomplete-data log-likelihood,
\code{params.for.best.lli}, indicator of whether the algorithm converged
\code{converged}, and if the user chose to keep all parameter values from
every iteration, also \code{params.every.iteration}.
Note that for \strong{biclustering}, i.e. when \code{ROWCLUST} and
\code{COLCLUST} are both included in the model, the \strong{incomplete}
log-likelihood is calculated using the entropy approximation, and this
may be \strong{inaccurate} unless the algorithm has converged or is close
to converging. So beware of using the incomplete log-likelihood and the
corresponding AIC value \strong{unless the EM algorithm has converged}.
\code{criteria}: the calculated values of AIC, BIC,
etc. from the best incomplete-data log-likelihood.
\code{epsilon}: the very small value (default 1e-6) used to adjust values
of pi and kappa and theta that are too close to zero, so that taking logs
of them does not produce infinite values. Use the EM.control argument to
adjust epsilon.
\code{constraints_sum_zero}: the chosen value of constraints_sum_zero.
\code{param_lengths}: vector of total number of parameters/coefficients
for each part of the model, labelled with the names of the components.
The value is 0 for each component that is not included in the model, e.g.
if there are no covariates interacting with row clusters then the
\code{rowc_cov_coef} value will be 0. If the component is included, then
the value given will include any dependent parameter/coefficient values,
so if column clusters are included then the \code{colc_coef} value will
be \code{nclus.column}, whereas the number of independent values will be
\code{nclus.column - 1}.
\code{initvect}: the initial \emph{vector} of parameter values, either
specified by the user or generated automatically. This vector has only
the \strong{independent} values of the parameters, not the full set.
\code{outvect}: the final \emph{vector} of parameter values, containing
only the independent parameter values from \code{parlist.out}.
\code{parlist.init}: the initial list of parameters, constructed from
the initial parameter vector \code{initvect}. Note that if the initial
vector has been incorrectly specified, the values of \code{parlist.init}
may not be as expected, and they should be checked by the user.
\code{parlist.out}: fitted values of parameters.
\code{pi}, \code{kappa}: fitted values of pi and kappa, where relevant.
\code{ppr}, \code{ppc}: the posterior probabilities of membership of the
row clusters and the column clusters, where relevant.
\code{rowc_mm}, \code{colc_mm}, \code{cov_mm}: the model matrices for,
respectively, the covariates interacting with row clusters, the covariates
interacting with column clusters, and the covariates not interacting with
row or column clusters (i.e. the covariates with constant coefficients).
Note that one row of each model matrix corresponds to one row of long.df.
\code{RowClusters}, \code{ColumnClusters}: the assigned row and column
clusters, where relevant, where each row/column is assigned to a cluster
based on maximum posterior probability of cluster membership (\code{ppr}
and \code{ppc}).
}
\description{
Likelihood-based clustering with parameters fitted using the EM algorithm.
You can perform clustering on rows or columns of a data matrix, or biclustering
on both rows and columns simultaneously. You can include any number of
covariates for rows and covariates for columns.
Ordinal models used in the package are Ordered Stereotype Model (OSM),
Proportional Odds Model (POM) and a dedicated Binary Model for binary data.
}
\details{
You can select your own input parameters, or starting values will be
generated by running kmeans or by fitting simpler models and feeding the
outputs into the final model as starting values.
The starting point for clustering is a data matrix of response values that
are binary or categorical. You may also have a data frame of covariates that
are linked to the rows of the data matrix, and may also have a data frame of
covariates that are linked to the columns of the data matrix.
For example, if clustering data from fishing trawls, where the rows are trawl
events and columns are species caught, then you could also supply a gear
covariate linked to the rows, representing gear used on each trawl event, and
could additionally supply species covariates linked to the columns,
representing auxiliary information about each species. There is no
requirement to provide any covariates, and you can provide only row
covariates, or only column covariates.
Before running \code{clustord.fit}, you need to run \code{\link{mat2df}} to
convert the data matrix into a long form data frame. The data frame needs to
have at least three columns, \code{Y} and \code{ROW} and \code{COL}. Each row
in the data frame corresponds to a single cell in the original data matrix;
the response value in that cell is given by \code{Y}, and the row and column
indices of that cell in the matrix are given by \code{ROW} and \code{COL}.
\code{\link{mat2df}} also allows you to supply data frames of row or column
covariates which will be incorporated into \code{long.df}.
Then, to run the \code{clustord.fit} function, you need to enter your chosen
formula and model, and the number of clusters you want to fit. The formula
model_structure is akin to that for \code{glm}, but with a few restrictions. You
can include any number of covariates in the same way as for a multiple
regression model, though unlike for \code{glm}, you can include both row and
column covariates.
Note that, unlike \code{glm}, you should not specify a \code{family}
argument; the \code{model} argument is used instead.
\code{formula} \strong{argument details}
In the following description of different models, the Binary model is used
for simplicity when giving the mathematical descriptions of the models, but
you can use any of the following models with the Ordered Stereotype or
Proportional Odds Models as well.
In the \code{formula} argument, the response must be exactly \code{Y}. You
cannot use any functions of \code{Y} as the response, nor can you include
\code{Y} in any terms on the right hand side of the formula. \code{Y} is the
name in \code{clustord.fit} of the response values in the original data matrix.
The \code{formula} argument has 4 special variables: \code{ROWCLUST},
\code{COLCLUST}, \code{ROW} and \code{COL}. There are some restrictions on
how these can be used in the formula, as they are not covariates, but instead
act as indicators of the clustering model_structure you want to use.
All other variables in the formula will be any covariates that you want to
include in the model, and these are unrestricted, and can be used in the same
way as in \code{glm}.
\code{ROWCLUST} and \code{COLCLUST} are used to indicate what row clustering
model_structure you want, and what column clustering model_structure you want,
respectively. The inclusion of \code{ROWCLUST} as a single term indicates
that you want to include a row clustering effect in the model. In the
simplest row clustering model, for Binary data with \strong{row clustering}
effects only, the basic function call would be
\code{clustord.fit(Y ~ ROWCLUST, model="Binary", long.df=long.df)}
and the model fitted would have the form:
Logit(P(Y = 1)) = mu + rowc_coef_r
where mu is the intercept term, and rowc_coef_r is the row cluster effect
that will be applied to every row from the original data matrix that is a
member of row cluster r. The inclusion of \code{ROWCLUST} corresponds to the
inclusion of rowc_coef_r.
Note that we are not using notation involving greek letters, because (a) we
ran out of letters for all the different types of parameters in the model and
(b) with this many parameters, it would be difficult to remember which ones
are which.
Similarly to row clustering, the formula \code{Y ~ COLCLUST} would perform
\strong{column clustering}, with model Logit(P(Y = 1)) = mu + colc_coef_c,
where colc_coef_c is the column cluster effect that will be applied to every
column from the original data matrix that is a member of column cluster c.
Including both \code{ROWCLUST} and \code{COLCLUST} in the same formula
indicates that you want to perform biclustering, i.e. you want to cluster the
rows and the columns of the original data matrix simultaneously. If included
without interaction, then the terms just correspond to including rowc_coef_r
and colc_coef_c in the model:
The formula
\code{Y ~ ROWCLUST + COLCLUST}
is the simplest possible \strong{biclustering} model,
Logit(P(Y = 1)) = mu + rowc_coef_r + colc_coef_c
If you want to include interaction between the rows and columns, i.e. you
want to perform block biclustering where each block corresponds to a row
cluster r and a column cluster c, then that model has a matrix of parameters
indexed by r and c.
\code{clustord.fit(Y ~ ROWCLUST*COLCLUST, model="Binary", ...)} has the model
Logit(P(Y = 1)) = mu + rowc_colc_coef_rc
This model can instead be called using the equivalent formula
\code{Y ~ ROWCLUST + COLCLUST + ROWCLUST:COLCLUST}.
You can instead use the formula \code{Y ~ ROWCLUST:COLCLUST}. Mathematically,
this is equivalent to the previous two. In regression, the models would not
be equivalent but in clustering, they are equivalent, and have the same
number of independent parameters overall. If you include the main effects,
then that reduces the number of independent parameters in the interaction
term compared to if you just use the interaction term (see below section about
\code{initvect}).
You cannot include just one of the main effects alongside the interaction
term, i.e. you cannot use \code{Y ~ ROWCLUST + ROWCLUST:COLCLUST} or
\code{Y ~ COLCLUST + ROWCLUST:COLCLUST}. This is for simplicity in the code,
and to avoid confusion when interpreting the results.
However, \code{clustord.fit} allows a lot more flexibility than this. The
variables \code{ROW} and \code{COL} are used to indicate that you want to
also include \strong{individual row or column effects}, respectively.
For example, if you are clustering binary data that indicates the presence/
absence of different species (columns) at different trawl events (rows), and
you know that one particular species is incredibly common, then you can
include column effects in the model, which will allow for the possibility
that two columns may correspond to species with different probabilities of
appearing in the trawl.
You can add individual column effects along with
row clustering, or you can add individual row effects along with column clustering.
The formula for row clustering with individual column effects (without
interaction) is
\code{Y ~ ROWCLUST + COL}
which corresponds to Binary model
Logit(P(Y = 1)) = mu + rowc_coef_r + col_coef_j
So if two cells from the data matrix are in the same row cluster, but in
different columns, they will not have the same probability of Y = 1.
You can also add interaction between the individual row/column effects and
the clustering effects.
If you still want to be able to see the row cluster and column effects
separately, then you use \code{Y ~ ROWCLUST*COL} or
\code{Y ~ ROWCLUST + COL + ROWCLUST:COL} (these are both the same), which
have model
Logit(P(Y = 1)) = mu + rowc_coef_r + col_coef_j + rowc_col_coef_rj
As before, rowc_coef_r and col_coef_j are the row cluster effects and
individual column effects, and rowc_col_coef_rj are the interaction terms.
Alternatively, you can use the mathematically-equivalent formula
\code{Y ~ ROWCLUST:COL} which has model
Logit(P(Y = 1)) = mu + rowc_col_coef_rj
where the row cluster effects and individual column effects are absorbed into
the matrix rowc_col_coef_rj. These models are the same mathematically, the
only differences between them are in how they are constrained (see below in
the section about the \code{initvect} argument) and how they should be
interpreted.
Note that if you were using covariates, then it would not be equivalent to
leave out the main effects and just use the interaction terms, but the
clustering models don't work quite the same as regression models with
covariates.
Equivalently, if you want to cluster the columns, you can include individual
row effects alongside the column clusters, i.e.
\code{Y ~ COLCLUST + ROW} or \code{Y ~ COLCLUST + ROW + COLCLUST:ROW},
depending on whether you want the interaction terms or not.
You are \strong{not} able to include individual row effects with row clusters,
or include individual column effects with column clusters, because there is
not enough information in ordinal or binary data to fit these models. As a
consequence, you cannot include individual row or column effects if you are
doing biclustering, e.g.
\code{Y ~ ROWCLUST + COLCLUST + ROW} or \code{Y ~ ROwCLUST + COLCLUST + COL}
are not permitted.
From version 1 of the package, you can now also include \strong{covariates}
alongside the clustering patterns. The basic way to do this is include them
as additions to the clustering model_structure. For example, including one row
covariate \code{xr} to a row clustering model would have the formula
\code{Y ~ ROWCLUST + xr}
with Binary model Logit(P(Y = 1)) = mu + rowc_coef_r + row_coef_1*xr_i
where row_coef_1 is the coefficient of xr_i, just as in a typical regression
model.
Additional row covariates can also be included, and you can include
interactions between them, and functions of them, as in regression models, e.g.
\code{Y ~ ROWCLUST + xr1*log(xr2)}
which would have the Binary model
Logit(P(Y = 1)) = mu + rowc_coef_r + row_coef1*xr1_i + row_coef2*log(xr2_i) +
row_coef3*xr1_i*log(xr2_i)
If instead you want to add column covariates to the model, they work in the
same way after they've been added to the \code{long.df} data frame using
\code{\link{mat2df}}, but they are indexed by j instead of i. Simplest model,
with one single column covariate xc, would have formula
\code{Y ~ ROWCLUST + xc}
with Binary model Logit(P(Y = 1)) = mu + rowc_coef_r + col_coef1*xc_j
You can use any functions of or interactions between column covariates, just
as with row covariates. You can similarly add row or column covariates to
column clustering or biclustering models.
You can include \strong{interactions between covariates} and \code{ROWCLUST}
or \code{COLCLUST} in the formula. But these are \strong{not quite} the same
as interactions between covariates. The formula
\code{Y ~ ROWCLUST*xr}
where \code{xr} is some row covariate, corresponds to the Binary model
Logit(P(Y = 1)) = mu + rowc_coef_r + rowc_row_coef_r1*xr_i
What this means is that there is a term in the linear predictor that involves
the row covariate xr (which has the index i because it is a row covariate),
and each cluster (indexed by r) has a different coefficient for
that covariate (as distinct from the non-interaction covariate models above,
which have the same coefficients for the covariates regardless of which
cluster the row is in).
This is different from interaction terms involving only covariates, where two
or more covariates appear multiplied together in the model and then have a
shared coefficient term. In a clustering/covariate interaction, the row or
column clustering pattern controls the coefficients rather than adding a
different type of covariate.
Note that the pure cluster effect rowc_coef_r is also included in the model
automatically, in the same way that a regression formula \code{Y ~ x1*x2}
would include the individual x1 and x2 effects as well as the interaction
between x1 and x2.
The coefficients for row clusters interacting with row coefficients are named
\code{row.cluster.row.coef} in the output of \code{clustord.fit} because you
can also have coefficients for interactions between row clustering and column
covariates, or column clustering and row covariates, or column clustering and
column covariates. Row clustering interacting with column covariates would
look something like
\code{Y ~ ROWCLUST*xc}
with Binary model Logit(P(Y = 1)) = mu + rowc_coef_r + rowc_col_coef_r1*xc_j
The other combinations of clustering and covariates work similarly.
\code{rowc_col_coef_rl} and the other similar coefficients have two indices.
Their first index is the index of the cluster, and their second index is the
index of the covariate among the list of covariates interacting with that
direction of clustering. So if there are two row covariates \code{xr1} and
\code{xr2} interacting with three row clusters, that gives you 6
coefficients:
\code{rowc_col_coef_11, rowc_col_coef_12,
rowc_col_coef_21, rowc_col_coef_22,
rowc_col_coef_31, rowc_col_coef_32}.
and you can also have a three-way interaction between row cluster and those
two covariates, which would add the coefficients \code{rowc_col_coef_r3}
for the \code{xr1:xr2} term.
You can instead add covariates that interact with column clusters, which will
have parameters \code{colc_row_coef_cm}, where \code{m} here indexes just the
covariates interacting with column cluster.
If you have covariates interacting with row clusters and other covariates
interacting with column clusters, then you will have parameters
\code{rowc_cov_coef_rl} \strong{and} \code{colc_cov_coef_cm}.
An example of this is the model
\code{Y ~ ROWCLUST + xr1 + ROWCLUST:xr1 + xc1 + COLCLUST + COLCLUST:log(xc1)}
This has main effects for row clusters and column clusters, i.e.
\code{ROWCLUST} and \code{COLCLUST}. It also has two covariate terms not
interacting with clusters, \code{xr1} and \code{xc1}. It also has 1 covariate
term interacting with row clusters, \code{xr1}, with coefficients
\code{rowc_cov_coef_r1}, and 1 covariate term interacting with column
clusters, \code{log(xc1)}, with coefficients \code{colc_cov_coef_c1}.
\strong{Restrictions on \code{formula}}
The primary restriction on the \code{formula} argument is that that you
\strong{cannot} use functions of \code{ROW}, \code{COL}, \code{ROWCLUST} or
\code{COLCLUST}, such as \code{log(ROW)} or I(COLCLUST^2). That is because
they are not covariates, and cannot be manipulated like that; instead, they
are indicators for particular elements of the clustering model_structure.
If performing biclustering, i.e. if \code{ROWCLUST} and \code{COLCLUST} are
both in the model, and you want to include the interaction between them, then
you can use the interaction between them on its own, or you can include both
main effects, but you are not allowed to use just one main effect alongside
the interaction. That is, you can use
\code{Y ~ ROWCLUST + COLCLUST + ROWCLUST:COLCLUST} or \code{Y ~ ROWCLUST*COLCLUST},
or you can use \code{Y ~ ROWCLUST:COLCLUST}, and these two types of
biclustering model will have different parameter constraints (see below under
\code{initvect} details), but you \strong{cannot} use
\code{Y ~ ROWCLUST + ROWCLUST:COLCLUST} or \code{Y ~ COLCLUST + ROWCLUST:COLCLUST}
As stated above, you also cannot include individual row effects alongside
row clustering, and you cannot use individual column effects alongside
column clustering, i.e. if \code{ROWCLUST} is in the formula, then \code{ROW}
\strong{cannnot} be in the formula, and if \code{COLCLUST} is in the formula
then \code{COL} \strong{cannot} be in the formula.
If you are including \code{COL} with \code{ROWCLUST}, then you can include
the interaction between them but that is the \strong{only} permitted interaction
term that involves \code{COL}, and similarly the interaction between
\code{ROW} and \code{COLCLUST} is the \strong{only} permitted interaction
term that involves \code{ROW}. But you can include those interactions in the
form
\code{Y ~ ROWCLUST + COL + ROWCLUST:COL} or as \code{Y ~ ROWCLUST*COL}, or as
\code{Y ~ ROWCLUST:COL}.
These are the only permitted uses of the \code{COL} term, and there are
equivalent constraints on the inclusion of \code{ROW}.
As stated above, you can include interactions between \code{ROWCLUST} or
\code{COLCLUST} and covariates, but you \strong{cannot} include three-way
interactions between \code{ROWCLUST}, \code{COLCLUST} and one or more
covariates are \strong{not permitted} in \code{clustord.fit}, mostly because
of the prohibitive number of parameter values that would need to be fitted,
and the difficulty of interpreting such a model. That is, you cannot use
formulae such as \code{Y ~ ROWCLUST*COLCLUST*xr}, which would have Binary
model Logit(P(Y = 1)) = mu + bi_cluster_row_coef_rc1*xr_i.
\code{model} \strong{argument details}
The three models available in \code{clustord.fit} are the Binary model, which
is a Bernoulli model equivalent to the binary model in the package
\code{clustglm}, the Proportional Odds Model (POM) and the Ordered Stereotype
Model (OSM).
Many Binary model examples have been given above, which have the general
form
logit(P(Y = 1)) = mu + <<linear terms>>
where the linear terms can include row or column clustering effects,
individual row or column effects, and row or column covariates, with or
without interactions with row or column clustering.
The Proportional Odds Model and the Ordered Stereotype Model have the same
model_structure for the linear terms, but the overall model equation is different.
The Proportional Odds Model (\code{model = "POM"}) has the form
logit(P(Y <= k)) = log(P(Y <= k)/P(Y > k)) = mu_k - <<linear terms>>
So the simplest POM for row clustering would be
logit(P(Y <= k)) = mu_k - rowc_coef_r
and the model including individual column effects and no interactions would be
logit(P(Y <= k)) = mu_k - rowc_coef_r - col_coef_j
Note that the linear-term coefficients have negative signs for the
Proportional Odds Models. This is so that as the row cluster index increases,
or as the column index increases, Y is more likely to fall at higher values
(see Ch4 of Agresti, 2010).
The Ordered Stereotype model (\code{model = "OSM"}) has the form
log(P(Y = k)/P(Y = 1)) = mu_k + phi_k(<<linear terms>>)
So the simplest OSM for row clustering would be
log(P(Y = k)/P(Y = 1)) = mu_k + phi_k*rowc_coef_r
and the model including individual column effects and no interactions would be
log(P(Y = k)/P(Y = 1)) = mu_k + phi_k(rowc_coef_r + col_coef_j)
Note that the OSM is \strong{not} a cumulative logit model, unlike the POM.
The model describes the log of the kth level relative to the first level,
which is the baseline category, but the patterns for k = 2 may be different
than the patterns for k = 3. They are linked, because the linear terms will
be the same, but they may not have the same shape. In this sense, the OSM is
more flexible/less restrictive than the POM.
See Anderson (1984) for the original definition of the ordered stereotype
model, and see Fernández et al. (2016) for the application to clustering.
The phi_k parameters may be treated as "score" parameters. After fitting the
OSM, the fitted phi_k values can give some indication of what the true
separation is between the categories. Even if the default labelling of the
categories is from 1 to n, that doesn't mean that the categories are actually
equally spaced in reality. But the fitted phi_k values from the OSM can be
treated as data-derived numerical labels for the categories. Moreover, if two
categories have very similar fitted phi_k values, e.g. if phi_2 = 0.11 and
phi_3 = 0.13, that suggests that there is not enough information in the data
to distinguish between categories 2 and 3, and so you might as well merge
them into a single category to simplify the model-fitting process and the
interpretation of the results.
\code{initvect} \strong{argument details}
Initvect is the vector of starting values for the parameters, made up of
sections for each different type of parameter in the model. Note that the
length of each section of initvect is the number of \strong{independent}
parameter values, not the overall number of parameter values of that type.
If you want to supply a vector of starting values for the EM algorithm, you
need to be careful how many values you supply, and the order in which you
include them in \code{initvect}, and you should \strong{CHECK} the output
list of parameters (which is the full set of parameter values, including
dependent ones, broken up into each type of parameter) to check that your
initvect model_structure is correct for the formula you have specified.
For example, the number of \code{mu} values will always be 1 fewer than the
number of categories in the data, and the remaining value of mu is dependent
on those q-1 values. In the OSM for data with 3 categories, the first value
of mu for category 1 will be 0, and then the other 2 values of mu for
categories 2 and 3 will be the independent values of mu. For the POM for data
with 5 categories, the first 4 values of mu will be the independent values
and then the last value of mu is infinity, because the probability of Y being
in category 5 is defined as 1 minus the sum of the probabilities for the
other 4 levels.
\code{q} is the number of levels in the values of y, \code{n} is the number
of rows in the original data matrix, and \code{p} is the number of columns in
the original data matrix.
For Binary,
There is one independent value for \code{mu}, i.e. q = 2.
Ignore \code{phi}, which is not used in the Binary model.
For OSM,
The starting values for \code{mu_k} are length \code{q-1}, and the model
has \code{mu_1} = 0 always, so the initvect values for \code{mu} will become
\code{mu_2}, \code{mu_3}, etc. up to \code{mu_q}.
The starting values for \code{phi_k} are length \code{q-2}.
Note that the starting values for \code{phi} do not correspond directly to
\code{phi}, because \code{phi} is restricted to being increasing and between
0 and 1, so instead the starting values are treated as elements
\code{u[2:q-1]} of a vector \code{u} which can be between \code{-Inf} and
\code{+Inf}, and then
\code{phi[2] <- expit(u[2])} and
\code{phi[k] <- expit(u[2] + sum(exp(u[3:k])))} for k between 3 and q-1
\code{(phi[1] = 0 and phi[q] = 1)}.
For POM,
The starting values for \code{mu_k} are length \code{q-1}, but the starting
values do not correspond directly to \code{mu_k}, because \code{mu_k} is
restricted to being increasing, i.e. the model has to have
\code{mu_1} <= \code{mu_2} <= ... \code{mu_q} = \code{+Inf}
So instead of using the initvect values directly for \code{mu_k}, the 2nd to
(q-1)th elements of initvect are used to construct \code{mu_k} as follows:
\code{mu_1 <- initvect[1]}
\code{mu_2 <- initvect[1] + exp(initvect[2])}
\code{mu_3 <- initvect[1] + exp(initvect[2]) + exp(initvect[3])}
... and so on up to \code{mu_{k-1}}, and \code{mu_k} is infinity, because
it is not used directly to construct the probability of Y = q.
Thus the values that are used to construct \code{mu_k} can be unconstrained,
which makes it easier to specify initvect and easier to optimize the parameter
values.
Ignore \code{phi}, which is not used in POM.
For \strong{all three models},
The starting values for \code{rowc_coef_r} are length \code{nclus.row-1},
where \code{nclus.row} is the number of row clusters. The final row cluster
parameter is dependent on the others (see the input parameter info for
constraint_sum_zero), whereas if it were independent it would be colinear
with the \code{mu_k} parameters and thus not identifiable.
Similarly the starting values for \code{colc_coef_c} are length
\code{nclus.column-1}, where \code{nclus.column} is the number of column
clusters, to avoid problems of colinearity and nonidentifiability.
If you have biclustering with an interaction term between row clusters and
column clusters, then the number of independent values in the matrix of
interaction terms depends on whether you include the main effects of row and
column clusters separately. That is, if you use the biclustering model
\code{Y ~ ROWCLUST + COLCLUST + ROWCLUST:COLCLUST}, or equivalently
\code{Y ~ ROWCLUST*COLCLUST},
then the main effect term \code{ROWCLUST} has \code{nclus.row-1} independent
parameters in \code{initvect}, and \code{COLCLUST} has \code{nclus.column-1}
independent parameters in \code{initvect}, and \code{ROWCLUST:COLCLUST} will
have \code{(nclus.row - 1)*(nclus.column - 1)} independent parameter values.
The final matrix of interaction terms will be constrained to have its last
row equal to the negative sum of the other rows, and the last column equal to
the negative sum of the other columns.
On the other hand, if you want to use only the interaction term and not the
main effects (which for the clustering model is mathematically equivalent),
i.e.
\code{Y ~ ROWCLUST:COLCLUST},
then that matrix of interaction terms will have \code{nclus.row*nclus.column - 1}
independent parameters, i.e. more independent parameters than if you included
the main effects.
If you have column effects alongside row clusters (they are not permitted
alongside column clusters), without interactions, i.e. the formula
\code{Y ~ ROWCLUST + COL} with Binary model Logit(P(Y = 1)) = mu +
rowc_coef_r + col_coef_j
then the row cluster coefficients have \code{nclus.row - 1} independent
parameters, and the column effect coefficients have \code{p - 1} independent
parameters, where p is the number of columns in the original data matrix,
i.e. the maximum value of \code{long.df$COL}.
If you include the interaction term, then the number of independent parameters
again depends on whether you just use the interaction term, or include the
main effects.
In the formula \code{Y ~ ROWCLUST + COL + ROWCLUST:COL} or its equivalent with
"*", the interaction term will have \code{(nclus.row - 1)*(p-1)} independent
parameters.
If you instead use the formula \code{Y ~ ROWCLUST:COL}, then the interaction
term will have \code{nclus.row*p - 1} independent parameters. Either way, the
total number of independent parameters in the model will be \code{nclus.row*p}.
Similarly, if you have row effects alongside column clusters, without
interactions, i.e. the formula
\code{Y ~ COLCLUST + ROW},
with Binary model Logit(P(Y = 1)) = mu + colc_coef_c + row_coef_i
then the column cluster coefficients will have \code{nclus.column - 1}
independent parameters, and the row coefficients will have \code{n-1}
independent parameters, where n is the number of rows in the original data
matrix, i.e. the maximum value of \code{long.df$ROW}.
If you include the interaction term alongside the main effects, i.e.
\code{Y ~ COLCLUST + ROW + COLCLUST:ROW}, or its equivalent with "*", the
interaction term will have \code{(nclus.column - 1)*(n-1)} independent
parameters.
If you instead use the formula \code{Y ~ COLCLUST:ROW}, that interaction
coefficient matrix will have \code{nclus.column*n - 1} independent parameters.
Any covariate terms included in the formula will be split up by
\code{clustord.fit} into the covariates that interact with row clusters, the
covariates that interact with column clusters, and the covariates that do not
interact with row or column clusters.
The number of independent parameters for row-cluster-interacting covariates
will be \code{nclus.row*L}, where \code{L} is the number of terms involving
row clusters and covariates after any "*" terms have been expanded.
So in this formula, for example,
\code{Y ~ ROWCLUST*xr1 + xr2 + ROWCLUST:log(xc1)}
where xr1 and xr2 are row covariates, and xc1 is a column covariate, the fully
expanded formula would be
\code{Y ~ ROWCLUST + xr1 + xr2 + ROWCLUST:xr1 + ROWCLUST:log(xc1)}
and the terms interacting with \code{ROWCLUST} would be \code{ROWCLUST:xr1}
and \code{ROWCLUST:log(xc1)}, so there would be \code{nclus.row*2}
independent coefficients for those covariates.
The number of independent parameters for column-cluster-interacting
covariates will be \code{nclus.column*M}, where \code{M} is the number of
terms involving column clusters and covariates after any "*" terms have been
expanded.
So this formula, for example,
\code{Y ~ I(xr1^2) + COLCLUST*xc1 + COLCLUST:xc2:xc3 + COLCLUST*xr1}
would be expanded as
\code{Y ~ COLCLUST + xr1 + I(xr1^2) + xc1 + COLCLUST:xc1 + COLCLUST:xc2:xc3 + COLCLUST:xr1}
and the terms interacting with \code{COLCLUST} would be \code{COLCLUST:xc1},
\code{COLCLUST:xc2:xc3} and \code{COLCLUST:xr1}, so there would be
\code{nclus.column*3} independent coefficients for those covariates.
The number of independent parameters for covariates that do not interact with
row or column clusters will be the same as the number of those covariate terms,
after any "*" terms have been expanded.
So this formula, for example,
\code{Y ~ ROWCLUST*xr1 + xr2 + ROWCLUST:log(xc1) + COLCLUST*xc1}
would be expanded as
\code{Y ~ ROWCLUST +COLCLUST + xr1 + xr2 + xc1 + ROWCLUST:xr1 + ROWCLUST:log(xc1) + COLCLUST:xc1},
so there would be 3 independent coefficients for the terms \code{xr1, xr2, xc1}.
Note that there are \strong{no intercept} terms for the coefficients,
because those are incorporated into the parameters \code{mu_k}.
The \strong{order of the} \code{initvect} \strong{entries} is as follows, and
any entries that are not included in the formula will be ignored and not
included in \code{initvect}. That is, you should NOT provide values in
\code{initvect} for components that are not included in the formula.
1) mu (or values used to construct mu, POM only)
2) values used to construct phi (OSM only)
3) row cluster coefficients
4) column cluster coefficients
5) [matrix] bicluster coefficients (i.e. interaction between row and column clusters)
6) individual row coefficients
7) individual column coefficients
8) [matrix] interactions between row clusters and individual column coefficients
9) [matrix] interactions between column clusters and individual row coefficients
10) [matrix] row-cluster-specific coefficients for covariates interacting with row clusters
11) [matrix] column-cluster-specific coefficients for covariates interacting with column clusters
12) coefficients for covariates that do not interact with row or column clusters
Any entries marked as [matrix] will be constructed into matrices by filling
those matrices row-wise, e.g. if you want starting values 1:6 for a matrix of
2 row clusters and 3 covariates interacting with those row clusters, the
matrix of coefficients will become
\code{1 2 3
4 5 6}
For the formula \code{Y ~ ROWCLUST*COLCLUST}, where the matrix of interactions
between row and column clusters has \code{(nclus.row - 1)*(nclus.column - 1)}
independent parameters, the last row and column of the matrix will be the
negative sums of the rest, so e.g. if you have 2 row clusters and 3 column
clusters, there will only be 2 independent values, so if you provide the
starting values -0.5 and 1.2, the final matrix of parameters will be:
\code{ column cluster 1 column cluster 2 column cluster 3
row cluster 1 -0.5 1.2 -0.7
row cluster 2 0.5 -1.2 0.7}
If the matrix is a matrix relating to row clusters, then the row clusters are
in the rows, and if it's a matrix relating to column clusters but not row
clusters, then the column clusters are in the rows, i.e. the matrix of
coefficients for column clusters interacting with individual row effects will
have the rows of the matrix corresponding to the clusters, i.e. the matrix
would be indexed colc_row_coef_ci, c being the column cluster index and i
being the row index.
Similarly, if the matrix is a matrix relating to column clusters and covariates,
then the rows of the matrix will correspond to the column clusters, i.e. the
matrix would be indexed colc_cov_coef_cl, c being the column cluster index
and l being the covariate index.
If using biclustering with interaction between row and column clusters, then
the row clusters will be the rows and the column clusters will be the columns,
i.e. the matrix would be indexed rowc_colc_coef_rc, r being the row cluster
index and c being the column cluster index.
}
\examples{
long.df <- data.frame(Y=factor(sample(1:3,5*20,replace=TRUE)),
ROW=factor(rep(1:20,times=5)),COL=rep(1:5,each=20))
# Model Log(P(Y=k)/P(Y=1))=mu_k+phi_k*rowc_coef_r with 3 row clustering groups:
clustord.fit(Y~ROWCLUST,model="OSM",3,long.df=long.df)
# Model Log(P(Y=k)/P(Y=1))=mu_k+phi_k*(rowc_coef_r + col_coef_j) with 3 row clustering groups:
clustord.fit(Y~ROWCLUST+COL,model="OSM",3,long.df=long.df)
# Model Logit(P(Y <= k))=mu_k-rowc_coef_r-col_coef_j-rowc_col_coef_rj with 2 row clustering groups:
clustord.fit(Y~ROWCLUST*COL,model="POM",nclus.row=2,long.df=long.df)
# Model Log(P(Y=k)/P(Y=1))=mu_k+phi_k*(colc_coef_c) with 3 column clustering groups:
clustord.fit(Y~COLCLUST,model="OSM",nclus.column=3,long.df=long.df)
# Model Log(P(Y=k)/P(Y=1))=mu_k+phi_k*(colc_coef_c + row_coef_i) with 3 column clustering groups:
clustord.fit(Y~COLCLUST+ROW,model="OSM",nclus.column=3,long.df=long.df)
# Model Log(P(Y=k)/P(Y=1))=mu_k+phi_k*(rowc_coef_r + colc_coef_c)
# with 3 row clustering groups and 2 column clustering groups:
clustord.fit(Y~ROWCLUST+COLCLUST,model="OSM",nclus.row=3,nclus.column=2,long.df=long.df,
EM.control=list(EMcycles=5), nstarts=1)
# Model Logit(P(Y<=k))=mu_k-rowc_coef_r-colc_coef_c-rowc_colc_coef_rc
# with 2 row clustering groups and 4 column clustering groups, and
# interactions between them:
clustord.fit(Y~ROWCLUST*COLCLUST, model="POM", nclus.row=2, nclus.column=4,
long.df=long.df,EM.control=list(EMcycles=5), nstarts=1,
start_from_simple_model=FALSE)
}
\references{
Fernandez, D., Arnold, R., & Pledger, S. (2016). Mixture-based clustering for the ordered stereotype model. *Computational Statistics & Data Analysis*, 93, 46-75.
Anderson, J. A. (1984). Regression and ordered categorical variables. *Journal of the Royal Statistical Society: Series B (Methodological)*, 46(1), 1-22.
Agresti, A. (2010). *Analysis of ordinal categorical data* (Vol. 656). John Wiley & Sons.
}
|
116a9f6e522e1f6649b2be6996acdd04bae4a359
|
c2061964216f76ad0f440c76dbfe1119e0279a22
|
/man/API-methods.Rd
|
2b5382da3f976dc349710fddc61f802981ac433a
|
[] |
no_license
|
cran/antaresRead
|
046829e05e411adfb55fc652ad49ea84f2610264
|
f6a182b21854e12c5c470afcd38c26f44fb2b8d5
|
refs/heads/master
| 2023-04-16T10:45:23.521378
| 2023-04-06T16:20:02
| 2023-04-06T16:20:02
| 87,090,660
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,095
|
rd
|
API-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/API-methods.R
\name{API-methods}
\alias{API-methods}
\alias{api_get}
\alias{api_post}
\alias{api_put}
\alias{api_delete}
\title{API methods}
\usage{
api_get(opts, endpoint, ..., default_endpoint = "v1/studies")
api_post(opts, endpoint, ..., default_endpoint = "v1/studies")
api_put(opts, endpoint, ..., default_endpoint = "v1/studies")
api_delete(opts, endpoint, ..., default_endpoint = "v1/studies")
}
\arguments{
\item{opts}{Antares simulation options or a `list` with an `host = ` slot.}
\item{endpoint}{API endpoint to interrogate, it will be added after `default_endpoint`.
Can be a full URL (by wrapping ìn [I()]), in that case `default_endpoint` is ignored.}
\item{...}{Additional arguments passed to API method.}
\item{default_endpoint}{Default endpoint to use.}
}
\value{
Response from the API.
}
\description{
API methods
}
\examples{
\dontrun{
# List studies with local API
api_get(
opts = list(host = "http://0.0.0.0:8080"),
endpoint = NULL
)
}
}
|
bc103e5c431a05a670d2dcf7e99ba2e593ee654c
|
afb9e6263d2625fc37137cd5dae18dcb3abdf9c4
|
/summarizeBed
|
39fca2693bae3c96cfcab9da94f4b2d855a6e574
|
[] |
no_license
|
raivivek/omics-utils
|
65e114fd0c2da79a8c98bca40b99e3537c9f395f
|
f5bdf4f9eb576f47c0f11d3de97bad5035a15d62
|
refs/heads/master
| 2020-03-23T15:32:35.132606
| 2018-10-16T14:47:22
| 2018-10-16T14:47:22
| 141,755,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,247
|
summarizeBed
|
#! /usr/bin/env Rscript
#
# The Parker Lab, 2018
# theparkerlab.org
#
# (c) Vivek Rai
# Summarizes a BED file using following metrics:
# 1. Total peaks
# 2. Total span (bp)
# 3. Average width
# 4. Median width
# 5. Max width
# 6. Min width
#
# Optionally, prints the same statistics per-chromosome if `--chr` is supplied
# as an argument
library(magrittr)
args <- commandArgs(trailingOnly=TRUE)
args_bed <- args[1]
is_chr <- if (!is.na(args[2])) args[2] == '--chr' else FALSE
if (args_bed == 'stdin' || args_bed == '-' || is.na(args_bed)) {
args_bed <- 'cat /dev/stdin'
}
bed_file <- data.table::fread(args_bed, header = F, showProgress = T)
colnames(bed_file) <- c('chr', 'start', 'end')
bed_file$lengths <- bed_file$end - bed_file$start
summary_df <- bed_file %>%
`if` (is_chr, dplyr::group_by(., chr), .) %>%
dplyr::summarize("total_peaks" = n(),
"total_span" = sum(as.numeric(lengths)),
"average_width" = mean(lengths),
"median_width" = median(lengths),
"max_width" = max(lengths),
"min_width" = min(lengths),
) %>%
dplyr::arrange(desc(total_peaks))
write.table(summary_df, row.names = F, quote = F, sep = '\t')
|
|
8e8be0215e199c3c5eec62808a3cbe0e145acb56
|
6d13ee8b1393bfead43267f083e73dece9c751e8
|
/R/hello.R
|
04f3c1bbf0a68323deb76f219e66e375d8eefcc1
|
[
"Apache-2.0"
] |
permissive
|
PabRod/pyR
|
40f7f0e5ab295db159c093034701a5d30c4e50c7
|
1dc97651b710cfbfefbbbcc8d81f6b41e799009b
|
refs/heads/master
| 2022-11-19T08:51:23.838049
| 2020-07-09T14:31:37
| 2020-07-09T14:31:37
| 277,819,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
hello.R
|
#' Check Python core
#'
#' @return A dummy text generated in Python
#' @export
#'
python_core <- function() {
reticulate::py_run_string("text = 'Python core working correctly'")
return(reticulate::py$text)
}
#' Load and use a Python module
#'
#' @return The number pi, loaded from numpy
#' @export
#'
python_module <- function() {
return(np$pi) # np is loaded in R/zzz.R#.onLoad
}
#' Load and use custom Python module
#'
#' @export
#'
python_custom <- function() {
yinit <- c(np$pi, 0)
derivs <- pndmdl$dpendulum(yinit)
}
|
ed7e3524e60e28f043b88fb9936ae5e80e76dffa
|
5f221c1c9cb891db2164f8acb2b38e7ec839c390
|
/FinalCode.R
|
ed621a2a286d5fd11652d4b030fa7756c5eb4605
|
[] |
no_license
|
qk831019/PracticalMachineLearning
|
a3049c9aa9f95ca95b662bdf4ec434ea9cd55e09
|
fa732ca0f14e3c40c492469afc1c769cf0a08811
|
refs/heads/master
| 2021-01-02T08:13:59.104215
| 2015-01-25T23:46:36
| 2015-01-25T23:46:36
| 29,835,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,592
|
r
|
FinalCode.R
|
library(caret)
set.seed(825)
trainUrl <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
testUrl <- "http://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
### Load data
training <- read.csv(url(trainUrl), na.strings=c("NA","#DIV/0!","","."))
test <- read.csv(url(testUrl), na.strings=c("NA","#DIV/0!","","."))
### Clean the data by removing features have 80% or more missing values
noMissCol = colSums(is.na(training))/length(training[,1]) < 0.8
training1 = training[,noMissCol]
training1 = training1[,-1]
### check missing value after cleaning
colSums(is.na(training1))
## Fit model using rf method
### Create fit control using 10-fold cross validation
fitControl <- trainControl(
method = "repeatedcv",
number = 10,
### Fit model with random forest method
rf_model<-train(classe~.,data=training1,method="rf",
trControl=trainControl(method="cv",number=5),
prox=TRUE,allowParallel=TRUE)
### Check final model fit
print(rf_model$finalModel)
###Check variable importance and plot
varImp(rf_model)
plot(varImp(rf_model))
### Predict using final model
predtrain = predict(rf_model,training)
### Provide error report.
confusionMatrix(predtrain,training1$classe)
## Conclusions and Test Data Submit
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
predtest = predict(rf_model,test1)
answer = as.character(predtest)
pml_write_files(answer)
|
22064252dd6d7af5b6c889960c3447b96eb7fb21
|
229bf9f5443566993bfd9ba16153c1ad0aada67f
|
/gapminder/gapminder.R
|
0c7663e3f914cc4fd9655b6dfa3cca12fb5ffaae
|
[] |
no_license
|
GiulSposito/R-x
|
fc096199ca2efb483d164ba42b92a7a77281f39f
|
902aad081c5b7961983234f183ed1df4bf621e8b
|
refs/heads/master
| 2021-06-27T05:41:46.789209
| 2019-05-21T17:42:01
| 2019-05-21T17:42:01
| 115,006,654
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
gapminder.R
|
library(gapminder)
library(tidyverse)
library(broom)
gapminder %>%
mutate( year1950 = year - 1950) %>%
group_by( continent, country ) %>%
nest() -> gap
fit_model <- function(gapdata){
lm(lifeExp ~ year1950, data = gapdata)
}
gap %>%
mutate( model = data %>% map(fit_model) ) %>%
mutate(
gancle = model %>% map(broom::glance),
tidy = model %>% map(broom::tidy),
augment = model %>% map(broom::augment),
rsq = gancle %>% map_dbl("r.squared")
) -> gap
gap %>%
ggplot(aes(rsq,reorder(country, rsq))) +
geom_point(aes(color=continent))
gap %>%
unnest(tidy) %>%
select(continent, country, term, estimate, rsq) %>%
spread(term, estimate) %>%
ggplot(aes(`(Intercept)`, year1950) ) +
geom_point(aes(color=continent, size=rsq)) +
geom_smooth(se=F) +
ylab("Life Expectance") +
ylab("Yearly Improvement") +
scale_size_area()
|
178b7f2d6dff2539589f67cfa35871ddf1d34472
|
f6e62f39395654658c1f71ed1e28cc689d140f6c
|
/middle/rgaspi_middle_mpi.R
|
da987924a225af726fb8b0e1877cd0cd91a7bb3c
|
[] |
no_license
|
jamitzky/rgaspi
|
0890f5f2388c685f51afd61b4c34a188369a8b06
|
b0b4058b6d9f3589bf85aa8a48818f3e14aadc42
|
refs/heads/master
| 2020-06-26T13:28:38.544572
| 2017-07-12T16:21:53
| 2017-07-12T16:21:53
| 97,024,786
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,234
|
r
|
rgaspi_middle_mpi.R
|
#define class rgaspi
setClass(Class="rgaspi",representation(rank="integer",nranks="integer",extp="ANY"),prototype(rank=NULL, nranks=NULL,extp=NULL))
#define constructor rgaspi_init
setGeneric("rgaspi", function(mode) standardGeneric("rgaspi"))
setMethod('rgaspi',c('character'),function(mode){
if(mode=='master'){
mpi.remote.exec(eval(expression(rgaspi_obj<-rgaspi('worker')),envir=.GlobalEnv),ret=F)
eval(expression(rgaspi_obj<-rgaspi('worker')),envir=.GlobalEnv)
return(TRUE)
} else{
rgaspiinfop<-.Call("rgaspi_init",hptr=list("a"))
info<-.Call("rgaspi_show_info",extptr=rgaspiinfop)
rank<-info[2]
nranks<-info[3]
new_rgaspi<-new('rgaspi',rank=rank, nranks=nranks,extp=rgaspiinfop)
return(new_rgaspi)
}
})
#define class rgaspi_segment
setClass(Class="rg_segment",representation(id="integer",length="integer",type="character",extp = "ANY" ),prototype(id=NULL,length=NULL,type=NULL,extp=NULL))
#define constructor 'rgaspi_segment'
setGeneric("rg_segment", function(length,type,mode) standardGeneric("rg_segment"))
setMethod('rg_segment',c('numeric','character','character'),
function(length,type='db',mode='master'){
len<-as.integer(length)
id<-.Call("rgaspi_show_info",extptr=rgaspi_obj@extp)[1]
if(mode=='master'){
ctxt<-paste("mpi.remote.exec(eval(expression(seg",id,"obj<-rg_segment(length=",len,",type='db',mode='worker')),envir=.GlobalEnv),ret=F)",sep='')
eval(parse(text=ctxt))
#print(ctxt)
}
segp<-.Call("rgaspi_segment_create_db",rgaspi_info_ptr=rgaspi_obj@extp,r_seg_ptrh=list("a"),r_seg_size=len,r_seg_type=as.integer(1))
new_seg<-new('rg_segment',id=id,length=len,type=type,extp=segp)
return(new_seg)
})
rg_term<-function(){
mpi.remote.exec(eval(expression(.Call("rgaspi_term")),envir=.GlobalEnv),ret=F)
.Call("rgaspi_term")
}
rg_barrier<-function(){
mpi.remote.exec(eval(expression(.Call("rgaspi_barrier")),envir=.GlobalEnv),ret=F)
.Call("rgaspi_barrier")
}
rg_wait<-function(){
mpi.remote.exec(eval(expression(.Call("rgaspi_wait")),envir=.GlobalEnv),ret=F)
.Call("rgaspi_wait")
}
#local write
setGeneric("rg_write", function(segment,values,offset) standardGeneric("rg_write"))
setMethod('rg_write',c('rg_segment','numeric', 'numeric'),function(segment,values,offset){
len<-as.integer(length(values))
res<-.Call("rgaspi_segment_write_db",rgaspi_seginfo_ptr=segment@extp,values=values,offset=as.integer(offset),length=len)
return(res)
})
setGeneric("rg_remote_write", function(segment_from,segment_to,rank_to,length, offset_from, offset_to) standardGeneric("rg_remote_write"))
setMethod('rg_remote_write',c('rg_segment','rg_segment','numeric','numeric', 'numeric','numeric'),function(segment_from,segment_to,rank_to,length, offset_from, offset_to){
len<-as.integer(length)
res<-.Call("rgaspi_segment_remote_write_db",rgaspi_seginfo_extptr1=segment_from@extp,rgaspi_seginfo_extptr2=segment_to@extp,
rgaspi_info_extptr=rgaspi_obj@extp,length=len,rank=as.integer(rank_to), offset_1=as.integer(offset_from), offset_2=as.integer(offset_to))
return(res)
})
#rg_read
setGeneric("rg_read", function(segment, offset, length) standardGeneric("rg_read"))
setMethod('rg_read',c('rg_segment', 'numeric', 'numeric'),function(segment,offset,length){
res<-.Call("rgaspi_segment_read_db",rgaspi_seginfo_ptr=segment@extp,offset=as.integer(offset),length=as.integer(length))
return(res)
})
#rg_remote_read
setGeneric("rg_remote_read", function(segment_to,segment_from,rank_from,length,offset_to,offset_from) standardGeneric("rg_remote_read"))
setMethod('rg_remote_read',c('rg_segment','rg_segment','numeric','numeric', 'numeric', 'numeric'),function(segment_to,segment_from,rank_from,length,offset_to,offset_from){
len<-as.integer(length)
res<-.Call("rgaspi_segment_remote_read_db",rgaspi_seginfo_extptr1=segment_to@extp,rgaspi_seginfo_extptr2=segment_from@extp,
rgaspi_info_extptr=rgaspi_obj@extp,len=len,rank=as.integer(rank_from),offset_1=as.integer(offset_to),offset_2=as.integer(offset_from))
return(res)
})
rg_info<-function(){
res<-.Call("rgaspi_show_info",extptr=rgaspi_obj@extp)
return(res)
}
|
5a0dfa3de07879ac57ac13ee3c42e5d48ecc709f
|
ba554894bfb9289742ae7fb03ea1b10508f0be87
|
/plot2.R
|
c5a014fe44d041ab0d120a7a24a7757ba96af400
|
[] |
no_license
|
EPBaron/ExData_Plotting1
|
40e7664d7f609441e3e73f90b5a50fbc1f5cc0a9
|
748dd981cb5e46e190b54310df9bf660d8395118
|
refs/heads/master
| 2021-01-21T19:27:39.726278
| 2014-05-06T05:20:26
| 2014-05-06T05:20:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
plot2.R
|
## Code for generating plots in fulfillment of requirements
## for Course Project 1 of Exploratory Data Analysis course
##
# Download data
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, "powerconsumption.zip")
# record date file was downloaded and store for reference
datedownloaded <- date()
data <- read.table(unz("powerconsumption.zip", "household_power_consumption.txt"), header=T, sep=";",
na.strings="?",colClasses=c("character","character","numeric","numeric",
"numeric","numeric","numeric","numeric","numeric"))
# Set date range for data subsetting
startDate <- as.Date("2007-2-1")
endDate <- as.Date("2007-2-2")
# Convert Date
data$Date <- as.Date(data$Date, format='%d/%m/%Y')
# Subset data
projectdata <- subset(data, data$Date %in% c(startDate, endDate))
# Paste Date and Time fields into new column
projectdata$datetime <- paste(projectdata$Date, projectdata$Time)
# Convert to Posix formatted date-time
projectdata$datetime <- strptime(projectdata$datetime, format="%Y-%m-%d %H:%M:%S")
# Generate Plot 2
png(file = "plot2.png", width = 480, height = 480, units = "px")
par(mfrow = c(1, 1))
with(projectdata, plot(datetime, Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
0e33f6ef7e15838d2c5b6c740864b851c3373a0d
|
0409c24e7034afcc960f30f3ff22fc91a3c81999
|
/project.r
|
cbfcb5d1fe43e7689414009898e196b7cf7f9312
|
[] |
no_license
|
320221756-suresh/bayesian-network--disease-prediction
|
a05f86fdadb9d6b52d6bd7633456989c3a1e1862
|
3d329a015015970f212b08a95f0d7f757315d16c
|
refs/heads/master
| 2023-08-17T15:32:16.439763
| 2020-06-28T07:22:30
| 2020-06-28T07:22:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
project.r
|
library(bnlearn)
library(visNetwork)
library(ggplot2)
plot.network <- function(structure, ht = "400px"){
nodes.uniq <- unique(c(structure$arcs[,1], structure$arcs[,2]))
nodes <- data.frame(id = nodes.uniq,
label = nodes.uniq,
color = "darkturquoise",
shadow = TRUE)
edges <- data.frame(from = structure$arcs[,1],
to = structure$arcs[,2],
arrows = "to",
smooth = TRUE,
shadow = TRUE,
color = "black")
return(visNetwork(nodes, edges, height = ht, width = "100%"))
}
df <- read.csv('G:\\m.tech 2ndsem\\pgm\\New folder\\Support Devices_cut_df.csv')
str(df)
df <- japply( df,which(sapply(df, class)=="integer"), as.numeric )
df <- data.frame(df)
df <- sapply(df,as.factor)
df <- data.frame(df)
res <- hc(df)
str(res)
model <- bn.fit(res,data = df)
plot.network(res)
summary(model)
str()
df$Support.Devices
test <- df[, !names(df) %in% c('Support.Devices')]
pred <- predict(model,node='Support.Devices',data = test)
confusion<-table(df$Support.Devices,pred)
confusion
Accuracy<-sum(diag(confusion)/sum(confusion))
Accuracy
saveRDS(res, "G:\\m.tech 2ndsem\\pgm\\Support.Devices.rds")
super_model <- readRDS("G:\\m.tech 2ndsem\\pgm\\final_model1.rds")
print(super_model)
super_model$Age
|
1dd5d371d529208759368841c9c3f50ced997ad5
|
2e26bccfa670b18368e66a4f6fa9fddbb2918993
|
/section2/2-2-4_assessment.R
|
1d4e7df992e4cf6c1bd1d2a37c8cfd290d85193a
|
[] |
no_license
|
A-Harders/PH125.6x_Wrangling
|
0c3145738d4fa39c5c42f1b3454d4903e03418bd
|
866be0fe04c75e3bb04f7d24626a21b6df0db503
|
refs/heads/master
| 2020-09-05T14:25:56.074145
| 2019-12-03T10:08:04
| 2019-12-03T10:08:04
| 220,132,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 885
|
r
|
2-2-4_assessment.R
|
# config
library(tidyverse)
library(ggrepel)
library(dslabs)
# Assessment q5-7: Lahman dataset
install.packages("Lahman")
library(Lahman)
top <- Batting %>%
filter(yearID == 2016) %>%
arrange(desc(HR)) %>% # arrange by descending HR count
slice(1:10) # take entries 1-10
top %>% as_tibble()
Master %>% as_tibble()
Salaries %>% as_tibble()
# Assessment Q5: create a top 10 HR hitter table
top_names <- top %>% left_join(Master) %>%
select(playerID, nameFirst, nameLast, HR)
top_players <- top_names %>% select(playerID)
# Assessment Q6: create a top 10 salary table for 2016
top_salary <- Salaries %>% filter(yearID == 2016) %>%
right_join(top_names) %>%
select(playerID, nameFirst, nameLast, HR, salary)
top_salary
# Assesment Q7: compare AwardPlayers and top_salary
AwardsPlayers %>% filter(yearID == 2016) %>%
select(playerID) %>%
intersect(top_players)
|
ef01ec21f1f97749f59d9154f9c2641f97d83594
|
b20110dc8a9cb952fea513d738da144e9a13eb8a
|
/rankhospital.R
|
6ad8e257e326bf735aa8686fb0db5d779d7daa57
|
[] |
no_license
|
brentmcleod/ProgrammingAssignment3
|
055b1e79a78e250c8783389a7c46944c0a4246b3
|
8e8fd0f46de68babb4e0d85d03d6385b55a933c4
|
refs/heads/master
| 2020-04-02T09:50:32.307960
| 2018-10-23T10:59:01
| 2018-10-23T10:59:01
| 154,312,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,989
|
r
|
rankhospital.R
|
rankhospital <- function(state, outcome, num = "best") {
## 'rankhospital' returns a character vector with the name of the hospital based on the rank requested
## 'state' is a character vector of length 1 indicating the 2-character abbreviated name of the state from which to select the best hospital
## 'outcome' is a character vector indicating how the best hospital should be measured
## The valid outcomes are "heart attack", "heart failure" or "pneumonia"
## 'num' is the rank of the hospital that you would like returned
## The user can enter a number or the words 'best' or 'worst' to return the first or last hospital by rank
## !!IMPORTANT!! -- Required packages will be installed;
## this may take several minutes the first time this is run
packages <- c("readr","dplyr")
package.check <- lapply(packages, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
## !!IMPORTANT!! -- Remember to set the working directory to where the data is stored before running this code
## Read outcome data...
outcomeData <- read.csv("outcome-of-care-measures.csv")
## Check that state and outcome are valid
validStates <- levels(outcomeData$State)
validOutcomes <- c("heart attack", "heart failure", "pneumonia")
if(!state %in% validStates) stop("invalid state")
if(!outcome %in% validOutcomes) stop("invalid outcome")
if(!(num == "best" | num == "worst" | is.numeric(num))) stop("invalid rank")
## Create column name from the declared outcome; capitalise first letter and replace spaces with "."
tmpVect1 <- strsplit(outcome, " ")[[1]]
tmpVect2 <- paste0(toupper(substr(tmpVect1, 1, 1)), substr(tmpVect1, 2, nchar(tmpVect1)), collapse = ".")
outColName <- paste0("Hospital.30.Day.Death..Mortality..Rates.from.", tmpVect2)
## Clean the data...
rankedData <- outcomeData %>%
rename(Rate = outColName) %>%
filter(State == state, Rate != "Not Available") %>%
select(c(Hospital.Name, Rate)) %>%
mutate(Rate = as.numeric(as.character(.[["Rate"]]))) %>%
arrange(Rate, Hospital.Name) %>%
mutate(Rank = 1:nrow(.))
## Convert "best" and "worst" to numeric ids
if(num == "best") num <- 1
if(num == "worst") num <- nrow(rankedData)
## Return hospital name in declared state with the given rank
if(num > nrow(rankedData)){
NA
} else {
paste0(filter(rankedData, Rank == num)[[1]][[1]])
}
}
|
a5fdbacfa5e456fb96a04a3a295950794f10b9ef
|
f145e7805330fa4dfead51375502eb74a5086e20
|
/monarchs.R
|
d759e37280a4212f4f9fdeec21c0381b53cc4396
|
[] |
no_license
|
RussellGrayxd/Random-Ecology-Analyses
|
9502f3ad3d214211cbc7b29391fb140a93a8f300
|
fd7bdd1d5e3979252a32480939702072f29cf3ff
|
refs/heads/master
| 2021-01-02T01:32:36.307716
| 2020-08-02T04:57:23
| 2020-08-02T04:57:23
| 239,433,960
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
monarchs.R
|
library(ggplot2)
library(ggmap)
library(RColorBrewer) # for color selection
library(lubridate)
## Read the input spatiotemporal data
animal.data <- monarchs
## Break down by month
month <- vector()
for(i in 1:nrow(animal.data)) {
dateStr <- toString(animal.data$Tracking_month[i])
dateStrSplit <- strsplit(dateStr, "/")[[1]]
month[i] <- as.Date(dateStrSplit[3])
}
## Create a month attribute
animal.data$month <- animal.data$Tracking_month
## Convert month to factor
animal.data$month <- factor(animal.data$Tracking_month, levels=month.name, ordered=TRUE)
## Specify a map with center at the center of all the coordinates
mean.longitude <- mean(animal.data$longitude)
mean.latitude <- mean(animal.data$latitude)
register_google(key = "AIzaSyCq_aqS9pfXOqSFtCit4PvwC0wQL5ZL_eg")
animal.map <- get_map(location = c(lon=-99, lat=27), maptype = "toner", color = "bw", zoom = 4, scale = 2)
## Convert into ggmap object
animal.map <- ggmap(animal.map, extent="device", legend="none")
## Plot a heat map layer: Polygons with fill colors based on
## relative frequency of events
animal.map <- animal.map + stat_density2d(data=animal.data,
aes(x=longitude, y=latitude, fill=..level.., alpha=..level..), geom="polygon")+
scale_fill_gradientn(colours="orange")
## Remove any legends
animal.map <- animal.map + guides(size=FALSE, alpha = FALSE)
## Give the map a title
animal.map <- animal.map + ggtitle("Inaturalist Monthly Monarch Butterfly Observations 2007-2019")
## Plot animals by each month
animal.map <- animal.map + facet_wrap(~month)
print(animal.map) # this is necessary to display the plot
|
d51c38e0a3e1c8d1833d3ccd9b5561b78d0ef1d2
|
44d803b4ce510d4a16a466a46895bd59a87a87a2
|
/MFB/pbp_extract.R
|
03eef6c1e8e815fe790aa72a1130657a6b8efe04
|
[] |
no_license
|
meysubb/Temp_CollegeballR_Work
|
36aa68f9df28e8362450208e353a57c5d49d0f07
|
2138c26008a400aa1432535586ec7e4f52d8dc13
|
refs/heads/master
| 2021-07-25T13:35:24.007275
| 2018-10-21T13:46:23
| 2018-10-21T13:46:23
| 146,840,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,230
|
r
|
pbp_extract.R
|
library(dplyr)
##play-by-play scrape
## Store this as a data object
pt_url <- "https://api.collegefootballdata.com/play/types"
play_types <- fromJSON(pt_url)
cfb_play_type_df <- play_types %>% mutate(text = tolower(text))
library(assertthat)
pbp_data <- function(year,
week = 1,
team = NULL,
play_type = NULL,
drive=NULL) {
options(stringsAsFactors = FALSE)
if (!is.null(play_type)) {
text <- play_type %in% cfb_play_type_df$text
abbr <- play_type %in% cfb_play_type_df$abbreviation
pt <-
assert_that((text |
abbr) == T, msg = "Incorrect play type selected, please look at the available options in the Play Type DF.")
if (text) {
pt_id = cfb_play_type_df$id[which(cfb_play_type_df$text == play_type)]
} else{
pt_id = cfb_play_type_df$id[which(cfb_play_type_df$abbreviation == play_type)]
}
}
## Inputs
## Year, Week, Team
if(is.null(drive)){
play_base_url <- "https://api.collegefootballdata.com/plays?"
}
else{
play_base_url <- "https://api.collegefootballdata.com/drives?"
}
if (is.null(play_type) & is.null(team)) {
# no play type, no team
full_url <- paste0(play_base_url, "year=", year, "&week=", week)
} else{
# no team, play_type
if (is.null(play_type)) {
full_url <-
paste0(play_base_url,
"year=",
year,
"&week=",
week,
"&playType=",
pt_id)
} else if (is.null(team)) {
# no team, play_type
full_url <-
paste0(
play_base_url,
"year=",
year,
"&week=",
week,
"&team=",
URLencode(team, reserved = T)
)
} else{
# team & play type
full_url <-
paste0(
play_base_url,
"year=",
year,
"&week=",
week,
"&team=",
URLencode(team, reserved = T),
"&playType=",
pt_id
)
}
}
raw_play_df <- fromJSON(full_url)
raw_play_df <- do.call(data.frame, raw_play_df)
play_df <- raw_play_df
return(play_df)
}
|
c3fd746a9494ec8f051a78a147fb8ce622f29b20
|
da8bec4da38b9bd635a4ec6489e264e65e5d1b60
|
/dev.r
|
baef56b4bc1d2d3a9ae3d5a35e4a599c630472e1
|
[] |
no_license
|
madcap1090/blog
|
15dc632f4d718fd7ae57f1b0016e6b1bee59c7f0
|
a84b1f54b2db06a71d95e5ca7db6c643184493c8
|
refs/heads/master
| 2021-06-13T18:59:31.453304
| 2021-04-20T17:01:25
| 2021-04-20T17:01:25
| 180,212,253
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
dev.r
|
#test at 697 & test that at end we have 150
max_date <- max(one_date$end_date)
one_test <- one_date %>%
filter(end_date == max_date)
dim(one_date)
dim(one_test)
two_test <- two_dates %>%
filter(end_date == max_date)
dim(two_dates)
dim(two_test)
three_test <- three_dates %>%
filter(end_date == max_date)
dim(three_dates)
dim(three_test)
dim(three_dates)
dim(reps54)
length(unique(reps54$fn_ln))
|
5d7291947549d1918800fa97cd63d464944f46d4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PTXQC/examples/LCSn.Rd.R
|
639fd75e3334b161044423700d2604fb5aba344d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
LCSn.Rd.R
|
library(PTXQC)
### Name: LCSn
### Title: Find longest common substring from 'n' strings.
### Aliases: LCSn
### ** Examples
LCSn(c("1_abcde...", "2_abcd...", "x_abc...")) ## result: "_abc"
|
5d90ab4c18562dd4d3421dcdfb026da27e1e426f
|
a040f39f3f1be7983684f0225e3e6f12db789af6
|
/R/play_track_save_with_id.R
|
1277e1ff9053fb50881b4ea3cb02cf59bc1c1258
|
[
"Apache-2.0"
] |
permissive
|
dimitreOliveira/RecsysChallenge_Spotify
|
aaecf41262e9db181faae3020cb495819b5f2828
|
3b7cd4e4d12a51049573cca1dd86ef66603fddcb
|
refs/heads/master
| 2021-07-09T23:17:03.060501
| 2018-12-19T22:30:37
| 2018-12-19T22:30:37
| 134,984,353
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 551
|
r
|
play_track_save_with_id.R
|
library(dplyr)
playlists <- read.csv2("../data/play_track.csv", header = T, sep = ";", stringsAsFactors = F)
p_challenge <- read.csv2("../data/challenge/play_track_challenge.csv", header = T, sep = ";", stringsAsFactors = F)
# adding challenge to train data
playlists <- rbind(playlists, p_challenge)
playlists$track_id <- as.numeric(factor(playlists$track_uri,
levels=unique(playlists$track_uri)))
write.table(playlists, file = "../data/play_track_id.csv",row.names=FALSE, na="",col.names=TRUE, sep=";")
|
ab5b1f5f347621e1524988bae2ec8e715f733118
|
8731390d274cfa4d490847a34474b24640fb3a48
|
/code/compareSimulations.R
|
330cf79f78c3fe550e116db0447e3eddd4b5a52a
|
[] |
no_license
|
seabbs/huisman.epinow2.evaluation
|
afbfe110f069d4f58099e586d180c94b74a1afa5
|
f58cb39c28ece7c45200beecb744456d84919925
|
refs/heads/main
| 2023-02-08T10:19:41.231972
| 2021-01-04T12:09:08
| 2021-01-04T12:09:08
| 323,299,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,510
|
r
|
compareSimulations.R
|
###########################################################
## compareSimulations.R
## author: J.S. Huisman
###########################################################
# this library is optional, as the DTW distance
# wasn't used for the paper
#library(dtwclust)
###### Compare Simulation and Estimation results ######
getReRMSE <- function(ReCompare){
ReCompare <- ReCompare %>%
mutate(SE = ( median_R_mean - Re)^2)
rmse <- sqrt(sum(ReCompare$SE)/length(ReCompare$SE))
norm_rmse <- rmse / mean(ReCompare$Re)
return(norm_rmse)
}
getRootDiff <- function(simulation, estimatedRe, all = FALSE){
sim_change_points = diff(sign(simulation$Re -1))
sim_roots = simulation$date[which(sim_change_points != 0 )]
if (! ('median_R_mean' %in% colnames(estimatedRe))){
estimatedRe <- cleanReTSestimate(estimatedRe)
}
est_change_points = diff(sign(estimatedRe$median_R_mean -1))
est_roots = estimatedRe$date[which(est_change_points != 0 )]
root_diffs <- as.numeric(est_roots - sim_roots)
if (all){
root_diff = sum(abs(root_diffs))
} else{
root_diff = root_diffs[1]
}
return( root_diff )
}
getEmpCoverage <- function(ReCompare){
covCompare <- ReCompare %>%
mutate(coverage = (Re >= median_R_lowHPD) & (Re <= median_R_highHPD) ,
CI_width = median_R_highHPD - median_R_lowHPD)
frac_coverage <- sum(covCompare$coverage)/nrow(covCompare)
median_CI_width <- median(covCompare$CI_width)
return(list(frac_coverage = frac_coverage, CI_width = median_CI_width))
}
getOneTransitions <- function(simulation, estimatedRe){
simulation <- simulation %>%
mutate(OneTrans = ifelse(Re >= 1, 1, 0))
if (! ('median_R_mean' %in% colnames(estimatedRe))){
estimatedRe <- cleanReTSestimate(estimatedRe)
}
one_transitions = estimatedRe %>%
mutate(EstOneTrans = case_when(median_R_lowHPD >= 1 ~ 1,
median_R_highHPD < 1 ~ 0)) %>%
full_join(simulation, by = c('date')) %>%
dplyr::select(date, Re, OneTrans, EstOneTrans) %>%
filter(!is.na(EstOneTrans)) %>%
mutate(compare = OneTrans == EstOneTrans)
perc_transitions = sum(one_transitions$compare)/nrow(one_transitions)
return(perc_transitions)
}
getReError <- function(simulation, estimatedRe){
if (! ('median_R_mean' %in% colnames(estimatedRe))){
estimatedRe <- cleanReTSestimate(estimatedRe)
}
#Compare
ReCompare <- simulation %>%
full_join(estimatedRe, by = c('date')) %>%
filter(!is.na(median_R_mean))
ReRMSE <- getReRMSE(ReCompare)
RootDiff <- getRootDiff(simulation, estimatedRe, all = FALSE)
EmpCoverage <- getEmpCoverage(ReCompare)
OneTrans <- getOneTransitions(simulation, estimatedRe)
return(list(ReRMSE = ReRMSE, RootDiff = RootDiff,
EmpCoverage = EmpCoverage$frac_coverage,
OneTrans = OneTrans
))
}
getSlopeError <- function(simulation, estimatedRe, valid_cond){
date_t3 = simulation$date[valid_cond$t3]
date_t2 = simulation$date[valid_cond$t2]
date_first = max(min(estimatedRe$date), date_t2)
date_int = as.numeric(date_t3 - date_first)
slope_sim = (simulation$Re[simulation$date == date_t3] -
simulation$Re[simulation$date == date_first])/date_int
meanEstRe <- estimatedRe %>%
filter(variable == 'R_mean') %>%
dplyr::select(date, replicate, value) %>%
group_by(replicate) %>%
summarise(slope_est = (value[date == date_t3] -
value[date == date_first])/date_int,
.groups = "drop")
abs_slope_error = slope_sim - meanEstRe$slope_est
rel_slope_error = (abs_slope_error/slope_sim)
return(rel_slope_error)
}
getMeanSlopeError <- function(valid_cond_grid){
SlopeError = data.frame()
for (row_id in 1:nrow(valid_cond_grid)){
simulation <- read_csv(paste0(valid_cond_grid[row_id, 'simulationDir'], '/',
valid_cond_grid[row_id, 'filename']))
estimatedInfections <- read_csv(paste0(valid_cond_grid[row_id, 'estimationDir'],
valid_cond_grid[row_id, 'infection_file']))
estimatedRe <- read_csv(paste0(valid_cond_grid[row_id, 'estimationDir'],
valid_cond_grid[row_id, 're_file']))
new_error <- getSlopeError(simulation, estimatedRe, valid_cond_grid[row_id, ])
SlopeError <- bind_rows(SlopeError, list(slopeError = mean(new_error)) )
}
return(SlopeError)
}
|
d751278b4fc6bdce04921202616d5bc1f58b1bda
|
17f1b5b761a43ec178602a43f24ac72c2d5d01a9
|
/hmlasso/inst/testfiles/softThresholdC/libFuzzer_softThresholdC/softThresholdC_valgrind_files/1609897082-test.R
|
b0719d3beaf80a0109b2cbcc8ac83f161e61491f
|
[] |
no_license
|
akhikolla/newtestfiles-2
|
3e1882e7eea3091f45003c3abb3e55bc9c2f8f56
|
e539420696b7fdc05ce9bad66b5c7564c5b4dab2
|
refs/heads/master
| 2023-03-30T14:44:30.614977
| 2021-04-11T23:21:23
| 2021-04-11T23:21:23
| 356,957,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
1609897082-test.R
|
testlist <- list(g = 8.24548651624444e+136, z = 8.24548651624435e+136)
result <- do.call(hmlasso:::softThresholdC,testlist)
str(result)
|
bc82a00fefd0963ce739e3f85d554559da0fd1ff
|
03542871c1966009b330bc29b4b1f36c96c79da1
|
/man/make_ranking_violin_plot.Rd
|
9b7d6d090b749bccadb87ba913e63284983c22bf
|
[] |
no_license
|
boxizhang/celaref
|
bc68f319aa436cb17f49ba1b27032cd2a9fba49f
|
585f2fb96f8d382803cebea3c6fc7adefa8d2054
|
refs/heads/master
| 2020-07-03T03:20:07.015095
| 2019-06-03T07:06:13
| 2019-06-03T07:06:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,284
|
rd
|
make_ranking_violin_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{make_ranking_violin_plot}
\alias{make_ranking_violin_plot}
\title{make_ranking_violin_plot}
\usage{
make_ranking_violin_plot(de_table.marked = NA, de_table.test = NA,
de_table.ref = NA, log10trans = FALSE, ...)
}
\arguments{
\item{de_table.marked}{The output of
\code{\link{get_the_up_genes_for_all_possible_groups}}
for the contrast of interest.}
\item{de_table.test}{A differential expression table of the
query experiment,
as generated from \code{\link{contrast_each_group_to_the_rest}}}
\item{de_table.ref}{A differential expression table of the
reference dataset,
as generated from \code{\link{contrast_each_group_to_the_rest}}}
\item{log10trans}{Plot on a log scale? Useful for distinishing multiple
similar, yet distinct cell type that bunch at top of plot. Default=FALSE.}
\item{...}{Further options to be passed to
\code{\link{get_the_up_genes_for_all_possible_groups}},
e.g. rankmetric}
}
\value{
A ggplot object.
}
\description{
Plot a panel of violin plots showing the distribution of the 'top' genes of
each of query group, across the reference dataset.
}
\details{
In the plot output, each panel correponsds to a different group/cluster in
the query experiment. The x-axis has the groups in the reference dataset.
The y-axis is the rescaled rank of each 'top' gene from the query group,
within each reference group.
Only the 'top' genes for each query group are plotted, forming the violin
plots - each individual gene is shown as a tickmark. Some groups have few
top genes, and so their uncertanty can be seen on this plot.
The thick black lines reprenset the median gene rescaled ranking for each
query group / reference group combination. Having this fall above the dotted
median threshold marker is a quick indication of potential similarity.
A complete lack of similarity would have a median rank around 0.5. Median
rankings much less than 0.5 are common though (an 'anti-cell-groupA'
signature), because genes overrepresented in one group in an experiment,
are likely to be relatively 'underrepresented' in the other groups.
Taken to an
extreme, if there are only two reference groups, they'll be complete
opposites.
Input can be either the precomputed \emph{de_table.marked} object for the
comparison, OR both \emph{de_table.test} and \emph{de_table.ref}
differential expression results to compare from
\code{\link{contrast_each_group_to_the_rest}}
}
\examples{
# Make input
# de_table.demo_query <- contrast_each_group_to_the_rest(demo_query_se, "demo_query")
# de_table.demo_ref <- contrast_each_group_to_the_rest(demo_ref_se, "demo_ref")
# This:
make_ranking_violin_plot(de_table.test=de_table.demo_query,
de_table.ref=de_table.demo_ref )
# Is equivalent to this:
de_table.marked.query_vs_ref <-
get_the_up_genes_for_all_possible_groups( de_table.test=de_table.demo_query,
de_table.ref=de_table.demo_ref)
make_ranking_violin_plot(de_table.marked.query_vs_ref)
}
\seealso{
\code{\link{get_the_up_genes_for_all_possible_groups}} To make
the input data.
}
|
d9b0249c9fcaea87430a6598cac4384586a00b81
|
2c38fc71287efd16e70eb69cf44127a5f5604a81
|
/tests/performance/test-stems.R
|
c0575fbd36b4dc62ebd06c27b996bbd873c332d0
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ropensci/targets
|
4ceef4b2a3cf7305972c171227852338dd4f7a09
|
a906886874bc891cfb71700397eb9c29a2e1859c
|
refs/heads/main
| 2023-09-04T02:27:37.366455
| 2023-09-01T15:18:21
| 2023-09-01T15:18:21
| 200,093,430
| 612
| 57
|
NOASSERTION
| 2023-08-28T16:24:07
| 2019-08-01T17:33:25
|
R
|
UTF-8
|
R
| false
| false
| 2,418
|
r
|
test-stems.R
|
library(proffer)
library(targets)
px <- pprof(
targets <- lapply(
paste0("x", seq_len(1e3)), function(name) {
tar_target_raw(name, command = quote(1 + 1))
}
)
)
large_pipeline <- pipeline_init(targets)
px <- pprof(local_init(pipeline = large_pipeline)$run())
tar_destroy()
# With interface and priorities
tar_script({
x0 <- 1
targets <- lapply(seq_len(1e3), function(id) {
name <- paste0("x", as.character(id))
dep <- paste0("x", as.character(id - 1L))
command <- as.expression(rlang::sym(dep))
tar_target_raw(name, command = command)
})
list(targets, tar_target(y, 1, priority = 0.37))
})
px <- pprof(tar_make(reporter = "summary", callr_function = NULL))
tar_destroy()
unlink("_targets.R")
rm(x0)
# Same, but with a target chain that fails early.
# Should not see overhead due to topo_sort_by_priority()
# since all priorities are equal.
tar_script({
target_x0 <- tar_target(x0, stop())
out <- lapply(seq_len(1e3), function(id) {
name <- paste0("x", as.character(id))
dep <- paste0("x", as.character(id - 1L))
command <- as.expression(rlang::sym(dep))
tar_target_raw(name, command = command)
})
list(target_x0, out)
})
system.time(try(tar_make(reporter = "summary", callr_function = NULL)))
px <- pprof(try(tar_make(reporter = "summary", callr_function = NULL)))
tar_destroy()
unlink("_targets.R")
# Same, but with unequal priorities.
tar_script({
target_x0 <- tar_target(x0, stop(), priority = 1)
out <- lapply(seq_len(1e3), function(id) {
name <- paste0("x", as.character(id))
dep <- paste0("x", as.character(id - 1L))
command <- as.expression(rlang::sym(dep))
tar_target_raw(name, command = command, priority = 0)
})
list(target_x0, out)
})
system.time(try(tar_make(reporter = "summary", callr_function = NULL)))
px <- pprof(try(tar_make(reporter = "summary", callr_function = NULL)))
# Should not see topo sort overhead for tar_outdated().
system.time(try(tar_outdated(callr_function = NULL)))
px <- pprof(try(tar_outdated(callr_function = NULL)))
# Should not see topo sort overhead for tar_sitrep().
system.time(try(tar_sitrep(callr_function = NULL)))
px <- pprof(try(tar_sitrep(callr_function = NULL)))
# Should not see topo sort overhead for tar_make_future().
system.time(try(tar_make_future(callr_function = NULL)))
px <- pprof(try(tar_make_future(callr_function = NULL)))
tar_destroy()
unlink("_targets.R")
|
1a2a8973aa19ee678bd0e7c2ef179bd68a8cfb99
|
c63fc5e6607e2cd5d62464a72c78b06191277eb6
|
/R/movav.R
|
cdb17cbd7e7a9fe79c5efb092ebb084f08b182bd
|
[] |
no_license
|
SWS-Methodology/faoswsTrade
|
6ce400e545fc805fe1f87d5d3f9d5ba256a8a78c
|
2145d71a2fda7b63d17fa7461ec297f98b40756c
|
refs/heads/master
| 2023-02-17T08:40:21.308495
| 2023-02-09T13:53:56
| 2023-02-09T13:53:56
| 55,507,302
| 4
| 1
| null | 2020-05-15T14:45:59
| 2016-04-05T12:49:03
|
R
|
UTF-8
|
R
| false
| false
| 1,684
|
r
|
movav.R
|
#' Moving average
#'
#' Compute the moving average of a vector.
#'
#' The resulting moving average will be of order three.
#'
#' @param x a numeric vector.
#' @param pkg string: "native" (default) or the name of a package with
#' a function that computes moving averages. Besides "native", only
#' "zoo" is allowed (\code{zoo::rollapply} will be used).
#' respectively.
#' @param mode String: "centered" (default) indicates that the moving
#' average should be centered.
#'
#' @return A vector with the moving average of the input.
#'
#' @export
movav <- function(x, pkg = 'native', mode = 'centered', n = 5, na.rm = TRUE) {
if (pkg == 'zoo') {
if (mode == 'centered') {
stop('Only "mode = centered" is implemented with zoo')
} else {
res <- zoo::rollapply(lag(x), n, mean, fill = NA,
align = 'right', na.rm = na.rm)
}
} else if (pkg == 'native') {
if (n != 3) stop('Only n = 3 can be used with pkg = "native"')
if (mode == 'centered') {
if (length(x) > 1) {
res <- cbind(x, c(NA, x[1:(length(x)-1)]), c(x[2:length(x)], NA))
res <- apply(res, 1, mean, na.rm = TRUE)
} else {
res <- x
}
} else {
if (length(x) > 2) {
res <- cbind(x, c(NA, x[1:(length(x)-1)]), c(NA, NA, x[1:(length(x)-2)]))
res <- apply(res, 1, mean, na.rm = TRUE)
res[1:2] <- res[3]
} else {
res <- x
}
}
res[is.nan(res)] <- NA
res <- ifelse(!is.na(x), res, NA)
} else {
stop('"pkg" should be "native" or "zoo"')
}
if (length(x) != length(res)) {
stop('The lengths of input and output differ.')
}
return(res)
}
|
4abf5d870940c1f7d4748f52e6e623a28205457f
|
de8b22d92484dc5a456e5bfff9b4113f1761cf2f
|
/razvrscanjeQ.R
|
40baec746b8869696a8ade016285c7105e70252a
|
[
"MIT"
] |
permissive
|
MathBB/Ekipa-da-te-skipa
|
af669d303659254bb1c62e32d56dc0f89f684b05
|
e2d61473357b3493b6f5a9b06299006083634f54
|
refs/heads/master
| 2021-01-23T13:59:44.480188
| 2015-09-04T07:01:31
| 2015-09-04T07:01:31
| 32,923,816
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,800
|
r
|
razvrscanjeQ.R
|
razvrscanjeQ <- function(xts, xts1, xts2, xts3){
# Funkcija sprejme xts matriko xts, po kateri se uravnavamo glede na razplozljive
# datume in delnice (najbolje je, da je ta xts matrika xts matrika z dnevnimi cenami
# oz. donosi delnic) in xts matrike xts1, xts2 ter xts3, ki so xts matrike s
# fundamentalnimi podatki, po katerih želimo razvrstiti delnice v 18 portfeljev.
# Funkcija vrne xts matriko z vrednostmi od 1 do 18, ta pa za vsak posamezen
# trgovalni dan predstavlja portfelj, v kateremu se nahaja delnica na ta dan.
len_r <- nrow(xts)
# Priprava prostora v RAM-u za xts matriko, ki nam jo vrne funkcija.
razvrsti <- xts
razvrsti[, ] <- NA
for (j in 1:len_r){
m1 <- xts1[j, ]
m1[m1 == 0] <- NA
m2 <- xts2[j, ]
m2[m2 == 0] <- NA
m3 <- xts3[j, ]
m3[m3 == 0] <- NA
m4 <- xts[j, ]
m4[m4 == 0] <- NA
# Za xts matriko b najprej pripravimo prostor v RAM-u.
b <- razvrsti[1:4,]
# Za vsak dan zdruzimo vrstice iz xts matrik v novo xts matriko b.
b <- rbind(m1,m2,m3,m4)
# Znebimo se delnic (stolpcev), ki nimajo na voljo vseh potrebnih podatkov.
b <- b[, order(b[4,], na.last = NA)]
b <- b[, order(b[3,], na.last = NA)]
b <- b[, order(b[2,], na.last = NA)]
# Razdelimo delnice v dva portfelja v razmerju 50:50 glede na fundamentalen podatek,
# ki je shranjen v matriki xts1. Meja med tema dvema portfeljema je tako delnica, ki
# ima fundamentalni podatek, ki je srednja vrednost. Delnicam iz prvega portfelja iz
# te delitve v prvi vrstici xts matrike b pripišemo 1, delnicam iz drugega portfelja
# pa 10.
b <- b[, order(b[1,], na.last = NA)]
len <- ncol(b)
n1 <- floor(len/2)
b[1, ] <- rep(c(1,10), c(n1, (len - n1)))
# Tako delnice iz prvega portfelja kot tudi delnice iz drugega portfelja vsakega zase
# razdelimo na nove tri portfelje v razmerju 30:70:30 glede na fundamentalen podatek,
# ki je shranjen v matriki xts2. Sedaj imamo 6 portfeljev. Obema prvima na novo ustvarjena
# portfelja v drugi vrstici matrike b pripisemo 0, obema drugima 3, obema tretjima pa 6.
b <- b[, order(b[1,], b[2,])]
n2 <- floor(n1*0.3)
n3 <- ceiling(n1*0.7)
n4 <- floor(n1*1.3)
n5 <- ceiling(n1*1.7)
b[2, ] <- rep(c(0,3,6,0,3,6), c(n2, (n3-n2), (n1-n3), (n4-n1), (n5-n4), (len-n5)))
# Podobno kot zgoraj ponovno vsak portfelj zase razdelimo še na nove tri portfelje
# (30:40:30) tokrat glede na fundamentalen podatek, ki je shranjen v matriki xts3.
# Tako dobimo 18 portfeljev. Vsem sestim prvim na novo ustvarjenim portfeljem v
# tretji vrstici matrike b pripišemo 0, vsem sestim drugim 1, vsem sestim tretjim
# po vrsti pa 2.
b <- b[, order(b[1,], b[2,], b[3,])]
n6 <- floor(n2*0.3)
n7 <- ceiling(n2*0.7)
n8 <- floor((n3-n2)*0.3)
n9 <- ceiling((n3-n2)*0.7)
n10 <- floor((n1-n3)*0.3)
n11 <- ceiling((n1-n3)*0.7)
n12 <- floor((n4-n1)*0.3)
n13 <- ceiling((n4-n1)*0.7)
n14 <- floor((n5-n4)*0.3)
n15 <- ceiling((n5-n4)*0.7)
n16 <- floor((len-n5)*0.3)
n17 <- ceiling((len-n5)*0.7)
b[3, ] <- rep(rep(0:2, 6), c(n6, (n7-n6), (n2-n7), n8, (n9-n8), (n3-n2-n9), n10, (n11-n10), (n1-n3-n11),
n12, (n13-n12), (n4-n1-n13), n14, (n15-n14), (n5-n4-n15), n16, (n17-n16), (len-n5-n17)))
# Vsem delnicam v cetrti vrstici pripisemo 0 ter sestejemo vrednosti po stolpcih matrike b.
# Na novo dobljeni vektor transponiramo in ga shranimo v j-to vrstico xts matrike, ki nam jo
# bo po koncani for zanki vrnila funkcija.
b[4, ] <- rep(0, len)
b_new <- t(apply(b, 2, sum))
b_new <- t(b_new[, order(colnames(b_new))])
razvrsti[j, colnames(b_new)] <- b_new
}
return(razvrsti)
}
|
015e700c68f3a464cfa7d5ced3116a7b36c54c19
|
c262aa9d1819623e627386fd43e61e0d988d405a
|
/pipeline/scripts/AddOnFunctions/generateGaussian.R
|
da6dec2cd1269b0f8ca68afffb695a93415e833d
|
[
"MIT"
] |
permissive
|
UMCUGenetics/DIMS
|
bf818ebefd272f2b4726b9db26b6326a5070911f
|
dd98c1e4fb3cf8fbe0a08761b6583e7930696e21
|
refs/heads/master
| 2023-08-08T03:11:34.213700
| 2023-03-28T09:23:11
| 2023-03-28T09:23:11
| 175,600,531
| 1
| 3
|
MIT
| 2023-08-25T15:27:21
| 2019-03-14T10:34:21
|
R
|
UTF-8
|
R
| false
| false
| 1,432
|
r
|
generateGaussian.R
|
generateGaussian <- function(x,y,resol,plot,scanmode,int.factor,width,height) {
factor=1.5
index = which(y==max(y))[1]
x=x[index]
y=y[index]
mu = x
fwhm = getFwhm(mu,resol)
x.p = c(mu-factor*fwhm, x, mu+factor*fwhm)
y.p = c(0, y, 0)
# if (plot) dir.create("./results/plots",showWarnings = FALSE)
# if (plot) dir.create("./results/plots/Gaussian_fit",showWarnings = FALSE)
if (plot) {
if (scanmode=="positive"){
plot_label="pos_fit.png"
} else {
plot_label="neg_fit.png"
}
}
mz.range = x.p[length(x.p)] - x.p[1]
x2 = seq(x.p[1],x.p[length(x.p)],length=mz.range*int.factor)
sigma = getSD(x.p,y.p)
scale = optimizeGauss(x.p,y.p,sigma,mu)
if (plot) {
CairoPNG(filename=paste("./results/Gaussian_fit",paste(sampname, mu, plot_label, sep="_"), sep="/"), width, height)
plot(x.p,y.p,xlab="m/z",ylab="I", ylim=c(0,1.5*max(y)))
lines(x2,scale*dnorm(x2,mu,sigma), col="green")
half_max = max(scale*dnorm(x2,mu,sigma))*0.5
lines(c(mu - 0.5*fwhm, mu + 0.5*fwhm),c(half_max,half_max),col="orange")
abline(v = mu, col="green")
h=c(paste("mean =", mu, sep=" "))
legend("topright", legend=h)
dev.off()
}
# area = sum(scale*dnorm(x2,mu,sigma))
# area = max(scale*dnorm(x2,mu,sigma))
area = getArea(mu,resol,scale,sigma,int.factor)
return(list("mean"=mu, "area"=area, "min"=x2[1] , "max"=x2[length(x2)]))
}
|
e4f86e8ac5c630794bc9d9eb27266293ce87b06a
|
160622f50fc2fe9a6aaa3095849f7a8bd2caa496
|
/man/plate_map_multiple.Rd
|
b3968e82fd3a26f66d096bd437990bdd3b3bf57c
|
[] |
no_license
|
jayhesselberth/platetools
|
53fd0d16deca84ec6efd0c1f838d7a9fed835a1b
|
617a33fc4a3482b85fc1fd8b38dcc82a53a10176
|
refs/heads/master
| 2020-03-22T01:45:56.557163
| 2018-06-25T13:36:13
| 2018-06-25T13:36:13
| 139,328,253
| 1
| 0
| null | 2018-07-01T12:42:49
| 2018-07-01T12:42:48
| null |
UTF-8
|
R
| false
| true
| 451
|
rd
|
plate_map_multiple.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plate_map.R
\name{plate_map_multiple}
\alias{plate_map_multiple}
\title{row, column for multiple features}
\usage{
plate_map_multiple(data, well)
}
\arguments{
\item{data}{vector or dataframe of numeric data}
\item{well}{vector of alphanumeric well IDs e.g 'A01'}
}
\description{
Generates a dataframe for multiple features, given a wellID column and multiple
features
}
|
6d039cfa7d967a3b5e7ef15db6ee42978b4178f3
|
7763fa473117434c7750d58de1472392d10e5f98
|
/old_code/analysis.R
|
e9334f8ccb10d7836aa78106611a4cc699da1779
|
[] |
no_license
|
afcarl/sincerity_detection
|
0f68acb0e2b113221c992ac1b6dae6e3e79f261c
|
0f8f50d514a7f56fbb9c14961c91a3d856173094
|
refs/heads/master
| 2020-03-21T03:16:14.081073
| 2015-02-05T20:24:59
| 2015-02-05T20:24:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,821
|
r
|
analysis.R
|
#This R Script does the analysis part
setwd("/home/sidd/Documents/CS224s/Analysis");
library(randomForest)
dat <- read.table('ForAnalysis.csv', header = TRUE, sep = ';')
keep_indices <- complete.cases(dat)
dat = dat[keep_indices,] #Get rid of NAs
#Prosodic Features
pro <- dat[, c(194, 195, 199, 203, (440:471))]
centered_pros <- scale(pro, scale = TRUE, center = TRUE) #Mean-centers data and makes unit variance
j_columns = c("tndur.Mean", 'tndur.SD', 'pmin.Mean', 'pmin.SD', 'pmax.Mean', 'pmax.SD', 'pmean.Mean', 'pmean.SD', 'psd.Mean', 'psd.SD', 'imin.Mean', 'imin.SD', 'imax.Mean', 'imax.SD', 'imean.Mean', 'imean.SD', 'voiceProb_sma_min', 'voiceProb_sma_amean', 'voiceProb_sma_max', 'voiceProb_sma_stddev')
fa_data <- centered_pros[, which(colnames(centered_pros) %in% j_columns)]
#How many factors to extract?
#Do this with gap statistic on Variance Explained by PCA components
library(nFactors)
ev <- eigen(cor(fa_data))
ap <- parallel(subject=nrow(fa_data),var=ncol(fa_data),rep=100,cent=.05)
nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea)
plotnScree(nS)
#Gap Statistic -- Bad results! Don't want to use!
if(FALSE){
objs = princomp(fa_data)$sdev
objs = objs^2
objs = objs/sum(objs)
objs = cumsum(objs)
vals = matrix(rep(0, 100 * ncol(fa_data)), ncol = ncol(fa_data))
for(i in 1:100){
null_dat <- matrix(runif(nrow(fa_data) * ncol(fa_data), min = -4, max = 4), nrow = nrow(fa_data))
null_pca <- princomp(null_dat)
vars = (null_pca$sdev)^2
vars = vars/sum(vars)
vals[i,] = log(1 - cumsum(vars))
}
avgs = colSums(vals)/nrow(vals)
sds = apply(vals, 2, sd)
gaps = objs - avgs
}
#Factor Analysis
fa_data <- fa_data[, -grepl('*prange*', colnames(centered_pros))]
fit <- factanal(fa_data, 5, rotation = 'varimax')
l <- fit$loadings
write.table(l, 'loadingsMatrix.csv', sep = ';', col.names = TRUE, row.names = TRUE)
factors <- fa_data %*% l
#Extract 0-1 boundaries for data
gt_sinc <- dat$o_sincre #Sincerity ground truth
q_sinc <- quantile(gt_sinc)
#We see cutoffs are 6 and 8
ones = which(gt_sinc > 8) #
zeros = which(gt_sinc < 6)
gt = rep(0, length(ones) + length(zeros)) #Ground Truth
gt[1:length(ones)] = 1
split_set <- dat[c(ones, zeros),] #Data set that corresponds to 0-1 sincerity values
split_factors <- factors[c(ones, zeros),]
#Extract Lexical Features -- Obsolete...uses factors instead of all predictors (this is why it's commented out)
if(FALSE){
split_lex <- split_set[, 410:436]
split_lex <- scale(split_lex, center = TRUE, scale = TRUE)
predictor = cbind(split_factors, split_lex) #Predictor Matrix
outcome = as.factor(gt) #Outcome variable
sample_rows = sample(1:nrow(predictor), nrow(predictor)/5)
predict_train = predictor[-sample_rows,]
predict_test = predictor[sample_rows,]
outcome_train = outcome[-sample_rows]
outcome_test = outcome[sample_rows]
rf <- randomForest(predict_train, y = outcome_train, xtest = predict_test, ytest = outcome_test, ntree = 500)
}
#Now try random forest without replacing by factors
split_set_all_predict = split_set[, 26:ncol(split_set)] #All predictors in the split set (no output)
split_set_all_predict = split_set_all_predict[,-(193:384)] #Get rid of deltas
split_set_all_predict = scale(split_set_all_predict, center = TRUE, scale = TRUE)
predictor = split_set_all_predict #Predictor Matrix
outcome = as.factor(gt) #Outcome variable
sample_rows = sample(1:nrow(predictor), nrow(predictor)/5)
predict_train = predictor[-sample_rows,]
predict_test = predictor[sample_rows,]
outcome_train = outcome[-sample_rows]
outcome_test = outcome[sample_rows]
rf <- randomForest(predict_train, y = outcome_train, xtest = predict_test, ytest = outcome_test, ntree = 500, importance = TRUE)
sum(diag(rf$confusion))/sum(rf$confusion) #Gets 67-70% accuracy
imp = rf$importance
|
9d437ecb46ac53d19f3d494e2d85881997f6d831
|
545e38d58e1456a316b7e28f709faad39838af64
|
/notebook/2016-02-15-mixture-distribution/errors.R
|
610e07ae41bd01eba0cf6a017dcb801898a361df
|
[] |
no_license
|
attilagk/monoallelic-brain-research
|
686a2bf838f90f194e77eb652ab973b5b593e2e3
|
c171432bf119bb60023e25587df29d147b4a6984
|
refs/heads/master
| 2020-04-26T14:15:23.558193
| 2019-03-07T16:24:51
| 2019-03-07T16:24:51
| 173,606,964
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,858
|
r
|
errors.R
|
get.error.rates <- function(pi1, lambda, thrs) {
# calculate probability of true positives, false negatives,...
get.joint.probs <- function() {
pfn <- (1 - exp(- lambda)) * (exp(- lambda * thrs) - exp(- lambda))
list(
ptp = pi1 * (1 - pfn),
pfn = pi1 * pfn,
pfp = thrs * (1 - pi1),
ptn = (1 - thrs) * (1 - pi1)
)
}
p <- get.joint.probs()
precision <- p$ptp / (p$ptp + p$pfp)
negpredval <- p$ptn / (p$ptn + p$pfn)
list(
ppv = precision,
fdr = 1 - precision,
npv = negpredval,
fom = 1 - negpredval, # false omission rate
fpr = thrs,
tpr = p$ptp / (p$ptp + p$pfn) # sensitivity, recall
)
}
# Creates a prob. density fun. for a mixture of two p-value densities: one uniform, one trimmed exponential.
# p1 is the prior probability of the exponential component, lamda is its rate constant.
dpval.maker <- function(pi1, lambda) {
foo <- function(p)
1 - pi1 + pi1 * lambda * (1 - exp(- lambda)) * exp(- lambda * p)
class(foo) <- 'dpval'
attributes(foo) <- list(pi1=pi1, lambda=lambda)
return(foo)
}
# Samples n p-values from a mixture density parametrized by pi1 and lambda.
# See dpval.maker for creating pdf
pval.sampler <- function(dpval, n) {
pi1 <- attributes(dpval)$pi1
lambda <- attributes(dpval)$lambda
# function for sampling m1 points from a truncated exponential distribution
trunc.exp.sampler <- function(m1, s1) {
l <- ((s <- rexp(m1, rate=lambda)) > 1)
if(m2 <- length(which(l))) {
trunc.exp.sampler(m2, c(s1, s[!l]))
}
else return(c(s1, s))
}
numexp <- rbinom(1, size=n, prob=pi1)
s.unif <- runif(n=n-numexp)
s.trunc.exp <- trunc.exp.sampler(numexp, c())
c(s.trunc.exp, s.unif)
}
|
95bf0f773e121d6817a3058c4acf1cbf7c0429e2
|
e1b973c582a68cb308e46b445381b457607e0791
|
/R/ltm/testr.R
|
4738cc54eca76f498e077a2be471c1b6fd4fcb5e
|
[] |
no_license
|
mishagam/progs
|
d21cb0b1a9523b6083ff77aca69e6a8beb6a3cff
|
3300640d779fa14aae15a672b803381c23285582
|
refs/heads/master
| 2021-08-30T15:51:05.890530
| 2017-12-18T13:54:43
| 2017-12-18T13:54:43
| 114,647,485
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
testr.R
|
# easy regression sample
r <- c(1,2,3,4)
r <- c(r[1:3], 7,6,5)
x <- 1:6
l <- lm(r ~ x)
# plot(l)
# data frames
fir <- c(1,2,3,4)
sec <- c(2,3,4,1)
f <- c(TRUE, TRUE, FALSE, TRUE)
df <- data.frame(fir=fir, sec=sec, f=f)
s <- df[which(df$f),]
#loop
for (i in 1:3) { print (i) }
a <- function(x) {
11 - b(x)
}
b <- function(y) {
y <- log(y - 0.5)c
y <- y*5
y + y
}
cat("a(5) = ")
print(a(5))
if (a(0) > 2) {
cc <- "passed"
} else {
cc <- "failed"log
}
print(a(0) > 2)
traceback()
print("testR finished running")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.